xref: /freebsd/sys/netinet/sctp_indata.c (revision 39beb93c)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctp_indata.c,v 1.36 2005/03/06 16:04:17 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 
48 
49 /*
50  * NOTES: On the outbound side of things I need to check the sack timer to
51  * see if I should generate a sack into the chunk queue (if I have data to
52  * send that is and will be sending it .. for bundling.
53  *
54  * The callback in sctp_usrreq.c will get called when the socket is read from.
55  * This will cause sctp_service_queues() to get called on the top entry in
56  * the list.
57  */
58 
59 void
60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
61 {
62 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
63 }
64 
65 /* Calculate what the rwnd would be */
66 uint32_t
67 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
68 {
69 	uint32_t calc = 0;
70 
71 	/*
72 	 * This is really set wrong with respect to a 1-2-m socket. Since
73 	 * the sb_cc is the count that everyone as put up. When we re-write
74 	 * sctp_soreceive then we will fix this so that ONLY this
75 	 * associations data is taken into account.
76 	 */
77 	if (stcb->sctp_socket == NULL)
78 		return (calc);
79 
80 	if (stcb->asoc.sb_cc == 0 &&
81 	    asoc->size_on_reasm_queue == 0 &&
82 	    asoc->size_on_all_streams == 0) {
83 		/* Full rwnd granted */
84 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
85 		return (calc);
86 	}
87 	/* get actual space */
88 	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
89 
90 	/*
91 	 * take out what has NOT been put on socket queue and we yet hold
92 	 * for putting up.
93 	 */
94 	calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue);
95 	calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams);
96 
97 	if (calc == 0) {
98 		/* out of space */
99 		return (calc);
100 	}
101 	/* what is the overhead of all these rwnd's */
102 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
103 	/*
104 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
105 	 * even it is 0. SWS engaged
106 	 */
107 	if (calc < stcb->asoc.my_rwnd_control_len) {
108 		calc = 1;
109 	}
110 	return (calc);
111 }
112 
113 
114 
115 /*
116  * Build out our readq entry based on the incoming packet.
117  */
118 struct sctp_queued_to_read *
119 sctp_build_readq_entry(struct sctp_tcb *stcb,
120     struct sctp_nets *net,
121     uint32_t tsn, uint32_t ppid,
122     uint32_t context, uint16_t stream_no,
123     uint16_t stream_seq, uint8_t flags,
124     struct mbuf *dm)
125 {
126 	struct sctp_queued_to_read *read_queue_e = NULL;
127 
128 	sctp_alloc_a_readq(stcb, read_queue_e);
129 	if (read_queue_e == NULL) {
130 		goto failed_build;
131 	}
132 	read_queue_e->sinfo_stream = stream_no;
133 	read_queue_e->sinfo_ssn = stream_seq;
134 	read_queue_e->sinfo_flags = (flags << 8);
135 	read_queue_e->sinfo_ppid = ppid;
136 	read_queue_e->sinfo_context = stcb->asoc.context;
137 	read_queue_e->sinfo_timetolive = 0;
138 	read_queue_e->sinfo_tsn = tsn;
139 	read_queue_e->sinfo_cumtsn = tsn;
140 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
141 	read_queue_e->whoFrom = net;
142 	read_queue_e->length = 0;
143 	atomic_add_int(&net->ref_count, 1);
144 	read_queue_e->data = dm;
145 	read_queue_e->spec_flags = 0;
146 	read_queue_e->tail_mbuf = NULL;
147 	read_queue_e->aux_data = NULL;
148 	read_queue_e->stcb = stcb;
149 	read_queue_e->port_from = stcb->rport;
150 	read_queue_e->do_not_ref_stcb = 0;
151 	read_queue_e->end_added = 0;
152 	read_queue_e->some_taken = 0;
153 	read_queue_e->pdapi_aborted = 0;
154 failed_build:
155 	return (read_queue_e);
156 }
157 
158 
159 /*
160  * Build out our readq entry based on the incoming packet.
161  */
162 static struct sctp_queued_to_read *
163 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
164     struct sctp_tmit_chunk *chk)
165 {
166 	struct sctp_queued_to_read *read_queue_e = NULL;
167 
168 	sctp_alloc_a_readq(stcb, read_queue_e);
169 	if (read_queue_e == NULL) {
170 		goto failed_build;
171 	}
172 	read_queue_e->sinfo_stream = chk->rec.data.stream_number;
173 	read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
174 	read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
175 	read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
176 	read_queue_e->sinfo_context = stcb->asoc.context;
177 	read_queue_e->sinfo_timetolive = 0;
178 	read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
179 	read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
180 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
181 	read_queue_e->whoFrom = chk->whoTo;
182 	read_queue_e->aux_data = NULL;
183 	read_queue_e->length = 0;
184 	atomic_add_int(&chk->whoTo->ref_count, 1);
185 	read_queue_e->data = chk->data;
186 	read_queue_e->tail_mbuf = NULL;
187 	read_queue_e->stcb = stcb;
188 	read_queue_e->port_from = stcb->rport;
189 	read_queue_e->spec_flags = 0;
190 	read_queue_e->do_not_ref_stcb = 0;
191 	read_queue_e->end_added = 0;
192 	read_queue_e->some_taken = 0;
193 	read_queue_e->pdapi_aborted = 0;
194 failed_build:
195 	return (read_queue_e);
196 }
197 
198 
199 struct mbuf *
200 sctp_build_ctl_nchunk(struct sctp_inpcb *inp,
201     struct sctp_sndrcvinfo *sinfo)
202 {
203 	struct sctp_sndrcvinfo *outinfo;
204 	struct cmsghdr *cmh;
205 	struct mbuf *ret;
206 	int len;
207 	int use_extended = 0;
208 
209 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
210 		/* user does not want the sndrcv ctl */
211 		return (NULL);
212 	}
213 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
214 		use_extended = 1;
215 		len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
216 	} else {
217 		len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
218 	}
219 
220 
221 	ret = sctp_get_mbuf_for_msg(len,
222 	    0, M_DONTWAIT, 1, MT_DATA);
223 
224 	if (ret == NULL) {
225 		/* No space */
226 		return (ret);
227 	}
228 	/* We need a CMSG header followed by the struct  */
229 	cmh = mtod(ret, struct cmsghdr *);
230 	outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
231 	cmh->cmsg_level = IPPROTO_SCTP;
232 	if (use_extended) {
233 		cmh->cmsg_type = SCTP_EXTRCV;
234 		cmh->cmsg_len = len;
235 		memcpy(outinfo, sinfo, len);
236 	} else {
237 		cmh->cmsg_type = SCTP_SNDRCV;
238 		cmh->cmsg_len = len;
239 		*outinfo = *sinfo;
240 	}
241 	SCTP_BUF_LEN(ret) = cmh->cmsg_len;
242 	return (ret);
243 }
244 
245 
246 char *
247 sctp_build_ctl_cchunk(struct sctp_inpcb *inp,
248     int *control_len,
249     struct sctp_sndrcvinfo *sinfo)
250 {
251 	struct sctp_sndrcvinfo *outinfo;
252 	struct cmsghdr *cmh;
253 	char *buf;
254 	int len;
255 	int use_extended = 0;
256 
257 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
258 		/* user does not want the sndrcv ctl */
259 		return (NULL);
260 	}
261 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
262 		use_extended = 1;
263 		len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
264 	} else {
265 		len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
266 	}
267 	SCTP_MALLOC(buf, char *, len, SCTP_M_CMSG);
268 	if (buf == NULL) {
269 		/* No space */
270 		return (buf);
271 	}
272 	/* We need a CMSG header followed by the struct  */
273 	cmh = (struct cmsghdr *)buf;
274 	outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
275 	cmh->cmsg_level = IPPROTO_SCTP;
276 	if (use_extended) {
277 		cmh->cmsg_type = SCTP_EXTRCV;
278 		cmh->cmsg_len = len;
279 		memcpy(outinfo, sinfo, len);
280 	} else {
281 		cmh->cmsg_type = SCTP_SNDRCV;
282 		cmh->cmsg_len = len;
283 		*outinfo = *sinfo;
284 	}
285 	*control_len = len;
286 	return (buf);
287 }
288 
289 
290 /*
291  * We are delivering currently from the reassembly queue. We must continue to
292  * deliver until we either: 1) run out of space. 2) run out of sequential
293  * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
294  */
295 static void
296 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
297 {
298 	struct sctp_tmit_chunk *chk;
299 	uint16_t nxt_todel;
300 	uint16_t stream_no;
301 	int end = 0;
302 	int cntDel;
303 
304 	/* EY if any out-of-order delivered, then tag it nr on nr_map */
305 	uint32_t nr_tsn, nr_gap;
306 
307 	struct sctp_queued_to_read *control, *ctl, *ctlat;
308 
309 	if (stcb == NULL)
310 		return;
311 
312 	cntDel = stream_no = 0;
313 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
314 	    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
315 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
316 		/* socket above is long gone or going.. */
317 abandon:
318 		asoc->fragmented_delivery_inprogress = 0;
319 		chk = TAILQ_FIRST(&asoc->reasmqueue);
320 		while (chk) {
321 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
322 			asoc->size_on_reasm_queue -= chk->send_size;
323 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
324 			/*
325 			 * Lose the data pointer, since its in the socket
326 			 * buffer
327 			 */
328 			if (chk->data) {
329 				sctp_m_freem(chk->data);
330 				chk->data = NULL;
331 			}
332 			/* Now free the address and data */
333 			sctp_free_a_chunk(stcb, chk);
334 			/* sa_ignore FREED_MEMORY */
335 			chk = TAILQ_FIRST(&asoc->reasmqueue);
336 		}
337 		return;
338 	}
339 	SCTP_TCB_LOCK_ASSERT(stcb);
340 	do {
341 		chk = TAILQ_FIRST(&asoc->reasmqueue);
342 		if (chk == NULL) {
343 			return;
344 		}
345 		if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
346 			/* Can't deliver more :< */
347 			return;
348 		}
349 		stream_no = chk->rec.data.stream_number;
350 		nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
351 		if (nxt_todel != chk->rec.data.stream_seq &&
352 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
353 			/*
354 			 * Not the next sequence to deliver in its stream OR
355 			 * unordered
356 			 */
357 			return;
358 		}
359 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
360 
361 			control = sctp_build_readq_entry_chk(stcb, chk);
362 			if (control == NULL) {
363 				/* out of memory? */
364 				return;
365 			}
366 			/* save it off for our future deliveries */
367 			stcb->asoc.control_pdapi = control;
368 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
369 				end = 1;
370 			else
371 				end = 0;
372 			sctp_add_to_readq(stcb->sctp_ep,
373 			    stcb, control, &stcb->sctp_socket->so_rcv, end, SCTP_SO_NOT_LOCKED);
374 			cntDel++;
375 		} else {
376 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
377 				end = 1;
378 			else
379 				end = 0;
380 			if (sctp_append_to_readq(stcb->sctp_ep, stcb,
381 			    stcb->asoc.control_pdapi,
382 			    chk->data, end, chk->rec.data.TSN_seq,
383 			    &stcb->sctp_socket->so_rcv)) {
384 				/*
385 				 * something is very wrong, either
386 				 * control_pdapi is NULL, or the tail_mbuf
387 				 * is corrupt, or there is a EOM already on
388 				 * the mbuf chain.
389 				 */
390 				if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
391 					goto abandon;
392 				} else {
393 #ifdef INVARIANTS
394 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
395 						panic("This should not happen control_pdapi NULL?");
396 					}
397 					/* if we did not panic, it was a EOM */
398 					panic("Bad chunking ??");
399 #else
400 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
401 						SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
402 					}
403 					SCTP_PRINTF("Bad chunking ??\n");
404 					SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
405 
406 #endif
407 					goto abandon;
408 				}
409 			}
410 			cntDel++;
411 		}
412 		/* pull it we did it */
413 		TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
414 		/*
415 		 * EY this is the chunk that should be tagged nr gapped
416 		 * calculate the gap and such then tag this TSN nr
417 		 * chk->rec.data.TSN_seq
418 		 */
419 		/*
420 		 * EY!-TODO- this tsn should be tagged nr only if it is
421 		 * out-of-order, the if statement should be modified
422 		 */
423 		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
424 
425 			nr_tsn = chk->rec.data.TSN_seq;
426 			if (nr_tsn >= asoc->nr_mapping_array_base_tsn) {
427 				nr_gap = nr_tsn - asoc->nr_mapping_array_base_tsn;
428 			} else {
429 				nr_gap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) + nr_tsn + 1;
430 			}
431 			if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
432 			    (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
433 				/*
434 				 * EY The 1st should never happen, as in
435 				 * process_a_data_chunk method this check
436 				 * should be done
437 				 */
438 				/*
439 				 * EY The 2nd should never happen, because
440 				 * nr_mapping_array is always expanded when
441 				 * mapping_array is expanded
442 				 */
443 			} else {
444 				SCTP_TCB_LOCK_ASSERT(stcb);
445 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
446 				if (nr_tsn > asoc->highest_tsn_inside_nr_map)
447 					asoc->highest_tsn_inside_nr_map = nr_tsn;
448 			}
449 		}
450 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
451 			asoc->fragmented_delivery_inprogress = 0;
452 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
453 				asoc->strmin[stream_no].last_sequence_delivered++;
454 			}
455 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
456 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
457 			}
458 		} else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
459 			/*
460 			 * turn the flag back on since we just  delivered
461 			 * yet another one.
462 			 */
463 			asoc->fragmented_delivery_inprogress = 1;
464 		}
465 		asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
466 		asoc->last_flags_delivered = chk->rec.data.rcv_flags;
467 		asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
468 		asoc->last_strm_no_delivered = chk->rec.data.stream_number;
469 
470 		asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
471 		asoc->size_on_reasm_queue -= chk->send_size;
472 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
473 		/* free up the chk */
474 		chk->data = NULL;
475 		sctp_free_a_chunk(stcb, chk);
476 
477 		if (asoc->fragmented_delivery_inprogress == 0) {
478 			/*
479 			 * Now lets see if we can deliver the next one on
480 			 * the stream
481 			 */
482 			struct sctp_stream_in *strm;
483 
484 			strm = &asoc->strmin[stream_no];
485 			nxt_todel = strm->last_sequence_delivered + 1;
486 			ctl = TAILQ_FIRST(&strm->inqueue);
487 			if (ctl && (nxt_todel == ctl->sinfo_ssn)) {
488 				while (ctl != NULL) {
489 					/* Deliver more if we can. */
490 					if (nxt_todel == ctl->sinfo_ssn) {
491 						ctlat = TAILQ_NEXT(ctl, next);
492 						TAILQ_REMOVE(&strm->inqueue, ctl, next);
493 						asoc->size_on_all_streams -= ctl->length;
494 						sctp_ucount_decr(asoc->cnt_on_all_streams);
495 						strm->last_sequence_delivered++;
496 						/*
497 						 * EY will be used to
498 						 * calculate nr-gap
499 						 */
500 						nr_tsn = ctl->sinfo_tsn;
501 						sctp_add_to_readq(stcb->sctp_ep, stcb,
502 						    ctl,
503 						    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
504 						/*
505 						 * EY -now something is
506 						 * delivered, calculate
507 						 * nr_gap and tag this tsn
508 						 * NR
509 						 */
510 						if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
511 
512 							if (nr_tsn >= asoc->nr_mapping_array_base_tsn) {
513 								nr_gap = nr_tsn - asoc->nr_mapping_array_base_tsn;
514 							} else {
515 								nr_gap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) + nr_tsn + 1;
516 							}
517 							if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
518 							    (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
519 								/*
520 								 * EY The
521 								 * 1st
522 								 * should
523 								 * never
524 								 * happen,
525 								 * as in
526 								 * process_a_
527 								 * data_chunk
528 								 *  method
529 								 * this
530 								 * check
531 								 * should be
532 								 * done
533 								 */
534 								/*
535 								 * EY The
536 								 * 2nd
537 								 * should
538 								 * never
539 								 * happen,
540 								 * because
541 								 * nr_mapping
542 								 * _array is
543 								 * always
544 								 * expanded
545 								 * when
546 								 * mapping_ar
547 								 * ray is
548 								 * expanded
549 								 */
550 							} else {
551 								SCTP_TCB_LOCK_ASSERT(stcb);
552 								SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
553 								if (nr_tsn > asoc->highest_tsn_inside_nr_map)
554 									asoc->highest_tsn_inside_nr_map = nr_tsn;
555 							}
556 						}
557 						ctl = ctlat;
558 					} else {
559 						break;
560 					}
561 					nxt_todel = strm->last_sequence_delivered + 1;
562 				}
563 			}
564 			break;
565 		}
566 		/* sa_ignore FREED_MEMORY */
567 		chk = TAILQ_FIRST(&asoc->reasmqueue);
568 	} while (chk);
569 }
570 
571 /*
572  * Queue the chunk either right into the socket buffer if it is the next one
573  * to go OR put it in the correct place in the delivery queue.  If we do
574  * append to the so_buf, keep doing so until we are out of order. One big
575  * question still remains, what to do when the socket buffer is FULL??
576  */
577 static void
578 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
579     struct sctp_queued_to_read *control, int *abort_flag)
580 {
581 	/*
582 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
583 	 * all the data in one stream this could happen quite rapidly. One
584 	 * could use the TSN to keep track of things, but this scheme breaks
585 	 * down in the other type of stream useage that could occur. Send a
586 	 * single msg to stream 0, send 4Billion messages to stream 1, now
587 	 * send a message to stream 0. You have a situation where the TSN
588 	 * has wrapped but not in the stream. Is this worth worrying about
589 	 * or should we just change our queue sort at the bottom to be by
590 	 * TSN.
591 	 *
592 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
593 	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
594 	 * assignment this could happen... and I don't see how this would be
595 	 * a violation. So for now I am undecided an will leave the sort by
596 	 * SSN alone. Maybe a hybred approach is the answer
597 	 *
598 	 */
599 	struct sctp_stream_in *strm;
600 	struct sctp_queued_to_read *at;
601 	int queue_needed;
602 	uint16_t nxt_todel;
603 	struct mbuf *oper;
604 
605 	/* EY- will be used to calculate nr-gap for a tsn */
606 	uint32_t nr_tsn, nr_gap;
607 
608 	queue_needed = 1;
609 	asoc->size_on_all_streams += control->length;
610 	sctp_ucount_incr(asoc->cnt_on_all_streams);
611 	strm = &asoc->strmin[control->sinfo_stream];
612 	nxt_todel = strm->last_sequence_delivered + 1;
613 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
614 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
615 	}
616 	SCTPDBG(SCTP_DEBUG_INDATA1,
617 	    "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
618 	    (uint32_t) control->sinfo_stream,
619 	    (uint32_t) strm->last_sequence_delivered,
620 	    (uint32_t) nxt_todel);
621 	if (compare_with_wrap(strm->last_sequence_delivered,
622 	    control->sinfo_ssn, MAX_SEQ) ||
623 	    (strm->last_sequence_delivered == control->sinfo_ssn)) {
624 		/* The incoming sseq is behind where we last delivered? */
625 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort  association\n",
626 		    control->sinfo_ssn, strm->last_sequence_delivered);
627 protocol_error:
628 		/*
629 		 * throw it in the stream so it gets cleaned up in
630 		 * association destruction
631 		 */
632 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
633 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
634 		    0, M_DONTWAIT, 1, MT_DATA);
635 		if (oper) {
636 			struct sctp_paramhdr *ph;
637 			uint32_t *ippp;
638 
639 			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
640 			    (sizeof(uint32_t) * 3);
641 			ph = mtod(oper, struct sctp_paramhdr *);
642 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
643 			ph->param_length = htons(SCTP_BUF_LEN(oper));
644 			ippp = (uint32_t *) (ph + 1);
645 			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
646 			ippp++;
647 			*ippp = control->sinfo_tsn;
648 			ippp++;
649 			*ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
650 		}
651 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
652 		sctp_abort_an_association(stcb->sctp_ep, stcb,
653 		    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
654 
655 		*abort_flag = 1;
656 		return;
657 
658 	}
659 	if (nxt_todel == control->sinfo_ssn) {
660 		/* can be delivered right away? */
661 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
662 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
663 		}
664 		/* EY it wont be queued if it could be delivered directly */
665 		queue_needed = 0;
666 		asoc->size_on_all_streams -= control->length;
667 		sctp_ucount_decr(asoc->cnt_on_all_streams);
668 		strm->last_sequence_delivered++;
669 		/* EY will be used to calculate nr-gap */
670 		nr_tsn = control->sinfo_tsn;
671 		sctp_add_to_readq(stcb->sctp_ep, stcb,
672 		    control,
673 		    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
674 
675 		/*
676 		 * EY this is the chunk that should be tagged nr gapped
677 		 * calculate the gap and such then tag this TSN nr
678 		 * chk->rec.data.TSN_seq
679 		 */
680 		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
681 
682 			if (nr_tsn >= asoc->nr_mapping_array_base_tsn) {
683 				nr_gap = nr_tsn - asoc->nr_mapping_array_base_tsn;
684 			} else {
685 				nr_gap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) + nr_tsn + 1;
686 			}
687 			if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
688 			    (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
689 				/*
690 				 * EY The 1st should never happen, as in
691 				 * process_a_data_chunk method this check
692 				 * should be done
693 				 */
694 				/*
695 				 * EY The 2nd should never happen, because
696 				 * nr_mapping_array is always expanded when
697 				 * mapping_array is expanded
698 				 */
699 			} else {
700 				SCTP_TCB_LOCK_ASSERT(stcb);
701 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
702 				if (nr_tsn > asoc->highest_tsn_inside_nr_map)
703 					asoc->highest_tsn_inside_nr_map = nr_tsn;
704 			}
705 		}
706 		control = TAILQ_FIRST(&strm->inqueue);
707 		while (control != NULL) {
708 			/* all delivered */
709 			nxt_todel = strm->last_sequence_delivered + 1;
710 			if (nxt_todel == control->sinfo_ssn) {
711 				at = TAILQ_NEXT(control, next);
712 				TAILQ_REMOVE(&strm->inqueue, control, next);
713 				asoc->size_on_all_streams -= control->length;
714 				sctp_ucount_decr(asoc->cnt_on_all_streams);
715 				strm->last_sequence_delivered++;
716 				/*
717 				 * We ignore the return of deliver_data here
718 				 * since we always can hold the chunk on the
719 				 * d-queue. And we have a finite number that
720 				 * can be delivered from the strq.
721 				 */
722 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
723 					sctp_log_strm_del(control, NULL,
724 					    SCTP_STR_LOG_FROM_IMMED_DEL);
725 				}
726 				/* EY will be used to calculate nr-gap */
727 				nr_tsn = control->sinfo_tsn;
728 				sctp_add_to_readq(stcb->sctp_ep, stcb,
729 				    control,
730 				    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
731 				/*
732 				 * EY this is the chunk that should be
733 				 * tagged nr gapped calculate the gap and
734 				 * such then tag this TSN nr
735 				 * chk->rec.data.TSN_seq
736 				 */
737 				if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
738 
739 					if (nr_tsn >= asoc->nr_mapping_array_base_tsn) {
740 						nr_gap = nr_tsn - asoc->nr_mapping_array_base_tsn;
741 					} else {
742 						nr_gap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) + nr_tsn + 1;
743 					}
744 					if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
745 					    (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
746 						/*
747 						 * EY The 1st should never
748 						 * happen, as in
749 						 * process_a_data_chunk
750 						 * method this check should
751 						 * be done
752 						 */
753 						/*
754 						 * EY The 2nd should never
755 						 * happen, because
756 						 * nr_mapping_array is
757 						 * always expanded when
758 						 * mapping_array is expanded
759 						 */
760 					} else {
761 						SCTP_TCB_LOCK_ASSERT(stcb);
762 						SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
763 						if (nr_tsn > asoc->highest_tsn_inside_nr_map)
764 							asoc->highest_tsn_inside_nr_map = nr_tsn;
765 					}
766 				}
767 				control = at;
768 				continue;
769 			}
770 			break;
771 		}
772 	}
773 	if (queue_needed) {
774 		/*
775 		 * Ok, we did not deliver this guy, find the correct place
776 		 * to put it on the queue.
777 		 */
778 		if ((compare_with_wrap(asoc->cumulative_tsn,
779 		    control->sinfo_tsn, MAX_TSN)) ||
780 		    (control->sinfo_tsn == asoc->cumulative_tsn)) {
781 			goto protocol_error;
782 		}
783 		if (TAILQ_EMPTY(&strm->inqueue)) {
784 			/* Empty queue */
785 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
786 				sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
787 			}
788 			TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
789 		} else {
790 			TAILQ_FOREACH(at, &strm->inqueue, next) {
791 				if (compare_with_wrap(at->sinfo_ssn,
792 				    control->sinfo_ssn, MAX_SEQ)) {
793 					/*
794 					 * one in queue is bigger than the
795 					 * new one, insert before this one
796 					 */
797 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
798 						sctp_log_strm_del(control, at,
799 						    SCTP_STR_LOG_FROM_INSERT_MD);
800 					}
801 					TAILQ_INSERT_BEFORE(at, control, next);
802 					break;
803 				} else if (at->sinfo_ssn == control->sinfo_ssn) {
804 					/*
805 					 * Gak, He sent me a duplicate str
806 					 * seq number
807 					 */
808 					/*
809 					 * foo bar, I guess I will just free
810 					 * this new guy, should we abort
811 					 * too? FIX ME MAYBE? Or it COULD be
812 					 * that the SSN's have wrapped.
813 					 * Maybe I should compare to TSN
814 					 * somehow... sigh for now just blow
815 					 * away the chunk!
816 					 */
817 
818 					if (control->data)
819 						sctp_m_freem(control->data);
820 					control->data = NULL;
821 					asoc->size_on_all_streams -= control->length;
822 					sctp_ucount_decr(asoc->cnt_on_all_streams);
823 					if (control->whoFrom)
824 						sctp_free_remote_addr(control->whoFrom);
825 					control->whoFrom = NULL;
826 					sctp_free_a_readq(stcb, control);
827 					return;
828 				} else {
829 					if (TAILQ_NEXT(at, next) == NULL) {
830 						/*
831 						 * We are at the end, insert
832 						 * it after this one
833 						 */
834 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
835 							sctp_log_strm_del(control, at,
836 							    SCTP_STR_LOG_FROM_INSERT_TL);
837 						}
838 						TAILQ_INSERT_AFTER(&strm->inqueue,
839 						    at, control, next);
840 						break;
841 					}
842 				}
843 			}
844 		}
845 	}
846 }
847 
848 /*
849  * Returns two things: You get the total size of the deliverable parts of the
850  * first fragmented message on the reassembly queue. And you get a 1 back if
851  * all of the message is ready or a 0 back if the message is still incomplete
852  */
853 static int
854 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
855 {
856 	struct sctp_tmit_chunk *chk;
857 	uint32_t tsn;
858 
859 	*t_size = 0;
860 	chk = TAILQ_FIRST(&asoc->reasmqueue);
861 	if (chk == NULL) {
862 		/* nothing on the queue */
863 		return (0);
864 	}
865 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
866 		/* Not a first on the queue */
867 		return (0);
868 	}
869 	tsn = chk->rec.data.TSN_seq;
870 	while (chk) {
871 		if (tsn != chk->rec.data.TSN_seq) {
872 			return (0);
873 		}
874 		*t_size += chk->send_size;
875 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
876 			return (1);
877 		}
878 		tsn++;
879 		chk = TAILQ_NEXT(chk, sctp_next);
880 	}
881 	return (0);
882 }
883 
884 static void
885 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
886 {
887 	struct sctp_tmit_chunk *chk;
888 	uint16_t nxt_todel;
889 	uint32_t tsize;
890 
891 doit_again:
892 	chk = TAILQ_FIRST(&asoc->reasmqueue);
893 	if (chk == NULL) {
894 		/* Huh? */
895 		asoc->size_on_reasm_queue = 0;
896 		asoc->cnt_on_reasm_queue = 0;
897 		return;
898 	}
899 	if (asoc->fragmented_delivery_inprogress == 0) {
900 		nxt_todel =
901 		    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
902 		if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
903 		    (nxt_todel == chk->rec.data.stream_seq ||
904 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
905 			/*
906 			 * Yep the first one is here and its ok to deliver
907 			 * but should we?
908 			 */
909 			if ((sctp_is_all_msg_on_reasm(asoc, &tsize) ||
910 			    (tsize >= stcb->sctp_ep->partial_delivery_point))) {
911 
912 				/*
913 				 * Yes, we setup to start reception, by
914 				 * backing down the TSN just in case we
915 				 * can't deliver. If we
916 				 */
917 				asoc->fragmented_delivery_inprogress = 1;
918 				asoc->tsn_last_delivered =
919 				    chk->rec.data.TSN_seq - 1;
920 				asoc->str_of_pdapi =
921 				    chk->rec.data.stream_number;
922 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
923 				asoc->pdapi_ppid = chk->rec.data.payloadtype;
924 				asoc->fragment_flags = chk->rec.data.rcv_flags;
925 				sctp_service_reassembly(stcb, asoc);
926 			}
927 		}
928 	} else {
929 		/*
930 		 * Service re-assembly will deliver stream data queued at
931 		 * the end of fragmented delivery.. but it wont know to go
932 		 * back and call itself again... we do that here with the
933 		 * got doit_again
934 		 */
935 		sctp_service_reassembly(stcb, asoc);
936 		if (asoc->fragmented_delivery_inprogress == 0) {
937 			/*
938 			 * finished our Fragmented delivery, could be more
939 			 * waiting?
940 			 */
941 			goto doit_again;
942 		}
943 	}
944 }
945 
946 /*
947  * Dump onto the re-assembly queue, in its proper place. After dumping on the
948  * queue, see if anthing can be delivered. If so pull it off (or as much as
949  * we can. If we run out of space then we must dump what we can and set the
950  * appropriate flag to say we queued what we could.
951  */
952 static void
953 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
954     struct sctp_tmit_chunk *chk, int *abort_flag)
955 {
956 	struct mbuf *oper;
957 	uint32_t cum_ackp1, last_tsn, prev_tsn, post_tsn;
958 	u_char last_flags;
959 	struct sctp_tmit_chunk *at, *prev, *next;
960 
961 	prev = next = NULL;
962 	cum_ackp1 = asoc->tsn_last_delivered + 1;
963 	if (TAILQ_EMPTY(&asoc->reasmqueue)) {
964 		/* This is the first one on the queue */
965 		TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
966 		/*
967 		 * we do not check for delivery of anything when only one
968 		 * fragment is here
969 		 */
970 		asoc->size_on_reasm_queue = chk->send_size;
971 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
972 		if (chk->rec.data.TSN_seq == cum_ackp1) {
973 			if (asoc->fragmented_delivery_inprogress == 0 &&
974 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
975 			    SCTP_DATA_FIRST_FRAG) {
976 				/*
977 				 * An empty queue, no delivery inprogress,
978 				 * we hit the next one and it does NOT have
979 				 * a FIRST fragment mark.
980 				 */
981 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
982 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
983 				    0, M_DONTWAIT, 1, MT_DATA);
984 
985 				if (oper) {
986 					struct sctp_paramhdr *ph;
987 					uint32_t *ippp;
988 
989 					SCTP_BUF_LEN(oper) =
990 					    sizeof(struct sctp_paramhdr) +
991 					    (sizeof(uint32_t) * 3);
992 					ph = mtod(oper, struct sctp_paramhdr *);
993 					ph->param_type =
994 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
995 					ph->param_length = htons(SCTP_BUF_LEN(oper));
996 					ippp = (uint32_t *) (ph + 1);
997 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
998 					ippp++;
999 					*ippp = chk->rec.data.TSN_seq;
1000 					ippp++;
1001 					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1002 
1003 				}
1004 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
1005 				sctp_abort_an_association(stcb->sctp_ep, stcb,
1006 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1007 				*abort_flag = 1;
1008 			} else if (asoc->fragmented_delivery_inprogress &&
1009 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1010 				/*
1011 				 * We are doing a partial delivery and the
1012 				 * NEXT chunk MUST be either the LAST or
1013 				 * MIDDLE fragment NOT a FIRST
1014 				 */
1015 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
1016 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1017 				    0, M_DONTWAIT, 1, MT_DATA);
1018 				if (oper) {
1019 					struct sctp_paramhdr *ph;
1020 					uint32_t *ippp;
1021 
1022 					SCTP_BUF_LEN(oper) =
1023 					    sizeof(struct sctp_paramhdr) +
1024 					    (3 * sizeof(uint32_t));
1025 					ph = mtod(oper, struct sctp_paramhdr *);
1026 					ph->param_type =
1027 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1028 					ph->param_length = htons(SCTP_BUF_LEN(oper));
1029 					ippp = (uint32_t *) (ph + 1);
1030 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
1031 					ippp++;
1032 					*ippp = chk->rec.data.TSN_seq;
1033 					ippp++;
1034 					*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1035 				}
1036 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
1037 				sctp_abort_an_association(stcb->sctp_ep, stcb,
1038 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1039 				*abort_flag = 1;
1040 			} else if (asoc->fragmented_delivery_inprogress) {
1041 				/*
1042 				 * Here we are ok with a MIDDLE or LAST
1043 				 * piece
1044 				 */
1045 				if (chk->rec.data.stream_number !=
1046 				    asoc->str_of_pdapi) {
1047 					/* Got to be the right STR No */
1048 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
1049 					    chk->rec.data.stream_number,
1050 					    asoc->str_of_pdapi);
1051 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1052 					    0, M_DONTWAIT, 1, MT_DATA);
1053 					if (oper) {
1054 						struct sctp_paramhdr *ph;
1055 						uint32_t *ippp;
1056 
1057 						SCTP_BUF_LEN(oper) =
1058 						    sizeof(struct sctp_paramhdr) +
1059 						    (sizeof(uint32_t) * 3);
1060 						ph = mtod(oper,
1061 						    struct sctp_paramhdr *);
1062 						ph->param_type =
1063 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1064 						ph->param_length =
1065 						    htons(SCTP_BUF_LEN(oper));
1066 						ippp = (uint32_t *) (ph + 1);
1067 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
1068 						ippp++;
1069 						*ippp = chk->rec.data.TSN_seq;
1070 						ippp++;
1071 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1072 					}
1073 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
1074 					sctp_abort_an_association(stcb->sctp_ep,
1075 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1076 					*abort_flag = 1;
1077 				} else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
1078 					    SCTP_DATA_UNORDERED &&
1079 					    chk->rec.data.stream_seq !=
1080 				    asoc->ssn_of_pdapi) {
1081 					/* Got to be the right STR Seq */
1082 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
1083 					    chk->rec.data.stream_seq,
1084 					    asoc->ssn_of_pdapi);
1085 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1086 					    0, M_DONTWAIT, 1, MT_DATA);
1087 					if (oper) {
1088 						struct sctp_paramhdr *ph;
1089 						uint32_t *ippp;
1090 
1091 						SCTP_BUF_LEN(oper) =
1092 						    sizeof(struct sctp_paramhdr) +
1093 						    (3 * sizeof(uint32_t));
1094 						ph = mtod(oper,
1095 						    struct sctp_paramhdr *);
1096 						ph->param_type =
1097 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1098 						ph->param_length =
1099 						    htons(SCTP_BUF_LEN(oper));
1100 						ippp = (uint32_t *) (ph + 1);
1101 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1102 						ippp++;
1103 						*ippp = chk->rec.data.TSN_seq;
1104 						ippp++;
1105 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1106 
1107 					}
1108 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
1109 					sctp_abort_an_association(stcb->sctp_ep,
1110 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1111 					*abort_flag = 1;
1112 				}
1113 			}
1114 		}
1115 		return;
1116 	}
1117 	/* Find its place */
1118 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1119 		if (compare_with_wrap(at->rec.data.TSN_seq,
1120 		    chk->rec.data.TSN_seq, MAX_TSN)) {
1121 			/*
1122 			 * one in queue is bigger than the new one, insert
1123 			 * before this one
1124 			 */
1125 			/* A check */
1126 			asoc->size_on_reasm_queue += chk->send_size;
1127 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1128 			next = at;
1129 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1130 			break;
1131 		} else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1132 			/* Gak, He sent me a duplicate str seq number */
1133 			/*
1134 			 * foo bar, I guess I will just free this new guy,
1135 			 * should we abort too? FIX ME MAYBE? Or it COULD be
1136 			 * that the SSN's have wrapped. Maybe I should
1137 			 * compare to TSN somehow... sigh for now just blow
1138 			 * away the chunk!
1139 			 */
1140 			if (chk->data) {
1141 				sctp_m_freem(chk->data);
1142 				chk->data = NULL;
1143 			}
1144 			sctp_free_a_chunk(stcb, chk);
1145 			return;
1146 		} else {
1147 			last_flags = at->rec.data.rcv_flags;
1148 			last_tsn = at->rec.data.TSN_seq;
1149 			prev = at;
1150 			if (TAILQ_NEXT(at, sctp_next) == NULL) {
1151 				/*
1152 				 * We are at the end, insert it after this
1153 				 * one
1154 				 */
1155 				/* check it first */
1156 				asoc->size_on_reasm_queue += chk->send_size;
1157 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1158 				TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1159 				break;
1160 			}
1161 		}
1162 	}
1163 	/* Now the audits */
1164 	if (prev) {
1165 		prev_tsn = chk->rec.data.TSN_seq - 1;
1166 		if (prev_tsn == prev->rec.data.TSN_seq) {
1167 			/*
1168 			 * Ok the one I am dropping onto the end is the
1169 			 * NEXT. A bit of valdiation here.
1170 			 */
1171 			if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1172 			    SCTP_DATA_FIRST_FRAG ||
1173 			    (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1174 			    SCTP_DATA_MIDDLE_FRAG) {
1175 				/*
1176 				 * Insert chk MUST be a MIDDLE or LAST
1177 				 * fragment
1178 				 */
1179 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1180 				    SCTP_DATA_FIRST_FRAG) {
1181 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1182 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1183 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1184 					    0, M_DONTWAIT, 1, MT_DATA);
1185 					if (oper) {
1186 						struct sctp_paramhdr *ph;
1187 						uint32_t *ippp;
1188 
1189 						SCTP_BUF_LEN(oper) =
1190 						    sizeof(struct sctp_paramhdr) +
1191 						    (3 * sizeof(uint32_t));
1192 						ph = mtod(oper,
1193 						    struct sctp_paramhdr *);
1194 						ph->param_type =
1195 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1196 						ph->param_length =
1197 						    htons(SCTP_BUF_LEN(oper));
1198 						ippp = (uint32_t *) (ph + 1);
1199 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1200 						ippp++;
1201 						*ippp = chk->rec.data.TSN_seq;
1202 						ippp++;
1203 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1204 
1205 					}
1206 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1207 					sctp_abort_an_association(stcb->sctp_ep,
1208 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1209 					*abort_flag = 1;
1210 					return;
1211 				}
1212 				if (chk->rec.data.stream_number !=
1213 				    prev->rec.data.stream_number) {
1214 					/*
1215 					 * Huh, need the correct STR here,
1216 					 * they must be the same.
1217 					 */
1218 					SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1219 					    chk->rec.data.stream_number,
1220 					    prev->rec.data.stream_number);
1221 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1222 					    0, M_DONTWAIT, 1, MT_DATA);
1223 					if (oper) {
1224 						struct sctp_paramhdr *ph;
1225 						uint32_t *ippp;
1226 
1227 						SCTP_BUF_LEN(oper) =
1228 						    sizeof(struct sctp_paramhdr) +
1229 						    (3 * sizeof(uint32_t));
1230 						ph = mtod(oper,
1231 						    struct sctp_paramhdr *);
1232 						ph->param_type =
1233 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1234 						ph->param_length =
1235 						    htons(SCTP_BUF_LEN(oper));
1236 						ippp = (uint32_t *) (ph + 1);
1237 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1238 						ippp++;
1239 						*ippp = chk->rec.data.TSN_seq;
1240 						ippp++;
1241 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1242 					}
1243 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1244 					sctp_abort_an_association(stcb->sctp_ep,
1245 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1246 
1247 					*abort_flag = 1;
1248 					return;
1249 				}
1250 				if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1251 				    chk->rec.data.stream_seq !=
1252 				    prev->rec.data.stream_seq) {
1253 					/*
1254 					 * Huh, need the correct STR here,
1255 					 * they must be the same.
1256 					 */
1257 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1258 					    chk->rec.data.stream_seq,
1259 					    prev->rec.data.stream_seq);
1260 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1261 					    0, M_DONTWAIT, 1, MT_DATA);
1262 					if (oper) {
1263 						struct sctp_paramhdr *ph;
1264 						uint32_t *ippp;
1265 
1266 						SCTP_BUF_LEN(oper) =
1267 						    sizeof(struct sctp_paramhdr) +
1268 						    (3 * sizeof(uint32_t));
1269 						ph = mtod(oper,
1270 						    struct sctp_paramhdr *);
1271 						ph->param_type =
1272 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1273 						ph->param_length =
1274 						    htons(SCTP_BUF_LEN(oper));
1275 						ippp = (uint32_t *) (ph + 1);
1276 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1277 						ippp++;
1278 						*ippp = chk->rec.data.TSN_seq;
1279 						ippp++;
1280 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1281 					}
1282 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1283 					sctp_abort_an_association(stcb->sctp_ep,
1284 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1285 
1286 					*abort_flag = 1;
1287 					return;
1288 				}
1289 			} else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1290 			    SCTP_DATA_LAST_FRAG) {
1291 				/* Insert chk MUST be a FIRST */
1292 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1293 				    SCTP_DATA_FIRST_FRAG) {
1294 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1295 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1296 					    0, M_DONTWAIT, 1, MT_DATA);
1297 					if (oper) {
1298 						struct sctp_paramhdr *ph;
1299 						uint32_t *ippp;
1300 
1301 						SCTP_BUF_LEN(oper) =
1302 						    sizeof(struct sctp_paramhdr) +
1303 						    (3 * sizeof(uint32_t));
1304 						ph = mtod(oper,
1305 						    struct sctp_paramhdr *);
1306 						ph->param_type =
1307 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1308 						ph->param_length =
1309 						    htons(SCTP_BUF_LEN(oper));
1310 						ippp = (uint32_t *) (ph + 1);
1311 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1312 						ippp++;
1313 						*ippp = chk->rec.data.TSN_seq;
1314 						ippp++;
1315 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1316 
1317 					}
1318 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1319 					sctp_abort_an_association(stcb->sctp_ep,
1320 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1321 
1322 					*abort_flag = 1;
1323 					return;
1324 				}
1325 			}
1326 		}
1327 	}
1328 	if (next) {
1329 		post_tsn = chk->rec.data.TSN_seq + 1;
1330 		if (post_tsn == next->rec.data.TSN_seq) {
1331 			/*
1332 			 * Ok the one I am inserting ahead of is my NEXT
1333 			 * one. A bit of valdiation here.
1334 			 */
1335 			if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1336 				/* Insert chk MUST be a last fragment */
1337 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1338 				    != SCTP_DATA_LAST_FRAG) {
1339 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1340 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1341 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1342 					    0, M_DONTWAIT, 1, MT_DATA);
1343 					if (oper) {
1344 						struct sctp_paramhdr *ph;
1345 						uint32_t *ippp;
1346 
1347 						SCTP_BUF_LEN(oper) =
1348 						    sizeof(struct sctp_paramhdr) +
1349 						    (3 * sizeof(uint32_t));
1350 						ph = mtod(oper,
1351 						    struct sctp_paramhdr *);
1352 						ph->param_type =
1353 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1354 						ph->param_length =
1355 						    htons(SCTP_BUF_LEN(oper));
1356 						ippp = (uint32_t *) (ph + 1);
1357 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1358 						ippp++;
1359 						*ippp = chk->rec.data.TSN_seq;
1360 						ippp++;
1361 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1362 					}
1363 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1364 					sctp_abort_an_association(stcb->sctp_ep,
1365 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1366 
1367 					*abort_flag = 1;
1368 					return;
1369 				}
1370 			} else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1371 				    SCTP_DATA_MIDDLE_FRAG ||
1372 				    (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1373 			    SCTP_DATA_LAST_FRAG) {
1374 				/*
1375 				 * Insert chk CAN be MIDDLE or FIRST NOT
1376 				 * LAST
1377 				 */
1378 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1379 				    SCTP_DATA_LAST_FRAG) {
1380 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1381 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1382 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1383 					    0, M_DONTWAIT, 1, MT_DATA);
1384 					if (oper) {
1385 						struct sctp_paramhdr *ph;
1386 						uint32_t *ippp;
1387 
1388 						SCTP_BUF_LEN(oper) =
1389 						    sizeof(struct sctp_paramhdr) +
1390 						    (3 * sizeof(uint32_t));
1391 						ph = mtod(oper,
1392 						    struct sctp_paramhdr *);
1393 						ph->param_type =
1394 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1395 						ph->param_length =
1396 						    htons(SCTP_BUF_LEN(oper));
1397 						ippp = (uint32_t *) (ph + 1);
1398 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1399 						ippp++;
1400 						*ippp = chk->rec.data.TSN_seq;
1401 						ippp++;
1402 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1403 
1404 					}
1405 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1406 					sctp_abort_an_association(stcb->sctp_ep,
1407 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1408 
1409 					*abort_flag = 1;
1410 					return;
1411 				}
1412 				if (chk->rec.data.stream_number !=
1413 				    next->rec.data.stream_number) {
1414 					/*
1415 					 * Huh, need the correct STR here,
1416 					 * they must be the same.
1417 					 */
1418 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1419 					    chk->rec.data.stream_number,
1420 					    next->rec.data.stream_number);
1421 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1422 					    0, M_DONTWAIT, 1, MT_DATA);
1423 					if (oper) {
1424 						struct sctp_paramhdr *ph;
1425 						uint32_t *ippp;
1426 
1427 						SCTP_BUF_LEN(oper) =
1428 						    sizeof(struct sctp_paramhdr) +
1429 						    (3 * sizeof(uint32_t));
1430 						ph = mtod(oper,
1431 						    struct sctp_paramhdr *);
1432 						ph->param_type =
1433 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1434 						ph->param_length =
1435 						    htons(SCTP_BUF_LEN(oper));
1436 						ippp = (uint32_t *) (ph + 1);
1437 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1438 						ippp++;
1439 						*ippp = chk->rec.data.TSN_seq;
1440 						ippp++;
1441 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1442 
1443 					}
1444 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1445 					sctp_abort_an_association(stcb->sctp_ep,
1446 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1447 
1448 					*abort_flag = 1;
1449 					return;
1450 				}
1451 				if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1452 				    chk->rec.data.stream_seq !=
1453 				    next->rec.data.stream_seq) {
1454 					/*
1455 					 * Huh, need the correct STR here,
1456 					 * they must be the same.
1457 					 */
1458 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1459 					    chk->rec.data.stream_seq,
1460 					    next->rec.data.stream_seq);
1461 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1462 					    0, M_DONTWAIT, 1, MT_DATA);
1463 					if (oper) {
1464 						struct sctp_paramhdr *ph;
1465 						uint32_t *ippp;
1466 
1467 						SCTP_BUF_LEN(oper) =
1468 						    sizeof(struct sctp_paramhdr) +
1469 						    (3 * sizeof(uint32_t));
1470 						ph = mtod(oper,
1471 						    struct sctp_paramhdr *);
1472 						ph->param_type =
1473 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1474 						ph->param_length =
1475 						    htons(SCTP_BUF_LEN(oper));
1476 						ippp = (uint32_t *) (ph + 1);
1477 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1478 						ippp++;
1479 						*ippp = chk->rec.data.TSN_seq;
1480 						ippp++;
1481 						*ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1482 					}
1483 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1484 					sctp_abort_an_association(stcb->sctp_ep,
1485 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1486 
1487 					*abort_flag = 1;
1488 					return;
1489 				}
1490 			}
1491 		}
1492 	}
1493 	/* Do we need to do some delivery? check */
1494 	sctp_deliver_reasm_check(stcb, asoc);
1495 }
1496 
1497 /*
1498  * This is an unfortunate routine. It checks to make sure a evil guy is not
1499  * stuffing us full of bad packet fragments. A broken peer could also do this
1500  * but this is doubtful. It is to bad I must worry about evil crackers sigh
1501  * :< more cycles.
1502  */
1503 static int
1504 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1505     uint32_t TSN_seq)
1506 {
1507 	struct sctp_tmit_chunk *at;
1508 	uint32_t tsn_est;
1509 
1510 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1511 		if (compare_with_wrap(TSN_seq,
1512 		    at->rec.data.TSN_seq, MAX_TSN)) {
1513 			/* is it one bigger? */
1514 			tsn_est = at->rec.data.TSN_seq + 1;
1515 			if (tsn_est == TSN_seq) {
1516 				/* yep. It better be a last then */
1517 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1518 				    SCTP_DATA_LAST_FRAG) {
1519 					/*
1520 					 * Ok this guy belongs next to a guy
1521 					 * that is NOT last, it should be a
1522 					 * middle/last, not a complete
1523 					 * chunk.
1524 					 */
1525 					return (1);
1526 				} else {
1527 					/*
1528 					 * This guy is ok since its a LAST
1529 					 * and the new chunk is a fully
1530 					 * self- contained one.
1531 					 */
1532 					return (0);
1533 				}
1534 			}
1535 		} else if (TSN_seq == at->rec.data.TSN_seq) {
1536 			/* Software error since I have a dup? */
1537 			return (1);
1538 		} else {
1539 			/*
1540 			 * Ok, 'at' is larger than new chunk but does it
1541 			 * need to be right before it.
1542 			 */
1543 			tsn_est = TSN_seq + 1;
1544 			if (tsn_est == at->rec.data.TSN_seq) {
1545 				/* Yep, It better be a first */
1546 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1547 				    SCTP_DATA_FIRST_FRAG) {
1548 					return (1);
1549 				} else {
1550 					return (0);
1551 				}
1552 			}
1553 		}
1554 	}
1555 	return (0);
1556 }
1557 
1558 
1559 static int
1560 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1561     struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1562     struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1563     int *break_flag, int last_chunk)
1564 {
1565 	/* Process a data chunk */
1566 	/* struct sctp_tmit_chunk *chk; */
1567 	struct sctp_tmit_chunk *chk;
1568 	uint32_t tsn, gap;
1569 
1570 	/* EY - for nr_sack */
1571 	uint32_t nr_gap;
1572 	struct mbuf *dmbuf;
1573 	int indx, the_len;
1574 	int need_reasm_check = 0;
1575 	uint16_t strmno, strmseq;
1576 	struct mbuf *oper;
1577 	struct sctp_queued_to_read *control;
1578 	int ordered;
1579 	uint32_t protocol_id;
1580 	uint8_t chunk_flags;
1581 	struct sctp_stream_reset_list *liste;
1582 
1583 	chk = NULL;
1584 	tsn = ntohl(ch->dp.tsn);
1585 	chunk_flags = ch->ch.chunk_flags;
1586 	if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1587 		asoc->send_sack = 1;
1588 	}
1589 	protocol_id = ch->dp.protocol_id;
1590 	ordered = ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0);
1591 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1592 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1593 	}
1594 	if (stcb == NULL) {
1595 		return (0);
1596 	}
1597 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1598 	if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) ||
1599 	    asoc->cumulative_tsn == tsn) {
1600 		/* It is a duplicate */
1601 		SCTP_STAT_INCR(sctps_recvdupdata);
1602 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1603 			/* Record a dup for the next outbound sack */
1604 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1605 			asoc->numduptsns++;
1606 		}
1607 		asoc->send_sack = 1;
1608 		return (0);
1609 	}
1610 	/* Calculate the number of TSN's between the base and this TSN */
1611 	if (tsn >= asoc->mapping_array_base_tsn) {
1612 		gap = tsn - asoc->mapping_array_base_tsn;
1613 	} else {
1614 		gap = (MAX_TSN - asoc->mapping_array_base_tsn) + tsn + 1;
1615 	}
1616 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1617 		/* Can't hold the bit in the mapping at max array, toss it */
1618 		return (0);
1619 	}
1620 	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1621 		SCTP_TCB_LOCK_ASSERT(stcb);
1622 		if (sctp_expand_mapping_array(asoc, gap)) {
1623 			/* Can't expand, drop it */
1624 			return (0);
1625 		}
1626 	}
1627 	/* EY - for nr_sack */
1628 	nr_gap = gap;
1629 
1630 	if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) {
1631 		*high_tsn = tsn;
1632 	}
1633 	/* See if we have received this one already */
1634 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
1635 		SCTP_STAT_INCR(sctps_recvdupdata);
1636 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1637 			/* Record a dup for the next outbound sack */
1638 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1639 			asoc->numduptsns++;
1640 		}
1641 		asoc->send_sack = 1;
1642 		return (0);
1643 	}
1644 	/*
1645 	 * Check to see about the GONE flag, duplicates would cause a sack
1646 	 * to be sent up above
1647 	 */
1648 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1649 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1650 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1651 	    ) {
1652 		/*
1653 		 * wait a minute, this guy is gone, there is no longer a
1654 		 * receiver. Send peer an ABORT!
1655 		 */
1656 		struct mbuf *op_err;
1657 
1658 		op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1659 		sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err, SCTP_SO_NOT_LOCKED);
1660 		*abort_flag = 1;
1661 		return (0);
1662 	}
1663 	/*
1664 	 * Now before going further we see if there is room. If NOT then we
1665 	 * MAY let one through only IF this TSN is the one we are waiting
1666 	 * for on a partial delivery API.
1667 	 */
1668 
1669 	/* now do the tests */
1670 	if (((asoc->cnt_on_all_streams +
1671 	    asoc->cnt_on_reasm_queue +
1672 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1673 	    (((int)asoc->my_rwnd) <= 0)) {
1674 		/*
1675 		 * When we have NO room in the rwnd we check to make sure
1676 		 * the reader is doing its job...
1677 		 */
1678 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1679 			/* some to read, wake-up */
1680 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1681 			struct socket *so;
1682 
1683 			so = SCTP_INP_SO(stcb->sctp_ep);
1684 			atomic_add_int(&stcb->asoc.refcnt, 1);
1685 			SCTP_TCB_UNLOCK(stcb);
1686 			SCTP_SOCKET_LOCK(so, 1);
1687 			SCTP_TCB_LOCK(stcb);
1688 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1689 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1690 				/* assoc was freed while we were unlocked */
1691 				SCTP_SOCKET_UNLOCK(so, 1);
1692 				return (0);
1693 			}
1694 #endif
1695 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1696 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1697 			SCTP_SOCKET_UNLOCK(so, 1);
1698 #endif
1699 		}
1700 		/* now is it in the mapping array of what we have accepted? */
1701 		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
1702 			/* Nope not in the valid range dump it */
1703 			sctp_set_rwnd(stcb, asoc);
1704 			if ((asoc->cnt_on_all_streams +
1705 			    asoc->cnt_on_reasm_queue +
1706 			    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1707 				SCTP_STAT_INCR(sctps_datadropchklmt);
1708 			} else {
1709 				SCTP_STAT_INCR(sctps_datadroprwnd);
1710 			}
1711 			indx = *break_flag;
1712 			*break_flag = 1;
1713 			return (0);
1714 		}
1715 	}
1716 	strmno = ntohs(ch->dp.stream_id);
1717 	if (strmno >= asoc->streamincnt) {
1718 		struct sctp_paramhdr *phdr;
1719 		struct mbuf *mb;
1720 
1721 		mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1722 		    0, M_DONTWAIT, 1, MT_DATA);
1723 		if (mb != NULL) {
1724 			/* add some space up front so prepend will work well */
1725 			SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1726 			phdr = mtod(mb, struct sctp_paramhdr *);
1727 			/*
1728 			 * Error causes are just param's and this one has
1729 			 * two back to back phdr, one with the error type
1730 			 * and size, the other with the streamid and a rsvd
1731 			 */
1732 			SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1733 			phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1734 			phdr->param_length =
1735 			    htons(sizeof(struct sctp_paramhdr) * 2);
1736 			phdr++;
1737 			/* We insert the stream in the type field */
1738 			phdr->param_type = ch->dp.stream_id;
1739 			/* And set the length to 0 for the rsvd field */
1740 			phdr->param_length = 0;
1741 			sctp_queue_op_err(stcb, mb);
1742 		}
1743 		SCTP_STAT_INCR(sctps_badsid);
1744 		SCTP_TCB_LOCK_ASSERT(stcb);
1745 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1746 		/* EY set this tsn present in  nr_sack's nr_mapping_array */
1747 		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
1748 			SCTP_TCB_LOCK_ASSERT(stcb);
1749 			SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1750 		}
1751 		if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
1752 			/* we have a new high score */
1753 			asoc->highest_tsn_inside_map = tsn;
1754 			/* EY nr_sack version of the above */
1755 			if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack)
1756 				asoc->highest_tsn_inside_nr_map = tsn;
1757 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1758 				sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
1759 			}
1760 		}
1761 		if (tsn == (asoc->cumulative_tsn + 1)) {
1762 			/* Update cum-ack */
1763 			asoc->cumulative_tsn = tsn;
1764 		}
1765 		return (0);
1766 	}
1767 	/*
1768 	 * Before we continue lets validate that we are not being fooled by
1769 	 * an evil attacker. We can only have 4k chunks based on our TSN
1770 	 * spread allowed by the mapping array 512 * 8 bits, so there is no
1771 	 * way our stream sequence numbers could have wrapped. We of course
1772 	 * only validate the FIRST fragment so the bit must be set.
1773 	 */
1774 	strmseq = ntohs(ch->dp.stream_sequence);
1775 #ifdef SCTP_ASOCLOG_OF_TSNS
1776 	SCTP_TCB_LOCK_ASSERT(stcb);
1777 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1778 		asoc->tsn_in_at = 0;
1779 		asoc->tsn_in_wrapped = 1;
1780 	}
1781 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1782 	asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1783 	asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1784 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1785 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1786 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1787 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1788 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1789 	asoc->tsn_in_at++;
1790 #endif
1791 	if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1792 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1793 	    (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1794 	    (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered,
1795 	    strmseq, MAX_SEQ) ||
1796 	    asoc->strmin[strmno].last_sequence_delivered == strmseq)) {
1797 		/* The incoming sseq is behind where we last delivered? */
1798 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1799 		    strmseq, asoc->strmin[strmno].last_sequence_delivered);
1800 		oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1801 		    0, M_DONTWAIT, 1, MT_DATA);
1802 		if (oper) {
1803 			struct sctp_paramhdr *ph;
1804 			uint32_t *ippp;
1805 
1806 			SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1807 			    (3 * sizeof(uint32_t));
1808 			ph = mtod(oper, struct sctp_paramhdr *);
1809 			ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1810 			ph->param_length = htons(SCTP_BUF_LEN(oper));
1811 			ippp = (uint32_t *) (ph + 1);
1812 			*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1813 			ippp++;
1814 			*ippp = tsn;
1815 			ippp++;
1816 			*ippp = ((strmno << 16) | strmseq);
1817 
1818 		}
1819 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1820 		sctp_abort_an_association(stcb->sctp_ep, stcb,
1821 		    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
1822 		*abort_flag = 1;
1823 		return (0);
1824 	}
1825 	/************************************
1826 	 * From here down we may find ch-> invalid
1827 	 * so its a good idea NOT to use it.
1828 	 *************************************/
1829 
1830 	the_len = (chk_length - sizeof(struct sctp_data_chunk));
1831 	if (last_chunk == 0) {
1832 		dmbuf = SCTP_M_COPYM(*m,
1833 		    (offset + sizeof(struct sctp_data_chunk)),
1834 		    the_len, M_DONTWAIT);
1835 #ifdef SCTP_MBUF_LOGGING
1836 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1837 			struct mbuf *mat;
1838 
1839 			mat = dmbuf;
1840 			while (mat) {
1841 				if (SCTP_BUF_IS_EXTENDED(mat)) {
1842 					sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1843 				}
1844 				mat = SCTP_BUF_NEXT(mat);
1845 			}
1846 		}
1847 #endif
1848 	} else {
1849 		/* We can steal the last chunk */
1850 		int l_len;
1851 
1852 		dmbuf = *m;
1853 		/* lop off the top part */
1854 		m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1855 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1856 			l_len = SCTP_BUF_LEN(dmbuf);
1857 		} else {
1858 			/*
1859 			 * need to count up the size hopefully does not hit
1860 			 * this to often :-0
1861 			 */
1862 			struct mbuf *lat;
1863 
1864 			l_len = 0;
1865 			lat = dmbuf;
1866 			while (lat) {
1867 				l_len += SCTP_BUF_LEN(lat);
1868 				lat = SCTP_BUF_NEXT(lat);
1869 			}
1870 		}
1871 		if (l_len > the_len) {
1872 			/* Trim the end round bytes off  too */
1873 			m_adj(dmbuf, -(l_len - the_len));
1874 		}
1875 	}
1876 	if (dmbuf == NULL) {
1877 		SCTP_STAT_INCR(sctps_nomem);
1878 		return (0);
1879 	}
1880 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1881 	    asoc->fragmented_delivery_inprogress == 0 &&
1882 	    TAILQ_EMPTY(&asoc->resetHead) &&
1883 	    ((ordered == 0) ||
1884 	    ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1885 	    TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1886 		/* Candidate for express delivery */
1887 		/*
1888 		 * Its not fragmented, No PD-API is up, Nothing in the
1889 		 * delivery queue, Its un-ordered OR ordered and the next to
1890 		 * deliver AND nothing else is stuck on the stream queue,
1891 		 * And there is room for it in the socket buffer. Lets just
1892 		 * stuff it up the buffer....
1893 		 */
1894 
1895 		/* It would be nice to avoid this copy if we could :< */
1896 		sctp_alloc_a_readq(stcb, control);
1897 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1898 		    protocol_id,
1899 		    stcb->asoc.context,
1900 		    strmno, strmseq,
1901 		    chunk_flags,
1902 		    dmbuf);
1903 		if (control == NULL) {
1904 			goto failed_express_del;
1905 		}
1906 		sctp_add_to_readq(stcb->sctp_ep, stcb, control, &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
1907 
1908 		/*
1909 		 * EY here I should check if this delivered tsn is
1910 		 * out_of_order, if yes then update the nr_map
1911 		 */
1912 		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
1913 			/*
1914 			 * EY check if the mapping_array and nr_mapping
1915 			 * array are consistent
1916 			 */
1917 			if (asoc->mapping_array_base_tsn != asoc->nr_mapping_array_base_tsn)
1918 				/*
1919 				 * printf("EY-IN
1920 				 * sctp_process_a_data_chunk(5): Something
1921 				 * is wrong the map base tsn" "\nEY-and
1922 				 * nr_map base tsn should be equal.");
1923 				 */
1924 				/* EY debugging block */
1925 			{
1926 				/*
1927 				 * printf("\nEY-Calculating an
1928 				 * nr_gap!!\nmapping_array_size = %d
1929 				 * nr_mapping_array_size = %d"
1930 				 * "\nEY-mapping_array_base = %d
1931 				 * nr_mapping_array_base =
1932 				 * %d\nEY-highest_tsn_inside_map = %d"
1933 				 * "highest_tsn_inside_nr_map = %d\nEY-TSN =
1934 				 * %d nr_gap = %d",asoc->mapping_array_size,
1935 				 * asoc->nr_mapping_array_size,
1936 				 * asoc->mapping_array_base_tsn,
1937 				 * asoc->nr_mapping_array_base_tsn,
1938 				 * asoc->highest_tsn_inside_map,
1939 				 * asoc->highest_tsn_inside_nr_map,tsn,nr_gap
1940 				 * );
1941 				 */
1942 			}
1943 			/* EY - not %100 sure about the lock thing */
1944 			SCTP_TCB_LOCK_ASSERT(stcb);
1945 			SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
1946 			if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
1947 				asoc->highest_tsn_inside_nr_map = tsn;
1948 		}
1949 		if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1950 			/* for ordered, bump what we delivered */
1951 			asoc->strmin[strmno].last_sequence_delivered++;
1952 		}
1953 		SCTP_STAT_INCR(sctps_recvexpress);
1954 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1955 			sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1956 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
1957 		}
1958 		control = NULL;
1959 		goto finish_express_del;
1960 	}
1961 failed_express_del:
1962 	/* If we reach here this is a new chunk */
1963 	chk = NULL;
1964 	control = NULL;
1965 	/* Express for fragmented delivery? */
1966 	if ((asoc->fragmented_delivery_inprogress) &&
1967 	    (stcb->asoc.control_pdapi) &&
1968 	    (asoc->str_of_pdapi == strmno) &&
1969 	    (asoc->ssn_of_pdapi == strmseq)
1970 	    ) {
1971 		control = stcb->asoc.control_pdapi;
1972 		if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1973 			/* Can't be another first? */
1974 			goto failed_pdapi_express_del;
1975 		}
1976 		if (tsn == (control->sinfo_tsn + 1)) {
1977 			/* Yep, we can add it on */
1978 			int end = 0;
1979 			uint32_t cumack;
1980 
1981 			if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1982 				end = 1;
1983 			}
1984 			cumack = asoc->cumulative_tsn;
1985 			if ((cumack + 1) == tsn)
1986 				cumack = tsn;
1987 
1988 			if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1989 			    tsn,
1990 			    &stcb->sctp_socket->so_rcv)) {
1991 				SCTP_PRINTF("Append fails end:%d\n", end);
1992 				goto failed_pdapi_express_del;
1993 			}
1994 			/*
1995 			 * EY It is appended to the read queue in prev if
1996 			 * block here I should check if this delivered tsn
1997 			 * is out_of_order, if yes then update the nr_map
1998 			 */
1999 			if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
2000 				/* EY debugging block */
2001 				{
2002 					/*
2003 					 * printf("\nEY-Calculating an
2004 					 * nr_gap!!\nEY-mapping_array_size =
2005 					 * %d nr_mapping_array_size = %d"
2006 					 * "\nEY-mapping_array_base = %d
2007 					 * nr_mapping_array_base =
2008 					 * %d\nEY-highest_tsn_inside_map =
2009 					 * %d" "highest_tsn_inside_nr_map =
2010 					 * %d\nEY-TSN = %d nr_gap =
2011 					 * %d",asoc->mapping_array_size,
2012 					 * asoc->nr_mapping_array_size,
2013 					 * asoc->mapping_array_base_tsn,
2014 					 * asoc->nr_mapping_array_base_tsn,
2015 					 * asoc->highest_tsn_inside_map,
2016 					 * asoc->highest_tsn_inside_nr_map,ts
2017 					 * n,nr_gap);
2018 					 */
2019 				}
2020 				/* EY - not %100 sure about the lock thing */
2021 				SCTP_TCB_LOCK_ASSERT(stcb);
2022 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
2023 				if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
2024 					asoc->highest_tsn_inside_nr_map = tsn;
2025 			}
2026 			SCTP_STAT_INCR(sctps_recvexpressm);
2027 			control->sinfo_tsn = tsn;
2028 			asoc->tsn_last_delivered = tsn;
2029 			asoc->fragment_flags = chunk_flags;
2030 			asoc->tsn_of_pdapi_last_delivered = tsn;
2031 			asoc->last_flags_delivered = chunk_flags;
2032 			asoc->last_strm_seq_delivered = strmseq;
2033 			asoc->last_strm_no_delivered = strmno;
2034 			if (end) {
2035 				/* clean up the flags and such */
2036 				asoc->fragmented_delivery_inprogress = 0;
2037 				if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
2038 					asoc->strmin[strmno].last_sequence_delivered++;
2039 				}
2040 				stcb->asoc.control_pdapi = NULL;
2041 				if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
2042 					/*
2043 					 * There could be another message
2044 					 * ready
2045 					 */
2046 					need_reasm_check = 1;
2047 				}
2048 			}
2049 			control = NULL;
2050 			goto finish_express_del;
2051 		}
2052 	}
2053 failed_pdapi_express_del:
2054 	control = NULL;
2055 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2056 		sctp_alloc_a_chunk(stcb, chk);
2057 		if (chk == NULL) {
2058 			/* No memory so we drop the chunk */
2059 			SCTP_STAT_INCR(sctps_nomem);
2060 			if (last_chunk == 0) {
2061 				/* we copied it, free the copy */
2062 				sctp_m_freem(dmbuf);
2063 			}
2064 			return (0);
2065 		}
2066 		chk->rec.data.TSN_seq = tsn;
2067 		chk->no_fr_allowed = 0;
2068 		chk->rec.data.stream_seq = strmseq;
2069 		chk->rec.data.stream_number = strmno;
2070 		chk->rec.data.payloadtype = protocol_id;
2071 		chk->rec.data.context = stcb->asoc.context;
2072 		chk->rec.data.doing_fast_retransmit = 0;
2073 		chk->rec.data.rcv_flags = chunk_flags;
2074 		chk->asoc = asoc;
2075 		chk->send_size = the_len;
2076 		chk->whoTo = net;
2077 		atomic_add_int(&net->ref_count, 1);
2078 		chk->data = dmbuf;
2079 	} else {
2080 		sctp_alloc_a_readq(stcb, control);
2081 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2082 		    protocol_id,
2083 		    stcb->asoc.context,
2084 		    strmno, strmseq,
2085 		    chunk_flags,
2086 		    dmbuf);
2087 		if (control == NULL) {
2088 			/* No memory so we drop the chunk */
2089 			SCTP_STAT_INCR(sctps_nomem);
2090 			if (last_chunk == 0) {
2091 				/* we copied it, free the copy */
2092 				sctp_m_freem(dmbuf);
2093 			}
2094 			return (0);
2095 		}
2096 		control->length = the_len;
2097 	}
2098 
2099 	/* Mark it as received */
2100 	/* Now queue it where it belongs */
2101 	if (control != NULL) {
2102 		/* First a sanity check */
2103 		if (asoc->fragmented_delivery_inprogress) {
2104 			/*
2105 			 * Ok, we have a fragmented delivery in progress if
2106 			 * this chunk is next to deliver OR belongs in our
2107 			 * view to the reassembly, the peer is evil or
2108 			 * broken.
2109 			 */
2110 			uint32_t estimate_tsn;
2111 
2112 			estimate_tsn = asoc->tsn_last_delivered + 1;
2113 			if (TAILQ_EMPTY(&asoc->reasmqueue) &&
2114 			    (estimate_tsn == control->sinfo_tsn)) {
2115 				/* Evil/Broke peer */
2116 				sctp_m_freem(control->data);
2117 				control->data = NULL;
2118 				if (control->whoFrom) {
2119 					sctp_free_remote_addr(control->whoFrom);
2120 					control->whoFrom = NULL;
2121 				}
2122 				sctp_free_a_readq(stcb, control);
2123 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
2124 				    0, M_DONTWAIT, 1, MT_DATA);
2125 				if (oper) {
2126 					struct sctp_paramhdr *ph;
2127 					uint32_t *ippp;
2128 
2129 					SCTP_BUF_LEN(oper) =
2130 					    sizeof(struct sctp_paramhdr) +
2131 					    (3 * sizeof(uint32_t));
2132 					ph = mtod(oper, struct sctp_paramhdr *);
2133 					ph->param_type =
2134 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2135 					ph->param_length = htons(SCTP_BUF_LEN(oper));
2136 					ippp = (uint32_t *) (ph + 1);
2137 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
2138 					ippp++;
2139 					*ippp = tsn;
2140 					ippp++;
2141 					*ippp = ((strmno << 16) | strmseq);
2142 				}
2143 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
2144 				sctp_abort_an_association(stcb->sctp_ep, stcb,
2145 				    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2146 
2147 				*abort_flag = 1;
2148 				return (0);
2149 			} else {
2150 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
2151 					sctp_m_freem(control->data);
2152 					control->data = NULL;
2153 					if (control->whoFrom) {
2154 						sctp_free_remote_addr(control->whoFrom);
2155 						control->whoFrom = NULL;
2156 					}
2157 					sctp_free_a_readq(stcb, control);
2158 
2159 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
2160 					    0, M_DONTWAIT, 1, MT_DATA);
2161 					if (oper) {
2162 						struct sctp_paramhdr *ph;
2163 						uint32_t *ippp;
2164 
2165 						SCTP_BUF_LEN(oper) =
2166 						    sizeof(struct sctp_paramhdr) +
2167 						    (3 * sizeof(uint32_t));
2168 						ph = mtod(oper,
2169 						    struct sctp_paramhdr *);
2170 						ph->param_type =
2171 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2172 						ph->param_length =
2173 						    htons(SCTP_BUF_LEN(oper));
2174 						ippp = (uint32_t *) (ph + 1);
2175 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
2176 						ippp++;
2177 						*ippp = tsn;
2178 						ippp++;
2179 						*ippp = ((strmno << 16) | strmseq);
2180 					}
2181 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
2182 					sctp_abort_an_association(stcb->sctp_ep,
2183 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2184 
2185 					*abort_flag = 1;
2186 					return (0);
2187 				}
2188 			}
2189 		} else {
2190 			/* No PDAPI running */
2191 			if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
2192 				/*
2193 				 * Reassembly queue is NOT empty validate
2194 				 * that this tsn does not need to be in
2195 				 * reasembly queue. If it does then our peer
2196 				 * is broken or evil.
2197 				 */
2198 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
2199 					sctp_m_freem(control->data);
2200 					control->data = NULL;
2201 					if (control->whoFrom) {
2202 						sctp_free_remote_addr(control->whoFrom);
2203 						control->whoFrom = NULL;
2204 					}
2205 					sctp_free_a_readq(stcb, control);
2206 					oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
2207 					    0, M_DONTWAIT, 1, MT_DATA);
2208 					if (oper) {
2209 						struct sctp_paramhdr *ph;
2210 						uint32_t *ippp;
2211 
2212 						SCTP_BUF_LEN(oper) =
2213 						    sizeof(struct sctp_paramhdr) +
2214 						    (3 * sizeof(uint32_t));
2215 						ph = mtod(oper,
2216 						    struct sctp_paramhdr *);
2217 						ph->param_type =
2218 						    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2219 						ph->param_length =
2220 						    htons(SCTP_BUF_LEN(oper));
2221 						ippp = (uint32_t *) (ph + 1);
2222 						*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2223 						ippp++;
2224 						*ippp = tsn;
2225 						ippp++;
2226 						*ippp = ((strmno << 16) | strmseq);
2227 					}
2228 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2229 					sctp_abort_an_association(stcb->sctp_ep,
2230 					    stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
2231 
2232 					*abort_flag = 1;
2233 					return (0);
2234 				}
2235 			}
2236 		}
2237 		/* ok, if we reach here we have passed the sanity checks */
2238 		if (chunk_flags & SCTP_DATA_UNORDERED) {
2239 			/* queue directly into socket buffer */
2240 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2241 			    control,
2242 			    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
2243 
2244 			/*
2245 			 * EY It is added to the read queue in prev if block
2246 			 * here I should check if this delivered tsn is
2247 			 * out_of_order, if yes then update the nr_map
2248 			 */
2249 			if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
2250 				/*
2251 				 * EY check if the mapping_array and
2252 				 * nr_mapping array are consistent
2253 				 */
2254 				if (asoc->mapping_array_base_tsn != asoc->nr_mapping_array_base_tsn)
2255 					/*
2256 					 * printf("EY-IN
2257 					 * sctp_process_a_data_chunk(6):
2258 					 * Something is wrong the map base
2259 					 * tsn" "\nEY-and nr_map base tsn
2260 					 * should be equal.");
2261 					 */
2262 					/*
2263 					 * EY - not %100 sure about the lock
2264 					 * thing, i think we don't need the
2265 					 * below,
2266 					 */
2267 					/* SCTP_TCB_LOCK_ASSERT(stcb); */
2268 				{
2269 					/*
2270 					 * printf("\nEY-Calculating an
2271 					 * nr_gap!!\nEY-mapping_array_size =
2272 					 * %d nr_mapping_array_size = %d"
2273 					 * "\nEY-mapping_array_base = %d
2274 					 * nr_mapping_array_base =
2275 					 * %d\nEY-highest_tsn_inside_map =
2276 					 * %d" "highest_tsn_inside_nr_map =
2277 					 * %d\nEY-TSN = %d nr_gap =
2278 					 * %d",asoc->mapping_array_size,
2279 					 * asoc->nr_mapping_array_size,
2280 					 * asoc->mapping_array_base_tsn,
2281 					 * asoc->nr_mapping_array_base_tsn,
2282 					 * asoc->highest_tsn_inside_map,
2283 					 * asoc->highest_tsn_inside_nr_map,ts
2284 					 * n,nr_gap);
2285 					 */
2286 				}
2287 				SCTP_TCB_LOCK_ASSERT(stcb);
2288 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
2289 				if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
2290 					asoc->highest_tsn_inside_nr_map = tsn;
2291 			}
2292 		} else {
2293 			/*
2294 			 * Special check for when streams are resetting. We
2295 			 * could be more smart about this and check the
2296 			 * actual stream to see if it is not being reset..
2297 			 * that way we would not create a HOLB when amongst
2298 			 * streams being reset and those not being reset.
2299 			 *
2300 			 * We take complete messages that have a stream reset
2301 			 * intervening (aka the TSN is after where our
2302 			 * cum-ack needs to be) off and put them on a
2303 			 * pending_reply_queue. The reassembly ones we do
2304 			 * not have to worry about since they are all sorted
2305 			 * and proceessed by TSN order. It is only the
2306 			 * singletons I must worry about.
2307 			 */
2308 			if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2309 			    ((compare_with_wrap(tsn, liste->tsn, MAX_TSN)))
2310 			    ) {
2311 				/*
2312 				 * yep its past where we need to reset... go
2313 				 * ahead and queue it.
2314 				 */
2315 				if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2316 					/* first one on */
2317 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2318 				} else {
2319 					struct sctp_queued_to_read *ctlOn;
2320 					unsigned char inserted = 0;
2321 
2322 					ctlOn = TAILQ_FIRST(&asoc->pending_reply_queue);
2323 					while (ctlOn) {
2324 						if (compare_with_wrap(control->sinfo_tsn,
2325 						    ctlOn->sinfo_tsn, MAX_TSN)) {
2326 							ctlOn = TAILQ_NEXT(ctlOn, next);
2327 						} else {
2328 							/* found it */
2329 							TAILQ_INSERT_BEFORE(ctlOn, control, next);
2330 							inserted = 1;
2331 							break;
2332 						}
2333 					}
2334 					if (inserted == 0) {
2335 						/*
2336 						 * must be put at end, use
2337 						 * prevP (all setup from
2338 						 * loop) to setup nextP.
2339 						 */
2340 						TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2341 					}
2342 				}
2343 			} else {
2344 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2345 				if (*abort_flag) {
2346 					return (0);
2347 				}
2348 			}
2349 		}
2350 	} else {
2351 		/* Into the re-assembly queue */
2352 		sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2353 		if (*abort_flag) {
2354 			/*
2355 			 * the assoc is now gone and chk was put onto the
2356 			 * reasm queue, which has all been freed.
2357 			 */
2358 			*m = NULL;
2359 			return (0);
2360 		}
2361 	}
2362 finish_express_del:
2363 	if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
2364 		/* we have a new high score */
2365 		asoc->highest_tsn_inside_map = tsn;
2366 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2367 			sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2368 		}
2369 	}
2370 	if (tsn == (asoc->cumulative_tsn + 1)) {
2371 		/* Update cum-ack */
2372 		asoc->cumulative_tsn = tsn;
2373 	}
2374 	if (last_chunk) {
2375 		*m = NULL;
2376 	}
2377 	if (ordered) {
2378 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2379 	} else {
2380 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2381 	}
2382 	SCTP_STAT_INCR(sctps_recvdata);
2383 	/* Set it present please */
2384 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2385 		sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2386 	}
2387 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2388 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2389 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2390 	}
2391 	SCTP_TCB_LOCK_ASSERT(stcb);
2392 	SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2393 	/* check the special flag for stream resets */
2394 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2395 	    ((compare_with_wrap(asoc->cumulative_tsn, liste->tsn, MAX_TSN)) ||
2396 	    (asoc->cumulative_tsn == liste->tsn))
2397 	    ) {
2398 		/*
2399 		 * we have finished working through the backlogged TSN's now
2400 		 * time to reset streams. 1: call reset function. 2: free
2401 		 * pending_reply space 3: distribute any chunks in
2402 		 * pending_reply_queue.
2403 		 */
2404 		struct sctp_queued_to_read *ctl;
2405 
2406 		sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams);
2407 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2408 		SCTP_FREE(liste, SCTP_M_STRESET);
2409 		/* sa_ignore FREED_MEMORY */
2410 		liste = TAILQ_FIRST(&asoc->resetHead);
2411 		ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2412 		if (ctl && (liste == NULL)) {
2413 			/* All can be removed */
2414 			while (ctl) {
2415 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2416 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2417 				if (*abort_flag) {
2418 					return (0);
2419 				}
2420 				ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2421 			}
2422 		} else if (ctl) {
2423 			/* more than one in queue */
2424 			while (!compare_with_wrap(ctl->sinfo_tsn, liste->tsn, MAX_TSN)) {
2425 				/*
2426 				 * if ctl->sinfo_tsn is <= liste->tsn we can
2427 				 * process it which is the NOT of
2428 				 * ctl->sinfo_tsn > liste->tsn
2429 				 */
2430 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2431 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2432 				if (*abort_flag) {
2433 					return (0);
2434 				}
2435 				ctl = TAILQ_FIRST(&asoc->pending_reply_queue);
2436 			}
2437 		}
2438 		/*
2439 		 * Now service re-assembly to pick up anything that has been
2440 		 * held on reassembly queue?
2441 		 */
2442 		sctp_deliver_reasm_check(stcb, asoc);
2443 		need_reasm_check = 0;
2444 	}
2445 	if (need_reasm_check) {
2446 		/* Another one waits ? */
2447 		sctp_deliver_reasm_check(stcb, asoc);
2448 	}
2449 	return (1);
2450 }
2451 
2452 int8_t sctp_map_lookup_tab[256] = {
2453 	-1, 0, -1, 1, -1, 0, -1, 2,
2454 	-1, 0, -1, 1, -1, 0, -1, 3,
2455 	-1, 0, -1, 1, -1, 0, -1, 2,
2456 	-1, 0, -1, 1, -1, 0, -1, 4,
2457 	-1, 0, -1, 1, -1, 0, -1, 2,
2458 	-1, 0, -1, 1, -1, 0, -1, 3,
2459 	-1, 0, -1, 1, -1, 0, -1, 2,
2460 	-1, 0, -1, 1, -1, 0, -1, 5,
2461 	-1, 0, -1, 1, -1, 0, -1, 2,
2462 	-1, 0, -1, 1, -1, 0, -1, 3,
2463 	-1, 0, -1, 1, -1, 0, -1, 2,
2464 	-1, 0, -1, 1, -1, 0, -1, 4,
2465 	-1, 0, -1, 1, -1, 0, -1, 2,
2466 	-1, 0, -1, 1, -1, 0, -1, 3,
2467 	-1, 0, -1, 1, -1, 0, -1, 2,
2468 	-1, 0, -1, 1, -1, 0, -1, 6,
2469 	-1, 0, -1, 1, -1, 0, -1, 2,
2470 	-1, 0, -1, 1, -1, 0, -1, 3,
2471 	-1, 0, -1, 1, -1, 0, -1, 2,
2472 	-1, 0, -1, 1, -1, 0, -1, 4,
2473 	-1, 0, -1, 1, -1, 0, -1, 2,
2474 	-1, 0, -1, 1, -1, 0, -1, 3,
2475 	-1, 0, -1, 1, -1, 0, -1, 2,
2476 	-1, 0, -1, 1, -1, 0, -1, 5,
2477 	-1, 0, -1, 1, -1, 0, -1, 2,
2478 	-1, 0, -1, 1, -1, 0, -1, 3,
2479 	-1, 0, -1, 1, -1, 0, -1, 2,
2480 	-1, 0, -1, 1, -1, 0, -1, 4,
2481 	-1, 0, -1, 1, -1, 0, -1, 2,
2482 	-1, 0, -1, 1, -1, 0, -1, 3,
2483 	-1, 0, -1, 1, -1, 0, -1, 2,
2484 	-1, 0, -1, 1, -1, 0, -1, 7,
2485 };
2486 
2487 
2488 void
2489 sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort_flag)
2490 {
2491 	/*
2492 	 * Now we also need to check the mapping array in a couple of ways.
2493 	 * 1) Did we move the cum-ack point?
2494 	 */
2495 	struct sctp_association *asoc;
2496 	int at;
2497 	int last_all_ones = 0;
2498 	int slide_from, slide_end, lgap, distance;
2499 
2500 	/* EY nr_mapping array variables */
2501 	int nr_at;
2502 	int nr_last_all_ones = 0;
2503 	int nr_slide_from, nr_slide_end, nr_lgap, nr_distance;
2504 
2505 	uint32_t old_cumack, old_base, old_highest;
2506 	unsigned char aux_array[64];
2507 
2508 	/*
2509 	 * EY! Don't think this is required but I am immitating the code for
2510 	 * map just to make sure
2511 	 */
2512 	unsigned char nr_aux_array[64];
2513 
2514 	asoc = &stcb->asoc;
2515 	at = 0;
2516 
2517 	old_cumack = asoc->cumulative_tsn;
2518 	old_base = asoc->mapping_array_base_tsn;
2519 	old_highest = asoc->highest_tsn_inside_map;
2520 	if (asoc->mapping_array_size < 64)
2521 		memcpy(aux_array, asoc->mapping_array,
2522 		    asoc->mapping_array_size);
2523 	else
2524 		memcpy(aux_array, asoc->mapping_array, 64);
2525 	/* EY do the same for nr_mapping_array */
2526 	if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
2527 
2528 		if (asoc->nr_mapping_array_size != asoc->mapping_array_size) {
2529 			/*
2530 			 * printf("\nEY-IN sack_check method: \nEY-" "The
2531 			 * size of map and nr_map are inconsitent")
2532 			 */ ;
2533 		}
2534 		if (asoc->nr_mapping_array_base_tsn != asoc->mapping_array_base_tsn) {
2535 			/*
2536 			 * printf("\nEY-IN sack_check method VERY CRUCIAL
2537 			 * error: \nEY-" "The base tsns of map and nr_map
2538 			 * are inconsitent")
2539 			 */ ;
2540 		}
2541 		/* EY! just immitating the above code */
2542 		if (asoc->nr_mapping_array_size < 64)
2543 			memcpy(nr_aux_array, asoc->nr_mapping_array,
2544 			    asoc->nr_mapping_array_size);
2545 		else
2546 			memcpy(aux_array, asoc->nr_mapping_array, 64);
2547 	}
2548 	/*
2549 	 * We could probably improve this a small bit by calculating the
2550 	 * offset of the current cum-ack as the starting point.
2551 	 */
2552 	at = 0;
2553 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2554 
2555 		if (asoc->mapping_array[slide_from] == 0xff) {
2556 			at += 8;
2557 			last_all_ones = 1;
2558 		} else {
2559 			/* there is a 0 bit */
2560 			at += sctp_map_lookup_tab[asoc->mapping_array[slide_from]];
2561 			last_all_ones = 0;
2562 			break;
2563 		}
2564 	}
2565 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - last_all_ones);
2566 	/* at is one off, since in the table a embedded -1 is present */
2567 	at++;
2568 
2569 	if (compare_with_wrap(asoc->cumulative_tsn,
2570 	    asoc->highest_tsn_inside_map,
2571 	    MAX_TSN)) {
2572 #ifdef INVARIANTS
2573 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2574 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2575 #else
2576 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2577 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2578 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2579 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2580 		}
2581 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2582 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2583 #endif
2584 	}
2585 	if ((asoc->cumulative_tsn == asoc->highest_tsn_inside_map) && (at >= 8)) {
2586 		/* The complete array was completed by a single FR */
2587 		/* higest becomes the cum-ack */
2588 		int clr;
2589 
2590 		asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
2591 		/* clear the array */
2592 		clr = (at >> 3) + 1;
2593 		if (clr > asoc->mapping_array_size) {
2594 			clr = asoc->mapping_array_size;
2595 		}
2596 		memset(asoc->mapping_array, 0, clr);
2597 		/* base becomes one ahead of the cum-ack */
2598 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2599 
2600 		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
2601 
2602 			if (clr > asoc->nr_mapping_array_size)
2603 				clr = asoc->nr_mapping_array_size;
2604 
2605 			memset(asoc->nr_mapping_array, 0, clr);
2606 			/* base becomes one ahead of the cum-ack */
2607 			asoc->nr_mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2608 			asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2609 		}
2610 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2611 			sctp_log_map(old_base, old_cumack, old_highest,
2612 			    SCTP_MAP_PREPARE_SLIDE);
2613 			sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2614 			    asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_CLEARED);
2615 		}
2616 	} else if (at >= 8) {
2617 		/* we can slide the mapping array down */
2618 		/* slide_from holds where we hit the first NON 0xff byte */
2619 
2620 		/*
2621 		 * now calculate the ceiling of the move using our highest
2622 		 * TSN value
2623 		 */
2624 		if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) {
2625 			lgap = asoc->highest_tsn_inside_map -
2626 			    asoc->mapping_array_base_tsn;
2627 		} else {
2628 			lgap = (MAX_TSN - asoc->mapping_array_base_tsn) +
2629 			    asoc->highest_tsn_inside_map + 1;
2630 		}
2631 		slide_end = lgap >> 3;
2632 		if (slide_end < slide_from) {
2633 #ifdef INVARIANTS
2634 			panic("impossible slide");
2635 #else
2636 			printf("impossible slide?\n");
2637 			return;
2638 #endif
2639 		}
2640 		if (slide_end > asoc->mapping_array_size) {
2641 #ifdef INVARIANTS
2642 			panic("would overrun buffer");
2643 #else
2644 			printf("Gak, would have overrun map end:%d slide_end:%d\n",
2645 			    asoc->mapping_array_size, slide_end);
2646 			slide_end = asoc->mapping_array_size;
2647 #endif
2648 		}
2649 		distance = (slide_end - slide_from) + 1;
2650 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2651 			sctp_log_map(old_base, old_cumack, old_highest,
2652 			    SCTP_MAP_PREPARE_SLIDE);
2653 			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2654 			    (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2655 		}
2656 		if (distance + slide_from > asoc->mapping_array_size ||
2657 		    distance < 0) {
2658 			/*
2659 			 * Here we do NOT slide forward the array so that
2660 			 * hopefully when more data comes in to fill it up
2661 			 * we will be able to slide it forward. Really I
2662 			 * don't think this should happen :-0
2663 			 */
2664 
2665 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2666 				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2667 				    (uint32_t) asoc->mapping_array_size,
2668 				    SCTP_MAP_SLIDE_NONE);
2669 			}
2670 		} else {
2671 			int ii;
2672 
2673 			for (ii = 0; ii < distance; ii++) {
2674 				asoc->mapping_array[ii] =
2675 				    asoc->mapping_array[slide_from + ii];
2676 			}
2677 			for (ii = distance; ii <= slide_end; ii++) {
2678 				asoc->mapping_array[ii] = 0;
2679 			}
2680 			asoc->mapping_array_base_tsn += (slide_from << 3);
2681 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2682 				sctp_log_map(asoc->mapping_array_base_tsn,
2683 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2684 				    SCTP_MAP_SLIDE_RESULT);
2685 			}
2686 		}
2687 	}
2688 	/*
2689 	 * EY if doing nr_sacks then slide the nr_mapping_array accordingly
2690 	 * please
2691 	 */
2692 	if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
2693 
2694 		nr_at = 0;
2695 		for (nr_slide_from = 0; nr_slide_from < stcb->asoc.nr_mapping_array_size; nr_slide_from++) {
2696 
2697 			if (asoc->nr_mapping_array[nr_slide_from] == 0xff) {
2698 				nr_at += 8;
2699 				nr_last_all_ones = 1;
2700 			} else {
2701 				/* there is a 0 bit */
2702 				nr_at += sctp_map_lookup_tab[asoc->nr_mapping_array[nr_slide_from]];
2703 				nr_last_all_ones = 0;
2704 				break;
2705 			}
2706 		}
2707 
2708 		nr_at++;
2709 
2710 		if (compare_with_wrap(asoc->cumulative_tsn,
2711 		    asoc->highest_tsn_inside_nr_map, MAX_TSN) && (at >= 8)) {
2712 			/* The complete array was completed by a single FR */
2713 			/* higest becomes the cum-ack */
2714 			int clr;
2715 
2716 			clr = (nr_at >> 3) + 1;
2717 
2718 			if (clr > asoc->nr_mapping_array_size)
2719 				clr = asoc->nr_mapping_array_size;
2720 
2721 			memset(asoc->nr_mapping_array, 0, clr);
2722 			/* base becomes one ahead of the cum-ack */
2723 			asoc->nr_mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2724 			asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2725 
2726 		} else if (nr_at >= 8) {
2727 			/* we can slide the mapping array down */
2728 			/* Calculate the new byte postion we can move down */
2729 
2730 			/*
2731 			 * now calculate the ceiling of the move using our
2732 			 * highest TSN value
2733 			 */
2734 			if (asoc->highest_tsn_inside_nr_map >= asoc->nr_mapping_array_base_tsn) {
2735 				nr_lgap = asoc->highest_tsn_inside_nr_map -
2736 				    asoc->nr_mapping_array_base_tsn;
2737 			} else {
2738 				nr_lgap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) +
2739 				    asoc->highest_tsn_inside_nr_map + 1;
2740 			}
2741 			nr_slide_end = nr_lgap >> 3;
2742 			if (nr_slide_end < nr_slide_from) {
2743 #ifdef INVARIANTS
2744 				panic("impossible slide");
2745 #else
2746 				printf("impossible slide?\n");
2747 				return;
2748 #endif
2749 			}
2750 			if (nr_slide_end > asoc->nr_mapping_array_size) {
2751 #ifdef INVARIANTS
2752 				panic("would overrun buffer");
2753 #else
2754 				printf("Gak, would have overrun map end:%d nr_slide_end:%d\n",
2755 				    asoc->nr_mapping_array_size, nr_slide_end);
2756 				nr_slide_end = asoc->nr_mapping_array_size;
2757 #endif
2758 			}
2759 			nr_distance = (nr_slide_end - nr_slide_from) + 1;
2760 
2761 			if (nr_distance + nr_slide_from > asoc->nr_mapping_array_size ||
2762 			    nr_distance < 0) {
2763 				/*
2764 				 * Here we do NOT slide forward the array so
2765 				 * that hopefully when more data comes in to
2766 				 * fill it up we will be able to slide it
2767 				 * forward. Really I don't think this should
2768 				 * happen :-0
2769 				 */
2770 				;
2771 			} else {
2772 				int ii;
2773 
2774 				for (ii = 0; ii < nr_distance; ii++) {
2775 					asoc->nr_mapping_array[ii] =
2776 					    asoc->nr_mapping_array[nr_slide_from + ii];
2777 				}
2778 				for (ii = nr_distance; ii <= nr_slide_end; ii++) {
2779 					asoc->nr_mapping_array[ii] = 0;
2780 				}
2781 				asoc->nr_mapping_array_base_tsn += (nr_slide_from << 3);
2782 			}
2783 		}
2784 	}
2785 	/*
2786 	 * Now we need to see if we need to queue a sack or just start the
2787 	 * timer (if allowed).
2788 	 */
2789 	if (ok_to_sack) {
2790 		if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2791 			/*
2792 			 * Ok special case, in SHUTDOWN-SENT case. here we
2793 			 * maker sure SACK timer is off and instead send a
2794 			 * SHUTDOWN and a SACK
2795 			 */
2796 			if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2797 				sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2798 				    stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2799 			}
2800 			sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
2801 			/*
2802 			 * EY if nr_sacks used then send an nr-sack , a sack
2803 			 * otherwise
2804 			 */
2805 			if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack)
2806 				sctp_send_nr_sack(stcb);
2807 			else
2808 				sctp_send_sack(stcb);
2809 		} else {
2810 			int is_a_gap;
2811 
2812 			/* is there a gap now ? */
2813 			is_a_gap = compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2814 			    stcb->asoc.cumulative_tsn, MAX_TSN);
2815 
2816 			/*
2817 			 * CMT DAC algorithm: increase number of packets
2818 			 * received since last ack
2819 			 */
2820 			stcb->asoc.cmt_dac_pkts_rcvd++;
2821 
2822 			if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2823 								 * SACK */
2824 			    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2825 								 * longer is one */
2826 			    (stcb->asoc.numduptsns) ||	/* we have dup's */
2827 			    (is_a_gap) ||	/* is still a gap */
2828 			    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2829 			    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2830 			    ) {
2831 
2832 				if ((SCTP_BASE_SYSCTL(sctp_cmt_on_off)) &&
2833 				    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2834 				    (stcb->asoc.send_sack == 0) &&
2835 				    (stcb->asoc.numduptsns == 0) &&
2836 				    (stcb->asoc.delayed_ack) &&
2837 				    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2838 
2839 					/*
2840 					 * CMT DAC algorithm: With CMT,
2841 					 * delay acks even in the face of
2842 					 *
2843 					 * reordering. Therefore, if acks that
2844 					 * do not have to be sent because of
2845 					 * the above reasons, will be
2846 					 * delayed. That is, acks that would
2847 					 * have been sent due to gap reports
2848 					 * will be delayed with DAC. Start
2849 					 * the delayed ack timer.
2850 					 */
2851 					sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2852 					    stcb->sctp_ep, stcb, NULL);
2853 				} else {
2854 					/*
2855 					 * Ok we must build a SACK since the
2856 					 * timer is pending, we got our
2857 					 * first packet OR there are gaps or
2858 					 * duplicates.
2859 					 */
2860 					(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2861 					/*
2862 					 * EY if nr_sacks used then send an
2863 					 * nr-sack , a sack otherwise
2864 					 */
2865 					if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
2866 						sctp_send_nr_sack(stcb);
2867 					else
2868 						sctp_send_sack(stcb);
2869 				}
2870 			} else {
2871 				if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2872 					sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2873 					    stcb->sctp_ep, stcb, NULL);
2874 				}
2875 			}
2876 		}
2877 	}
2878 }
2879 
2880 void
2881 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2882 {
2883 	struct sctp_tmit_chunk *chk;
2884 	uint32_t tsize;
2885 	uint16_t nxt_todel;
2886 
2887 	if (asoc->fragmented_delivery_inprogress) {
2888 		sctp_service_reassembly(stcb, asoc);
2889 	}
2890 	/* Can we proceed further, i.e. the PD-API is complete */
2891 	if (asoc->fragmented_delivery_inprogress) {
2892 		/* no */
2893 		return;
2894 	}
2895 	/*
2896 	 * Now is there some other chunk I can deliver from the reassembly
2897 	 * queue.
2898 	 */
2899 doit_again:
2900 	chk = TAILQ_FIRST(&asoc->reasmqueue);
2901 	if (chk == NULL) {
2902 		asoc->size_on_reasm_queue = 0;
2903 		asoc->cnt_on_reasm_queue = 0;
2904 		return;
2905 	}
2906 	nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2907 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2908 	    ((nxt_todel == chk->rec.data.stream_seq) ||
2909 	    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2910 		/*
2911 		 * Yep the first one is here. We setup to start reception,
2912 		 * by backing down the TSN just in case we can't deliver.
2913 		 */
2914 
2915 		/*
2916 		 * Before we start though either all of the message should
2917 		 * be here or 1/4 the socket buffer max or nothing on the
2918 		 * delivery queue and something can be delivered.
2919 		 */
2920 		if ((sctp_is_all_msg_on_reasm(asoc, &tsize) ||
2921 		    (tsize >= stcb->sctp_ep->partial_delivery_point))) {
2922 			asoc->fragmented_delivery_inprogress = 1;
2923 			asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2924 			asoc->str_of_pdapi = chk->rec.data.stream_number;
2925 			asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2926 			asoc->pdapi_ppid = chk->rec.data.payloadtype;
2927 			asoc->fragment_flags = chk->rec.data.rcv_flags;
2928 			sctp_service_reassembly(stcb, asoc);
2929 			if (asoc->fragmented_delivery_inprogress == 0) {
2930 				goto doit_again;
2931 			}
2932 		}
2933 	}
2934 }
2935 
2936 int
2937 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2938     struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2939     struct sctp_nets *net, uint32_t * high_tsn)
2940 {
2941 	struct sctp_data_chunk *ch, chunk_buf;
2942 	struct sctp_association *asoc;
2943 	int num_chunks = 0;	/* number of control chunks processed */
2944 	int stop_proc = 0;
2945 	int chk_length, break_flag, last_chunk;
2946 	int abort_flag = 0, was_a_gap = 0;
2947 	struct mbuf *m;
2948 
2949 	/* set the rwnd */
2950 	sctp_set_rwnd(stcb, &stcb->asoc);
2951 
2952 	m = *mm;
2953 	SCTP_TCB_LOCK_ASSERT(stcb);
2954 	asoc = &stcb->asoc;
2955 	if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2956 	    stcb->asoc.cumulative_tsn, MAX_TSN)) {
2957 		/* there was a gap before this data was processed */
2958 		was_a_gap = 1;
2959 	}
2960 	/*
2961 	 * setup where we got the last DATA packet from for any SACK that
2962 	 * may need to go out. Don't bump the net. This is done ONLY when a
2963 	 * chunk is assigned.
2964 	 */
2965 	asoc->last_data_chunk_from = net;
2966 
2967 	/*-
2968 	 * Now before we proceed we must figure out if this is a wasted
2969 	 * cluster... i.e. it is a small packet sent in and yet the driver
2970 	 * underneath allocated a full cluster for it. If so we must copy it
2971 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2972 	 * with cluster starvation. Note for __Panda__ we don't do this
2973 	 * since it has clusters all the way down to 64 bytes.
2974 	 */
2975 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2976 		/* we only handle mbufs that are singletons.. not chains */
2977 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA);
2978 		if (m) {
2979 			/* ok lets see if we can copy the data up */
2980 			caddr_t *from, *to;
2981 
2982 			/* get the pointers and copy */
2983 			to = mtod(m, caddr_t *);
2984 			from = mtod((*mm), caddr_t *);
2985 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2986 			/* copy the length and free up the old */
2987 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2988 			sctp_m_freem(*mm);
2989 			/* sucess, back copy */
2990 			*mm = m;
2991 		} else {
2992 			/* We are in trouble in the mbuf world .. yikes */
2993 			m = *mm;
2994 		}
2995 	}
2996 	/* get pointer to the first chunk header */
2997 	ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2998 	    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2999 	if (ch == NULL) {
3000 		return (1);
3001 	}
3002 	/*
3003 	 * process all DATA chunks...
3004 	 */
3005 	*high_tsn = asoc->cumulative_tsn;
3006 	break_flag = 0;
3007 	asoc->data_pkts_seen++;
3008 	while (stop_proc == 0) {
3009 		/* validate chunk length */
3010 		chk_length = ntohs(ch->ch.chunk_length);
3011 		if (length - *offset < chk_length) {
3012 			/* all done, mutulated chunk */
3013 			stop_proc = 1;
3014 			break;
3015 		}
3016 		if (ch->ch.chunk_type == SCTP_DATA) {
3017 			if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
3018 				/*
3019 				 * Need to send an abort since we had a
3020 				 * invalid data chunk.
3021 				 */
3022 				struct mbuf *op_err;
3023 
3024 				op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
3025 				    0, M_DONTWAIT, 1, MT_DATA);
3026 
3027 				if (op_err) {
3028 					struct sctp_paramhdr *ph;
3029 					uint32_t *ippp;
3030 
3031 					SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
3032 					    (2 * sizeof(uint32_t));
3033 					ph = mtod(op_err, struct sctp_paramhdr *);
3034 					ph->param_type =
3035 					    htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
3036 					ph->param_length = htons(SCTP_BUF_LEN(op_err));
3037 					ippp = (uint32_t *) (ph + 1);
3038 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
3039 					ippp++;
3040 					*ippp = asoc->cumulative_tsn;
3041 
3042 				}
3043 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
3044 				sctp_abort_association(inp, stcb, m, iphlen, sh,
3045 				    op_err, 0, net->port);
3046 				return (2);
3047 			}
3048 #ifdef SCTP_AUDITING_ENABLED
3049 			sctp_audit_log(0xB1, 0);
3050 #endif
3051 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
3052 				last_chunk = 1;
3053 			} else {
3054 				last_chunk = 0;
3055 			}
3056 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
3057 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
3058 			    last_chunk)) {
3059 				num_chunks++;
3060 			}
3061 			if (abort_flag)
3062 				return (2);
3063 
3064 			if (break_flag) {
3065 				/*
3066 				 * Set because of out of rwnd space and no
3067 				 * drop rep space left.
3068 				 */
3069 				stop_proc = 1;
3070 				break;
3071 			}
3072 		} else {
3073 			/* not a data chunk in the data region */
3074 			switch (ch->ch.chunk_type) {
3075 			case SCTP_INITIATION:
3076 			case SCTP_INITIATION_ACK:
3077 			case SCTP_SELECTIVE_ACK:
3078 			case SCTP_NR_SELECTIVE_ACK:	/* EY */
3079 			case SCTP_HEARTBEAT_REQUEST:
3080 			case SCTP_HEARTBEAT_ACK:
3081 			case SCTP_ABORT_ASSOCIATION:
3082 			case SCTP_SHUTDOWN:
3083 			case SCTP_SHUTDOWN_ACK:
3084 			case SCTP_OPERATION_ERROR:
3085 			case SCTP_COOKIE_ECHO:
3086 			case SCTP_COOKIE_ACK:
3087 			case SCTP_ECN_ECHO:
3088 			case SCTP_ECN_CWR:
3089 			case SCTP_SHUTDOWN_COMPLETE:
3090 			case SCTP_AUTHENTICATION:
3091 			case SCTP_ASCONF_ACK:
3092 			case SCTP_PACKET_DROPPED:
3093 			case SCTP_STREAM_RESET:
3094 			case SCTP_FORWARD_CUM_TSN:
3095 			case SCTP_ASCONF:
3096 				/*
3097 				 * Now, what do we do with KNOWN chunks that
3098 				 * are NOT in the right place?
3099 				 *
3100 				 * For now, I do nothing but ignore them. We
3101 				 * may later want to add sysctl stuff to
3102 				 * switch out and do either an ABORT() or
3103 				 * possibly process them.
3104 				 */
3105 				if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
3106 					struct mbuf *op_err;
3107 
3108 					op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
3109 					sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 0, net->port);
3110 					return (2);
3111 				}
3112 				break;
3113 			default:
3114 				/* unknown chunk type, use bit rules */
3115 				if (ch->ch.chunk_type & 0x40) {
3116 					/* Add a error report to the queue */
3117 					struct mbuf *merr;
3118 					struct sctp_paramhdr *phd;
3119 
3120 					merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA);
3121 					if (merr) {
3122 						phd = mtod(merr, struct sctp_paramhdr *);
3123 						/*
3124 						 * We cheat and use param
3125 						 * type since we did not
3126 						 * bother to define a error
3127 						 * cause struct. They are
3128 						 * the same basic format
3129 						 * with different names.
3130 						 */
3131 						phd->param_type =
3132 						    htons(SCTP_CAUSE_UNRECOG_CHUNK);
3133 						phd->param_length =
3134 						    htons(chk_length + sizeof(*phd));
3135 						SCTP_BUF_LEN(merr) = sizeof(*phd);
3136 						SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset,
3137 						    SCTP_SIZE32(chk_length),
3138 						    M_DONTWAIT);
3139 						if (SCTP_BUF_NEXT(merr)) {
3140 							sctp_queue_op_err(stcb, merr);
3141 						} else {
3142 							sctp_m_freem(merr);
3143 						}
3144 					}
3145 				}
3146 				if ((ch->ch.chunk_type & 0x80) == 0) {
3147 					/* discard the rest of this packet */
3148 					stop_proc = 1;
3149 				}	/* else skip this bad chunk and
3150 					 * continue... */
3151 				break;
3152 			};	/* switch of chunk type */
3153 		}
3154 		*offset += SCTP_SIZE32(chk_length);
3155 		if ((*offset >= length) || stop_proc) {
3156 			/* no more data left in the mbuf chain */
3157 			stop_proc = 1;
3158 			continue;
3159 		}
3160 		ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
3161 		    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
3162 		if (ch == NULL) {
3163 			*offset = length;
3164 			stop_proc = 1;
3165 			break;
3166 
3167 		}
3168 	}			/* while */
3169 	if (break_flag) {
3170 		/*
3171 		 * we need to report rwnd overrun drops.
3172 		 */
3173 		sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
3174 	}
3175 	if (num_chunks) {
3176 		/*
3177 		 * Did we get data, if so update the time for auto-close and
3178 		 * give peer credit for being alive.
3179 		 */
3180 		SCTP_STAT_INCR(sctps_recvpktwithdata);
3181 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3182 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3183 			    stcb->asoc.overall_error_count,
3184 			    0,
3185 			    SCTP_FROM_SCTP_INDATA,
3186 			    __LINE__);
3187 		}
3188 		stcb->asoc.overall_error_count = 0;
3189 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
3190 	}
3191 	/* now service all of the reassm queue if needed */
3192 	if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
3193 		sctp_service_queues(stcb, asoc);
3194 
3195 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
3196 		/* Assure that we ack right away */
3197 		stcb->asoc.send_sack = 1;
3198 	}
3199 	/* Start a sack timer or QUEUE a SACK for sending */
3200 	if ((stcb->asoc.cumulative_tsn == stcb->asoc.highest_tsn_inside_map) &&
3201 	    (stcb->asoc.mapping_array[0] != 0xff)) {
3202 		if ((stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) ||
3203 		    (stcb->asoc.delayed_ack == 0) ||
3204 		    (stcb->asoc.numduptsns) ||
3205 		    (stcb->asoc.send_sack == 1)) {
3206 			if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
3207 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
3208 			}
3209 			/*
3210 			 * EY if nr_sacks used then send an nr-sack , a sack
3211 			 * otherwise
3212 			 */
3213 			if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
3214 				sctp_send_nr_sack(stcb);
3215 			else
3216 				sctp_send_sack(stcb);
3217 		} else {
3218 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
3219 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
3220 				    stcb->sctp_ep, stcb, NULL);
3221 			}
3222 		}
3223 	} else {
3224 		sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
3225 	}
3226 	if (abort_flag)
3227 		return (2);
3228 
3229 	return (0);
3230 }
3231 
3232 static void
3233 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3234     struct sctp_sack_chunk *ch, uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3235     uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3236     int num_seg, int *ecn_seg_sums)
3237 {
3238 	/************************************************/
3239 	/* process fragments and update sendqueue        */
3240 	/************************************************/
3241 	struct sctp_sack *sack;
3242 	struct sctp_gap_ack_block *frag, block;
3243 	struct sctp_tmit_chunk *tp1;
3244 	int i, j;
3245 	unsigned int theTSN;
3246 	int num_frs = 0;
3247 
3248 	uint16_t frag_strt, frag_end, primary_flag_set;
3249 	u_long last_frag_high;
3250 
3251 	/*
3252 	 * @@@ JRI : TODO: This flag is not used anywhere .. remove?
3253 	 */
3254 	if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
3255 		primary_flag_set = 1;
3256 	} else {
3257 		primary_flag_set = 0;
3258 	}
3259 	sack = &ch->sack;
3260 
3261 	frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3262 	    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3263 	*offset += sizeof(block);
3264 	if (frag == NULL) {
3265 		return;
3266 	}
3267 	tp1 = NULL;
3268 	last_frag_high = 0;
3269 	for (i = 0; i < num_seg; i++) {
3270 		frag_strt = ntohs(frag->start);
3271 		frag_end = ntohs(frag->end);
3272 		/* some sanity checks on the fragment offsets */
3273 		if (frag_strt > frag_end) {
3274 			/* this one is malformed, skip */
3275 			frag++;
3276 			continue;
3277 		}
3278 		if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked,
3279 		    MAX_TSN))
3280 			*biggest_tsn_acked = frag_end + last_tsn;
3281 
3282 		/* mark acked dgs and find out the highestTSN being acked */
3283 		if (tp1 == NULL) {
3284 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3285 
3286 			/* save the locations of the last frags */
3287 			last_frag_high = frag_end + last_tsn;
3288 		} else {
3289 			/*
3290 			 * now lets see if we need to reset the queue due to
3291 			 * a out-of-order SACK fragment
3292 			 */
3293 			if (compare_with_wrap(frag_strt + last_tsn,
3294 			    last_frag_high, MAX_TSN)) {
3295 				/*
3296 				 * if the new frag starts after the last TSN
3297 				 * frag covered, we are ok and this one is
3298 				 * beyond the last one
3299 				 */
3300 				;
3301 			} else {
3302 				/*
3303 				 * ok, they have reset us, so we need to
3304 				 * reset the queue this will cause extra
3305 				 * hunting but hey, they chose the
3306 				 * performance hit when they failed to order
3307 				 * their gaps
3308 				 */
3309 				tp1 = TAILQ_FIRST(&asoc->sent_queue);
3310 			}
3311 			last_frag_high = frag_end + last_tsn;
3312 		}
3313 		for (j = frag_strt; j <= frag_end; j++) {
3314 			theTSN = j + last_tsn;
3315 			while (tp1) {
3316 				if (tp1->rec.data.doing_fast_retransmit)
3317 					num_frs++;
3318 
3319 				/*
3320 				 * CMT: CUCv2 algorithm. For each TSN being
3321 				 * processed from the sent queue, track the
3322 				 * next expected pseudo-cumack, or
3323 				 * rtx_pseudo_cumack, if required. Separate
3324 				 * cumack trackers for first transmissions,
3325 				 * and retransmissions.
3326 				 */
3327 				if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
3328 				    (tp1->snd_count == 1)) {
3329 					tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
3330 					tp1->whoTo->find_pseudo_cumack = 0;
3331 				}
3332 				if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
3333 				    (tp1->snd_count > 1)) {
3334 					tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
3335 					tp1->whoTo->find_rtx_pseudo_cumack = 0;
3336 				}
3337 				if (tp1->rec.data.TSN_seq == theTSN) {
3338 					if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
3339 						/*
3340 						 * must be held until
3341 						 * cum-ack passes
3342 						 */
3343 						/*
3344 						 * ECN Nonce: Add the nonce
3345 						 * value to the sender's
3346 						 * nonce sum
3347 						 */
3348 						if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3349 							/*-
3350 							 * If it is less than RESEND, it is
3351 							 * now no-longer in flight.
3352 							 * Higher values may already be set
3353 							 * via previous Gap Ack Blocks...
3354 							 * i.e. ACKED or RESEND.
3355 							 */
3356 							if (compare_with_wrap(tp1->rec.data.TSN_seq,
3357 							    *biggest_newly_acked_tsn, MAX_TSN)) {
3358 								*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
3359 							}
3360 							/*
3361 							 * CMT: SFR algo
3362 							 * (and HTNA) - set
3363 							 * saw_newack to 1
3364 							 * for dest being
3365 							 * newly acked.
3366 							 * update
3367 							 * this_sack_highest_
3368 							 * newack if
3369 							 * appropriate.
3370 							 */
3371 							if (tp1->rec.data.chunk_was_revoked == 0)
3372 								tp1->whoTo->saw_newack = 1;
3373 
3374 							if (compare_with_wrap(tp1->rec.data.TSN_seq,
3375 							    tp1->whoTo->this_sack_highest_newack,
3376 							    MAX_TSN)) {
3377 								tp1->whoTo->this_sack_highest_newack =
3378 								    tp1->rec.data.TSN_seq;
3379 							}
3380 							/*
3381 							 * CMT DAC algo:
3382 							 * also update
3383 							 * this_sack_lowest_n
3384 							 * ewack
3385 							 */
3386 							if (*this_sack_lowest_newack == 0) {
3387 								if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3388 									sctp_log_sack(*this_sack_lowest_newack,
3389 									    last_tsn,
3390 									    tp1->rec.data.TSN_seq,
3391 									    0,
3392 									    0,
3393 									    SCTP_LOG_TSN_ACKED);
3394 								}
3395 								*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
3396 							}
3397 							/*
3398 							 * CMT: CUCv2
3399 							 * algorithm. If
3400 							 * (rtx-)pseudo-cumac
3401 							 * k for corresp
3402 							 * dest is being
3403 							 * acked, then we
3404 							 * have a new
3405 							 * (rtx-)pseudo-cumac
3406 							 * k. Set
3407 							 * new_(rtx_)pseudo_c
3408 							 * umack to TRUE so
3409 							 * that the cwnd for
3410 							 * this dest can be
3411 							 * updated. Also
3412 							 * trigger search
3413 							 * for the next
3414 							 * expected
3415 							 * (rtx-)pseudo-cumac
3416 							 * k. Separate
3417 							 * pseudo_cumack
3418 							 * trackers for
3419 							 * first
3420 							 * transmissions and
3421 							 * retransmissions.
3422 							 */
3423 							if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
3424 								if (tp1->rec.data.chunk_was_revoked == 0) {
3425 									tp1->whoTo->new_pseudo_cumack = 1;
3426 								}
3427 								tp1->whoTo->find_pseudo_cumack = 1;
3428 							}
3429 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3430 								sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3431 							}
3432 							if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
3433 								if (tp1->rec.data.chunk_was_revoked == 0) {
3434 									tp1->whoTo->new_pseudo_cumack = 1;
3435 								}
3436 								tp1->whoTo->find_rtx_pseudo_cumack = 1;
3437 							}
3438 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3439 								sctp_log_sack(*biggest_newly_acked_tsn,
3440 								    last_tsn,
3441 								    tp1->rec.data.TSN_seq,
3442 								    frag_strt,
3443 								    frag_end,
3444 								    SCTP_LOG_TSN_ACKED);
3445 							}
3446 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3447 								sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3448 								    tp1->whoTo->flight_size,
3449 								    tp1->book_size,
3450 								    (uintptr_t) tp1->whoTo,
3451 								    tp1->rec.data.TSN_seq);
3452 							}
3453 							sctp_flight_size_decrease(tp1);
3454 							sctp_total_flight_decrease(stcb, tp1);
3455 
3456 							tp1->whoTo->net_ack += tp1->send_size;
3457 							if (tp1->snd_count < 2) {
3458 								/*
3459 								 * True
3460 								 * non-retran
3461 								 * smited
3462 								 * chunk */
3463 								tp1->whoTo->net_ack2 += tp1->send_size;
3464 
3465 								/*
3466 								 * update RTO
3467 								 * too ? */
3468 								if (tp1->do_rtt) {
3469 									tp1->whoTo->RTO =
3470 									    sctp_calculate_rto(stcb,
3471 									    asoc,
3472 									    tp1->whoTo,
3473 									    &tp1->sent_rcv_time,
3474 									    sctp_align_safe_nocopy);
3475 									tp1->do_rtt = 0;
3476 								}
3477 							}
3478 						}
3479 						if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3480 							(*ecn_seg_sums) += tp1->rec.data.ect_nonce;
3481 							(*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
3482 							if (compare_with_wrap(tp1->rec.data.TSN_seq,
3483 							    asoc->this_sack_highest_gap,
3484 							    MAX_TSN)) {
3485 								asoc->this_sack_highest_gap =
3486 								    tp1->rec.data.TSN_seq;
3487 							}
3488 							if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3489 								sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3490 #ifdef SCTP_AUDITING_ENABLED
3491 								sctp_audit_log(0xB2,
3492 								    (asoc->sent_queue_retran_cnt & 0x000000ff));
3493 #endif
3494 							}
3495 						}
3496 						/*
3497 						 * All chunks NOT UNSENT
3498 						 * fall through here and are
3499 						 * marked
3500 						 */
3501 						tp1->sent = SCTP_DATAGRAM_MARKED;
3502 						if (tp1->rec.data.chunk_was_revoked) {
3503 							/* deflate the cwnd */
3504 							tp1->whoTo->cwnd -= tp1->book_size;
3505 							tp1->rec.data.chunk_was_revoked = 0;
3506 						}
3507 					}
3508 					break;
3509 				}	/* if (tp1->TSN_seq == theTSN) */
3510 				if (compare_with_wrap(tp1->rec.data.TSN_seq, theTSN,
3511 				    MAX_TSN))
3512 					break;
3513 
3514 				tp1 = TAILQ_NEXT(tp1, sctp_next);
3515 			}	/* end while (tp1) */
3516 		}		/* end for (j = fragStart */
3517 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3518 		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3519 		*offset += sizeof(block);
3520 		if (frag == NULL) {
3521 			break;
3522 		}
3523 	}
3524 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3525 		if (num_frs)
3526 			sctp_log_fr(*biggest_tsn_acked,
3527 			    *biggest_newly_acked_tsn,
3528 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3529 	}
3530 }
3531 
3532 static void
3533 sctp_check_for_revoked(struct sctp_tcb *stcb,
3534     struct sctp_association *asoc, uint32_t cumack,
3535     u_long biggest_tsn_acked)
3536 {
3537 	struct sctp_tmit_chunk *tp1;
3538 	int tot_revoked = 0;
3539 
3540 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3541 	while (tp1) {
3542 		if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack,
3543 		    MAX_TSN)) {
3544 			/*
3545 			 * ok this guy is either ACK or MARKED. If it is
3546 			 * ACKED it has been previously acked but not this
3547 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3548 			 * again.
3549 			 */
3550 			if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3551 			    MAX_TSN))
3552 				break;
3553 
3554 
3555 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3556 				/* it has been revoked */
3557 				tp1->sent = SCTP_DATAGRAM_SENT;
3558 				tp1->rec.data.chunk_was_revoked = 1;
3559 				/*
3560 				 * We must add this stuff back in to assure
3561 				 * timers and such get started.
3562 				 */
3563 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3564 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3565 					    tp1->whoTo->flight_size,
3566 					    tp1->book_size,
3567 					    (uintptr_t) tp1->whoTo,
3568 					    tp1->rec.data.TSN_seq);
3569 				}
3570 				sctp_flight_size_increase(tp1);
3571 				sctp_total_flight_increase(stcb, tp1);
3572 				/*
3573 				 * We inflate the cwnd to compensate for our
3574 				 * artificial inflation of the flight_size.
3575 				 */
3576 				tp1->whoTo->cwnd += tp1->book_size;
3577 				tot_revoked++;
3578 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3579 					sctp_log_sack(asoc->last_acked_seq,
3580 					    cumack,
3581 					    tp1->rec.data.TSN_seq,
3582 					    0,
3583 					    0,
3584 					    SCTP_LOG_TSN_REVOKED);
3585 				}
3586 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3587 				/* it has been re-acked in this SACK */
3588 				tp1->sent = SCTP_DATAGRAM_ACKED;
3589 			}
3590 		}
3591 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3592 			break;
3593 		tp1 = TAILQ_NEXT(tp1, sctp_next);
3594 	}
3595 	if (tot_revoked > 0) {
3596 		/*
3597 		 * Setup the ecn nonce re-sync point. We do this since once
3598 		 * data is revoked we begin to retransmit things, which do
3599 		 * NOT have the ECN bits set. This means we are now out of
3600 		 * sync and must wait until we get back in sync with the
3601 		 * peer to check ECN bits.
3602 		 */
3603 		tp1 = TAILQ_FIRST(&asoc->send_queue);
3604 		if (tp1 == NULL) {
3605 			asoc->nonce_resync_tsn = asoc->sending_seq;
3606 		} else {
3607 			asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq;
3608 		}
3609 		asoc->nonce_wait_for_ecne = 0;
3610 		asoc->nonce_sum_check = 0;
3611 	}
3612 }
3613 
3614 
3615 static void
3616 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3617     u_long biggest_tsn_acked, u_long biggest_tsn_newly_acked, u_long this_sack_lowest_newack, int accum_moved)
3618 {
3619 	struct sctp_tmit_chunk *tp1;
3620 	int strike_flag = 0;
3621 	struct timeval now;
3622 	int tot_retrans = 0;
3623 	uint32_t sending_seq;
3624 	struct sctp_nets *net;
3625 	int num_dests_sacked = 0;
3626 
3627 	/*
3628 	 * select the sending_seq, this is either the next thing ready to be
3629 	 * sent but not transmitted, OR, the next seq we assign.
3630 	 */
3631 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3632 	if (tp1 == NULL) {
3633 		sending_seq = asoc->sending_seq;
3634 	} else {
3635 		sending_seq = tp1->rec.data.TSN_seq;
3636 	}
3637 
3638 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3639 	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3640 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3641 			if (net->saw_newack)
3642 				num_dests_sacked++;
3643 		}
3644 	}
3645 	if (stcb->asoc.peer_supports_prsctp) {
3646 		(void)SCTP_GETTIME_TIMEVAL(&now);
3647 	}
3648 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3649 	while (tp1) {
3650 		strike_flag = 0;
3651 		if (tp1->no_fr_allowed) {
3652 			/* this one had a timeout or something */
3653 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3654 			continue;
3655 		}
3656 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3657 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3658 				sctp_log_fr(biggest_tsn_newly_acked,
3659 				    tp1->rec.data.TSN_seq,
3660 				    tp1->sent,
3661 				    SCTP_FR_LOG_CHECK_STRIKE);
3662 		}
3663 		if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3664 		    MAX_TSN) ||
3665 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3666 			/* done */
3667 			break;
3668 		}
3669 		if (stcb->asoc.peer_supports_prsctp) {
3670 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3671 				/* Is it expired? */
3672 				if (
3673 				/*
3674 				 * TODO sctp_constants.h needs alternative
3675 				 * time macros when _KERNEL is undefined.
3676 				 */
3677 				    (timevalcmp(&now, &tp1->rec.data.timetodrop, >))
3678 				    ) {
3679 					/* Yes so drop it */
3680 					if (tp1->data != NULL) {
3681 						(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3682 						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3683 						    &asoc->sent_queue, SCTP_SO_NOT_LOCKED);
3684 					}
3685 					tp1 = TAILQ_NEXT(tp1, sctp_next);
3686 					continue;
3687 				}
3688 			}
3689 			if ((PR_SCTP_RTX_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3690 				/* Has it been retransmitted tv_sec times? */
3691 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3692 					/* Yes, so drop it */
3693 					if (tp1->data != NULL) {
3694 						(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3695 						    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
3696 						    &asoc->sent_queue, SCTP_SO_NOT_LOCKED);
3697 					}
3698 					tp1 = TAILQ_NEXT(tp1, sctp_next);
3699 					continue;
3700 				}
3701 			}
3702 		}
3703 		if (compare_with_wrap(tp1->rec.data.TSN_seq,
3704 		    asoc->this_sack_highest_gap, MAX_TSN)) {
3705 			/* we are beyond the tsn in the sack  */
3706 			break;
3707 		}
3708 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3709 			/* either a RESEND, ACKED, or MARKED */
3710 			/* skip */
3711 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3712 			continue;
3713 		}
3714 		/*
3715 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3716 		 */
3717 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3718 			/*
3719 			 * No new acks were receieved for data sent to this
3720 			 * dest. Therefore, according to the SFR algo for
3721 			 * CMT, no data sent to this dest can be marked for
3722 			 * FR using this SACK.
3723 			 */
3724 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3725 			continue;
3726 		} else if (tp1->whoTo && compare_with_wrap(tp1->rec.data.TSN_seq,
3727 		    tp1->whoTo->this_sack_highest_newack, MAX_TSN)) {
3728 			/*
3729 			 * CMT: New acks were receieved for data sent to
3730 			 * this dest. But no new acks were seen for data
3731 			 * sent after tp1. Therefore, according to the SFR
3732 			 * algo for CMT, tp1 cannot be marked for FR using
3733 			 * this SACK. This step covers part of the DAC algo
3734 			 * and the HTNA algo as well.
3735 			 */
3736 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3737 			continue;
3738 		}
3739 		/*
3740 		 * Here we check to see if we were have already done a FR
3741 		 * and if so we see if the biggest TSN we saw in the sack is
3742 		 * smaller than the recovery point. If so we don't strike
3743 		 * the tsn... otherwise we CAN strike the TSN.
3744 		 */
3745 		/*
3746 		 * @@@ JRI: Check for CMT if (accum_moved &&
3747 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3748 		 * 0)) {
3749 		 */
3750 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3751 			/*
3752 			 * Strike the TSN if in fast-recovery and cum-ack
3753 			 * moved.
3754 			 */
3755 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3756 				sctp_log_fr(biggest_tsn_newly_acked,
3757 				    tp1->rec.data.TSN_seq,
3758 				    tp1->sent,
3759 				    SCTP_FR_LOG_STRIKE_CHUNK);
3760 			}
3761 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3762 				tp1->sent++;
3763 			}
3764 			if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3765 				/*
3766 				 * CMT DAC algorithm: If SACK flag is set to
3767 				 * 0, then lowest_newack test will not pass
3768 				 * because it would have been set to the
3769 				 * cumack earlier. If not already to be
3770 				 * rtx'd, If not a mixed sack and if tp1 is
3771 				 * not between two sacked TSNs, then mark by
3772 				 * one more. NOTE that we are marking by one
3773 				 * additional time since the SACK DAC flag
3774 				 * indicates that two packets have been
3775 				 * received after this missing TSN.
3776 				 */
3777 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3778 				    compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3779 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3780 						sctp_log_fr(16 + num_dests_sacked,
3781 						    tp1->rec.data.TSN_seq,
3782 						    tp1->sent,
3783 						    SCTP_FR_LOG_STRIKE_CHUNK);
3784 					}
3785 					tp1->sent++;
3786 				}
3787 			}
3788 		} else if ((tp1->rec.data.doing_fast_retransmit) && (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0)) {
3789 			/*
3790 			 * For those that have done a FR we must take
3791 			 * special consideration if we strike. I.e the
3792 			 * biggest_newly_acked must be higher than the
3793 			 * sending_seq at the time we did the FR.
3794 			 */
3795 			if (
3796 #ifdef SCTP_FR_TO_ALTERNATE
3797 			/*
3798 			 * If FR's go to new networks, then we must only do
3799 			 * this for singly homed asoc's. However if the FR's
3800 			 * go to the same network (Armando's work) then its
3801 			 * ok to FR multiple times.
3802 			 */
3803 			    (asoc->numnets < 2)
3804 #else
3805 			    (1)
3806 #endif
3807 			    ) {
3808 
3809 				if ((compare_with_wrap(biggest_tsn_newly_acked,
3810 				    tp1->rec.data.fast_retran_tsn, MAX_TSN)) ||
3811 				    (biggest_tsn_newly_acked ==
3812 				    tp1->rec.data.fast_retran_tsn)) {
3813 					/*
3814 					 * Strike the TSN, since this ack is
3815 					 * beyond where things were when we
3816 					 * did a FR.
3817 					 */
3818 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3819 						sctp_log_fr(biggest_tsn_newly_acked,
3820 						    tp1->rec.data.TSN_seq,
3821 						    tp1->sent,
3822 						    SCTP_FR_LOG_STRIKE_CHUNK);
3823 					}
3824 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3825 						tp1->sent++;
3826 					}
3827 					strike_flag = 1;
3828 					if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3829 						/*
3830 						 * CMT DAC algorithm: If
3831 						 * SACK flag is set to 0,
3832 						 * then lowest_newack test
3833 						 * will not pass because it
3834 						 * would have been set to
3835 						 * the cumack earlier. If
3836 						 * not already to be rtx'd,
3837 						 * If not a mixed sack and
3838 						 * if tp1 is not between two
3839 						 * sacked TSNs, then mark by
3840 						 * one more. NOTE that we
3841 						 * are marking by one
3842 						 * additional time since the
3843 						 * SACK DAC flag indicates
3844 						 * that two packets have
3845 						 * been received after this
3846 						 * missing TSN.
3847 						 */
3848 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3849 						    (num_dests_sacked == 1) &&
3850 						    compare_with_wrap(this_sack_lowest_newack,
3851 						    tp1->rec.data.TSN_seq, MAX_TSN)) {
3852 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3853 								sctp_log_fr(32 + num_dests_sacked,
3854 								    tp1->rec.data.TSN_seq,
3855 								    tp1->sent,
3856 								    SCTP_FR_LOG_STRIKE_CHUNK);
3857 							}
3858 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3859 								tp1->sent++;
3860 							}
3861 						}
3862 					}
3863 				}
3864 			}
3865 			/*
3866 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3867 			 * algo covers HTNA.
3868 			 */
3869 		} else if (compare_with_wrap(tp1->rec.data.TSN_seq,
3870 		    biggest_tsn_newly_acked, MAX_TSN)) {
3871 			/*
3872 			 * We don't strike these: This is the  HTNA
3873 			 * algorithm i.e. we don't strike If our TSN is
3874 			 * larger than the Highest TSN Newly Acked.
3875 			 */
3876 			;
3877 		} else {
3878 			/* Strike the TSN */
3879 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3880 				sctp_log_fr(biggest_tsn_newly_acked,
3881 				    tp1->rec.data.TSN_seq,
3882 				    tp1->sent,
3883 				    SCTP_FR_LOG_STRIKE_CHUNK);
3884 			}
3885 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3886 				tp1->sent++;
3887 			}
3888 			if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3889 				/*
3890 				 * CMT DAC algorithm: If SACK flag is set to
3891 				 * 0, then lowest_newack test will not pass
3892 				 * because it would have been set to the
3893 				 * cumack earlier. If not already to be
3894 				 * rtx'd, If not a mixed sack and if tp1 is
3895 				 * not between two sacked TSNs, then mark by
3896 				 * one more. NOTE that we are marking by one
3897 				 * additional time since the SACK DAC flag
3898 				 * indicates that two packets have been
3899 				 * received after this missing TSN.
3900 				 */
3901 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3902 				    compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
3903 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3904 						sctp_log_fr(48 + num_dests_sacked,
3905 						    tp1->rec.data.TSN_seq,
3906 						    tp1->sent,
3907 						    SCTP_FR_LOG_STRIKE_CHUNK);
3908 					}
3909 					tp1->sent++;
3910 				}
3911 			}
3912 		}
3913 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3914 			/* Increment the count to resend */
3915 			struct sctp_nets *alt;
3916 
3917 			/* printf("OK, we are now ready to FR this guy\n"); */
3918 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3919 				sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3920 				    0, SCTP_FR_MARKED);
3921 			}
3922 			if (strike_flag) {
3923 				/* This is a subsequent FR */
3924 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3925 			}
3926 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3927 			if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
3928 				/*
3929 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3930 				 * If CMT is being used, then pick dest with
3931 				 * largest ssthresh for any retransmission.
3932 				 */
3933 				tp1->no_fr_allowed = 1;
3934 				alt = tp1->whoTo;
3935 				/* sa_ignore NO_NULL_CHK */
3936 				if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) {
3937 					/*
3938 					 * JRS 5/18/07 - If CMT PF is on,
3939 					 * use the PF version of
3940 					 * find_alt_net()
3941 					 */
3942 					alt = sctp_find_alternate_net(stcb, alt, 2);
3943 				} else {
3944 					/*
3945 					 * JRS 5/18/07 - If only CMT is on,
3946 					 * use the CMT version of
3947 					 * find_alt_net()
3948 					 */
3949 					/* sa_ignore NO_NULL_CHK */
3950 					alt = sctp_find_alternate_net(stcb, alt, 1);
3951 				}
3952 				if (alt == NULL) {
3953 					alt = tp1->whoTo;
3954 				}
3955 				/*
3956 				 * CUCv2: If a different dest is picked for
3957 				 * the retransmission, then new
3958 				 * (rtx-)pseudo_cumack needs to be tracked
3959 				 * for orig dest. Let CUCv2 track new (rtx-)
3960 				 * pseudo-cumack always.
3961 				 */
3962 				if (tp1->whoTo) {
3963 					tp1->whoTo->find_pseudo_cumack = 1;
3964 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3965 				}
3966 			} else {/* CMT is OFF */
3967 
3968 #ifdef SCTP_FR_TO_ALTERNATE
3969 				/* Can we find an alternate? */
3970 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3971 #else
3972 				/*
3973 				 * default behavior is to NOT retransmit
3974 				 * FR's to an alternate. Armando Caro's
3975 				 * paper details why.
3976 				 */
3977 				alt = tp1->whoTo;
3978 #endif
3979 			}
3980 
3981 			tp1->rec.data.doing_fast_retransmit = 1;
3982 			tot_retrans++;
3983 			/* mark the sending seq for possible subsequent FR's */
3984 			/*
3985 			 * printf("Marking TSN for FR new value %x\n",
3986 			 * (uint32_t)tpi->rec.data.TSN_seq);
3987 			 */
3988 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3989 				/*
3990 				 * If the queue of send is empty then its
3991 				 * the next sequence number that will be
3992 				 * assigned so we subtract one from this to
3993 				 * get the one we last sent.
3994 				 */
3995 				tp1->rec.data.fast_retran_tsn = sending_seq;
3996 			} else {
3997 				/*
3998 				 * If there are chunks on the send queue
3999 				 * (unsent data that has made it from the
4000 				 * stream queues but not out the door, we
4001 				 * take the first one (which will have the
4002 				 * lowest TSN) and subtract one to get the
4003 				 * one we last sent.
4004 				 */
4005 				struct sctp_tmit_chunk *ttt;
4006 
4007 				ttt = TAILQ_FIRST(&asoc->send_queue);
4008 				tp1->rec.data.fast_retran_tsn =
4009 				    ttt->rec.data.TSN_seq;
4010 			}
4011 
4012 			if (tp1->do_rtt) {
4013 				/*
4014 				 * this guy had a RTO calculation pending on
4015 				 * it, cancel it
4016 				 */
4017 				tp1->do_rtt = 0;
4018 			}
4019 			/* fix counts and things */
4020 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4021 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
4022 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
4023 				    tp1->book_size,
4024 				    (uintptr_t) tp1->whoTo,
4025 				    tp1->rec.data.TSN_seq);
4026 			}
4027 			if (tp1->whoTo) {
4028 				tp1->whoTo->net_ack++;
4029 				sctp_flight_size_decrease(tp1);
4030 			}
4031 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4032 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
4033 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
4034 			}
4035 			/* add back to the rwnd */
4036 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
4037 
4038 			/* remove from the total flight */
4039 			sctp_total_flight_decrease(stcb, tp1);
4040 			if (alt != tp1->whoTo) {
4041 				/* yes, there is an alternate. */
4042 				sctp_free_remote_addr(tp1->whoTo);
4043 				/* sa_ignore FREED_MEMORY */
4044 				tp1->whoTo = alt;
4045 				atomic_add_int(&alt->ref_count, 1);
4046 			}
4047 		}
4048 		tp1 = TAILQ_NEXT(tp1, sctp_next);
4049 	}			/* while (tp1) */
4050 
4051 	if (tot_retrans > 0) {
4052 		/*
4053 		 * Setup the ecn nonce re-sync point. We do this since once
4054 		 * we go to FR something we introduce a Karn's rule scenario
4055 		 * and won't know the totals for the ECN bits.
4056 		 */
4057 		asoc->nonce_resync_tsn = sending_seq;
4058 		asoc->nonce_wait_for_ecne = 0;
4059 		asoc->nonce_sum_check = 0;
4060 	}
4061 }
4062 
4063 struct sctp_tmit_chunk *
4064 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
4065     struct sctp_association *asoc)
4066 {
4067 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
4068 	struct timeval now;
4069 	int now_filled = 0;
4070 
4071 	if (asoc->peer_supports_prsctp == 0) {
4072 		return (NULL);
4073 	}
4074 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
4075 	while (tp1) {
4076 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
4077 		    tp1->sent != SCTP_DATAGRAM_RESEND) {
4078 			/* no chance to advance, out of here */
4079 			break;
4080 		}
4081 		if (!PR_SCTP_ENABLED(tp1->flags)) {
4082 			/*
4083 			 * We can't fwd-tsn past any that are reliable aka
4084 			 * retransmitted until the asoc fails.
4085 			 */
4086 			break;
4087 		}
4088 		if (!now_filled) {
4089 			(void)SCTP_GETTIME_TIMEVAL(&now);
4090 			now_filled = 1;
4091 		}
4092 		tp2 = TAILQ_NEXT(tp1, sctp_next);
4093 		/*
4094 		 * now we got a chunk which is marked for another
4095 		 * retransmission to a PR-stream but has run out its chances
4096 		 * already maybe OR has been marked to skip now. Can we skip
4097 		 * it if its a resend?
4098 		 */
4099 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
4100 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
4101 			/*
4102 			 * Now is this one marked for resend and its time is
4103 			 * now up?
4104 			 */
4105 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
4106 				/* Yes so drop it */
4107 				if (tp1->data) {
4108 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
4109 					    (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
4110 					    &asoc->sent_queue, SCTP_SO_NOT_LOCKED);
4111 				}
4112 			} else {
4113 				/*
4114 				 * No, we are done when hit one for resend
4115 				 * whos time as not expired.
4116 				 */
4117 				break;
4118 			}
4119 		}
4120 		/*
4121 		 * Ok now if this chunk is marked to drop it we can clean up
4122 		 * the chunk, advance our peer ack point and we can check
4123 		 * the next chunk.
4124 		 */
4125 		if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
4126 			/* advance PeerAckPoint goes forward */
4127 			asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
4128 			a_adv = tp1;
4129 		} else {
4130 			/*
4131 			 * If it is still in RESEND we can advance no
4132 			 * further
4133 			 */
4134 			break;
4135 		}
4136 		/*
4137 		 * If we hit here we just dumped tp1, move to next tsn on
4138 		 * sent queue.
4139 		 */
4140 		tp1 = tp2;
4141 	}
4142 	return (a_adv);
4143 }
4144 
4145 static void
4146 sctp_fs_audit(struct sctp_association *asoc)
4147 {
4148 	struct sctp_tmit_chunk *chk;
4149 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
4150 
4151 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
4152 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
4153 			inflight++;
4154 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
4155 			resend++;
4156 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
4157 			inbetween++;
4158 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
4159 			above++;
4160 		} else {
4161 			acked++;
4162 		}
4163 	}
4164 
4165 	if ((inflight > 0) || (inbetween > 0)) {
4166 #ifdef INVARIANTS
4167 		panic("Flight size-express incorrect? \n");
4168 #else
4169 		SCTP_PRINTF("Flight size-express incorrect inflight:%d inbetween:%d\n",
4170 		    inflight, inbetween);
4171 #endif
4172 	}
4173 }
4174 
4175 
4176 static void
4177 sctp_window_probe_recovery(struct sctp_tcb *stcb,
4178     struct sctp_association *asoc,
4179     struct sctp_nets *net,
4180     struct sctp_tmit_chunk *tp1)
4181 {
4182 	struct sctp_tmit_chunk *chk;
4183 
4184 	/* First setup this one and get it moved back */
4185 	tp1->sent = SCTP_DATAGRAM_UNSENT;
4186 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4187 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
4188 		    tp1->whoTo->flight_size,
4189 		    tp1->book_size,
4190 		    (uintptr_t) tp1->whoTo,
4191 		    tp1->rec.data.TSN_seq);
4192 	}
4193 	sctp_flight_size_decrease(tp1);
4194 	sctp_total_flight_decrease(stcb, tp1);
4195 	TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4196 	TAILQ_INSERT_HEAD(&asoc->send_queue, tp1, sctp_next);
4197 	asoc->sent_queue_cnt--;
4198 	asoc->send_queue_cnt++;
4199 	/*
4200 	 * Now all guys marked for RESEND on the sent_queue must be moved
4201 	 * back too.
4202 	 */
4203 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
4204 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
4205 			/* Another chunk to move */
4206 			chk->sent = SCTP_DATAGRAM_UNSENT;
4207 			/* It should not be in flight */
4208 			TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
4209 			TAILQ_INSERT_AFTER(&asoc->send_queue, tp1, chk, sctp_next);
4210 			asoc->sent_queue_cnt--;
4211 			asoc->send_queue_cnt++;
4212 			sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4213 		}
4214 	}
4215 }
4216 
4217 void
4218 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
4219     uint32_t rwnd, int nonce_sum_flag, int *abort_now)
4220 {
4221 	struct sctp_nets *net;
4222 	struct sctp_association *asoc;
4223 	struct sctp_tmit_chunk *tp1, *tp2;
4224 	uint32_t old_rwnd;
4225 	int win_probe_recovery = 0;
4226 	int win_probe_recovered = 0;
4227 	int j, done_once = 0;
4228 
4229 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4230 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
4231 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4232 	}
4233 	SCTP_TCB_LOCK_ASSERT(stcb);
4234 #ifdef SCTP_ASOCLOG_OF_TSNS
4235 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
4236 	stcb->asoc.cumack_log_at++;
4237 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4238 		stcb->asoc.cumack_log_at = 0;
4239 	}
4240 #endif
4241 	asoc = &stcb->asoc;
4242 	old_rwnd = asoc->peers_rwnd;
4243 	if (compare_with_wrap(asoc->last_acked_seq, cumack, MAX_TSN)) {
4244 		/* old ack */
4245 		return;
4246 	} else if (asoc->last_acked_seq == cumack) {
4247 		/* Window update sack */
4248 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4249 		    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4250 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4251 			/* SWS sender side engages */
4252 			asoc->peers_rwnd = 0;
4253 		}
4254 		if (asoc->peers_rwnd > old_rwnd) {
4255 			goto again;
4256 		}
4257 		return;
4258 	}
4259 	/* First setup for CC stuff */
4260 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4261 		net->prev_cwnd = net->cwnd;
4262 		net->net_ack = 0;
4263 		net->net_ack2 = 0;
4264 
4265 		/*
4266 		 * CMT: Reset CUC and Fast recovery algo variables before
4267 		 * SACK processing
4268 		 */
4269 		net->new_pseudo_cumack = 0;
4270 		net->will_exit_fast_recovery = 0;
4271 	}
4272 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4273 		uint32_t send_s;
4274 
4275 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4276 			tp1 = TAILQ_LAST(&asoc->sent_queue,
4277 			    sctpchunk_listhead);
4278 			send_s = tp1->rec.data.TSN_seq + 1;
4279 		} else {
4280 			send_s = asoc->sending_seq;
4281 		}
4282 		if ((cumack == send_s) ||
4283 		    compare_with_wrap(cumack, send_s, MAX_TSN)) {
4284 #ifndef INVARIANTS
4285 			struct mbuf *oper;
4286 
4287 #endif
4288 #ifdef INVARIANTS
4289 			panic("Impossible sack 1");
4290 #else
4291 			*abort_now = 1;
4292 			/* XXX */
4293 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4294 			    0, M_DONTWAIT, 1, MT_DATA);
4295 			if (oper) {
4296 				struct sctp_paramhdr *ph;
4297 				uint32_t *ippp;
4298 
4299 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4300 				    sizeof(uint32_t);
4301 				ph = mtod(oper, struct sctp_paramhdr *);
4302 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4303 				ph->param_length = htons(SCTP_BUF_LEN(oper));
4304 				ippp = (uint32_t *) (ph + 1);
4305 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4306 			}
4307 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4308 			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
4309 			return;
4310 #endif
4311 		}
4312 	}
4313 	asoc->this_sack_highest_gap = cumack;
4314 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4315 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4316 		    stcb->asoc.overall_error_count,
4317 		    0,
4318 		    SCTP_FROM_SCTP_INDATA,
4319 		    __LINE__);
4320 	}
4321 	stcb->asoc.overall_error_count = 0;
4322 	if (compare_with_wrap(cumack, asoc->last_acked_seq, MAX_TSN)) {
4323 		/* process the new consecutive TSN first */
4324 		tp1 = TAILQ_FIRST(&asoc->sent_queue);
4325 		while (tp1) {
4326 			tp2 = TAILQ_NEXT(tp1, sctp_next);
4327 			if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq,
4328 			    MAX_TSN) ||
4329 			    cumack == tp1->rec.data.TSN_seq) {
4330 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4331 					printf("Warning, an unsent is now acked?\n");
4332 				}
4333 				/*
4334 				 * ECN Nonce: Add the nonce to the sender's
4335 				 * nonce sum
4336 				 */
4337 				asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
4338 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4339 					/*
4340 					 * If it is less than ACKED, it is
4341 					 * now no-longer in flight. Higher
4342 					 * values may occur during marking
4343 					 */
4344 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4345 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4346 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4347 							    tp1->whoTo->flight_size,
4348 							    tp1->book_size,
4349 							    (uintptr_t) tp1->whoTo,
4350 							    tp1->rec.data.TSN_seq);
4351 						}
4352 						sctp_flight_size_decrease(tp1);
4353 						/* sa_ignore NO_NULL_CHK */
4354 						sctp_total_flight_decrease(stcb, tp1);
4355 					}
4356 					tp1->whoTo->net_ack += tp1->send_size;
4357 					if (tp1->snd_count < 2) {
4358 						/*
4359 						 * True non-retransmited
4360 						 * chunk
4361 						 */
4362 						tp1->whoTo->net_ack2 +=
4363 						    tp1->send_size;
4364 
4365 						/* update RTO too? */
4366 						if (tp1->do_rtt) {
4367 							tp1->whoTo->RTO =
4368 							/*
4369 							 * sa_ignore
4370 							 * NO_NULL_CHK
4371 							 */
4372 							    sctp_calculate_rto(stcb,
4373 							    asoc, tp1->whoTo,
4374 							    &tp1->sent_rcv_time,
4375 							    sctp_align_safe_nocopy);
4376 							tp1->do_rtt = 0;
4377 						}
4378 					}
4379 					/*
4380 					 * CMT: CUCv2 algorithm. From the
4381 					 * cumack'd TSNs, for each TSN being
4382 					 * acked for the first time, set the
4383 					 * following variables for the
4384 					 * corresp destination.
4385 					 * new_pseudo_cumack will trigger a
4386 					 * cwnd update.
4387 					 * find_(rtx_)pseudo_cumack will
4388 					 * trigger search for the next
4389 					 * expected (rtx-)pseudo-cumack.
4390 					 */
4391 					tp1->whoTo->new_pseudo_cumack = 1;
4392 					tp1->whoTo->find_pseudo_cumack = 1;
4393 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4394 
4395 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4396 						/* sa_ignore NO_NULL_CHK */
4397 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4398 					}
4399 				}
4400 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4401 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4402 				}
4403 				if (tp1->rec.data.chunk_was_revoked) {
4404 					/* deflate the cwnd */
4405 					tp1->whoTo->cwnd -= tp1->book_size;
4406 					tp1->rec.data.chunk_was_revoked = 0;
4407 				}
4408 				tp1->sent = SCTP_DATAGRAM_ACKED;
4409 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4410 				if (tp1->data) {
4411 					/* sa_ignore NO_NULL_CHK */
4412 					sctp_free_bufspace(stcb, asoc, tp1, 1);
4413 					sctp_m_freem(tp1->data);
4414 				}
4415 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4416 					sctp_log_sack(asoc->last_acked_seq,
4417 					    cumack,
4418 					    tp1->rec.data.TSN_seq,
4419 					    0,
4420 					    0,
4421 					    SCTP_LOG_FREE_SENT);
4422 				}
4423 				tp1->data = NULL;
4424 				asoc->sent_queue_cnt--;
4425 				sctp_free_a_chunk(stcb, tp1);
4426 				tp1 = tp2;
4427 			} else {
4428 				break;
4429 			}
4430 		}
4431 
4432 	}
4433 	/* sa_ignore NO_NULL_CHK */
4434 	if (stcb->sctp_socket) {
4435 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4436 		struct socket *so;
4437 
4438 #endif
4439 
4440 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4441 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4442 			/* sa_ignore NO_NULL_CHK */
4443 			sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
4444 		}
4445 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4446 		so = SCTP_INP_SO(stcb->sctp_ep);
4447 		atomic_add_int(&stcb->asoc.refcnt, 1);
4448 		SCTP_TCB_UNLOCK(stcb);
4449 		SCTP_SOCKET_LOCK(so, 1);
4450 		SCTP_TCB_LOCK(stcb);
4451 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4452 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4453 			/* assoc was freed while we were unlocked */
4454 			SCTP_SOCKET_UNLOCK(so, 1);
4455 			return;
4456 		}
4457 #endif
4458 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4459 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4460 		SCTP_SOCKET_UNLOCK(so, 1);
4461 #endif
4462 	} else {
4463 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4464 			sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
4465 		}
4466 	}
4467 
4468 	/* JRS - Use the congestion control given in the CC module */
4469 	if (asoc->last_acked_seq != cumack)
4470 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4471 
4472 	asoc->last_acked_seq = cumack;
4473 
4474 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4475 		/* nothing left in-flight */
4476 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4477 			net->flight_size = 0;
4478 			net->partial_bytes_acked = 0;
4479 		}
4480 		asoc->total_flight = 0;
4481 		asoc->total_flight_count = 0;
4482 	}
4483 	/* Fix up the a-p-a-p for future PR-SCTP sends */
4484 	if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) {
4485 		asoc->advanced_peer_ack_point = cumack;
4486 	}
4487 	/* ECN Nonce updates */
4488 	if (asoc->ecn_nonce_allowed) {
4489 		if (asoc->nonce_sum_check) {
4490 			if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) {
4491 				if (asoc->nonce_wait_for_ecne == 0) {
4492 					struct sctp_tmit_chunk *lchk;
4493 
4494 					lchk = TAILQ_FIRST(&asoc->send_queue);
4495 					asoc->nonce_wait_for_ecne = 1;
4496 					if (lchk) {
4497 						asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
4498 					} else {
4499 						asoc->nonce_wait_tsn = asoc->sending_seq;
4500 					}
4501 				} else {
4502 					if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
4503 					    (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
4504 						/*
4505 						 * Misbehaving peer. We need
4506 						 * to react to this guy
4507 						 */
4508 						asoc->ecn_allowed = 0;
4509 						asoc->ecn_nonce_allowed = 0;
4510 					}
4511 				}
4512 			}
4513 		} else {
4514 			/* See if Resynchronization Possible */
4515 			if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
4516 				asoc->nonce_sum_check = 1;
4517 				/*
4518 				 * now we must calculate what the base is.
4519 				 * We do this based on two things, we know
4520 				 * the total's for all the segments
4521 				 * gap-acked in the SACK (none), We also
4522 				 * know the SACK's nonce sum, its in
4523 				 * nonce_sum_flag. So we can build a truth
4524 				 * table to back-calculate the new value of
4525 				 * asoc->nonce_sum_expect_base:
4526 				 *
4527 				 * SACK-flag-Value         Seg-Sums Base 0 0 0
4528 				 * 1                    0 1 0 1 1 1
4529 				 * 1 0
4530 				 */
4531 				asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
4532 			}
4533 		}
4534 	}
4535 	/* RWND update */
4536 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4537 	    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4538 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4539 		/* SWS sender side engages */
4540 		asoc->peers_rwnd = 0;
4541 	}
4542 	if (asoc->peers_rwnd > old_rwnd) {
4543 		win_probe_recovery = 1;
4544 	}
4545 	/* Now assure a timer where data is queued at */
4546 again:
4547 	j = 0;
4548 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4549 		if (win_probe_recovery && (net->window_probe)) {
4550 			net->window_probe = 0;
4551 			win_probe_recovered = 1;
4552 			/*
4553 			 * Find first chunk that was used with window probe
4554 			 * and clear the sent
4555 			 */
4556 			/* sa_ignore FREED_MEMORY */
4557 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4558 				if (tp1->window_probe) {
4559 					/* move back to data send queue */
4560 					sctp_window_probe_recovery(stcb, asoc, net, tp1);
4561 					break;
4562 				}
4563 			}
4564 		}
4565 		if (net->flight_size) {
4566 			int to_ticks;
4567 
4568 			if (net->RTO == 0) {
4569 				to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4570 			} else {
4571 				to_ticks = MSEC_TO_TICKS(net->RTO);
4572 			}
4573 			j++;
4574 			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4575 			    sctp_timeout_handler, &net->rxt_timer);
4576 		} else {
4577 			if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4578 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4579 				    stcb, net,
4580 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4581 			}
4582 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4583 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4584 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
4585 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4586 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4587 				}
4588 			}
4589 		}
4590 	}
4591 	if ((j == 0) &&
4592 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4593 	    (asoc->sent_queue_retran_cnt == 0) &&
4594 	    (win_probe_recovered == 0) &&
4595 	    (done_once == 0)) {
4596 		/* huh, this should not happen */
4597 		sctp_fs_audit(asoc);
4598 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4599 			net->flight_size = 0;
4600 		}
4601 		asoc->total_flight = 0;
4602 		asoc->total_flight_count = 0;
4603 		asoc->sent_queue_retran_cnt = 0;
4604 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4605 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4606 				sctp_flight_size_increase(tp1);
4607 				sctp_total_flight_increase(stcb, tp1);
4608 			} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4609 				asoc->sent_queue_retran_cnt++;
4610 			}
4611 		}
4612 		done_once = 1;
4613 		goto again;
4614 	}
4615 	/**********************************/
4616 	/* Now what about shutdown issues */
4617 	/**********************************/
4618 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4619 		/* nothing left on sendqueue.. consider done */
4620 		/* clean up */
4621 		if ((asoc->stream_queue_cnt == 1) &&
4622 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4623 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4624 		    (asoc->locked_on_sending)
4625 		    ) {
4626 			struct sctp_stream_queue_pending *sp;
4627 
4628 			/*
4629 			 * I may be in a state where we got all across.. but
4630 			 * cannot write more due to a shutdown... we abort
4631 			 * since the user did not indicate EOR in this case.
4632 			 * The sp will be cleaned during free of the asoc.
4633 			 */
4634 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4635 			    sctp_streamhead);
4636 			if ((sp) && (sp->length == 0)) {
4637 				/* Let cleanup code purge it */
4638 				if (sp->msg_is_complete) {
4639 					asoc->stream_queue_cnt--;
4640 				} else {
4641 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4642 					asoc->locked_on_sending = NULL;
4643 					asoc->stream_queue_cnt--;
4644 				}
4645 			}
4646 		}
4647 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4648 		    (asoc->stream_queue_cnt == 0)) {
4649 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4650 				/* Need to abort here */
4651 				struct mbuf *oper;
4652 
4653 		abort_out_now:
4654 				*abort_now = 1;
4655 				/* XXX */
4656 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4657 				    0, M_DONTWAIT, 1, MT_DATA);
4658 				if (oper) {
4659 					struct sctp_paramhdr *ph;
4660 					uint32_t *ippp;
4661 
4662 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4663 					    sizeof(uint32_t);
4664 					ph = mtod(oper, struct sctp_paramhdr *);
4665 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4666 					ph->param_length = htons(SCTP_BUF_LEN(oper));
4667 					ippp = (uint32_t *) (ph + 1);
4668 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
4669 				}
4670 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4671 				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
4672 			} else {
4673 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4674 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4675 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4676 				}
4677 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4678 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4679 				sctp_stop_timers_for_shutdown(stcb);
4680 				sctp_send_shutdown(stcb,
4681 				    stcb->asoc.primary_destination);
4682 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4683 				    stcb->sctp_ep, stcb, asoc->primary_destination);
4684 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4685 				    stcb->sctp_ep, stcb, asoc->primary_destination);
4686 			}
4687 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4688 		    (asoc->stream_queue_cnt == 0)) {
4689 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4690 				goto abort_out_now;
4691 			}
4692 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4693 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4694 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4695 			sctp_send_shutdown_ack(stcb,
4696 			    stcb->asoc.primary_destination);
4697 
4698 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4699 			    stcb->sctp_ep, stcb, asoc->primary_destination);
4700 		}
4701 	}
4702 	/* PR-Sctp issues need to be addressed too */
4703 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4704 		struct sctp_tmit_chunk *lchk;
4705 		uint32_t old_adv_peer_ack_point;
4706 
4707 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4708 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4709 		/* C3. See if we need to send a Fwd-TSN */
4710 		if (compare_with_wrap(asoc->advanced_peer_ack_point, cumack,
4711 		    MAX_TSN)) {
4712 			/*
4713 			 * ISSUE with ECN, see FWD-TSN processing for notes
4714 			 * on issues that will occur when the ECN NONCE
4715 			 * stuff is put into SCTP for cross checking.
4716 			 */
4717 			if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
4718 			    MAX_TSN)) {
4719 				send_forward_tsn(stcb, asoc);
4720 				/*
4721 				 * ECN Nonce: Disable Nonce Sum check when
4722 				 * FWD TSN is sent and store resync tsn
4723 				 */
4724 				asoc->nonce_sum_check = 0;
4725 				asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
4726 			}
4727 		}
4728 		if (lchk) {
4729 			/* Assure a timer is up */
4730 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4731 			    stcb->sctp_ep, stcb, lchk->whoTo);
4732 		}
4733 	}
4734 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4735 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4736 		    rwnd,
4737 		    stcb->asoc.peers_rwnd,
4738 		    stcb->asoc.total_flight,
4739 		    stcb->asoc.total_output_queue_size);
4740 	}
4741 }
4742 
4743 void
4744 sctp_handle_sack(struct mbuf *m, int offset,
4745     struct sctp_sack_chunk *ch, struct sctp_tcb *stcb,
4746     struct sctp_nets *net_from, int *abort_now, int sack_len, uint32_t rwnd)
4747 {
4748 	struct sctp_association *asoc;
4749 	struct sctp_sack *sack;
4750 	struct sctp_tmit_chunk *tp1, *tp2;
4751 	uint32_t cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked,
4752 	         this_sack_lowest_newack;
4753 	uint32_t sav_cum_ack;
4754 	uint16_t num_seg, num_dup;
4755 	uint16_t wake_him = 0;
4756 	unsigned int sack_length;
4757 	uint32_t send_s = 0;
4758 	long j;
4759 	int accum_moved = 0;
4760 	int will_exit_fast_recovery = 0;
4761 	uint32_t a_rwnd, old_rwnd;
4762 	int win_probe_recovery = 0;
4763 	int win_probe_recovered = 0;
4764 	struct sctp_nets *net = NULL;
4765 	int nonce_sum_flag, ecn_seg_sums = 0;
4766 	int done_once;
4767 	uint8_t reneged_all = 0;
4768 	uint8_t cmt_dac_flag;
4769 
4770 	/*
4771 	 * we take any chance we can to service our queues since we cannot
4772 	 * get awoken when the socket is read from :<
4773 	 */
4774 	/*
4775 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4776 	 * old sack, if so discard. 2) If there is nothing left in the send
4777 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4778 	 * too, update any rwnd change and verify no timers are running.
4779 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4780 	 * moved process these first and note that it moved. 4) Process any
4781 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4782 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4783 	 * sync up flightsizes and things, stop all timers and also check
4784 	 * for shutdown_pending state. If so then go ahead and send off the
4785 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4786 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4787 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4788 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4789 	 * if in shutdown_recv state.
4790 	 */
4791 	SCTP_TCB_LOCK_ASSERT(stcb);
4792 	sack = &ch->sack;
4793 	/* CMT DAC algo */
4794 	this_sack_lowest_newack = 0;
4795 	j = 0;
4796 	sack_length = (unsigned int)sack_len;
4797 	/* ECN Nonce */
4798 	SCTP_STAT_INCR(sctps_slowpath_sack);
4799 	nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM;
4800 	cum_ack = last_tsn = ntohl(sack->cum_tsn_ack);
4801 #ifdef SCTP_ASOCLOG_OF_TSNS
4802 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4803 	stcb->asoc.cumack_log_at++;
4804 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4805 		stcb->asoc.cumack_log_at = 0;
4806 	}
4807 #endif
4808 	num_seg = ntohs(sack->num_gap_ack_blks);
4809 	a_rwnd = rwnd;
4810 
4811 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4812 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4813 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4814 	}
4815 	/* CMT DAC algo */
4816 	cmt_dac_flag = ch->ch.chunk_flags & SCTP_SACK_CMT_DAC;
4817 	num_dup = ntohs(sack->num_dup_tsns);
4818 
4819 	old_rwnd = stcb->asoc.peers_rwnd;
4820 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4821 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4822 		    stcb->asoc.overall_error_count,
4823 		    0,
4824 		    SCTP_FROM_SCTP_INDATA,
4825 		    __LINE__);
4826 	}
4827 	stcb->asoc.overall_error_count = 0;
4828 	asoc = &stcb->asoc;
4829 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4830 		sctp_log_sack(asoc->last_acked_seq,
4831 		    cum_ack,
4832 		    0,
4833 		    num_seg,
4834 		    num_dup,
4835 		    SCTP_LOG_NEW_SACK);
4836 	}
4837 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) {
4838 		int off_to_dup, iii;
4839 		uint32_t *dupdata, dblock;
4840 
4841 		off_to_dup = (num_seg * sizeof(struct sctp_gap_ack_block)) + sizeof(struct sctp_sack_chunk);
4842 		if ((off_to_dup + (num_dup * sizeof(uint32_t))) <= sack_length) {
4843 			dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
4844 			    sizeof(uint32_t), (uint8_t *) & dblock);
4845 			off_to_dup += sizeof(uint32_t);
4846 			if (dupdata) {
4847 				for (iii = 0; iii < num_dup; iii++) {
4848 					sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4849 					dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
4850 					    sizeof(uint32_t), (uint8_t *) & dblock);
4851 					if (dupdata == NULL)
4852 						break;
4853 					off_to_dup += sizeof(uint32_t);
4854 				}
4855 			}
4856 		} else {
4857 			SCTP_PRINTF("Size invalid offset to dups:%d number dups:%d sack_len:%d num gaps:%d\n",
4858 			    off_to_dup, num_dup, sack_length, num_seg);
4859 		}
4860 	}
4861 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4862 		/* reality check */
4863 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4864 			tp1 = TAILQ_LAST(&asoc->sent_queue,
4865 			    sctpchunk_listhead);
4866 			send_s = tp1->rec.data.TSN_seq + 1;
4867 		} else {
4868 			send_s = asoc->sending_seq;
4869 		}
4870 		if (cum_ack == send_s ||
4871 		    compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
4872 #ifndef INVARIANTS
4873 			struct mbuf *oper;
4874 
4875 #endif
4876 #ifdef INVARIANTS
4877 	hopeless_peer:
4878 			panic("Impossible sack 1");
4879 #else
4880 
4881 
4882 			/*
4883 			 * no way, we have not even sent this TSN out yet.
4884 			 * Peer is hopelessly messed up with us.
4885 			 */
4886 	hopeless_peer:
4887 			*abort_now = 1;
4888 			/* XXX */
4889 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4890 			    0, M_DONTWAIT, 1, MT_DATA);
4891 			if (oper) {
4892 				struct sctp_paramhdr *ph;
4893 				uint32_t *ippp;
4894 
4895 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4896 				    sizeof(uint32_t);
4897 				ph = mtod(oper, struct sctp_paramhdr *);
4898 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4899 				ph->param_length = htons(SCTP_BUF_LEN(oper));
4900 				ippp = (uint32_t *) (ph + 1);
4901 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4902 			}
4903 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4904 			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
4905 			return;
4906 #endif
4907 		}
4908 	}
4909 	/**********************/
4910 	/* 1) check the range */
4911 	/**********************/
4912 	if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
4913 		/* acking something behind */
4914 		return;
4915 	}
4916 	sav_cum_ack = asoc->last_acked_seq;
4917 
4918 	/* update the Rwnd of the peer */
4919 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4920 	    TAILQ_EMPTY(&asoc->send_queue) &&
4921 	    (asoc->stream_queue_cnt == 0)
4922 	    ) {
4923 		/* nothing left on send/sent and strmq */
4924 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4925 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4926 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4927 		}
4928 		asoc->peers_rwnd = a_rwnd;
4929 		if (asoc->sent_queue_retran_cnt) {
4930 			asoc->sent_queue_retran_cnt = 0;
4931 		}
4932 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4933 			/* SWS sender side engages */
4934 			asoc->peers_rwnd = 0;
4935 		}
4936 		/* stop any timers */
4937 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4938 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4939 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4940 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
4941 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
4942 					SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
4943 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
4944 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4945 				}
4946 			}
4947 			net->partial_bytes_acked = 0;
4948 			net->flight_size = 0;
4949 		}
4950 		asoc->total_flight = 0;
4951 		asoc->total_flight_count = 0;
4952 		return;
4953 	}
4954 	/*
4955 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4956 	 * things. The total byte count acked is tracked in netAckSz AND
4957 	 * netAck2 is used to track the total bytes acked that are un-
4958 	 * amibguious and were never retransmitted. We track these on a per
4959 	 * destination address basis.
4960 	 */
4961 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4962 		net->prev_cwnd = net->cwnd;
4963 		net->net_ack = 0;
4964 		net->net_ack2 = 0;
4965 
4966 		/*
4967 		 * CMT: Reset CUC and Fast recovery algo variables before
4968 		 * SACK processing
4969 		 */
4970 		net->new_pseudo_cumack = 0;
4971 		net->will_exit_fast_recovery = 0;
4972 	}
4973 	/* process the new consecutive TSN first */
4974 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
4975 	while (tp1) {
4976 		if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
4977 		    MAX_TSN) ||
4978 		    last_tsn == tp1->rec.data.TSN_seq) {
4979 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4980 				/*
4981 				 * ECN Nonce: Add the nonce to the sender's
4982 				 * nonce sum
4983 				 */
4984 				asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
4985 				accum_moved = 1;
4986 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4987 					/*
4988 					 * If it is less than ACKED, it is
4989 					 * now no-longer in flight. Higher
4990 					 * values may occur during marking
4991 					 */
4992 					if ((tp1->whoTo->dest_state &
4993 					    SCTP_ADDR_UNCONFIRMED) &&
4994 					    (tp1->snd_count < 2)) {
4995 						/*
4996 						 * If there was no retran
4997 						 * and the address is
4998 						 * un-confirmed and we sent
4999 						 * there and are now
5000 						 * sacked.. its confirmed,
5001 						 * mark it so.
5002 						 */
5003 						tp1->whoTo->dest_state &=
5004 						    ~SCTP_ADDR_UNCONFIRMED;
5005 					}
5006 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5007 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
5008 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
5009 							    tp1->whoTo->flight_size,
5010 							    tp1->book_size,
5011 							    (uintptr_t) tp1->whoTo,
5012 							    tp1->rec.data.TSN_seq);
5013 						}
5014 						sctp_flight_size_decrease(tp1);
5015 						sctp_total_flight_decrease(stcb, tp1);
5016 					}
5017 					tp1->whoTo->net_ack += tp1->send_size;
5018 
5019 					/* CMT SFR and DAC algos */
5020 					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
5021 					tp1->whoTo->saw_newack = 1;
5022 
5023 					if (tp1->snd_count < 2) {
5024 						/*
5025 						 * True non-retransmited
5026 						 * chunk
5027 						 */
5028 						tp1->whoTo->net_ack2 +=
5029 						    tp1->send_size;
5030 
5031 						/* update RTO too? */
5032 						if (tp1->do_rtt) {
5033 							tp1->whoTo->RTO =
5034 							    sctp_calculate_rto(stcb,
5035 							    asoc, tp1->whoTo,
5036 							    &tp1->sent_rcv_time,
5037 							    sctp_align_safe_nocopy);
5038 							tp1->do_rtt = 0;
5039 						}
5040 					}
5041 					/*
5042 					 * CMT: CUCv2 algorithm. From the
5043 					 * cumack'd TSNs, for each TSN being
5044 					 * acked for the first time, set the
5045 					 * following variables for the
5046 					 * corresp destination.
5047 					 * new_pseudo_cumack will trigger a
5048 					 * cwnd update.
5049 					 * find_(rtx_)pseudo_cumack will
5050 					 * trigger search for the next
5051 					 * expected (rtx-)pseudo-cumack.
5052 					 */
5053 					tp1->whoTo->new_pseudo_cumack = 1;
5054 					tp1->whoTo->find_pseudo_cumack = 1;
5055 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
5056 
5057 
5058 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
5059 						sctp_log_sack(asoc->last_acked_seq,
5060 						    cum_ack,
5061 						    tp1->rec.data.TSN_seq,
5062 						    0,
5063 						    0,
5064 						    SCTP_LOG_TSN_ACKED);
5065 					}
5066 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
5067 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
5068 					}
5069 				}
5070 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5071 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
5072 #ifdef SCTP_AUDITING_ENABLED
5073 					sctp_audit_log(0xB3,
5074 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
5075 #endif
5076 				}
5077 				if (tp1->rec.data.chunk_was_revoked) {
5078 					/* deflate the cwnd */
5079 					tp1->whoTo->cwnd -= tp1->book_size;
5080 					tp1->rec.data.chunk_was_revoked = 0;
5081 				}
5082 				tp1->sent = SCTP_DATAGRAM_ACKED;
5083 			}
5084 		} else {
5085 			break;
5086 		}
5087 		tp1 = TAILQ_NEXT(tp1, sctp_next);
5088 	}
5089 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
5090 	/* always set this up to cum-ack */
5091 	asoc->this_sack_highest_gap = last_tsn;
5092 
5093 	/* Move offset up to point to gaps/dups */
5094 	offset += sizeof(struct sctp_sack_chunk);
5095 	if (((num_seg * (sizeof(struct sctp_gap_ack_block))) + sizeof(struct sctp_sack_chunk)) > sack_length) {
5096 
5097 		/* skip corrupt segments */
5098 		goto skip_segments;
5099 	}
5100 	if (num_seg > 0) {
5101 
5102 		/*
5103 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
5104 		 * to be greater than the cumack. Also reset saw_newack to 0
5105 		 * for all dests.
5106 		 */
5107 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5108 			net->saw_newack = 0;
5109 			net->this_sack_highest_newack = last_tsn;
5110 		}
5111 
5112 		/*
5113 		 * thisSackHighestGap will increase while handling NEW
5114 		 * segments this_sack_highest_newack will increase while
5115 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
5116 		 * used for CMT DAC algo. saw_newack will also change.
5117 		 */
5118 		sctp_handle_segments(m, &offset, stcb, asoc, ch, last_tsn,
5119 		    &biggest_tsn_acked, &biggest_tsn_newly_acked, &this_sack_lowest_newack,
5120 		    num_seg, &ecn_seg_sums);
5121 
5122 		if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
5123 			/*
5124 			 * validate the biggest_tsn_acked in the gap acks if
5125 			 * strict adherence is wanted.
5126 			 */
5127 			if ((biggest_tsn_acked == send_s) ||
5128 			    (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
5129 				/*
5130 				 * peer is either confused or we are under
5131 				 * attack. We must abort.
5132 				 */
5133 				goto hopeless_peer;
5134 			}
5135 		}
5136 	}
5137 skip_segments:
5138 	/*******************************************/
5139 	/* cancel ALL T3-send timer if accum moved */
5140 	/*******************************************/
5141 	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
5142 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5143 			if (net->new_pseudo_cumack)
5144 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5145 				    stcb, net,
5146 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
5147 
5148 		}
5149 	} else {
5150 		if (accum_moved) {
5151 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5152 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5153 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
5154 			}
5155 		}
5156 	}
5157 	/********************************************/
5158 	/* drop the acked chunks from the sendqueue */
5159 	/********************************************/
5160 	asoc->last_acked_seq = cum_ack;
5161 
5162 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
5163 	if (tp1 == NULL)
5164 		goto done_with_it;
5165 	do {
5166 		if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
5167 		    MAX_TSN)) {
5168 			break;
5169 		}
5170 		if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
5171 			/* no more sent on list */
5172 			printf("Warning, tp1->sent == %d and its now acked?\n",
5173 			    tp1->sent);
5174 		}
5175 		tp2 = TAILQ_NEXT(tp1, sctp_next);
5176 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
5177 		if (tp1->pr_sctp_on) {
5178 			if (asoc->pr_sctp_cnt != 0)
5179 				asoc->pr_sctp_cnt--;
5180 		}
5181 		if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) &&
5182 		    (asoc->total_flight > 0)) {
5183 #ifdef INVARIANTS
5184 			panic("Warning flight size is postive and should be 0");
5185 #else
5186 			SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
5187 			    asoc->total_flight);
5188 #endif
5189 			asoc->total_flight = 0;
5190 		}
5191 		if (tp1->data) {
5192 			/* sa_ignore NO_NULL_CHK */
5193 			sctp_free_bufspace(stcb, asoc, tp1, 1);
5194 			sctp_m_freem(tp1->data);
5195 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
5196 				asoc->sent_queue_cnt_removeable--;
5197 			}
5198 		}
5199 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
5200 			sctp_log_sack(asoc->last_acked_seq,
5201 			    cum_ack,
5202 			    tp1->rec.data.TSN_seq,
5203 			    0,
5204 			    0,
5205 			    SCTP_LOG_FREE_SENT);
5206 		}
5207 		tp1->data = NULL;
5208 		asoc->sent_queue_cnt--;
5209 		sctp_free_a_chunk(stcb, tp1);
5210 		wake_him++;
5211 		tp1 = tp2;
5212 	} while (tp1 != NULL);
5213 
5214 done_with_it:
5215 	/* sa_ignore NO_NULL_CHK */
5216 	if ((wake_him) && (stcb->sctp_socket)) {
5217 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5218 		struct socket *so;
5219 
5220 #endif
5221 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
5222 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
5223 			sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
5224 		}
5225 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5226 		so = SCTP_INP_SO(stcb->sctp_ep);
5227 		atomic_add_int(&stcb->asoc.refcnt, 1);
5228 		SCTP_TCB_UNLOCK(stcb);
5229 		SCTP_SOCKET_LOCK(so, 1);
5230 		SCTP_TCB_LOCK(stcb);
5231 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
5232 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
5233 			/* assoc was freed while we were unlocked */
5234 			SCTP_SOCKET_UNLOCK(so, 1);
5235 			return;
5236 		}
5237 #endif
5238 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
5239 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5240 		SCTP_SOCKET_UNLOCK(so, 1);
5241 #endif
5242 	} else {
5243 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
5244 			sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
5245 		}
5246 	}
5247 
5248 	if (asoc->fast_retran_loss_recovery && accum_moved) {
5249 		if (compare_with_wrap(asoc->last_acked_seq,
5250 		    asoc->fast_recovery_tsn, MAX_TSN) ||
5251 		    asoc->last_acked_seq == asoc->fast_recovery_tsn) {
5252 			/* Setup so we will exit RFC2582 fast recovery */
5253 			will_exit_fast_recovery = 1;
5254 		}
5255 	}
5256 	/*
5257 	 * Check for revoked fragments:
5258 	 *
5259 	 * if Previous sack - Had no frags then we can't have any revoked if
5260 	 * Previous sack - Had frag's then - If we now have frags aka
5261 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
5262 	 * some of them. else - The peer revoked all ACKED fragments, since
5263 	 * we had some before and now we have NONE.
5264 	 */
5265 
5266 	if (num_seg)
5267 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
5268 	else if (asoc->saw_sack_with_frags) {
5269 		int cnt_revoked = 0;
5270 
5271 		tp1 = TAILQ_FIRST(&asoc->sent_queue);
5272 		if (tp1 != NULL) {
5273 			/* Peer revoked all dg's marked or acked */
5274 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5275 				if ((tp1->sent > SCTP_DATAGRAM_RESEND) &&
5276 				    (tp1->sent < SCTP_FORWARD_TSN_SKIP)) {
5277 					tp1->sent = SCTP_DATAGRAM_SENT;
5278 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
5279 						sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
5280 						    tp1->whoTo->flight_size,
5281 						    tp1->book_size,
5282 						    (uintptr_t) tp1->whoTo,
5283 						    tp1->rec.data.TSN_seq);
5284 					}
5285 					sctp_flight_size_increase(tp1);
5286 					sctp_total_flight_increase(stcb, tp1);
5287 					tp1->rec.data.chunk_was_revoked = 1;
5288 					/*
5289 					 * To ensure that this increase in
5290 					 * flightsize, which is artificial,
5291 					 * does not throttle the sender, we
5292 					 * also increase the cwnd
5293 					 * artificially.
5294 					 */
5295 					tp1->whoTo->cwnd += tp1->book_size;
5296 					cnt_revoked++;
5297 				}
5298 			}
5299 			if (cnt_revoked) {
5300 				reneged_all = 1;
5301 			}
5302 		}
5303 		asoc->saw_sack_with_frags = 0;
5304 	}
5305 	if (num_seg)
5306 		asoc->saw_sack_with_frags = 1;
5307 	else
5308 		asoc->saw_sack_with_frags = 0;
5309 
5310 	/* JRS - Use the congestion control given in the CC module */
5311 	asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
5312 
5313 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
5314 		/* nothing left in-flight */
5315 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5316 			/* stop all timers */
5317 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
5318 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
5319 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
5320 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
5321 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
5322 				}
5323 			}
5324 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5325 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
5326 			net->flight_size = 0;
5327 			net->partial_bytes_acked = 0;
5328 		}
5329 		asoc->total_flight = 0;
5330 		asoc->total_flight_count = 0;
5331 	}
5332 	/**********************************/
5333 	/* Now what about shutdown issues */
5334 	/**********************************/
5335 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
5336 		/* nothing left on sendqueue.. consider done */
5337 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5338 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5339 			    asoc->peers_rwnd, 0, 0, a_rwnd);
5340 		}
5341 		asoc->peers_rwnd = a_rwnd;
5342 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5343 			/* SWS sender side engages */
5344 			asoc->peers_rwnd = 0;
5345 		}
5346 		/* clean up */
5347 		if ((asoc->stream_queue_cnt == 1) &&
5348 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5349 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5350 		    (asoc->locked_on_sending)
5351 		    ) {
5352 			struct sctp_stream_queue_pending *sp;
5353 
5354 			/*
5355 			 * I may be in a state where we got all across.. but
5356 			 * cannot write more due to a shutdown... we abort
5357 			 * since the user did not indicate EOR in this case.
5358 			 */
5359 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
5360 			    sctp_streamhead);
5361 			if ((sp) && (sp->length == 0)) {
5362 				asoc->locked_on_sending = NULL;
5363 				if (sp->msg_is_complete) {
5364 					asoc->stream_queue_cnt--;
5365 				} else {
5366 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
5367 					asoc->stream_queue_cnt--;
5368 				}
5369 			}
5370 		}
5371 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5372 		    (asoc->stream_queue_cnt == 0)) {
5373 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5374 				/* Need to abort here */
5375 				struct mbuf *oper;
5376 
5377 		abort_out_now:
5378 				*abort_now = 1;
5379 				/* XXX */
5380 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
5381 				    0, M_DONTWAIT, 1, MT_DATA);
5382 				if (oper) {
5383 					struct sctp_paramhdr *ph;
5384 					uint32_t *ippp;
5385 
5386 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5387 					    sizeof(uint32_t);
5388 					ph = mtod(oper, struct sctp_paramhdr *);
5389 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
5390 					ph->param_length = htons(SCTP_BUF_LEN(oper));
5391 					ippp = (uint32_t *) (ph + 1);
5392 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
5393 				}
5394 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
5395 				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
5396 				return;
5397 			} else {
5398 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
5399 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5400 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5401 				}
5402 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
5403 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5404 				sctp_stop_timers_for_shutdown(stcb);
5405 				sctp_send_shutdown(stcb,
5406 				    stcb->asoc.primary_destination);
5407 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5408 				    stcb->sctp_ep, stcb, asoc->primary_destination);
5409 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5410 				    stcb->sctp_ep, stcb, asoc->primary_destination);
5411 			}
5412 			return;
5413 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5414 		    (asoc->stream_queue_cnt == 0)) {
5415 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
5416 				goto abort_out_now;
5417 			}
5418 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5419 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
5420 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5421 			sctp_send_shutdown_ack(stcb,
5422 			    stcb->asoc.primary_destination);
5423 
5424 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5425 			    stcb->sctp_ep, stcb, asoc->primary_destination);
5426 			return;
5427 		}
5428 	}
5429 	/*
5430 	 * Now here we are going to recycle net_ack for a different use...
5431 	 * HEADS UP.
5432 	 */
5433 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5434 		net->net_ack = 0;
5435 	}
5436 
5437 	/*
5438 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5439 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5440 	 * automatically ensure that.
5441 	 */
5442 	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && (cmt_dac_flag == 0)) {
5443 		this_sack_lowest_newack = cum_ack;
5444 	}
5445 	if (num_seg > 0) {
5446 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5447 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5448 	}
5449 	/*********************************************/
5450 	/* Here we perform PR-SCTP procedures        */
5451 	/* (section 4.2)                             */
5452 	/*********************************************/
5453 	/* C1. update advancedPeerAckPoint */
5454 	if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
5455 		asoc->advanced_peer_ack_point = cum_ack;
5456 	}
5457 	/* JRS - Use the congestion control given in the CC module */
5458 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5459 
5460 	/******************************************************************
5461 	 *  Here we do the stuff with ECN Nonce checking.
5462 	 *  We basically check to see if the nonce sum flag was incorrect
5463 	 *  or if resynchronization needs to be done. Also if we catch a
5464 	 *  misbehaving receiver we give him the kick.
5465 	 ******************************************************************/
5466 
5467 	if (asoc->ecn_nonce_allowed) {
5468 		if (asoc->nonce_sum_check) {
5469 			if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
5470 				if (asoc->nonce_wait_for_ecne == 0) {
5471 					struct sctp_tmit_chunk *lchk;
5472 
5473 					lchk = TAILQ_FIRST(&asoc->send_queue);
5474 					asoc->nonce_wait_for_ecne = 1;
5475 					if (lchk) {
5476 						asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
5477 					} else {
5478 						asoc->nonce_wait_tsn = asoc->sending_seq;
5479 					}
5480 				} else {
5481 					if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
5482 					    (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
5483 						/*
5484 						 * Misbehaving peer. We need
5485 						 * to react to this guy
5486 						 */
5487 						asoc->ecn_allowed = 0;
5488 						asoc->ecn_nonce_allowed = 0;
5489 					}
5490 				}
5491 			}
5492 		} else {
5493 			/* See if Resynchronization Possible */
5494 			if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
5495 				asoc->nonce_sum_check = 1;
5496 				/*
5497 				 * now we must calculate what the base is.
5498 				 * We do this based on two things, we know
5499 				 * the total's for all the segments
5500 				 * gap-acked in the SACK, its stored in
5501 				 * ecn_seg_sums. We also know the SACK's
5502 				 * nonce sum, its in nonce_sum_flag. So we
5503 				 * can build a truth table to back-calculate
5504 				 * the new value of
5505 				 * asoc->nonce_sum_expect_base:
5506 				 *
5507 				 * SACK-flag-Value         Seg-Sums Base 0 0 0
5508 				 * 1                    0 1 0 1 1 1
5509 				 * 1 0
5510 				 */
5511 				asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
5512 			}
5513 		}
5514 	}
5515 	/* Now are we exiting loss recovery ? */
5516 	if (will_exit_fast_recovery) {
5517 		/* Ok, we must exit fast recovery */
5518 		asoc->fast_retran_loss_recovery = 0;
5519 	}
5520 	if ((asoc->sat_t3_loss_recovery) &&
5521 	    ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
5522 	    MAX_TSN) ||
5523 	    (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
5524 		/* end satellite t3 loss recovery */
5525 		asoc->sat_t3_loss_recovery = 0;
5526 	}
5527 	/*
5528 	 * CMT Fast recovery
5529 	 */
5530 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5531 		if (net->will_exit_fast_recovery) {
5532 			/* Ok, we must exit fast recovery */
5533 			net->fast_retran_loss_recovery = 0;
5534 		}
5535 	}
5536 
5537 	/* Adjust and set the new rwnd value */
5538 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5539 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5540 		    asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5541 	}
5542 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5543 	    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5544 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5545 		/* SWS sender side engages */
5546 		asoc->peers_rwnd = 0;
5547 	}
5548 	if (asoc->peers_rwnd > old_rwnd) {
5549 		win_probe_recovery = 1;
5550 	}
5551 	/*
5552 	 * Now we must setup so we have a timer up for anyone with
5553 	 * outstanding data.
5554 	 */
5555 	done_once = 0;
5556 again:
5557 	j = 0;
5558 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5559 		if (win_probe_recovery && (net->window_probe)) {
5560 			net->window_probe = 0;
5561 			win_probe_recovered = 1;
5562 			/*-
5563 			 * Find first chunk that was used with
5564 			 * window probe and clear the event. Put
5565 			 * it back into the send queue as if has
5566 			 * not been sent.
5567 			 */
5568 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5569 				if (tp1->window_probe) {
5570 					sctp_window_probe_recovery(stcb, asoc, net, tp1);
5571 					break;
5572 				}
5573 			}
5574 		}
5575 		if (net->flight_size) {
5576 			j++;
5577 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5578 			    stcb->sctp_ep, stcb, net);
5579 		} else {
5580 			if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5581 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5582 				    stcb, net,
5583 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
5584 			}
5585 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
5586 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
5587 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
5588 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
5589 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
5590 				}
5591 			}
5592 		}
5593 	}
5594 	if ((j == 0) &&
5595 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5596 	    (asoc->sent_queue_retran_cnt == 0) &&
5597 	    (win_probe_recovered == 0) &&
5598 	    (done_once == 0)) {
5599 		/* huh, this should not happen */
5600 		sctp_fs_audit(asoc);
5601 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5602 			net->flight_size = 0;
5603 		}
5604 		asoc->total_flight = 0;
5605 		asoc->total_flight_count = 0;
5606 		asoc->sent_queue_retran_cnt = 0;
5607 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5608 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5609 				sctp_flight_size_increase(tp1);
5610 				sctp_total_flight_increase(stcb, tp1);
5611 			} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5612 				asoc->sent_queue_retran_cnt++;
5613 			}
5614 		}
5615 		done_once = 1;
5616 		goto again;
5617 	}
5618 	/* C2. try to further move advancedPeerAckPoint ahead */
5619 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5620 		struct sctp_tmit_chunk *lchk;
5621 		uint32_t old_adv_peer_ack_point;
5622 
5623 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5624 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5625 		/* C3. See if we need to send a Fwd-TSN */
5626 		if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
5627 		    MAX_TSN)) {
5628 			/*
5629 			 * ISSUE with ECN, see FWD-TSN processing for notes
5630 			 * on issues that will occur when the ECN NONCE
5631 			 * stuff is put into SCTP for cross checking.
5632 			 */
5633 			if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
5634 			    MAX_TSN)) {
5635 				send_forward_tsn(stcb, asoc);
5636 				/*
5637 				 * ECN Nonce: Disable Nonce Sum check when
5638 				 * FWD TSN is sent and store resync tsn
5639 				 */
5640 				asoc->nonce_sum_check = 0;
5641 				asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
5642 			}
5643 		}
5644 		if (lchk) {
5645 			/* Assure a timer is up */
5646 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5647 			    stcb->sctp_ep, stcb, lchk->whoTo);
5648 		}
5649 	}
5650 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5651 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5652 		    a_rwnd,
5653 		    stcb->asoc.peers_rwnd,
5654 		    stcb->asoc.total_flight,
5655 		    stcb->asoc.total_output_queue_size);
5656 	}
5657 }
5658 
5659 void
5660 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp,
5661     struct sctp_nets *netp, int *abort_flag)
5662 {
5663 	/* Copy cum-ack */
5664 	uint32_t cum_ack, a_rwnd;
5665 
5666 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5667 	/* Arrange so a_rwnd does NOT change */
5668 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5669 
5670 	/* Now call the express sack handling */
5671 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 0, abort_flag);
5672 }
5673 
5674 static void
5675 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5676     struct sctp_stream_in *strmin)
5677 {
5678 	struct sctp_queued_to_read *ctl, *nctl;
5679 	struct sctp_association *asoc;
5680 	int tt;
5681 
5682 	/* EY -used to calculate nr_gap information */
5683 	uint32_t nr_tsn, nr_gap;
5684 
5685 	asoc = &stcb->asoc;
5686 	tt = strmin->last_sequence_delivered;
5687 	/*
5688 	 * First deliver anything prior to and including the stream no that
5689 	 * came in
5690 	 */
5691 	ctl = TAILQ_FIRST(&strmin->inqueue);
5692 	while (ctl) {
5693 		nctl = TAILQ_NEXT(ctl, next);
5694 		if (compare_with_wrap(tt, ctl->sinfo_ssn, MAX_SEQ) ||
5695 		    (tt == ctl->sinfo_ssn)) {
5696 			/* this is deliverable now */
5697 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5698 			/* subtract pending on streams */
5699 			asoc->size_on_all_streams -= ctl->length;
5700 			sctp_ucount_decr(asoc->cnt_on_all_streams);
5701 			/* deliver it to at least the delivery-q */
5702 			if (stcb->sctp_socket) {
5703 				/* EY need the tsn info for calculating nr */
5704 				nr_tsn = ctl->sinfo_tsn;
5705 				sctp_add_to_readq(stcb->sctp_ep, stcb,
5706 				    ctl,
5707 				    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
5708 				/*
5709 				 * EY this is the chunk that should be
5710 				 * tagged nr gapped calculate the gap and
5711 				 * such then tag this TSN nr
5712 				 * chk->rec.data.TSN_seq
5713 				 */
5714 				if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
5715 
5716 					if (nr_tsn >= asoc->nr_mapping_array_base_tsn) {
5717 						nr_gap = nr_tsn - asoc->nr_mapping_array_base_tsn;
5718 					} else {
5719 						nr_gap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) + nr_tsn + 1;
5720 					}
5721 					if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
5722 					    (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
5723 						/*
5724 						 * EY These should never
5725 						 * happen- explained before
5726 						 */
5727 					} else {
5728 						SCTP_TCB_LOCK_ASSERT(stcb);
5729 						SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
5730 						if (nr_tsn > asoc->highest_tsn_inside_nr_map)
5731 							asoc->highest_tsn_inside_nr_map = nr_tsn;
5732 					}
5733 
5734 					if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, nr_gap))
5735 						/*
5736 						 * printf("In
5737 						 * sctp_kick_prsctp_reorder_q
5738 						 * ueue(7): Something wrong,
5739 						 * the TSN to be tagged"
5740 						 * "\nas NR is not even in
5741 						 * the mapping_array, or map
5742 						 * and nr_map are
5743 						 * inconsistent");
5744 						 */
5745 						/*
5746 						 * EY - not %100 sure about
5747 						 * the lock thing, don't
5748 						 * think its required
5749 						 */
5750 						/*
5751 						 * SCTP_TCB_LOCK_ASSERT(stcb)
5752 						 * ;
5753 						 */
5754 					{
5755 						/*
5756 						 * printf("\nCalculating an
5757 						 * nr_gap!!\nmapping_array_si
5758 						 * ze = %d
5759 						 * nr_mapping_array_size =
5760 						 * %d" "\nmapping_array_base
5761 						 * = %d
5762 						 * nr_mapping_array_base =
5763 						 * %d\nhighest_tsn_inside_map
5764 						 *  = %d"
5765 						 * "highest_tsn_inside_nr_map
5766 						 *  = %d\nTSN = %d nr_gap =
5767 						 * %d",asoc->mapping_array_si
5768 						 * ze,
5769 						 * asoc->nr_mapping_array_siz
5770 						 * e,
5771 						 * asoc->mapping_array_base_t
5772 						 * sn,
5773 						 * asoc->nr_mapping_array_bas
5774 						 * e_tsn,
5775 						 * asoc->highest_tsn_inside_m
5776 						 * ap,
5777 						 * asoc->highest_tsn_inside_n
5778 						 * r_map,tsn,nr_gap);
5779 						 */
5780 					}
5781 				}
5782 			}
5783 		} else {
5784 			/* no more delivery now. */
5785 			break;
5786 		}
5787 		ctl = nctl;
5788 	}
5789 	/*
5790 	 * now we must deliver things in queue the normal way  if any are
5791 	 * now ready.
5792 	 */
5793 	tt = strmin->last_sequence_delivered + 1;
5794 	ctl = TAILQ_FIRST(&strmin->inqueue);
5795 	while (ctl) {
5796 		nctl = TAILQ_NEXT(ctl, next);
5797 		if (tt == ctl->sinfo_ssn) {
5798 			/* this is deliverable now */
5799 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5800 			/* subtract pending on streams */
5801 			asoc->size_on_all_streams -= ctl->length;
5802 			sctp_ucount_decr(asoc->cnt_on_all_streams);
5803 			/* deliver it to at least the delivery-q */
5804 			strmin->last_sequence_delivered = ctl->sinfo_ssn;
5805 			if (stcb->sctp_socket) {
5806 				/* EY */
5807 				nr_tsn = ctl->sinfo_tsn;
5808 				sctp_add_to_readq(stcb->sctp_ep, stcb,
5809 				    ctl,
5810 				    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
5811 				/*
5812 				 * EY this is the chunk that should be
5813 				 * tagged nr gapped calculate the gap and
5814 				 * such then tag this TSN nr
5815 				 * chk->rec.data.TSN_seq
5816 				 */
5817 				if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
5818 
5819 					if (nr_tsn >= asoc->nr_mapping_array_base_tsn) {
5820 						nr_gap = nr_tsn - asoc->nr_mapping_array_base_tsn;
5821 					} else {
5822 						nr_gap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) + nr_tsn + 1;
5823 					}
5824 					if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
5825 					    (nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
5826 						/*
5827 						 * EY These should never
5828 						 * happen, explained before
5829 						 */
5830 					} else {
5831 						SCTP_TCB_LOCK_ASSERT(stcb);
5832 						SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
5833 						if (nr_tsn > asoc->highest_tsn_inside_nr_map)
5834 							asoc->highest_tsn_inside_nr_map = nr_tsn;
5835 					}
5836 
5837 
5838 					if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, nr_gap))
5839 						/*
5840 						 * printf("In
5841 						 * sctp_kick_prsctp_reorder_q
5842 						 * ueue(8): Something wrong,
5843 						 * the TSN to be tagged"
5844 						 * "\nas NR is not even in
5845 						 * the mapping_array, or map
5846 						 * and nr_map are
5847 						 * inconsistent");
5848 						 */
5849 						/*
5850 						 * EY - not %100 sure about
5851 						 * the lock thing, don't
5852 						 * think its required
5853 						 */
5854 						/*
5855 						 * SCTP_TCB_LOCK_ASSERT(stcb)
5856 						 * ;
5857 						 */
5858 					{
5859 						/*
5860 						 * printf("\nCalculating an
5861 						 * nr_gap!!\nmapping_array_si
5862 						 * ze = %d
5863 						 * nr_mapping_array_size =
5864 						 * %d" "\nmapping_array_base
5865 						 * = %d
5866 						 * nr_mapping_array_base =
5867 						 * %d\nhighest_tsn_inside_map
5868 						 *  = %d"
5869 						 * "highest_tsn_inside_nr_map
5870 						 *  = %d\nTSN = %d nr_gap =
5871 						 * %d",asoc->mapping_array_si
5872 						 * ze,
5873 						 * asoc->nr_mapping_array_siz
5874 						 * e,
5875 						 * asoc->mapping_array_base_t
5876 						 * sn,
5877 						 * asoc->nr_mapping_array_bas
5878 						 * e_tsn,
5879 						 * asoc->highest_tsn_inside_m
5880 						 * ap,
5881 						 * asoc->highest_tsn_inside_n
5882 						 * r_map,tsn,nr_gap);
5883 						 */
5884 					}
5885 				}
5886 			}
5887 			tt = strmin->last_sequence_delivered + 1;
5888 		} else {
5889 			break;
5890 		}
5891 		ctl = nctl;
5892 	}
5893 }
5894 
5895 void
5896 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5897     struct sctp_forward_tsn_chunk *fwd, int *abort_flag, struct mbuf *m, int offset)
5898 {
5899 	/*
5900 	 * ISSUES that MUST be fixed for ECN! When we are the sender of the
5901 	 * forward TSN, when the SACK comes back that acknowledges the
5902 	 * FWD-TSN we must reset the NONCE sum to match correctly. This will
5903 	 * get quite tricky since we may have sent more data interveneing
5904 	 * and must carefully account for what the SACK says on the nonce
5905 	 * and any gaps that are reported. This work will NOT be done here,
5906 	 * but I note it here since it is really related to PR-SCTP and
5907 	 * FWD-TSN's
5908 	 */
5909 
5910 	/* The pr-sctp fwd tsn */
5911 	/*
5912 	 * here we will perform all the data receiver side steps for
5913 	 * processing FwdTSN, as required in by pr-sctp draft:
5914 	 *
5915 	 * Assume we get FwdTSN(x):
5916 	 *
5917 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5918 	 * others we have 3) examine and update re-ordering queue on
5919 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5920 	 * report where we are.
5921 	 */
5922 	struct sctp_association *asoc;
5923 	uint32_t new_cum_tsn, gap;
5924 	unsigned int i, cnt_gone, fwd_sz, cumack_set_flag, m_size;
5925 	struct sctp_stream_in *strm;
5926 	struct sctp_tmit_chunk *chk, *at;
5927 
5928 	cumack_set_flag = 0;
5929 	asoc = &stcb->asoc;
5930 	cnt_gone = 0;
5931 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5932 		SCTPDBG(SCTP_DEBUG_INDATA1,
5933 		    "Bad size too small/big fwd-tsn\n");
5934 		return;
5935 	}
5936 	m_size = (stcb->asoc.mapping_array_size << 3);
5937 	/*************************************************************/
5938 	/* 1. Here we update local cumTSN and shift the bitmap array */
5939 	/*************************************************************/
5940 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5941 
5942 	if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) ||
5943 	    asoc->cumulative_tsn == new_cum_tsn) {
5944 		/* Already got there ... */
5945 		return;
5946 	}
5947 	if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map,
5948 	    MAX_TSN)) {
5949 		asoc->highest_tsn_inside_map = new_cum_tsn;
5950 		/* EY nr_mapping_array version of the above */
5951 		/*
5952 		 * if(SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) &&
5953 		 * asoc->peer_supports_nr_sack)
5954 		 */
5955 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5956 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5957 			sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5958 		}
5959 	}
5960 	/*
5961 	 * now we know the new TSN is more advanced, let's find the actual
5962 	 * gap
5963 	 */
5964 	if ((compare_with_wrap(new_cum_tsn, asoc->mapping_array_base_tsn,
5965 	    MAX_TSN)) ||
5966 	    (new_cum_tsn == asoc->mapping_array_base_tsn)) {
5967 		gap = new_cum_tsn - asoc->mapping_array_base_tsn;
5968 	} else {
5969 		/* try to prevent underflow here */
5970 		gap = new_cum_tsn + (MAX_TSN - asoc->mapping_array_base_tsn) + 1;
5971 	}
5972 
5973 	if (gap >= m_size) {
5974 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5975 			sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5976 		}
5977 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5978 			struct mbuf *oper;
5979 
5980 			/*
5981 			 * out of range (of single byte chunks in the rwnd I
5982 			 * give out). This must be an attacker.
5983 			 */
5984 			*abort_flag = 1;
5985 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
5986 			    0, M_DONTWAIT, 1, MT_DATA);
5987 			if (oper) {
5988 				struct sctp_paramhdr *ph;
5989 				uint32_t *ippp;
5990 
5991 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5992 				    (sizeof(uint32_t) * 3);
5993 				ph = mtod(oper, struct sctp_paramhdr *);
5994 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5995 				ph->param_length = htons(SCTP_BUF_LEN(oper));
5996 				ippp = (uint32_t *) (ph + 1);
5997 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
5998 				ippp++;
5999 				*ippp = asoc->highest_tsn_inside_map;
6000 				ippp++;
6001 				*ippp = new_cum_tsn;
6002 			}
6003 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
6004 			sctp_abort_an_association(stcb->sctp_ep, stcb,
6005 			    SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
6006 			return;
6007 		}
6008 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
6009 slide_out:
6010 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
6011 		cumack_set_flag = 1;
6012 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
6013 		asoc->cumulative_tsn = asoc->highest_tsn_inside_map = new_cum_tsn;
6014 		/* EY - nr_sack: nr_mapping_array version of the above */
6015 		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
6016 			memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.nr_mapping_array_size);
6017 			asoc->nr_mapping_array_base_tsn = new_cum_tsn + 1;
6018 			asoc->highest_tsn_inside_nr_map = new_cum_tsn;
6019 			if (asoc->nr_mapping_array_size != asoc->mapping_array_size) {
6020 				/*
6021 				 * printf("IN sctp_handle_forward_tsn:
6022 				 * Something is wrong the size of" "map and
6023 				 * nr_map should be equal!")
6024 				 */ ;
6025 			}
6026 		}
6027 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
6028 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
6029 		}
6030 		asoc->last_echo_tsn = asoc->highest_tsn_inside_map;
6031 	} else {
6032 		SCTP_TCB_LOCK_ASSERT(stcb);
6033 		if ((compare_with_wrap(((uint32_t) asoc->cumulative_tsn + gap), asoc->highest_tsn_inside_map, MAX_TSN)) ||
6034 		    (((uint32_t) asoc->cumulative_tsn + gap) == asoc->highest_tsn_inside_map)) {
6035 			goto slide_out;
6036 		} else {
6037 			for (i = 0; i <= gap; i++) {
6038 				SCTP_SET_TSN_PRESENT(asoc->mapping_array, i);
6039 			}
6040 		}
6041 		/*
6042 		 * Now after marking all, slide thing forward but no sack
6043 		 * please.
6044 		 */
6045 		sctp_sack_check(stcb, 0, 0, abort_flag);
6046 		if (*abort_flag)
6047 			return;
6048 	}
6049 
6050 	/*************************************************************/
6051 	/* 2. Clear up re-assembly queue                             */
6052 	/*************************************************************/
6053 	/*
6054 	 * First service it if pd-api is up, just in case we can progress it
6055 	 * forward
6056 	 */
6057 	if (asoc->fragmented_delivery_inprogress) {
6058 		sctp_service_reassembly(stcb, asoc);
6059 	}
6060 	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
6061 		/* For each one on here see if we need to toss it */
6062 		/*
6063 		 * For now large messages held on the reasmqueue that are
6064 		 * complete will be tossed too. We could in theory do more
6065 		 * work to spin through and stop after dumping one msg aka
6066 		 * seeing the start of a new msg at the head, and call the
6067 		 * delivery function... to see if it can be delivered... But
6068 		 * for now we just dump everything on the queue.
6069 		 */
6070 		chk = TAILQ_FIRST(&asoc->reasmqueue);
6071 		while (chk) {
6072 			at = TAILQ_NEXT(chk, sctp_next);
6073 			if (compare_with_wrap(asoc->cumulative_tsn,
6074 			    chk->rec.data.TSN_seq, MAX_TSN) ||
6075 			    asoc->cumulative_tsn == chk->rec.data.TSN_seq) {
6076 				/* It needs to be tossed */
6077 				TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
6078 				if (compare_with_wrap(chk->rec.data.TSN_seq,
6079 				    asoc->tsn_last_delivered, MAX_TSN)) {
6080 					asoc->tsn_last_delivered =
6081 					    chk->rec.data.TSN_seq;
6082 					asoc->str_of_pdapi =
6083 					    chk->rec.data.stream_number;
6084 					asoc->ssn_of_pdapi =
6085 					    chk->rec.data.stream_seq;
6086 					asoc->fragment_flags =
6087 					    chk->rec.data.rcv_flags;
6088 				}
6089 				asoc->size_on_reasm_queue -= chk->send_size;
6090 				sctp_ucount_decr(asoc->cnt_on_reasm_queue);
6091 				cnt_gone++;
6092 
6093 				/* Clear up any stream problem */
6094 				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
6095 				    SCTP_DATA_UNORDERED &&
6096 				    (compare_with_wrap(chk->rec.data.stream_seq,
6097 				    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
6098 				    MAX_SEQ))) {
6099 					/*
6100 					 * We must dump forward this streams
6101 					 * sequence number if the chunk is
6102 					 * not unordered that is being
6103 					 * skipped. There is a chance that
6104 					 * if the peer does not include the
6105 					 * last fragment in its FWD-TSN we
6106 					 * WILL have a problem here since
6107 					 * you would have a partial chunk in
6108 					 * queue that may not be
6109 					 * deliverable. Also if a Partial
6110 					 * delivery API as started the user
6111 					 * may get a partial chunk. The next
6112 					 * read returning a new chunk...
6113 					 * really ugly but I see no way
6114 					 * around it! Maybe a notify??
6115 					 */
6116 					asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
6117 					    chk->rec.data.stream_seq;
6118 				}
6119 				if (chk->data) {
6120 					sctp_m_freem(chk->data);
6121 					chk->data = NULL;
6122 				}
6123 				sctp_free_a_chunk(stcb, chk);
6124 			} else {
6125 				/*
6126 				 * Ok we have gone beyond the end of the
6127 				 * fwd-tsn's mark. Some checks...
6128 				 */
6129 				if ((asoc->fragmented_delivery_inprogress) &&
6130 				    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
6131 					uint32_t str_seq;
6132 
6133 					/*
6134 					 * Special case PD-API is up and
6135 					 * what we fwd-tsn' over includes
6136 					 * one that had the LAST_FRAG. We no
6137 					 * longer need to do the PD-API.
6138 					 */
6139 					asoc->fragmented_delivery_inprogress = 0;
6140 
6141 					str_seq = (asoc->str_of_pdapi << 16) | asoc->ssn_of_pdapi;
6142 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
6143 					    stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)&str_seq, SCTP_SO_NOT_LOCKED);
6144 
6145 				}
6146 				break;
6147 			}
6148 			chk = at;
6149 		}
6150 	}
6151 	if (asoc->fragmented_delivery_inprogress) {
6152 		/*
6153 		 * Ok we removed cnt_gone chunks in the PD-API queue that
6154 		 * were being delivered. So now we must turn off the flag.
6155 		 */
6156 		uint32_t str_seq;
6157 
6158 		str_seq = (asoc->str_of_pdapi << 16) | asoc->ssn_of_pdapi;
6159 		sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
6160 		    stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)&str_seq, SCTP_SO_NOT_LOCKED);
6161 		asoc->fragmented_delivery_inprogress = 0;
6162 	}
6163 	/*************************************************************/
6164 	/* 3. Update the PR-stream re-ordering queues                */
6165 	/*************************************************************/
6166 	fwd_sz -= sizeof(*fwd);
6167 	if (m && fwd_sz) {
6168 		/* New method. */
6169 		unsigned int num_str;
6170 		struct sctp_strseq *stseq, strseqbuf;
6171 
6172 		offset += sizeof(*fwd);
6173 
6174 		num_str = fwd_sz / sizeof(struct sctp_strseq);
6175 		for (i = 0; i < num_str; i++) {
6176 			uint16_t st;
6177 
6178 			stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
6179 			    sizeof(struct sctp_strseq),
6180 			    (uint8_t *) & strseqbuf);
6181 			offset += sizeof(struct sctp_strseq);
6182 			if (stseq == NULL) {
6183 				break;
6184 			}
6185 			/* Convert */
6186 			st = ntohs(stseq->stream);
6187 			stseq->stream = st;
6188 			st = ntohs(stseq->sequence);
6189 			stseq->sequence = st;
6190 			/* now process */
6191 			if (stseq->stream >= asoc->streamincnt) {
6192 				/* screwed up streams, stop!  */
6193 				break;
6194 			}
6195 			strm = &asoc->strmin[stseq->stream];
6196 			if (compare_with_wrap(stseq->sequence,
6197 			    strm->last_sequence_delivered, MAX_SEQ)) {
6198 				/* Update the sequence number */
6199 				strm->last_sequence_delivered =
6200 				    stseq->sequence;
6201 			}
6202 			/* now kick the stream the new way */
6203 			/* sa_ignore NO_NULL_CHK */
6204 			sctp_kick_prsctp_reorder_queue(stcb, strm);
6205 		}
6206 	}
6207 	if (TAILQ_FIRST(&asoc->reasmqueue)) {
6208 		/* now lets kick out and check for more fragmented delivery */
6209 		/* sa_ignore NO_NULL_CHK */
6210 		sctp_deliver_reasm_check(stcb, &stcb->asoc);
6211 	}
6212 }
6213 
6214 /* EY fully identical to sctp_express_handle_sack, duplicated for only naming convention */
6215 void
6216 sctp_express_handle_nr_sack(struct sctp_tcb *stcb, uint32_t cumack,
6217     uint32_t rwnd, int nonce_sum_flag, int *abort_now)
6218 {
6219 	struct sctp_nets *net;
6220 	struct sctp_association *asoc;
6221 	struct sctp_tmit_chunk *tp1, *tp2;
6222 	uint32_t old_rwnd;
6223 	int win_probe_recovery = 0;
6224 	int win_probe_recovered = 0;
6225 	int j, done_once = 0;
6226 
6227 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
6228 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
6229 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
6230 	}
6231 	SCTP_TCB_LOCK_ASSERT(stcb);
6232 #ifdef SCTP_ASOCLOG_OF_TSNS
6233 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
6234 	stcb->asoc.cumack_log_at++;
6235 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
6236 		stcb->asoc.cumack_log_at = 0;
6237 	}
6238 #endif
6239 	asoc = &stcb->asoc;
6240 	old_rwnd = asoc->peers_rwnd;
6241 	if (compare_with_wrap(asoc->last_acked_seq, cumack, MAX_TSN)) {
6242 		/* old ack */
6243 		return;
6244 	} else if (asoc->last_acked_seq == cumack) {
6245 		/* Window update sack */
6246 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
6247 		    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
6248 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
6249 			/* SWS sender side engages */
6250 			asoc->peers_rwnd = 0;
6251 		}
6252 		if (asoc->peers_rwnd > old_rwnd) {
6253 			goto again;
6254 		}
6255 		return;
6256 	}
6257 	/* First setup for CC stuff */
6258 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
6259 		net->prev_cwnd = net->cwnd;
6260 		net->net_ack = 0;
6261 		net->net_ack2 = 0;
6262 
6263 		/*
6264 		 * CMT: Reset CUC and Fast recovery algo variables before
6265 		 * SACK processing
6266 		 */
6267 		net->new_pseudo_cumack = 0;
6268 		net->will_exit_fast_recovery = 0;
6269 	}
6270 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
6271 		uint32_t send_s;
6272 
6273 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
6274 			tp1 = TAILQ_LAST(&asoc->sent_queue,
6275 			    sctpchunk_listhead);
6276 			send_s = tp1->rec.data.TSN_seq + 1;
6277 		} else {
6278 			send_s = asoc->sending_seq;
6279 		}
6280 		if ((cumack == send_s) ||
6281 		    compare_with_wrap(cumack, send_s, MAX_TSN)) {
6282 #ifndef INVARIANTS
6283 			struct mbuf *oper;
6284 
6285 #endif
6286 #ifdef INVARIANTS
6287 			panic("Impossible sack 1");
6288 #else
6289 			*abort_now = 1;
6290 			/* XXX */
6291 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
6292 			    0, M_DONTWAIT, 1, MT_DATA);
6293 			if (oper) {
6294 				struct sctp_paramhdr *ph;
6295 				uint32_t *ippp;
6296 
6297 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
6298 				    sizeof(uint32_t);
6299 				ph = mtod(oper, struct sctp_paramhdr *);
6300 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
6301 				ph->param_length = htons(SCTP_BUF_LEN(oper));
6302 				ippp = (uint32_t *) (ph + 1);
6303 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
6304 			}
6305 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
6306 			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
6307 			return;
6308 #endif
6309 		}
6310 	}
6311 	asoc->this_sack_highest_gap = cumack;
6312 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
6313 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
6314 		    stcb->asoc.overall_error_count,
6315 		    0,
6316 		    SCTP_FROM_SCTP_INDATA,
6317 		    __LINE__);
6318 	}
6319 	stcb->asoc.overall_error_count = 0;
6320 	if (compare_with_wrap(cumack, asoc->last_acked_seq, MAX_TSN)) {
6321 		/* process the new consecutive TSN first */
6322 		tp1 = TAILQ_FIRST(&asoc->sent_queue);
6323 		while (tp1) {
6324 			tp2 = TAILQ_NEXT(tp1, sctp_next);
6325 			if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq,
6326 			    MAX_TSN) ||
6327 			    cumack == tp1->rec.data.TSN_seq) {
6328 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
6329 					printf("Warning, an unsent is now acked?\n");
6330 				}
6331 				/*
6332 				 * ECN Nonce: Add the nonce to the sender's
6333 				 * nonce sum
6334 				 */
6335 				asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
6336 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
6337 					/*
6338 					 * If it is less than ACKED, it is
6339 					 * now no-longer in flight. Higher
6340 					 * values may occur during marking
6341 					 */
6342 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
6343 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
6344 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
6345 							    tp1->whoTo->flight_size,
6346 							    tp1->book_size,
6347 							    (uintptr_t) tp1->whoTo,
6348 							    tp1->rec.data.TSN_seq);
6349 						}
6350 						sctp_flight_size_decrease(tp1);
6351 						/* sa_ignore NO_NULL_CHK */
6352 						sctp_total_flight_decrease(stcb, tp1);
6353 					}
6354 					tp1->whoTo->net_ack += tp1->send_size;
6355 					if (tp1->snd_count < 2) {
6356 						/*
6357 						 * True non-retransmited
6358 						 * chunk
6359 						 */
6360 						tp1->whoTo->net_ack2 +=
6361 						    tp1->send_size;
6362 
6363 						/* update RTO too? */
6364 						if (tp1->do_rtt) {
6365 							tp1->whoTo->RTO =
6366 							/*
6367 							 * sa_ignore
6368 							 * NO_NULL_CHK
6369 							 */
6370 							    sctp_calculate_rto(stcb,
6371 							    asoc, tp1->whoTo,
6372 							    &tp1->sent_rcv_time,
6373 							    sctp_align_safe_nocopy);
6374 							tp1->do_rtt = 0;
6375 						}
6376 					}
6377 					/*
6378 					 * CMT: CUCv2 algorithm. From the
6379 					 * cumack'd TSNs, for each TSN being
6380 					 * acked for the first time, set the
6381 					 * following variables for the
6382 					 * corresp destination.
6383 					 * new_pseudo_cumack will trigger a
6384 					 * cwnd update.
6385 					 * find_(rtx_)pseudo_cumack will
6386 					 * trigger search for the next
6387 					 * expected (rtx-)pseudo-cumack.
6388 					 */
6389 					tp1->whoTo->new_pseudo_cumack = 1;
6390 					tp1->whoTo->find_pseudo_cumack = 1;
6391 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
6392 
6393 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
6394 						/* sa_ignore NO_NULL_CHK */
6395 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
6396 					}
6397 				}
6398 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
6399 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
6400 				}
6401 				if (tp1->rec.data.chunk_was_revoked) {
6402 					/* deflate the cwnd */
6403 					tp1->whoTo->cwnd -= tp1->book_size;
6404 					tp1->rec.data.chunk_was_revoked = 0;
6405 				}
6406 				tp1->sent = SCTP_DATAGRAM_ACKED;
6407 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
6408 				if (tp1->data) {
6409 					/* sa_ignore NO_NULL_CHK */
6410 					sctp_free_bufspace(stcb, asoc, tp1, 1);
6411 					sctp_m_freem(tp1->data);
6412 				}
6413 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
6414 					sctp_log_sack(asoc->last_acked_seq,
6415 					    cumack,
6416 					    tp1->rec.data.TSN_seq,
6417 					    0,
6418 					    0,
6419 					    SCTP_LOG_FREE_SENT);
6420 				}
6421 				tp1->data = NULL;
6422 				asoc->sent_queue_cnt--;
6423 				sctp_free_a_chunk(stcb, tp1);
6424 				tp1 = tp2;
6425 			} else {
6426 				break;
6427 			}
6428 		}
6429 
6430 	}
6431 	/* sa_ignore NO_NULL_CHK */
6432 	if (stcb->sctp_socket) {
6433 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
6434 		struct socket *so;
6435 
6436 #endif
6437 
6438 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
6439 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
6440 			/* sa_ignore NO_NULL_CHK */
6441 			sctp_wakeup_log(stcb, cumack, 1, SCTP_WAKESND_FROM_SACK);
6442 		}
6443 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
6444 		so = SCTP_INP_SO(stcb->sctp_ep);
6445 		atomic_add_int(&stcb->asoc.refcnt, 1);
6446 		SCTP_TCB_UNLOCK(stcb);
6447 		SCTP_SOCKET_LOCK(so, 1);
6448 		SCTP_TCB_LOCK(stcb);
6449 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
6450 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
6451 			/* assoc was freed while we were unlocked */
6452 			SCTP_SOCKET_UNLOCK(so, 1);
6453 			return;
6454 		}
6455 #endif
6456 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
6457 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
6458 		SCTP_SOCKET_UNLOCK(so, 1);
6459 #endif
6460 	} else {
6461 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
6462 			sctp_wakeup_log(stcb, cumack, 1, SCTP_NOWAKE_FROM_SACK);
6463 		}
6464 	}
6465 
6466 	/* JRS - Use the congestion control given in the CC module */
6467 	if (asoc->last_acked_seq != cumack)
6468 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
6469 
6470 	asoc->last_acked_seq = cumack;
6471 
6472 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
6473 		/* nothing left in-flight */
6474 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
6475 			net->flight_size = 0;
6476 			net->partial_bytes_acked = 0;
6477 		}
6478 		asoc->total_flight = 0;
6479 		asoc->total_flight_count = 0;
6480 	}
6481 	/* Fix up the a-p-a-p for future PR-SCTP sends */
6482 	if (compare_with_wrap(cumack, asoc->advanced_peer_ack_point, MAX_TSN)) {
6483 		asoc->advanced_peer_ack_point = cumack;
6484 	}
6485 	/* ECN Nonce updates */
6486 	if (asoc->ecn_nonce_allowed) {
6487 		if (asoc->nonce_sum_check) {
6488 			if (nonce_sum_flag != ((asoc->nonce_sum_expect_base) & SCTP_SACK_NONCE_SUM)) {
6489 				if (asoc->nonce_wait_for_ecne == 0) {
6490 					struct sctp_tmit_chunk *lchk;
6491 
6492 					lchk = TAILQ_FIRST(&asoc->send_queue);
6493 					asoc->nonce_wait_for_ecne = 1;
6494 					if (lchk) {
6495 						asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
6496 					} else {
6497 						asoc->nonce_wait_tsn = asoc->sending_seq;
6498 					}
6499 				} else {
6500 					if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
6501 					    (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
6502 						/*
6503 						 * Misbehaving peer. We need
6504 						 * to react to this guy
6505 						 */
6506 						asoc->ecn_allowed = 0;
6507 						asoc->ecn_nonce_allowed = 0;
6508 					}
6509 				}
6510 			}
6511 		} else {
6512 			/* See if Resynchronization Possible */
6513 			if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
6514 				asoc->nonce_sum_check = 1;
6515 				/*
6516 				 * now we must calculate what the base is.
6517 				 * We do this based on two things, we know
6518 				 * the total's for all the segments
6519 				 * gap-acked in the SACK (none), We also
6520 				 * know the SACK's nonce sum, its in
6521 				 * nonce_sum_flag. So we can build a truth
6522 				 * table to back-calculate the new value of
6523 				 * asoc->nonce_sum_expect_base:
6524 				 *
6525 				 * SACK-flag-Value         Seg-Sums Base 0 0 0
6526 				 * 1                    0 1 0 1 1 1 1 0
6527 				 */
6528 				asoc->nonce_sum_expect_base = (0 ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
6529 			}
6530 		}
6531 	}
6532 	/* RWND update */
6533 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
6534 	    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
6535 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
6536 		/* SWS sender side engages */
6537 		asoc->peers_rwnd = 0;
6538 	}
6539 	if (asoc->peers_rwnd > old_rwnd) {
6540 		win_probe_recovery = 1;
6541 	}
6542 	/* Now assure a timer where data is queued at */
6543 again:
6544 	j = 0;
6545 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
6546 		if (win_probe_recovery && (net->window_probe)) {
6547 			net->window_probe = 0;
6548 			win_probe_recovered = 1;
6549 			/*
6550 			 * Find first chunk that was used with window probe
6551 			 * and clear the sent
6552 			 */
6553 			/* sa_ignore FREED_MEMORY */
6554 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
6555 				if (tp1->window_probe) {
6556 					/* move back to data send queue */
6557 					sctp_window_probe_recovery(stcb, asoc, net, tp1);
6558 					break;
6559 				}
6560 			}
6561 		}
6562 		if (net->flight_size) {
6563 			int to_ticks;
6564 
6565 			if (net->RTO == 0) {
6566 				to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
6567 			} else {
6568 				to_ticks = MSEC_TO_TICKS(net->RTO);
6569 			}
6570 			j++;
6571 			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
6572 			    sctp_timeout_handler, &net->rxt_timer);
6573 		} else {
6574 			if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
6575 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
6576 				    stcb, net,
6577 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
6578 			}
6579 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
6580 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
6581 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
6582 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
6583 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
6584 				}
6585 			}
6586 		}
6587 	}
6588 	if ((j == 0) &&
6589 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
6590 	    (asoc->sent_queue_retran_cnt == 0) &&
6591 	    (win_probe_recovered == 0) &&
6592 	    (done_once == 0)) {
6593 		/* huh, this should not happen */
6594 		sctp_fs_audit(asoc);
6595 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
6596 			net->flight_size = 0;
6597 		}
6598 		asoc->total_flight = 0;
6599 		asoc->total_flight_count = 0;
6600 		asoc->sent_queue_retran_cnt = 0;
6601 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
6602 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
6603 				sctp_flight_size_increase(tp1);
6604 				sctp_total_flight_increase(stcb, tp1);
6605 			} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
6606 				asoc->sent_queue_retran_cnt++;
6607 			}
6608 		}
6609 		done_once = 1;
6610 		goto again;
6611 	}
6612 	/**********************************/
6613 	/* Now what about shutdown issues */
6614 	/**********************************/
6615 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
6616 		/* nothing left on sendqueue.. consider done */
6617 		/* clean up */
6618 		if ((asoc->stream_queue_cnt == 1) &&
6619 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
6620 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
6621 		    (asoc->locked_on_sending)
6622 		    ) {
6623 			struct sctp_stream_queue_pending *sp;
6624 
6625 			/*
6626 			 * I may be in a state where we got all across.. but
6627 			 * cannot write more due to a shutdown... we abort
6628 			 * since the user did not indicate EOR in this case.
6629 			 * The sp will be cleaned during free of the asoc.
6630 			 */
6631 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
6632 			    sctp_streamhead);
6633 			if ((sp) && (sp->length == 0)) {
6634 				/* Let cleanup code purge it */
6635 				if (sp->msg_is_complete) {
6636 					asoc->stream_queue_cnt--;
6637 				} else {
6638 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
6639 					asoc->locked_on_sending = NULL;
6640 					asoc->stream_queue_cnt--;
6641 				}
6642 			}
6643 		}
6644 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
6645 		    (asoc->stream_queue_cnt == 0)) {
6646 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
6647 				/* Need to abort here */
6648 				struct mbuf *oper;
6649 
6650 		abort_out_now:
6651 				*abort_now = 1;
6652 				/* XXX */
6653 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
6654 				    0, M_DONTWAIT, 1, MT_DATA);
6655 				if (oper) {
6656 					struct sctp_paramhdr *ph;
6657 					uint32_t *ippp;
6658 
6659 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
6660 					    sizeof(uint32_t);
6661 					ph = mtod(oper, struct sctp_paramhdr *);
6662 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
6663 					ph->param_length = htons(SCTP_BUF_LEN(oper));
6664 					ippp = (uint32_t *) (ph + 1);
6665 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
6666 				}
6667 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
6668 				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
6669 			} else {
6670 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
6671 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
6672 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
6673 				}
6674 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
6675 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
6676 				sctp_stop_timers_for_shutdown(stcb);
6677 				sctp_send_shutdown(stcb,
6678 				    stcb->asoc.primary_destination);
6679 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
6680 				    stcb->sctp_ep, stcb, asoc->primary_destination);
6681 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
6682 				    stcb->sctp_ep, stcb, asoc->primary_destination);
6683 			}
6684 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
6685 		    (asoc->stream_queue_cnt == 0)) {
6686 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
6687 				goto abort_out_now;
6688 			}
6689 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
6690 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
6691 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
6692 			sctp_send_shutdown_ack(stcb,
6693 			    stcb->asoc.primary_destination);
6694 
6695 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
6696 			    stcb->sctp_ep, stcb, asoc->primary_destination);
6697 		}
6698 	}
6699 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
6700 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
6701 		    rwnd,
6702 		    stcb->asoc.peers_rwnd,
6703 		    stcb->asoc.total_flight,
6704 		    stcb->asoc.total_output_queue_size);
6705 	}
6706 }
6707 
6708 /* EY! nr_sack version of sctp_handle_segments, nr-gapped TSNs get removed from RtxQ in this method*/
6709 static void
6710 sctp_handle_nr_sack_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
6711     struct sctp_nr_sack_chunk *ch, uint32_t last_tsn, uint32_t * biggest_tsn_acked,
6712     uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
6713     uint32_t num_seg, uint32_t num_nr_seg, int *ecn_seg_sums)
6714 {
6715 	/************************************************/
6716 	/* process fragments and update sendqueue        */
6717 	/************************************************/
6718 	struct sctp_nr_sack *nr_sack;
6719 	struct sctp_gap_ack_block *frag, block;
6720 	struct sctp_nr_gap_ack_block *nr_frag, nr_block;
6721 	struct sctp_tmit_chunk *tp1;
6722 	uint32_t i, j, all_bit;
6723 	int wake_him = 0;
6724 	uint32_t theTSN;
6725 	int num_frs = 0;
6726 
6727 	uint16_t frag_strt, frag_end, primary_flag_set;
6728 	uint16_t nr_frag_strt, nr_frag_end;
6729 
6730 	uint32_t last_frag_high;
6731 	uint32_t last_nr_frag_high;
6732 
6733 	all_bit = ch->ch.chunk_flags & SCTP_NR_SACK_ALL_BIT;
6734 
6735 	/*
6736 	 * @@@ JRI : TODO: This flag is not used anywhere .. remove?
6737 	 */
6738 	if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
6739 		primary_flag_set = 1;
6740 	} else {
6741 		primary_flag_set = 0;
6742 	}
6743 	nr_sack = &ch->nr_sack;
6744 
6745 	/*
6746 	 * EY! - I will process nr_gaps similarly,by going to this position
6747 	 * again if All bit is set
6748 	 */
6749 	frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
6750 	    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
6751 	*offset += sizeof(block);
6752 	if (frag == NULL) {
6753 		return;
6754 	}
6755 	tp1 = NULL;
6756 	last_frag_high = 0;
6757 	for (i = 0; i < num_seg; i++) {
6758 		frag_strt = ntohs(frag->start);
6759 		frag_end = ntohs(frag->end);
6760 		/* some sanity checks on the fargment offsets */
6761 		if (frag_strt > frag_end) {
6762 			/* this one is malformed, skip */
6763 			frag++;
6764 			continue;
6765 		}
6766 		if (compare_with_wrap((frag_end + last_tsn), *biggest_tsn_acked,
6767 		    MAX_TSN))
6768 			*biggest_tsn_acked = frag_end + last_tsn;
6769 
6770 		/* mark acked dgs and find out the highestTSN being acked */
6771 		if (tp1 == NULL) {
6772 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
6773 
6774 			/* save the locations of the last frags */
6775 			last_frag_high = frag_end + last_tsn;
6776 		} else {
6777 			/*
6778 			 * now lets see if we need to reset the queue due to
6779 			 * a out-of-order SACK fragment
6780 			 */
6781 			if (compare_with_wrap(frag_strt + last_tsn,
6782 			    last_frag_high, MAX_TSN)) {
6783 				/*
6784 				 * if the new frag starts after the last TSN
6785 				 * frag covered, we are ok and this one is
6786 				 * beyond the last one
6787 				 */
6788 				;
6789 			} else {
6790 				/*
6791 				 * ok, they have reset us, so we need to
6792 				 * reset the queue this will cause extra
6793 				 * hunting but hey, they chose the
6794 				 * performance hit when they failed to order
6795 				 * there gaps..
6796 				 */
6797 				tp1 = TAILQ_FIRST(&asoc->sent_queue);
6798 			}
6799 			last_frag_high = frag_end + last_tsn;
6800 		}
6801 		for (j = frag_strt; j <= frag_end; j++) {
6802 			theTSN = j + last_tsn;
6803 			while (tp1) {
6804 				if (tp1->rec.data.doing_fast_retransmit)
6805 					num_frs++;
6806 
6807 				/*
6808 				 * CMT: CUCv2 algorithm. For each TSN being
6809 				 * processed from the sent queue, track the
6810 				 * next expected pseudo-cumack, or
6811 				 * rtx_pseudo_cumack, if required. Separate
6812 				 * cumack trackers for first transmissions,
6813 				 * and retransmissions.
6814 				 */
6815 				if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
6816 				    (tp1->snd_count == 1)) {
6817 					tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
6818 					tp1->whoTo->find_pseudo_cumack = 0;
6819 				}
6820 				if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
6821 				    (tp1->snd_count > 1)) {
6822 					tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
6823 					tp1->whoTo->find_rtx_pseudo_cumack = 0;
6824 				}
6825 				if (tp1->rec.data.TSN_seq == theTSN) {
6826 					if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
6827 						/*
6828 						 * must be held until
6829 						 * cum-ack passes
6830 						 */
6831 						/*
6832 						 * ECN Nonce: Add the nonce
6833 						 * value to the sender's
6834 						 * nonce sum
6835 						 */
6836 						if (tp1->sent < SCTP_DATAGRAM_RESEND) {
6837 							/*-
6838 							 * If it is less than RESEND, it is
6839 							 * now no-longer in flight.
6840 							 * Higher values may already be set
6841 							 * via previous Gap Ack Blocks...
6842 							 * i.e. ACKED or RESEND.
6843 							 */
6844 							if (compare_with_wrap(tp1->rec.data.TSN_seq,
6845 							    *biggest_newly_acked_tsn, MAX_TSN)) {
6846 								*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
6847 							}
6848 							/*
6849 							 * CMT: SFR algo
6850 							 * (and HTNA) - set
6851 							 * saw_newack to 1
6852 							 * for dest being
6853 							 * newly acked.
6854 							 * update
6855 							 * this_sack_highest_
6856 							 * newack if
6857 							 * appropriate.
6858 							 */
6859 							if (tp1->rec.data.chunk_was_revoked == 0)
6860 								tp1->whoTo->saw_newack = 1;
6861 
6862 							if (compare_with_wrap(tp1->rec.data.TSN_seq,
6863 							    tp1->whoTo->this_sack_highest_newack,
6864 							    MAX_TSN)) {
6865 								tp1->whoTo->this_sack_highest_newack =
6866 								    tp1->rec.data.TSN_seq;
6867 							}
6868 							/*
6869 							 * CMT DAC algo:
6870 							 * also update
6871 							 * this_sack_lowest_n
6872 							 * ewack
6873 							 */
6874 							if (*this_sack_lowest_newack == 0) {
6875 								if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
6876 									sctp_log_sack(*this_sack_lowest_newack,
6877 									    last_tsn,
6878 									    tp1->rec.data.TSN_seq,
6879 									    0,
6880 									    0,
6881 									    SCTP_LOG_TSN_ACKED);
6882 								}
6883 								*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
6884 							}
6885 							/*
6886 							 * CMT: CUCv2
6887 							 * algorithm. If
6888 							 * (rtx-)pseudo-cumac
6889 							 * k for corresp
6890 							 * dest is being
6891 							 * acked, then we
6892 							 * have a new
6893 							 * (rtx-)pseudo-cumac
6894 							 * k. Set
6895 							 * new_(rtx_)pseudo_c
6896 							 * umack to TRUE so
6897 							 * that the cwnd for
6898 							 * this dest can be
6899 							 * updated. Also
6900 							 * trigger search
6901 							 * for the next
6902 							 * expected
6903 							 * (rtx-)pseudo-cumac
6904 							 * k. Separate
6905 							 * pseudo_cumack
6906 							 * trackers for
6907 							 * first
6908 							 * transmissions and
6909 							 * retransmissions.
6910 							 */
6911 							if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
6912 								if (tp1->rec.data.chunk_was_revoked == 0) {
6913 									tp1->whoTo->new_pseudo_cumack = 1;
6914 								}
6915 								tp1->whoTo->find_pseudo_cumack = 1;
6916 							}
6917 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
6918 								sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
6919 							}
6920 							if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
6921 								if (tp1->rec.data.chunk_was_revoked == 0) {
6922 									tp1->whoTo->new_pseudo_cumack = 1;
6923 								}
6924 								tp1->whoTo->find_rtx_pseudo_cumack = 1;
6925 							}
6926 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
6927 								sctp_log_sack(*biggest_newly_acked_tsn,
6928 								    last_tsn,
6929 								    tp1->rec.data.TSN_seq,
6930 								    frag_strt,
6931 								    frag_end,
6932 								    SCTP_LOG_TSN_ACKED);
6933 							}
6934 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
6935 								sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
6936 								    tp1->whoTo->flight_size,
6937 								    tp1->book_size,
6938 								    (uintptr_t) tp1->whoTo,
6939 								    tp1->rec.data.TSN_seq);
6940 							}
6941 							sctp_flight_size_decrease(tp1);
6942 							sctp_total_flight_decrease(stcb, tp1);
6943 
6944 							tp1->whoTo->net_ack += tp1->send_size;
6945 							if (tp1->snd_count < 2) {
6946 								/*
6947 								 * True
6948 								 * non-retran
6949 								 * smited
6950 								 * chunk
6951 								 */
6952 								tp1->whoTo->net_ack2 += tp1->send_size;
6953 
6954 								/*
6955 								 * update
6956 								 * RTO too ?
6957 								 */
6958 								if (tp1->do_rtt) {
6959 									tp1->whoTo->RTO =
6960 									    sctp_calculate_rto(stcb,
6961 									    asoc,
6962 									    tp1->whoTo,
6963 									    &tp1->sent_rcv_time,
6964 									    sctp_align_safe_nocopy);
6965 									tp1->do_rtt = 0;
6966 								}
6967 							}
6968 						}
6969 						if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
6970 							(*ecn_seg_sums) += tp1->rec.data.ect_nonce;
6971 							(*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
6972 							if (compare_with_wrap(tp1->rec.data.TSN_seq,
6973 							    asoc->this_sack_highest_gap,
6974 							    MAX_TSN)) {
6975 								asoc->this_sack_highest_gap =
6976 								    tp1->rec.data.TSN_seq;
6977 							}
6978 							if (tp1->sent == SCTP_DATAGRAM_RESEND) {
6979 								sctp_ucount_decr(asoc->sent_queue_retran_cnt);
6980 #ifdef SCTP_AUDITING_ENABLED
6981 								sctp_audit_log(0xB2,
6982 								    (asoc->sent_queue_retran_cnt & 0x000000ff));
6983 #endif
6984 							}
6985 						}
6986 						/*
6987 						 * All chunks NOT UNSENT
6988 						 * fall through here and are
6989 						 * marked
6990 						 */
6991 						tp1->sent = SCTP_DATAGRAM_MARKED;
6992 						if (tp1->rec.data.chunk_was_revoked) {
6993 							/* deflate the cwnd */
6994 							tp1->whoTo->cwnd -= tp1->book_size;
6995 							tp1->rec.data.chunk_was_revoked = 0;
6996 						}
6997 						/*
6998 						 * EY - if all bit is set
6999 						 * then this TSN is
7000 						 * nr_marked
7001 						 */
7002 						if (all_bit) {
7003 							tp1->sent = SCTP_DATAGRAM_NR_MARKED;
7004 							/*
7005 							 * TAILQ_REMOVE(&asoc
7006 							 * ->sent_queue,
7007 							 * tp1, sctp_next);
7008 							 */
7009 							if (tp1->data) {
7010 								/*
7011 								 * sa_ignore
7012 								 * NO_NULL_CH
7013 								 * K
7014 								 */
7015 								sctp_free_bufspace(stcb, asoc, tp1, 1);
7016 								sctp_m_freem(tp1->data);
7017 							}
7018 							tp1->data = NULL;
7019 							/*
7020 							 * asoc->sent_queue_c
7021 							 * nt--;
7022 							 */
7023 							/*
7024 							 * sctp_free_a_chunk(
7025 							 * stcb, tp1);
7026 							 */
7027 							wake_him++;
7028 						}
7029 					}
7030 					break;
7031 				}	/* if (tp1->TSN_seq == theTSN) */
7032 				if (compare_with_wrap(tp1->rec.data.TSN_seq, theTSN,
7033 				    MAX_TSN))
7034 					break;
7035 
7036 				tp1 = TAILQ_NEXT(tp1, sctp_next);
7037 			}	/* end while (tp1) */
7038 		}		/* end for (j = fragStart */
7039 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
7040 		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
7041 		*offset += sizeof(block);
7042 		if (frag == NULL) {
7043 			break;
7044 		}
7045 	}
7046 
7047 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
7048 		if (num_frs)
7049 			sctp_log_fr(*biggest_tsn_acked,
7050 			    *biggest_newly_acked_tsn,
7051 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
7052 	}
7053 	/*
7054 	 * EY - if all bit is not set then there should be other loops to
7055 	 * identify nr TSNs
7056 	 */
7057 	if (!all_bit) {
7058 
7059 		nr_frag = (struct sctp_nr_gap_ack_block *)sctp_m_getptr(m, *offset,
7060 		    sizeof(struct sctp_nr_gap_ack_block), (uint8_t *) & nr_block);
7061 		*offset += sizeof(nr_block);
7062 
7063 
7064 
7065 		if (nr_frag == NULL) {
7066 			return;
7067 		}
7068 		tp1 = NULL;
7069 		last_nr_frag_high = 0;
7070 
7071 		for (i = 0; i < num_nr_seg; i++) {
7072 
7073 			nr_frag_strt = ntohs(nr_frag->start);
7074 			nr_frag_end = ntohs(nr_frag->end);
7075 
7076 			/* some sanity checks on the nr fargment offsets */
7077 			if (nr_frag_strt > nr_frag_end) {
7078 				/* this one is malformed, skip */
7079 				nr_frag++;
7080 				continue;
7081 			}
7082 			/*
7083 			 * mark acked dgs and find out the highestTSN being
7084 			 * acked
7085 			 */
7086 			if (tp1 == NULL) {
7087 				tp1 = TAILQ_FIRST(&asoc->sent_queue);
7088 
7089 				/* save the locations of the last frags */
7090 				last_nr_frag_high = nr_frag_end + last_tsn;
7091 			} else {
7092 				/*
7093 				 * now lets see if we need to reset the
7094 				 * queue due to a out-of-order SACK fragment
7095 				 */
7096 				if (compare_with_wrap(nr_frag_strt + last_tsn,
7097 				    last_nr_frag_high, MAX_TSN)) {
7098 					/*
7099 					 * if the new frag starts after the
7100 					 * last TSN frag covered, we are ok
7101 					 * and this one is beyond the last
7102 					 * one
7103 					 */
7104 					;
7105 				} else {
7106 					/*
7107 					 * ok, they have reset us, so we
7108 					 * need to reset the queue this will
7109 					 * cause extra hunting but hey, they
7110 					 * chose the performance hit when
7111 					 * they failed to order there gaps..
7112 					 */
7113 					tp1 = TAILQ_FIRST(&asoc->sent_queue);
7114 				}
7115 				last_nr_frag_high = nr_frag_end + last_tsn;
7116 			}
7117 
7118 			for (j = nr_frag_strt + last_tsn; (compare_with_wrap((nr_frag_end + last_tsn), j, MAX_TSN)); j++) {
7119 				while (tp1) {
7120 					if (tp1->rec.data.TSN_seq == j) {
7121 						if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
7122 							tp1->sent = SCTP_DATAGRAM_NR_MARKED;
7123 							/*
7124 							 * TAILQ_REMOVE(&asoc
7125 							 * ->sent_queue,
7126 							 * tp1, sctp_next);
7127 							 */
7128 							if (tp1->data) {
7129 								/*
7130 								 * sa_ignore
7131 								 * NO_NULL_CH
7132 								 * K
7133 								 */
7134 								sctp_free_bufspace(stcb, asoc, tp1, 1);
7135 								sctp_m_freem(tp1->data);
7136 							}
7137 							tp1->data = NULL;
7138 							/*
7139 							 * asoc->sent_queue_c
7140 							 * nt--;
7141 							 */
7142 							/*
7143 							 * sctp_free_a_chunk(
7144 							 * stcb, tp1);
7145 							 */
7146 							wake_him++;
7147 						}
7148 						break;
7149 					}	/* if (tp1->TSN_seq == j) */
7150 					if (compare_with_wrap(tp1->rec.data.TSN_seq, j,
7151 					    MAX_TSN))
7152 						break;
7153 					tp1 = TAILQ_NEXT(tp1, sctp_next);
7154 				}	/* end while (tp1) */
7155 
7156 			}	/* end for (j = nrFragStart */
7157 
7158 			nr_frag = (struct sctp_nr_gap_ack_block *)sctp_m_getptr(m, *offset,
7159 			    sizeof(struct sctp_nr_gap_ack_block), (uint8_t *) & nr_block);
7160 			*offset += sizeof(nr_block);
7161 			if (nr_frag == NULL) {
7162 				break;
7163 			}
7164 		}		/* end of if(!all_bit) */
7165 	}
7166 	/*
7167 	 * EY- wake up the socket if things have been removed from the sent
7168 	 * queue
7169 	 */
7170 	if ((wake_him) && (stcb->sctp_socket)) {
7171 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7172 		struct socket *so;
7173 
7174 #endif
7175 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
7176 		/*
7177 		 * if (SCTP_BASE_SYSCTL(sctp_logging_level) &
7178 		 * SCTP_WAKE_LOGGING_ENABLE) { sctp_wakeup_log(stcb,
7179 		 * cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);}
7180 		 */
7181 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7182 		so = SCTP_INP_SO(stcb->sctp_ep);
7183 		atomic_add_int(&stcb->asoc.refcnt, 1);
7184 		SCTP_TCB_UNLOCK(stcb);
7185 		SCTP_SOCKET_LOCK(so, 1);
7186 		SCTP_TCB_LOCK(stcb);
7187 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
7188 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
7189 			/* assoc was freed while we were unlocked */
7190 			SCTP_SOCKET_UNLOCK(so, 1);
7191 			return;
7192 		}
7193 #endif
7194 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
7195 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7196 		SCTP_SOCKET_UNLOCK(so, 1);
7197 #endif
7198 	}			/* else { if
7199 				 * (SCTP_BASE_SYSCTL(sctp_logging_level) &
7200 				 * SCTP_WAKE_LOGGING_ENABLE) {
7201 				 * sctp_wakeup_log(stcb, cum_ack, wake_him,
7202 				 * SCTP_NOWAKE_FROM_SACK); } } */
7203 }
7204 
7205 /* EY- nr_sack */
7206 /* Identifies the non-renegable tsns that are revoked*/
7207 static void
7208 sctp_check_for_nr_revoked(struct sctp_tcb *stcb,
7209     struct sctp_association *asoc, uint32_t cumack,
7210     u_long biggest_tsn_acked)
7211 {
7212 	struct sctp_tmit_chunk *tp1;
7213 
7214 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
7215 	while (tp1) {
7216 		if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack,
7217 		    MAX_TSN)) {
7218 			/*
7219 			 * ok this guy is either ACK or MARKED. If it is
7220 			 * ACKED it has been previously acked but not this
7221 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
7222 			 * again.
7223 			 */
7224 			if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
7225 			    MAX_TSN))
7226 				break;
7227 
7228 
7229 			if (tp1->sent == SCTP_DATAGRAM_NR_ACKED) {
7230 				/*
7231 				 * EY! a non-renegable TSN is revoked, need
7232 				 * to abort the association
7233 				 */
7234 				/*
7235 				 * EY TODO: put in the code to abort the
7236 				 * assoc.
7237 				 */
7238 				return;
7239 			} else if (tp1->sent == SCTP_DATAGRAM_NR_MARKED) {
7240 				/* it has been re-acked in this SACK */
7241 				tp1->sent = SCTP_DATAGRAM_NR_ACKED;
7242 			}
7243 		}
7244 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
7245 			break;
7246 		tp1 = TAILQ_NEXT(tp1, sctp_next);
7247 	}
7248 }
7249 
7250 /* EY! nr_sack version of sctp_handle_sack, nr_gap_ack processing should be added to this method*/
7251 void
7252 sctp_handle_nr_sack(struct mbuf *m, int offset,
7253     struct sctp_nr_sack_chunk *ch, struct sctp_tcb *stcb,
7254     struct sctp_nets *net_from, int *abort_now, int nr_sack_len, uint32_t rwnd)
7255 {
7256 	struct sctp_association *asoc;
7257 
7258 	/* EY sack */
7259 	struct sctp_nr_sack *nr_sack;
7260 	struct sctp_tmit_chunk *tp1, *tp2;
7261 	uint32_t cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked,
7262 	         this_sack_lowest_newack;
7263 	uint32_t sav_cum_ack;
7264 
7265 	/* EY num_seg */
7266 	uint16_t num_seg, num_nr_seg, num_dup;
7267 	uint16_t wake_him = 0;
7268 	unsigned int nr_sack_length;
7269 	uint32_t send_s = 0;
7270 	long j;
7271 	int accum_moved = 0;
7272 	int will_exit_fast_recovery = 0;
7273 	uint32_t a_rwnd, old_rwnd;
7274 	int win_probe_recovery = 0;
7275 	int win_probe_recovered = 0;
7276 	struct sctp_nets *net = NULL;
7277 	int nonce_sum_flag, ecn_seg_sums = 0, all_bit;
7278 	int done_once;
7279 	uint8_t reneged_all = 0;
7280 	uint8_t cmt_dac_flag;
7281 
7282 	/*
7283 	 * we take any chance we can to service our queues since we cannot
7284 	 * get awoken when the socket is read from :<
7285 	 */
7286 	/*
7287 	 * Now perform the actual SACK handling: 1) Verify that it is not an
7288 	 * old sack, if so discard. 2) If there is nothing left in the send
7289 	 * queue (cum-ack is equal to last acked) then you have a duplicate
7290 	 * too, update any rwnd change and verify no timers are running.
7291 	 * then return. 3) Process any new consequtive data i.e. cum-ack
7292 	 * moved process these first and note that it moved. 4) Process any
7293 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
7294 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
7295 	 * sync up flightsizes and things, stop all timers and also check
7296 	 * for shutdown_pending state. If so then go ahead and send off the
7297 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
7298 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
7299 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
7300 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
7301 	 * if in shutdown_recv state.
7302 	 */
7303 	SCTP_TCB_LOCK_ASSERT(stcb);
7304 	nr_sack = &ch->nr_sack;
7305 	/* CMT DAC algo */
7306 	this_sack_lowest_newack = 0;
7307 	j = 0;
7308 	nr_sack_length = (unsigned int)nr_sack_len;
7309 	/* ECN Nonce */
7310 	SCTP_STAT_INCR(sctps_slowpath_sack);
7311 	nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM;
7312 	cum_ack = last_tsn = ntohl(nr_sack->cum_tsn_ack);
7313 #ifdef SCTP_ASOCLOG_OF_TSNS
7314 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
7315 	stcb->asoc.cumack_log_at++;
7316 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
7317 		stcb->asoc.cumack_log_at = 0;
7318 	}
7319 #endif
7320 	all_bit = ch->ch.chunk_flags & SCTP_NR_SACK_ALL_BIT;
7321 	num_seg = ntohs(nr_sack->num_gap_ack_blks);
7322 	num_nr_seg = ntohs(nr_sack->num_nr_gap_ack_blks);
7323 	if (all_bit)
7324 		num_seg = num_nr_seg;
7325 	a_rwnd = rwnd;
7326 
7327 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
7328 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
7329 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
7330 	}
7331 	/* CMT DAC algo */
7332 	cmt_dac_flag = ch->ch.chunk_flags & SCTP_SACK_CMT_DAC;
7333 	num_dup = ntohs(nr_sack->num_dup_tsns);
7334 
7335 	old_rwnd = stcb->asoc.peers_rwnd;
7336 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
7337 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
7338 		    stcb->asoc.overall_error_count,
7339 		    0,
7340 		    SCTP_FROM_SCTP_INDATA,
7341 		    __LINE__);
7342 	}
7343 	stcb->asoc.overall_error_count = 0;
7344 	asoc = &stcb->asoc;
7345 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
7346 		sctp_log_sack(asoc->last_acked_seq,
7347 		    cum_ack,
7348 		    0,
7349 		    num_seg,
7350 		    num_dup,
7351 		    SCTP_LOG_NEW_SACK);
7352 	}
7353 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_FR_LOGGING_ENABLE | SCTP_EARLYFR_LOGGING_ENABLE))) {
7354 		int off_to_dup, iii;
7355 		uint32_t *dupdata, dblock;
7356 
7357 		/* EY! gotta be careful here */
7358 		if (all_bit) {
7359 			off_to_dup = (num_nr_seg * sizeof(struct sctp_nr_gap_ack_block)) +
7360 			    sizeof(struct sctp_nr_sack_chunk);
7361 		} else {
7362 			off_to_dup = (num_seg * sizeof(struct sctp_gap_ack_block)) +
7363 			    (num_nr_seg * sizeof(struct sctp_nr_gap_ack_block)) + sizeof(struct sctp_nr_sack_chunk);
7364 		}
7365 		if ((off_to_dup + (num_dup * sizeof(uint32_t))) <= nr_sack_length) {
7366 			dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
7367 			    sizeof(uint32_t), (uint8_t *) & dblock);
7368 			off_to_dup += sizeof(uint32_t);
7369 			if (dupdata) {
7370 				for (iii = 0; iii < num_dup; iii++) {
7371 					sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
7372 					dupdata = (uint32_t *) sctp_m_getptr(m, off_to_dup,
7373 					    sizeof(uint32_t), (uint8_t *) & dblock);
7374 					if (dupdata == NULL)
7375 						break;
7376 					off_to_dup += sizeof(uint32_t);
7377 				}
7378 			}
7379 		} else {
7380 			SCTP_PRINTF("Size invalid offset to dups:%d number dups:%d nr_sack_len:%d num gaps:%d num nr_gaps:%d\n",
7381 			    off_to_dup, num_dup, nr_sack_length, num_seg, num_nr_seg);
7382 		}
7383 	}
7384 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
7385 		/* reality check */
7386 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
7387 			tp1 = TAILQ_LAST(&asoc->sent_queue,
7388 			    sctpchunk_listhead);
7389 			send_s = tp1->rec.data.TSN_seq + 1;
7390 		} else {
7391 			send_s = asoc->sending_seq;
7392 		}
7393 		if (cum_ack == send_s ||
7394 		    compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
7395 #ifndef INVARIANTS
7396 			struct mbuf *oper;
7397 
7398 #endif
7399 #ifdef INVARIANTS
7400 	hopeless_peer:
7401 			panic("Impossible sack 1");
7402 #else
7403 
7404 
7405 			/*
7406 			 * no way, we have not even sent this TSN out yet.
7407 			 * Peer is hopelessly messed up with us.
7408 			 */
7409 	hopeless_peer:
7410 			*abort_now = 1;
7411 			/* XXX */
7412 			oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
7413 			    0, M_DONTWAIT, 1, MT_DATA);
7414 			if (oper) {
7415 				struct sctp_paramhdr *ph;
7416 				uint32_t *ippp;
7417 
7418 				SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
7419 				    sizeof(uint32_t);
7420 				ph = mtod(oper, struct sctp_paramhdr *);
7421 				ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
7422 				ph->param_length = htons(SCTP_BUF_LEN(oper));
7423 				ippp = (uint32_t *) (ph + 1);
7424 				*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
7425 			}
7426 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
7427 			sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
7428 			return;
7429 #endif
7430 		}
7431 	}
7432 	/**********************/
7433 	/* 1) check the range */
7434 	/**********************/
7435 	if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
7436 		/* acking something behind */
7437 		return;
7438 	}
7439 	sav_cum_ack = asoc->last_acked_seq;
7440 
7441 	/* update the Rwnd of the peer */
7442 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
7443 	    TAILQ_EMPTY(&asoc->send_queue) &&
7444 	    (asoc->stream_queue_cnt == 0)
7445 	    ) {
7446 		/* nothing left on send/sent and strmq */
7447 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
7448 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
7449 			    asoc->peers_rwnd, 0, 0, a_rwnd);
7450 		}
7451 		asoc->peers_rwnd = a_rwnd;
7452 		if (asoc->sent_queue_retran_cnt) {
7453 			asoc->sent_queue_retran_cnt = 0;
7454 		}
7455 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
7456 			/* SWS sender side engages */
7457 			asoc->peers_rwnd = 0;
7458 		}
7459 		/* stop any timers */
7460 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7461 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
7462 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
7463 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
7464 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
7465 					SCTP_STAT_INCR(sctps_earlyfrstpidsck1);
7466 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
7467 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
7468 				}
7469 			}
7470 			net->partial_bytes_acked = 0;
7471 			net->flight_size = 0;
7472 		}
7473 		asoc->total_flight = 0;
7474 		asoc->total_flight_count = 0;
7475 		return;
7476 	}
7477 	/*
7478 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
7479 	 * things. The total byte count acked is tracked in netAckSz AND
7480 	 * netAck2 is used to track the total bytes acked that are un-
7481 	 * amibguious and were never retransmitted. We track these on a per
7482 	 * destination address basis.
7483 	 */
7484 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7485 		net->prev_cwnd = net->cwnd;
7486 		net->net_ack = 0;
7487 		net->net_ack2 = 0;
7488 
7489 		/*
7490 		 * CMT: Reset CUC and Fast recovery algo variables before
7491 		 * SACK processing
7492 		 */
7493 		net->new_pseudo_cumack = 0;
7494 		net->will_exit_fast_recovery = 0;
7495 	}
7496 	/* process the new consecutive TSN first */
7497 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
7498 	while (tp1) {
7499 		if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
7500 		    MAX_TSN) ||
7501 		    last_tsn == tp1->rec.data.TSN_seq) {
7502 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
7503 				/*
7504 				 * ECN Nonce: Add the nonce to the sender's
7505 				 * nonce sum
7506 				 */
7507 				asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
7508 				accum_moved = 1;
7509 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
7510 					/*
7511 					 * If it is less than ACKED, it is
7512 					 * now no-longer in flight. Higher
7513 					 * values may occur during marking
7514 					 */
7515 					if ((tp1->whoTo->dest_state &
7516 					    SCTP_ADDR_UNCONFIRMED) &&
7517 					    (tp1->snd_count < 2)) {
7518 						/*
7519 						 * If there was no retran
7520 						 * and the address is
7521 						 * un-confirmed and we sent
7522 						 * there and are now
7523 						 * sacked.. its confirmed,
7524 						 * mark it so.
7525 						 */
7526 						tp1->whoTo->dest_state &=
7527 						    ~SCTP_ADDR_UNCONFIRMED;
7528 					}
7529 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
7530 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
7531 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
7532 							    tp1->whoTo->flight_size,
7533 							    tp1->book_size,
7534 							    (uintptr_t) tp1->whoTo,
7535 							    tp1->rec.data.TSN_seq);
7536 						}
7537 						sctp_flight_size_decrease(tp1);
7538 						sctp_total_flight_decrease(stcb, tp1);
7539 					}
7540 					tp1->whoTo->net_ack += tp1->send_size;
7541 
7542 					/* CMT SFR and DAC algos */
7543 					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
7544 					tp1->whoTo->saw_newack = 1;
7545 
7546 					if (tp1->snd_count < 2) {
7547 						/*
7548 						 * True non-retransmited
7549 						 * chunk
7550 						 */
7551 						tp1->whoTo->net_ack2 +=
7552 						    tp1->send_size;
7553 
7554 						/* update RTO too? */
7555 						if (tp1->do_rtt) {
7556 							tp1->whoTo->RTO =
7557 							    sctp_calculate_rto(stcb,
7558 							    asoc, tp1->whoTo,
7559 							    &tp1->sent_rcv_time,
7560 							    sctp_align_safe_nocopy);
7561 							tp1->do_rtt = 0;
7562 						}
7563 					}
7564 					/*
7565 					 * CMT: CUCv2 algorithm. From the
7566 					 * cumack'd TSNs, for each TSN being
7567 					 * acked for the first time, set the
7568 					 * following variables for the
7569 					 * corresp destination.
7570 					 * new_pseudo_cumack will trigger a
7571 					 * cwnd update.
7572 					 * find_(rtx_)pseudo_cumack will
7573 					 * trigger search for the next
7574 					 * expected (rtx-)pseudo-cumack.
7575 					 */
7576 					tp1->whoTo->new_pseudo_cumack = 1;
7577 					tp1->whoTo->find_pseudo_cumack = 1;
7578 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
7579 
7580 
7581 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
7582 						sctp_log_sack(asoc->last_acked_seq,
7583 						    cum_ack,
7584 						    tp1->rec.data.TSN_seq,
7585 						    0,
7586 						    0,
7587 						    SCTP_LOG_TSN_ACKED);
7588 					}
7589 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7590 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
7591 					}
7592 				}
7593 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
7594 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
7595 #ifdef SCTP_AUDITING_ENABLED
7596 					sctp_audit_log(0xB3,
7597 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
7598 #endif
7599 				}
7600 				if (tp1->rec.data.chunk_was_revoked) {
7601 					/* deflate the cwnd */
7602 					tp1->whoTo->cwnd -= tp1->book_size;
7603 					tp1->rec.data.chunk_was_revoked = 0;
7604 				}
7605 				tp1->sent = SCTP_DATAGRAM_ACKED;
7606 			}
7607 		} else {
7608 			break;
7609 		}
7610 		tp1 = TAILQ_NEXT(tp1, sctp_next);
7611 	}
7612 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
7613 	/* always set this up to cum-ack */
7614 	asoc->this_sack_highest_gap = last_tsn;
7615 
7616 	/* Move offset up to point to gaps/dups */
7617 	offset += sizeof(struct sctp_nr_sack_chunk);
7618 	if (((num_seg * (sizeof(struct sctp_gap_ack_block))) + sizeof(struct sctp_nr_sack_chunk)) > nr_sack_length) {
7619 
7620 		/* skip corrupt segments */
7621 		goto skip_segments;
7622 	}
7623 	if (num_seg > 0) {
7624 
7625 		/*
7626 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
7627 		 * to be greater than the cumack. Also reset saw_newack to 0
7628 		 * for all dests.
7629 		 */
7630 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7631 			net->saw_newack = 0;
7632 			net->this_sack_highest_newack = last_tsn;
7633 		}
7634 
7635 		/*
7636 		 * thisSackHighestGap will increase while handling NEW
7637 		 * segments this_sack_highest_newack will increase while
7638 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
7639 		 * used for CMT DAC algo. saw_newack will also change.
7640 		 */
7641 
7642 		sctp_handle_nr_sack_segments(m, &offset, stcb, asoc, ch, last_tsn,
7643 		    &biggest_tsn_acked, &biggest_tsn_newly_acked, &this_sack_lowest_newack,
7644 		    num_seg, num_nr_seg, &ecn_seg_sums);
7645 
7646 
7647 		if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
7648 			/*
7649 			 * validate the biggest_tsn_acked in the gap acks if
7650 			 * strict adherence is wanted.
7651 			 */
7652 			if ((biggest_tsn_acked == send_s) ||
7653 			    (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
7654 				/*
7655 				 * peer is either confused or we are under
7656 				 * attack. We must abort.
7657 				 */
7658 				goto hopeless_peer;
7659 			}
7660 		}
7661 	}
7662 skip_segments:
7663 	/*******************************************/
7664 	/* cancel ALL T3-send timer if accum moved */
7665 	/*******************************************/
7666 	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
7667 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7668 			if (net->new_pseudo_cumack)
7669 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
7670 				    stcb, net,
7671 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
7672 
7673 		}
7674 	} else {
7675 		if (accum_moved) {
7676 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7677 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
7678 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
7679 			}
7680 		}
7681 	}
7682 	/********************************************/
7683 	/* drop the acked chunks from the sendqueue */
7684 	/********************************************/
7685 	asoc->last_acked_seq = cum_ack;
7686 
7687 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
7688 	if (tp1 == NULL)
7689 		goto done_with_it;
7690 	do {
7691 		if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
7692 		    MAX_TSN)) {
7693 			break;
7694 		}
7695 		if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
7696 			/* no more sent on list */
7697 			printf("Warning, tp1->sent == %d and its now acked?\n",
7698 			    tp1->sent);
7699 		}
7700 		tp2 = TAILQ_NEXT(tp1, sctp_next);
7701 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
7702 		if (tp1->pr_sctp_on) {
7703 			if (asoc->pr_sctp_cnt != 0)
7704 				asoc->pr_sctp_cnt--;
7705 		}
7706 		if ((TAILQ_FIRST(&asoc->sent_queue) == NULL) &&
7707 		    (asoc->total_flight > 0)) {
7708 #ifdef INVARIANTS
7709 			panic("Warning flight size is postive and should be 0");
7710 #else
7711 			SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
7712 			    asoc->total_flight);
7713 #endif
7714 			asoc->total_flight = 0;
7715 		}
7716 		if (tp1->data) {
7717 			/* sa_ignore NO_NULL_CHK */
7718 			sctp_free_bufspace(stcb, asoc, tp1, 1);
7719 			sctp_m_freem(tp1->data);
7720 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
7721 				asoc->sent_queue_cnt_removeable--;
7722 			}
7723 		}
7724 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
7725 			sctp_log_sack(asoc->last_acked_seq,
7726 			    cum_ack,
7727 			    tp1->rec.data.TSN_seq,
7728 			    0,
7729 			    0,
7730 			    SCTP_LOG_FREE_SENT);
7731 		}
7732 		tp1->data = NULL;
7733 		asoc->sent_queue_cnt--;
7734 		sctp_free_a_chunk(stcb, tp1);
7735 		wake_him++;
7736 		tp1 = tp2;
7737 	} while (tp1 != NULL);
7738 
7739 done_with_it:
7740 	/* sa_ignore NO_NULL_CHK */
7741 	if ((wake_him) && (stcb->sctp_socket)) {
7742 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7743 		struct socket *so;
7744 
7745 #endif
7746 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
7747 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
7748 			sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_WAKESND_FROM_SACK);
7749 		}
7750 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7751 		so = SCTP_INP_SO(stcb->sctp_ep);
7752 		atomic_add_int(&stcb->asoc.refcnt, 1);
7753 		SCTP_TCB_UNLOCK(stcb);
7754 		SCTP_SOCKET_LOCK(so, 1);
7755 		SCTP_TCB_LOCK(stcb);
7756 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
7757 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
7758 			/* assoc was freed while we were unlocked */
7759 			SCTP_SOCKET_UNLOCK(so, 1);
7760 			return;
7761 		}
7762 #endif
7763 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
7764 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
7765 		SCTP_SOCKET_UNLOCK(so, 1);
7766 #endif
7767 	} else {
7768 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
7769 			sctp_wakeup_log(stcb, cum_ack, wake_him, SCTP_NOWAKE_FROM_SACK);
7770 		}
7771 	}
7772 
7773 	if (asoc->fast_retran_loss_recovery && accum_moved) {
7774 		if (compare_with_wrap(asoc->last_acked_seq,
7775 		    asoc->fast_recovery_tsn, MAX_TSN) ||
7776 		    asoc->last_acked_seq == asoc->fast_recovery_tsn) {
7777 			/* Setup so we will exit RFC2582 fast recovery */
7778 			will_exit_fast_recovery = 1;
7779 		}
7780 	}
7781 	/*
7782 	 * Check for revoked fragments:
7783 	 *
7784 	 * if Previous sack - Had no frags then we can't have any revoked if
7785 	 * Previous sack - Had frag's then - If we now have frags aka
7786 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
7787 	 * some of them. else - The peer revoked all ACKED fragments, since
7788 	 * we had some before and now we have NONE.
7789 	 */
7790 
7791 	if (num_seg)
7792 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
7793 
7794 	else if (asoc->saw_sack_with_frags) {
7795 		int cnt_revoked = 0;
7796 
7797 		tp1 = TAILQ_FIRST(&asoc->sent_queue);
7798 		if (tp1 != NULL) {
7799 			/* Peer revoked all dg's marked or acked */
7800 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
7801 				/*
7802 				 * EY- maybe check only if it is nr_acked
7803 				 * nr_marked may not be possible
7804 				 */
7805 				if ((tp1->sent == SCTP_DATAGRAM_NR_ACKED) ||
7806 				    (tp1->sent == SCTP_DATAGRAM_NR_MARKED)) {
7807 					/*
7808 					 * EY! - TODO: Something previously
7809 					 * nr_gapped is reneged, abort the
7810 					 * association
7811 					 */
7812 					return;
7813 				}
7814 				if ((tp1->sent > SCTP_DATAGRAM_RESEND) &&
7815 				    (tp1->sent < SCTP_FORWARD_TSN_SKIP)) {
7816 					tp1->sent = SCTP_DATAGRAM_SENT;
7817 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
7818 						sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
7819 						    tp1->whoTo->flight_size,
7820 						    tp1->book_size,
7821 						    (uintptr_t) tp1->whoTo,
7822 						    tp1->rec.data.TSN_seq);
7823 					}
7824 					sctp_flight_size_increase(tp1);
7825 					sctp_total_flight_increase(stcb, tp1);
7826 					tp1->rec.data.chunk_was_revoked = 1;
7827 					/*
7828 					 * To ensure that this increase in
7829 					 * flightsize, which is artificial,
7830 					 * does not throttle the sender, we
7831 					 * also increase the cwnd
7832 					 * artificially.
7833 					 */
7834 					tp1->whoTo->cwnd += tp1->book_size;
7835 					cnt_revoked++;
7836 				}
7837 			}
7838 			if (cnt_revoked) {
7839 				reneged_all = 1;
7840 			}
7841 		}
7842 		asoc->saw_sack_with_frags = 0;
7843 	}
7844 	if (num_seg)
7845 		asoc->saw_sack_with_frags = 1;
7846 	else
7847 		asoc->saw_sack_with_frags = 0;
7848 
7849 	/* EY! - not sure about if there should be an IF */
7850 	if (num_nr_seg)
7851 		sctp_check_for_nr_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
7852 	else if (asoc->saw_sack_with_nr_frags) {
7853 		/*
7854 		 * EY!- TODO: all previously nr_gapped chunks have been
7855 		 * reneged abort the association
7856 		 */
7857 		asoc->saw_sack_with_nr_frags = 0;
7858 	}
7859 	if (num_nr_seg)
7860 		asoc->saw_sack_with_nr_frags = 1;
7861 	else
7862 		asoc->saw_sack_with_nr_frags = 0;
7863 	/* JRS - Use the congestion control given in the CC module */
7864 	asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
7865 
7866 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
7867 		/* nothing left in-flight */
7868 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7869 			/* stop all timers */
7870 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
7871 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
7872 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
7873 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
7874 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
7875 				}
7876 			}
7877 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
7878 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
7879 			net->flight_size = 0;
7880 			net->partial_bytes_acked = 0;
7881 		}
7882 		asoc->total_flight = 0;
7883 		asoc->total_flight_count = 0;
7884 	}
7885 	/**********************************/
7886 	/* Now what about shutdown issues */
7887 	/**********************************/
7888 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
7889 		/* nothing left on sendqueue.. consider done */
7890 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
7891 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
7892 			    asoc->peers_rwnd, 0, 0, a_rwnd);
7893 		}
7894 		asoc->peers_rwnd = a_rwnd;
7895 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
7896 			/* SWS sender side engages */
7897 			asoc->peers_rwnd = 0;
7898 		}
7899 		/* clean up */
7900 		if ((asoc->stream_queue_cnt == 1) &&
7901 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
7902 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
7903 		    (asoc->locked_on_sending)
7904 		    ) {
7905 			struct sctp_stream_queue_pending *sp;
7906 
7907 			/*
7908 			 * I may be in a state where we got all across.. but
7909 			 * cannot write more due to a shutdown... we abort
7910 			 * since the user did not indicate EOR in this case.
7911 			 */
7912 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
7913 			    sctp_streamhead);
7914 			if ((sp) && (sp->length == 0)) {
7915 				asoc->locked_on_sending = NULL;
7916 				if (sp->msg_is_complete) {
7917 					asoc->stream_queue_cnt--;
7918 				} else {
7919 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
7920 					asoc->stream_queue_cnt--;
7921 				}
7922 			}
7923 		}
7924 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
7925 		    (asoc->stream_queue_cnt == 0)) {
7926 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
7927 				/* Need to abort here */
7928 				struct mbuf *oper;
7929 
7930 		abort_out_now:
7931 				*abort_now = 1;
7932 				/* XXX */
7933 				oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
7934 				    0, M_DONTWAIT, 1, MT_DATA);
7935 				if (oper) {
7936 					struct sctp_paramhdr *ph;
7937 					uint32_t *ippp;
7938 
7939 					SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
7940 					    sizeof(uint32_t);
7941 					ph = mtod(oper, struct sctp_paramhdr *);
7942 					ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
7943 					ph->param_length = htons(SCTP_BUF_LEN(oper));
7944 					ippp = (uint32_t *) (ph + 1);
7945 					*ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
7946 				}
7947 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
7948 				sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, oper, SCTP_SO_NOT_LOCKED);
7949 				return;
7950 			} else {
7951 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
7952 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
7953 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
7954 				}
7955 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
7956 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
7957 				sctp_stop_timers_for_shutdown(stcb);
7958 				sctp_send_shutdown(stcb,
7959 				    stcb->asoc.primary_destination);
7960 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
7961 				    stcb->sctp_ep, stcb, asoc->primary_destination);
7962 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
7963 				    stcb->sctp_ep, stcb, asoc->primary_destination);
7964 			}
7965 			return;
7966 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
7967 		    (asoc->stream_queue_cnt == 0)) {
7968 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
7969 				goto abort_out_now;
7970 			}
7971 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
7972 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
7973 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
7974 			sctp_send_shutdown_ack(stcb,
7975 			    stcb->asoc.primary_destination);
7976 
7977 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
7978 			    stcb->sctp_ep, stcb, asoc->primary_destination);
7979 			return;
7980 		}
7981 	}
7982 	/*
7983 	 * Now here we are going to recycle net_ack for a different use...
7984 	 * HEADS UP.
7985 	 */
7986 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7987 		net->net_ack = 0;
7988 	}
7989 
7990 	/*
7991 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
7992 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
7993 	 * automatically ensure that.
7994 	 */
7995 	if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && (cmt_dac_flag == 0)) {
7996 		this_sack_lowest_newack = cum_ack;
7997 	}
7998 	if (num_seg > 0) {
7999 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
8000 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
8001 	}
8002 	/* JRS - Use the congestion control given in the CC module */
8003 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
8004 
8005 	/******************************************************************
8006 	 *  Here we do the stuff with ECN Nonce checking.
8007 	 *  We basically check to see if the nonce sum flag was incorrect
8008 	 *  or if resynchronization needs to be done. Also if we catch a
8009 	 *  misbehaving receiver we give him the kick.
8010 	 ******************************************************************/
8011 
8012 	if (asoc->ecn_nonce_allowed) {
8013 		if (asoc->nonce_sum_check) {
8014 			if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
8015 				if (asoc->nonce_wait_for_ecne == 0) {
8016 					struct sctp_tmit_chunk *lchk;
8017 
8018 					lchk = TAILQ_FIRST(&asoc->send_queue);
8019 					asoc->nonce_wait_for_ecne = 1;
8020 					if (lchk) {
8021 						asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
8022 					} else {
8023 						asoc->nonce_wait_tsn = asoc->sending_seq;
8024 					}
8025 				} else {
8026 					if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
8027 					    (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
8028 						/*
8029 						 * Misbehaving peer. We need
8030 						 * to react to this guy
8031 						 */
8032 						asoc->ecn_allowed = 0;
8033 						asoc->ecn_nonce_allowed = 0;
8034 					}
8035 				}
8036 			}
8037 		} else {
8038 			/* See if Resynchronization Possible */
8039 			if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
8040 				asoc->nonce_sum_check = 1;
8041 				/*
8042 				 * now we must calculate what the base is.
8043 				 * We do this based on two things, we know
8044 				 * the total's for all the segments
8045 				 * gap-acked in the SACK, its stored in
8046 				 * ecn_seg_sums. We also know the SACK's
8047 				 * nonce sum, its in nonce_sum_flag. So we
8048 				 * can build a truth table to back-calculate
8049 				 * the new value of
8050 				 * asoc->nonce_sum_expect_base:
8051 				 *
8052 				 * SACK-flag-Value         Seg-Sums Base 0 0 0
8053 				 * 1                    0 1 0 1 1 1 1 0
8054 				 */
8055 				asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
8056 			}
8057 		}
8058 	}
8059 	/* Now are we exiting loss recovery ? */
8060 	if (will_exit_fast_recovery) {
8061 		/* Ok, we must exit fast recovery */
8062 		asoc->fast_retran_loss_recovery = 0;
8063 	}
8064 	if ((asoc->sat_t3_loss_recovery) &&
8065 	    ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
8066 	    MAX_TSN) ||
8067 	    (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
8068 		/* end satellite t3 loss recovery */
8069 		asoc->sat_t3_loss_recovery = 0;
8070 	}
8071 	/*
8072 	 * CMT Fast recovery
8073 	 */
8074 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
8075 		if (net->will_exit_fast_recovery) {
8076 			/* Ok, we must exit fast recovery */
8077 			net->fast_retran_loss_recovery = 0;
8078 		}
8079 	}
8080 
8081 	/* Adjust and set the new rwnd value */
8082 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
8083 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
8084 		    asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
8085 	}
8086 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
8087 	    (uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
8088 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
8089 		/* SWS sender side engages */
8090 		asoc->peers_rwnd = 0;
8091 	}
8092 	if (asoc->peers_rwnd > old_rwnd) {
8093 		win_probe_recovery = 1;
8094 	}
8095 	/*
8096 	 * Now we must setup so we have a timer up for anyone with
8097 	 * outstanding data.
8098 	 */
8099 	done_once = 0;
8100 again:
8101 	j = 0;
8102 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
8103 		if (win_probe_recovery && (net->window_probe)) {
8104 			net->window_probe = 0;
8105 			win_probe_recovered = 1;
8106 			/*-
8107 			 * Find first chunk that was used with
8108 			 * window probe and clear the event. Put
8109 			 * it back into the send queue as if has
8110 			 * not been sent.
8111 			 */
8112 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
8113 				if (tp1->window_probe) {
8114 					sctp_window_probe_recovery(stcb, asoc, net, tp1);
8115 					break;
8116 				}
8117 			}
8118 		}
8119 		if (net->flight_size) {
8120 			j++;
8121 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
8122 			    stcb->sctp_ep, stcb, net);
8123 		} else {
8124 			if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
8125 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
8126 				    stcb, net,
8127 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
8128 			}
8129 			if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
8130 				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
8131 					SCTP_STAT_INCR(sctps_earlyfrstpidsck4);
8132 					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
8133 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
8134 				}
8135 			}
8136 		}
8137 	}
8138 	if ((j == 0) &&
8139 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
8140 	    (asoc->sent_queue_retran_cnt == 0) &&
8141 	    (win_probe_recovered == 0) &&
8142 	    (done_once == 0)) {
8143 		/* huh, this should not happen */
8144 		sctp_fs_audit(asoc);
8145 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
8146 			net->flight_size = 0;
8147 		}
8148 		asoc->total_flight = 0;
8149 		asoc->total_flight_count = 0;
8150 		asoc->sent_queue_retran_cnt = 0;
8151 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
8152 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
8153 				sctp_flight_size_increase(tp1);
8154 				sctp_total_flight_increase(stcb, tp1);
8155 			} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
8156 				asoc->sent_queue_retran_cnt++;
8157 			}
8158 		}
8159 		done_once = 1;
8160 		goto again;
8161 	}
8162 	/*********************************************/
8163 	/* Here we perform PR-SCTP procedures        */
8164 	/* (section 4.2)                             */
8165 	/*********************************************/
8166 	/* C1. update advancedPeerAckPoint */
8167 	if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
8168 		asoc->advanced_peer_ack_point = cum_ack;
8169 	}
8170 	/* C2. try to further move advancedPeerAckPoint ahead */
8171 	if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
8172 		struct sctp_tmit_chunk *lchk;
8173 		uint32_t old_adv_peer_ack_point;
8174 
8175 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
8176 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
8177 		/* C3. See if we need to send a Fwd-TSN */
8178 		if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
8179 		    MAX_TSN)) {
8180 			/*
8181 			 * ISSUE with ECN, see FWD-TSN processing for notes
8182 			 * on issues that will occur when the ECN NONCE
8183 			 * stuff is put into SCTP for cross checking.
8184 			 */
8185 			if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
8186 			    MAX_TSN)) {
8187 				send_forward_tsn(stcb, asoc);
8188 				/*
8189 				 * ECN Nonce: Disable Nonce Sum check when
8190 				 * FWD TSN is sent and store resync tsn
8191 				 */
8192 				asoc->nonce_sum_check = 0;
8193 				asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
8194 			}
8195 		}
8196 		if (lchk) {
8197 			/* Assure a timer is up */
8198 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
8199 			    stcb->sctp_ep, stcb, lchk->whoTo);
8200 		}
8201 	}
8202 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
8203 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
8204 		    a_rwnd,
8205 		    stcb->asoc.peers_rwnd,
8206 		    stcb->asoc.total_flight,
8207 		    stcb->asoc.total_output_queue_size);
8208 	}
8209 }
8210