1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * a) Redistributions of source code must retain the above copyright notice,
12  *    this list of conditions and the following disclaimer.
13  *
14  * b) Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the distribution.
17  *
18  * c) Neither the name of Cisco Systems, Inc. nor the names of its
19  *    contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #if defined(__FreeBSD__) && !defined(__Userspace__)
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 #endif
39 
40 #include <netinet/sctp_os.h>
41 #if defined(__FreeBSD__) && !defined(__Userspace__)
42 #include <sys/proc.h>
43 #endif
44 #include <netinet/sctp_var.h>
45 #include <netinet/sctp_sysctl.h>
46 #include <netinet/sctp_header.h>
47 #include <netinet/sctp_pcb.h>
48 #include <netinet/sctputil.h>
49 #include <netinet/sctp_output.h>
50 #include <netinet/sctp_uio.h>
51 #include <netinet/sctp_auth.h>
52 #include <netinet/sctp_timer.h>
53 #include <netinet/sctp_asconf.h>
54 #include <netinet/sctp_indata.h>
55 #include <netinet/sctp_bsd_addr.h>
56 #include <netinet/sctp_input.h>
57 #include <netinet/sctp_crc32.h>
58 #if defined(__FreeBSD__) && !defined(__Userspace__)
59 #include <netinet/sctp_lock_bsd.h>
60 #endif
61 /*
62  * NOTES: On the outbound side of things I need to check the sack timer to
63  * see if I should generate a sack into the chunk queue (if I have data to
64  * send that is and will be sending it .. for bundling.
65  *
66  * The callback in sctp_usrreq.c will get called when the socket is read from.
67  * This will cause sctp_service_queues() to get called on the top entry in
68  * the list.
69  */
70 static uint32_t
71 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
72 			struct sctp_stream_in *strm,
73 			struct sctp_tcb *stcb,
74 			struct sctp_association *asoc,
75 			struct sctp_tmit_chunk *chk, int hold_rlock);
76 
77 void
sctp_set_rwnd(struct sctp_tcb * stcb,struct sctp_association * asoc)78 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
79 {
80 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
81 }
82 
83 /* Calculate what the rwnd would be */
84 uint32_t
sctp_calc_rwnd(struct sctp_tcb * stcb,struct sctp_association * asoc)85 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
86 {
87 	uint32_t calc = 0;
88 
89 	/*
90 	 * This is really set wrong with respect to a 1-2-m socket. Since
91 	 * the sb_cc is the count that everyone as put up. When we re-write
92 	 * sctp_soreceive then we will fix this so that ONLY this
93 	 * associations data is taken into account.
94 	 */
95 	if (stcb->sctp_socket == NULL) {
96 		return (calc);
97 	}
98 
99 	KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
100 	        ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
101 	KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
102 	        ("size_on_all_streams is %u", asoc->size_on_all_streams));
103 	if (stcb->asoc.sb_cc == 0 &&
104 	    asoc->cnt_on_reasm_queue == 0 &&
105 	    asoc->cnt_on_all_streams == 0) {
106 		/* Full rwnd granted */
107 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
108 		return (calc);
109 	}
110 	/* get actual space */
111 	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
112 	/*
113 	 * take out what has NOT been put on socket queue and we yet hold
114 	 * for putting up.
115 	 */
116 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
117 	                                         asoc->cnt_on_reasm_queue * MSIZE));
118 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
119 	                                         asoc->cnt_on_all_streams * MSIZE));
120 	if (calc == 0) {
121 		/* out of space */
122 		return (calc);
123 	}
124 
125 	/* what is the overhead of all these rwnd's */
126 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
127 	/* If the window gets too small due to ctrl-stuff, reduce it
128 	 * to 1, even it is 0. SWS engaged
129 	 */
130 	if (calc < stcb->asoc.my_rwnd_control_len) {
131 		calc = 1;
132 	}
133 	return (calc);
134 }
135 
136 /*
137  * Build out our readq entry based on the incoming packet.
138  */
139 struct sctp_queued_to_read *
sctp_build_readq_entry(struct sctp_tcb * stcb,struct sctp_nets * net,uint32_t tsn,uint32_t ppid,uint32_t context,uint16_t sid,uint32_t mid,uint8_t flags,struct mbuf * dm)140 sctp_build_readq_entry(struct sctp_tcb *stcb,
141     struct sctp_nets *net,
142     uint32_t tsn, uint32_t ppid,
143     uint32_t context, uint16_t sid,
144     uint32_t mid, uint8_t flags,
145     struct mbuf *dm)
146 {
147 	struct sctp_queued_to_read *read_queue_e = NULL;
148 
149 	sctp_alloc_a_readq(stcb, read_queue_e);
150 	if (read_queue_e == NULL) {
151 		goto failed_build;
152 	}
153 	memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
154 	read_queue_e->sinfo_stream = sid;
155 	read_queue_e->sinfo_flags = (flags << 8);
156 	read_queue_e->sinfo_ppid = ppid;
157 	read_queue_e->sinfo_context = context;
158 	read_queue_e->sinfo_tsn = tsn;
159 	read_queue_e->sinfo_cumtsn = tsn;
160 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
161 	read_queue_e->mid = mid;
162 	read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
163 	TAILQ_INIT(&read_queue_e->reasm);
164 	read_queue_e->whoFrom = net;
165 	atomic_add_int(&net->ref_count, 1);
166 	read_queue_e->data = dm;
167 	read_queue_e->stcb = stcb;
168 	read_queue_e->port_from = stcb->rport;
169 	if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
170 		read_queue_e->do_not_ref_stcb = 1;
171 	}
172 failed_build:
173 	return (read_queue_e);
174 }
175 
176 struct mbuf *
sctp_build_ctl_nchunk(struct sctp_inpcb * inp,struct sctp_sndrcvinfo * sinfo)177 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
178 {
179 	struct sctp_extrcvinfo *seinfo;
180 	struct sctp_sndrcvinfo *outinfo;
181 	struct sctp_rcvinfo *rcvinfo;
182 	struct sctp_nxtinfo *nxtinfo;
183 #if defined(_WIN32)
184 	WSACMSGHDR *cmh;
185 #else
186 	struct cmsghdr *cmh;
187 #endif
188 	struct mbuf *ret;
189 	int len;
190 	int use_extended;
191 	int provide_nxt;
192 
193 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
194 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
195 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
196 		/* user does not want any ancillary data */
197 		return (NULL);
198 	}
199 
200 	len = 0;
201 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
202 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
203 	}
204 	seinfo = (struct sctp_extrcvinfo *)sinfo;
205 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
206 	    (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
207 		provide_nxt = 1;
208 		len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
209 	} else {
210 		provide_nxt = 0;
211 	}
212 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
213 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
214 			use_extended = 1;
215 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
216 		} else {
217 			use_extended = 0;
218 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
219 		}
220 	} else {
221 		use_extended = 0;
222 	}
223 
224 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
225 	if (ret == NULL) {
226 		/* No space */
227 		return (ret);
228 	}
229 	SCTP_BUF_LEN(ret) = 0;
230 
231 	/* We need a CMSG header followed by the struct */
232 #if defined(_WIN32)
233 	cmh = mtod(ret, WSACMSGHDR *);
234 #else
235 	cmh = mtod(ret, struct cmsghdr *);
236 #endif
237 	/*
238 	 * Make sure that there is no un-initialized padding between
239 	 * the cmsg header and cmsg data and after the cmsg data.
240 	 */
241 	memset(cmh, 0, len);
242 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
243 		cmh->cmsg_level = IPPROTO_SCTP;
244 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
245 		cmh->cmsg_type = SCTP_RCVINFO;
246 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
247 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
248 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
249 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
250 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
251 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
252 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
253 		rcvinfo->rcv_context = sinfo->sinfo_context;
254 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
255 #if defined(_WIN32)
256 		cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
257 #else
258 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
259 #endif
260 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
261 	}
262 	if (provide_nxt) {
263 		cmh->cmsg_level = IPPROTO_SCTP;
264 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
265 		cmh->cmsg_type = SCTP_NXTINFO;
266 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
267 		nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
268 		nxtinfo->nxt_flags = 0;
269 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
270 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
271 		}
272 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
273 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
274 		}
275 		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
276 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
277 		}
278 		nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
279 		nxtinfo->nxt_length = seinfo->serinfo_next_length;
280 		nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
281 #if defined(_WIN32)
282 		cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
283 #else
284 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
285 #endif
286 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
287 	}
288 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
289 		cmh->cmsg_level = IPPROTO_SCTP;
290 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
291 		if (use_extended) {
292 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
293 			cmh->cmsg_type = SCTP_EXTRCV;
294 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
295 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
296 		} else {
297 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
298 			cmh->cmsg_type = SCTP_SNDRCV;
299 			*outinfo = *sinfo;
300 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
301 		}
302 	}
303 	return (ret);
304 }
305 
306 static void
sctp_mark_non_revokable(struct sctp_association * asoc,uint32_t tsn)307 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
308 {
309 	uint32_t gap, i;
310 	int in_r, in_nr;
311 
312 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
313 		return;
314 	}
315 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
316 		/*
317 		 * This tsn is behind the cum ack and thus we don't
318 		 * need to worry about it being moved from one to the other.
319 		 */
320 		return;
321 	}
322 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
323 	in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
324 	in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
325 	KASSERT(in_r || in_nr, ("%s: Things are really messed up now", __func__));
326 	if (!in_nr) {
327 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
328 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
329 			asoc->highest_tsn_inside_nr_map = tsn;
330 		}
331 	}
332 	if (in_r) {
333 		SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
334 		if (tsn == asoc->highest_tsn_inside_map) {
335 			/* We must back down to see what the new highest is. */
336 			for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
337 				SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
338 				if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
339 					asoc->highest_tsn_inside_map = i;
340 					break;
341 				}
342 			}
343 			if (!SCTP_TSN_GE(i, asoc->mapping_array_base_tsn)) {
344 				asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
345 			}
346 		}
347 	}
348 }
349 
350 static int
sctp_place_control_in_stream(struct sctp_stream_in * strm,struct sctp_association * asoc,struct sctp_queued_to_read * control)351 sctp_place_control_in_stream(struct sctp_stream_in *strm,
352 			     struct sctp_association *asoc,
353 			     struct sctp_queued_to_read *control)
354 {
355 	struct sctp_queued_to_read *at;
356 	struct sctp_readhead *q;
357 	uint8_t flags, unordered;
358 
359 	flags = (control->sinfo_flags >> 8);
360 	unordered = flags & SCTP_DATA_UNORDERED;
361 	if (unordered) {
362 		q = &strm->uno_inqueue;
363 		if (asoc->idata_supported == 0) {
364 			if (!TAILQ_EMPTY(q)) {
365 				/* Only one stream can be here in old style  -- abort */
366 				return (-1);
367 			}
368 			TAILQ_INSERT_TAIL(q, control, next_instrm);
369 			control->on_strm_q = SCTP_ON_UNORDERED;
370 			return (0);
371 		}
372 	} else {
373 		q = &strm->inqueue;
374 	}
375 	if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
376 		control->end_added = 1;
377 		control->first_frag_seen = 1;
378 		control->last_frag_seen = 1;
379 	}
380 	if (TAILQ_EMPTY(q)) {
381 		/* Empty queue */
382 		TAILQ_INSERT_HEAD(q, control, next_instrm);
383 		if (unordered) {
384 			control->on_strm_q = SCTP_ON_UNORDERED;
385 		} else {
386 			control->on_strm_q = SCTP_ON_ORDERED;
387 		}
388 		return (0);
389 	} else {
390 		TAILQ_FOREACH(at, q, next_instrm) {
391 			if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
392 				/*
393 				 * one in queue is bigger than the
394 				 * new one, insert before this one
395 				 */
396 				TAILQ_INSERT_BEFORE(at, control, next_instrm);
397 				if (unordered) {
398 					control->on_strm_q = SCTP_ON_UNORDERED;
399 				} else {
400 					control->on_strm_q = SCTP_ON_ORDERED;
401 				}
402 				break;
403 			} else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
404 				/*
405 				 * Gak, He sent me a duplicate msg
406 				 * id number?? return -1 to abort.
407 				 */
408 				return (-1);
409 			} else {
410 				if (TAILQ_NEXT(at, next_instrm) == NULL) {
411 					/*
412 					 * We are at the end, insert
413 					 * it after this one
414 					 */
415 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
416 						sctp_log_strm_del(control, at,
417 								  SCTP_STR_LOG_FROM_INSERT_TL);
418 					}
419 					TAILQ_INSERT_AFTER(q, at, control, next_instrm);
420 					if (unordered) {
421 						control->on_strm_q = SCTP_ON_UNORDERED;
422 					} else {
423 						control->on_strm_q = SCTP_ON_ORDERED;
424 					}
425 					break;
426 				}
427 			}
428 		}
429 	}
430 	return (0);
431 }
432 
433 static void
sctp_abort_in_reasm(struct sctp_tcb * stcb,struct sctp_queued_to_read * control,struct sctp_tmit_chunk * chk,int * abort_flag,int opspot)434 sctp_abort_in_reasm(struct sctp_tcb *stcb,
435                     struct sctp_queued_to_read *control,
436                     struct sctp_tmit_chunk *chk,
437                     int *abort_flag, int opspot)
438 {
439 	char msg[SCTP_DIAG_INFO_LEN];
440 	struct mbuf *oper;
441 
442 	if (stcb->asoc.idata_supported) {
443 		SCTP_SNPRINTF(msg, sizeof(msg),
444 		              "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
445 		              opspot,
446 		              control->fsn_included,
447 		              chk->rec.data.tsn,
448 		              chk->rec.data.sid,
449 		              chk->rec.data.fsn, chk->rec.data.mid);
450 	} else {
451 		SCTP_SNPRINTF(msg, sizeof(msg),
452 		              "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
453 		              opspot,
454 		              control->fsn_included,
455 		              chk->rec.data.tsn,
456 		              chk->rec.data.sid,
457 		              chk->rec.data.fsn,
458 		              (uint16_t)chk->rec.data.mid);
459 	}
460 	oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
461 	sctp_m_freem(chk->data);
462 	chk->data = NULL;
463 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
464 	stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
465 	sctp_abort_an_association(stcb->sctp_ep, stcb, oper, false, SCTP_SO_NOT_LOCKED);
466 	*abort_flag = 1;
467 }
468 
469 static void
sctp_clean_up_control(struct sctp_tcb * stcb,struct sctp_queued_to_read * control)470 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
471 {
472 	/*
473 	 * The control could not be placed and must be cleaned.
474 	 */
475 	struct sctp_tmit_chunk *chk, *nchk;
476 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
477 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
478 		if (chk->data)
479 			sctp_m_freem(chk->data);
480 		chk->data = NULL;
481 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
482 	}
483 	sctp_free_remote_addr(control->whoFrom);
484 	if (control->data) {
485 		sctp_m_freem(control->data);
486 		control->data = NULL;
487 	}
488 	sctp_free_a_readq(stcb, control);
489 }
490 
491 /*
492  * Queue the chunk either right into the socket buffer if it is the next one
493  * to go OR put it in the correct place in the delivery queue.  If we do
494  * append to the so_buf, keep doing so until we are out of order as
495  * long as the control's entered are non-fragmented.
496  */
497 static void
sctp_queue_data_to_stream(struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_queued_to_read * control,int * abort_flag,int * need_reasm)498 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
499     struct sctp_association *asoc,
500     struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
501 {
502 	/*
503 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
504 	 * all the data in one stream this could happen quite rapidly. One
505 	 * could use the TSN to keep track of things, but this scheme breaks
506 	 * down in the other type of stream usage that could occur. Send a
507 	 * single msg to stream 0, send 4Billion messages to stream 1, now
508 	 * send a message to stream 0. You have a situation where the TSN
509 	 * has wrapped but not in the stream. Is this worth worrying about
510 	 * or should we just change our queue sort at the bottom to be by
511 	 * TSN.
512 	 *
513 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
514 	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
515 	 * assignment this could happen... and I don't see how this would be
516 	 * a violation. So for now I am undecided an will leave the sort by
517 	 * SSN alone. Maybe a hybred approach is the answer
518 	 *
519 	 */
520 	struct sctp_queued_to_read *at;
521 	int queue_needed;
522 	uint32_t nxt_todel;
523 	struct mbuf *op_err;
524 	struct sctp_stream_in *strm;
525 	char msg[SCTP_DIAG_INFO_LEN];
526 
527 	strm = &asoc->strmin[control->sinfo_stream];
528 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
529 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
530 	}
531 	if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
532 		/* The incoming sseq is behind where we last delivered? */
533 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
534 			strm->last_mid_delivered, control->mid);
535 		/*
536 		 * throw it in the stream so it gets cleaned up in
537 		 * association destruction
538 		 */
539 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
540 		if (asoc->idata_supported) {
541 			SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
542 			              strm->last_mid_delivered, control->sinfo_tsn,
543 			              control->sinfo_stream, control->mid);
544 		} else {
545 			SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
546 			              (uint16_t)strm->last_mid_delivered,
547 			              control->sinfo_tsn,
548 			              control->sinfo_stream,
549 			              (uint16_t)control->mid);
550 		}
551 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
552 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
553 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
554 		*abort_flag = 1;
555 		return;
556 	}
557 	queue_needed = 1;
558 	asoc->size_on_all_streams += control->length;
559 	sctp_ucount_incr(asoc->cnt_on_all_streams);
560 	nxt_todel = strm->last_mid_delivered + 1;
561 	if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
562 #if defined(__APPLE__) && !defined(__Userspace__)
563 		struct socket *so;
564 
565 		so = SCTP_INP_SO(stcb->sctp_ep);
566 		atomic_add_int(&stcb->asoc.refcnt, 1);
567 		SCTP_TCB_UNLOCK(stcb);
568 		SCTP_SOCKET_LOCK(so, 1);
569 		SCTP_TCB_LOCK(stcb);
570 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
571 		if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
572 			SCTP_SOCKET_UNLOCK(so, 1);
573 			return;
574 		}
575 #endif
576 		/* can be delivered right away? */
577 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
578 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
579 		}
580 		/* EY it wont be queued if it could be delivered directly */
581 		queue_needed = 0;
582 		if (asoc->size_on_all_streams >= control->length) {
583 			asoc->size_on_all_streams -= control->length;
584 		} else {
585 #ifdef INVARIANTS
586 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
587 #else
588 			asoc->size_on_all_streams = 0;
589 #endif
590 		}
591 		sctp_ucount_decr(asoc->cnt_on_all_streams);
592 		strm->last_mid_delivered++;
593 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
594 		sctp_add_to_readq(stcb->sctp_ep, stcb,
595 		                  control,
596 		                  &stcb->sctp_socket->so_rcv, 1,
597 		                  SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
598 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
599 			/* all delivered */
600 			nxt_todel = strm->last_mid_delivered + 1;
601 			if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
602 			    (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
603 				if (control->on_strm_q == SCTP_ON_ORDERED) {
604 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
605 					if (asoc->size_on_all_streams >= control->length) {
606 						asoc->size_on_all_streams -= control->length;
607 					} else {
608 #ifdef INVARIANTS
609 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
610 #else
611 						asoc->size_on_all_streams = 0;
612 #endif
613 					}
614 					sctp_ucount_decr(asoc->cnt_on_all_streams);
615 #ifdef INVARIANTS
616 				} else {
617 					panic("Huh control: %p is on_strm_q: %d",
618 					      control, control->on_strm_q);
619 #endif
620 				}
621 				control->on_strm_q = 0;
622 				strm->last_mid_delivered++;
623 				/*
624 				 * We ignore the return of deliver_data here
625 				 * since we always can hold the chunk on the
626 				 * d-queue. And we have a finite number that
627 				 * can be delivered from the strq.
628 				 */
629 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
630 					sctp_log_strm_del(control, NULL,
631 							  SCTP_STR_LOG_FROM_IMMED_DEL);
632 				}
633 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
634 				sctp_add_to_readq(stcb->sctp_ep, stcb,
635 				                  control,
636 				                  &stcb->sctp_socket->so_rcv, 1,
637 				                  SCTP_READ_LOCK_NOT_HELD,
638 				                  SCTP_SO_LOCKED);
639 				continue;
640 			} else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
641 				*need_reasm = 1;
642 			}
643 			break;
644 		}
645 #if defined(__APPLE__) && !defined(__Userspace__)
646 		SCTP_SOCKET_UNLOCK(so, 1);
647 #endif
648 	}
649 	if (queue_needed) {
650 		/*
651 		 * Ok, we did not deliver this guy, find the correct place
652 		 * to put it on the queue.
653 		 */
654 		if (sctp_place_control_in_stream(strm, asoc, control)) {
655 			SCTP_SNPRINTF(msg, sizeof(msg),
656 			              "Queue to str MID: %u duplicate", control->mid);
657 			sctp_clean_up_control(stcb, control);
658 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
659 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
660 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
661 			*abort_flag = 1;
662 		}
663 	}
664 }
665 
666 static void
sctp_setup_tail_pointer(struct sctp_queued_to_read * control)667 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
668 {
669 	struct mbuf *m, *prev = NULL;
670 	struct sctp_tcb *stcb;
671 
672 	stcb = control->stcb;
673 	control->held_length = 0;
674 	control->length = 0;
675 	m = control->data;
676 	while (m) {
677 		if (SCTP_BUF_LEN(m) == 0) {
678 			/* Skip mbufs with NO length */
679 			if (prev == NULL) {
680 				/* First one */
681 				control->data = sctp_m_free(m);
682 				m = control->data;
683 			} else {
684 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
685 				m = SCTP_BUF_NEXT(prev);
686 			}
687 			if (m == NULL) {
688 				control->tail_mbuf = prev;
689 			}
690 			continue;
691 		}
692 		prev = m;
693 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
694 		if (control->on_read_q) {
695 			/*
696 			 * On read queue so we must increment the
697 			 * SB stuff, we assume caller has done any locks of SB.
698 			 */
699 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
700 		}
701 		m = SCTP_BUF_NEXT(m);
702 	}
703 	if (prev) {
704 		control->tail_mbuf = prev;
705 	}
706 }
707 
708 static void
sctp_add_to_tail_pointer(struct sctp_queued_to_read * control,struct mbuf * m,uint32_t * added)709 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
710 {
711 	struct mbuf *prev=NULL;
712 	struct sctp_tcb *stcb;
713 
714 	stcb = control->stcb;
715 	if (stcb == NULL) {
716 #ifdef INVARIANTS
717 		panic("Control broken");
718 #else
719 		return;
720 #endif
721 	}
722 	if (control->tail_mbuf == NULL) {
723 		/* TSNH */
724 		sctp_m_freem(control->data);
725 		control->data = m;
726 		sctp_setup_tail_pointer(control);
727 		return;
728 	}
729 	control->tail_mbuf->m_next = m;
730 	while (m) {
731 		if (SCTP_BUF_LEN(m) == 0) {
732 			/* Skip mbufs with NO length */
733 			if (prev == NULL) {
734 				/* First one */
735 				control->tail_mbuf->m_next = sctp_m_free(m);
736 				m = control->tail_mbuf->m_next;
737 			} else {
738 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
739 				m = SCTP_BUF_NEXT(prev);
740 			}
741 			if (m == NULL) {
742 				control->tail_mbuf = prev;
743 			}
744 			continue;
745 		}
746 		prev = m;
747 		if (control->on_read_q) {
748 			/*
749 			 * On read queue so we must increment the
750 			 * SB stuff, we assume caller has done any locks of SB.
751 			 */
752 			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
753 		}
754 		*added += SCTP_BUF_LEN(m);
755 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
756 		m = SCTP_BUF_NEXT(m);
757 	}
758 	if (prev) {
759 		control->tail_mbuf = prev;
760 	}
761 }
762 
763 static void
sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read * nc,struct sctp_queued_to_read * control)764 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
765 {
766 	memset(nc, 0, sizeof(struct sctp_queued_to_read));
767 	nc->sinfo_stream = control->sinfo_stream;
768 	nc->mid = control->mid;
769 	TAILQ_INIT(&nc->reasm);
770 	nc->top_fsn = control->top_fsn;
771 	nc->mid = control->mid;
772 	nc->sinfo_flags = control->sinfo_flags;
773 	nc->sinfo_ppid = control->sinfo_ppid;
774 	nc->sinfo_context = control->sinfo_context;
775 	nc->fsn_included = 0xffffffff;
776 	nc->sinfo_tsn = control->sinfo_tsn;
777 	nc->sinfo_cumtsn = control->sinfo_cumtsn;
778 	nc->sinfo_assoc_id = control->sinfo_assoc_id;
779 	nc->whoFrom = control->whoFrom;
780 	atomic_add_int(&nc->whoFrom->ref_count, 1);
781 	nc->stcb = control->stcb;
782 	nc->port_from = control->port_from;
783 	nc->do_not_ref_stcb = control->do_not_ref_stcb;
784 }
785 
786 static void
sctp_reset_a_control(struct sctp_queued_to_read * control,struct sctp_inpcb * inp,uint32_t tsn)787 sctp_reset_a_control(struct sctp_queued_to_read *control,
788                      struct sctp_inpcb *inp, uint32_t tsn)
789 {
790 	control->fsn_included = tsn;
791 	if (control->on_read_q) {
792 		/*
793 		 * We have to purge it from there,
794 		 * hopefully this will work :-)
795 		 */
796 		TAILQ_REMOVE(&inp->read_queue, control, next);
797 		control->on_read_q = 0;
798 	}
799 }
800 
801 static int
sctp_handle_old_unordered_data(struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_stream_in * strm,struct sctp_queued_to_read * control,uint32_t pd_point,int inp_read_lock_held)802 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
803                                struct sctp_association *asoc,
804                                struct sctp_stream_in *strm,
805                                struct sctp_queued_to_read *control,
806                                uint32_t pd_point,
807                                int inp_read_lock_held)
808 {
809 	/* Special handling for the old un-ordered data chunk.
810 	 * All the chunks/TSN's go to mid 0. So
811 	 * we have to do the old style watching to see
812 	 * if we have it all. If you return one, no other
813 	 * control entries on the un-ordered queue will
814 	 * be looked at. In theory there should be no others
815 	 * entries in reality, unless the guy is sending both
816 	 * unordered NDATA and unordered DATA...
817 	 */
818 	struct sctp_tmit_chunk *chk, *lchk, *tchk;
819 	uint32_t fsn;
820 	struct sctp_queued_to_read *nc;
821 	int cnt_added;
822 
823 	if (control->first_frag_seen == 0) {
824 		/* Nothing we can do, we have not seen the first piece yet */
825 		return (1);
826 	}
827 	/* Collapse any we can */
828 	cnt_added = 0;
829 restart:
830 	fsn = control->fsn_included + 1;
831 	/* Now what can we add? */
832 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
833 		if (chk->rec.data.fsn == fsn) {
834 			/* Ok lets add it */
835 			sctp_alloc_a_readq(stcb, nc);
836 			if (nc == NULL) {
837 				break;
838 			}
839 			memset(nc, 0, sizeof(struct sctp_queued_to_read));
840 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
841 			sctp_add_chk_to_control(control, strm, stcb, asoc, chk, inp_read_lock_held);
842 			fsn++;
843 			cnt_added++;
844 			chk = NULL;
845 			if (control->end_added) {
846 				/* We are done */
847 				if (!TAILQ_EMPTY(&control->reasm)) {
848 					/*
849 					 * Ok we have to move anything left on
850 					 * the control queue to a new control.
851 					 */
852 					sctp_build_readq_entry_from_ctl(nc, control);
853 					tchk = TAILQ_FIRST(&control->reasm);
854 					if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
855 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
856 						if (asoc->size_on_reasm_queue >= tchk->send_size) {
857 							asoc->size_on_reasm_queue -= tchk->send_size;
858 						} else {
859 #ifdef INVARIANTS
860 						panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
861 #else
862 						asoc->size_on_reasm_queue = 0;
863 #endif
864 						}
865 						sctp_ucount_decr(asoc->cnt_on_reasm_queue);
866 						nc->first_frag_seen = 1;
867 						nc->fsn_included = tchk->rec.data.fsn;
868 						nc->data = tchk->data;
869 						nc->sinfo_ppid = tchk->rec.data.ppid;
870 						nc->sinfo_tsn = tchk->rec.data.tsn;
871 						sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
872 						tchk->data = NULL;
873 						sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
874 						sctp_setup_tail_pointer(nc);
875 						tchk = TAILQ_FIRST(&control->reasm);
876 					}
877 					/* Spin the rest onto the queue */
878 					while (tchk) {
879 						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
880 						TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
881 						tchk = TAILQ_FIRST(&control->reasm);
882 					}
883 					/* Now lets add it to the queue after removing control */
884 					TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
885 					nc->on_strm_q = SCTP_ON_UNORDERED;
886 					if (control->on_strm_q) {
887 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
888 						control->on_strm_q = 0;
889 					}
890 				}
891 				if (control->pdapi_started) {
892 					strm->pd_api_started = 0;
893 					control->pdapi_started = 0;
894 				}
895 				if (control->on_strm_q) {
896 					TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
897 					control->on_strm_q = 0;
898 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
899 				}
900 				if (control->on_read_q == 0) {
901 					sctp_add_to_readq(stcb->sctp_ep, stcb, control,
902 							  &stcb->sctp_socket->so_rcv, control->end_added,
903 							  inp_read_lock_held, SCTP_SO_NOT_LOCKED);
904 #if defined(__Userspace__)
905 				} else {
906 					sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, inp_read_lock_held);
907 #endif
908 				}
909 				sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
910 				if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
911 					/* Switch to the new guy and continue */
912 					control = nc;
913 					goto restart;
914 				} else {
915 					if (nc->on_strm_q == 0) {
916 						sctp_free_a_readq(stcb, nc);
917 					}
918 				}
919 				return (1);
920 			} else {
921 				sctp_free_a_readq(stcb, nc);
922 			}
923 		} else {
924 			/* Can't add more */
925 			break;
926 		}
927 	}
928 	if (cnt_added && strm->pd_api_started) {
929 #if defined(__Userspace__)
930 		sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, inp_read_lock_held);
931 #endif
932 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
933 	}
934 	if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
935 		strm->pd_api_started = 1;
936 		control->pdapi_started = 1;
937 		sctp_add_to_readq(stcb->sctp_ep, stcb, control,
938 		                  &stcb->sctp_socket->so_rcv, control->end_added,
939 		                  inp_read_lock_held, SCTP_SO_NOT_LOCKED);
940 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
941 		return (0);
942 	} else {
943 		return (1);
944 	}
945 }
946 
947 static void
sctp_inject_old_unordered_data(struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_queued_to_read * control,struct sctp_tmit_chunk * chk,int * abort_flag)948 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
949                                struct sctp_association *asoc,
950                                struct sctp_queued_to_read *control,
951                                struct sctp_tmit_chunk *chk,
952                                int *abort_flag)
953 {
954 	struct sctp_tmit_chunk *at;
955 	int inserted;
956 	/*
957 	 * Here we need to place the chunk into the control structure
958 	 * sorted in the correct order.
959 	 */
960 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
961 		/* Its the very first one. */
962 		SCTPDBG(SCTP_DEBUG_XXX,
963 			"chunk is a first fsn: %u becomes fsn_included\n",
964 			chk->rec.data.fsn);
965 		at = TAILQ_FIRST(&control->reasm);
966 		if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) {
967 			/*
968 			 * The first chunk in the reassembly is
969 			 * a smaller TSN than this one, even though
970 			 * this has a first, it must be from a subsequent
971 			 * msg.
972 			 */
973 			goto place_chunk;
974 		}
975 		if (control->first_frag_seen) {
976 			/*
977 			 * In old un-ordered we can reassembly on
978 			 * one control multiple messages. As long
979 			 * as the next FIRST is greater then the old
980 			 * first (TSN i.e. FSN wise)
981 			 */
982 			struct mbuf *tdata;
983 			uint32_t tmp;
984 
985 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
986 				/* Easy way the start of a new guy beyond the lowest */
987 				goto place_chunk;
988 			}
989 			if ((chk->rec.data.fsn == control->fsn_included) ||
990 			    (control->pdapi_started)) {
991 				/*
992 				 * Ok this should not happen, if it does
993 				 * we started the pd-api on the higher TSN (since
994 				 * the equals part is a TSN failure it must be that).
995 				 *
996 				 * We are completly hosed in that case since I have
997 				 * no way to recover. This really will only happen
998 				 * if we can get more TSN's higher before the pd-api-point.
999 				 */
1000 				sctp_abort_in_reasm(stcb, control, chk,
1001 						    abort_flag,
1002 						    SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
1003 
1004 				return;
1005 			}
1006 			/*
1007 			 * Ok we have two firsts and the one we just got
1008 			 * is smaller than the one we previously placed.. yuck!
1009 			 * We must swap them out.
1010 			 */
1011 			/* swap the mbufs */
1012 			tdata = control->data;
1013 			control->data = chk->data;
1014 			chk->data = tdata;
1015 			/* Save the lengths */
1016 			chk->send_size = control->length;
1017 			/* Recompute length of control and tail pointer */
1018 			sctp_setup_tail_pointer(control);
1019 			/* Fix the FSN included */
1020 			tmp = control->fsn_included;
1021 			control->fsn_included = chk->rec.data.fsn;
1022 			chk->rec.data.fsn = tmp;
1023 			/* Fix the TSN included */
1024 			tmp = control->sinfo_tsn;
1025 			control->sinfo_tsn = chk->rec.data.tsn;
1026 			chk->rec.data.tsn = tmp;
1027 			/* Fix the PPID included */
1028 			tmp = control->sinfo_ppid;
1029 			control->sinfo_ppid = chk->rec.data.ppid;
1030 			chk->rec.data.ppid = tmp;
1031 			/* Fix tail pointer */
1032 			goto place_chunk;
1033 		}
1034 		control->first_frag_seen = 1;
1035 		control->fsn_included = chk->rec.data.fsn;
1036 		control->top_fsn = chk->rec.data.fsn;
1037 		control->sinfo_tsn = chk->rec.data.tsn;
1038 		control->sinfo_ppid = chk->rec.data.ppid;
1039 		control->data = chk->data;
1040 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1041 		chk->data = NULL;
1042 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1043 		sctp_setup_tail_pointer(control);
1044 		return;
1045 	}
1046 place_chunk:
1047 	inserted = 0;
1048 	TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1049 		if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1050 			/*
1051 			 * This one in queue is bigger than the new one, insert
1052 			 * the new one before at.
1053 			 */
1054 			asoc->size_on_reasm_queue += chk->send_size;
1055 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1056 			inserted = 1;
1057 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1058 			break;
1059 		} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1060 			/*
1061 			 * They sent a duplicate fsn number. This
1062 			 * really should not happen since the FSN is
1063 			 * a TSN and it should have been dropped earlier.
1064 			 */
1065 			sctp_abort_in_reasm(stcb, control, chk,
1066 			                    abort_flag,
1067 			                    SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1068 			return;
1069 		}
1070 	}
1071 	if (inserted == 0) {
1072 		/* Its at the end */
1073 		asoc->size_on_reasm_queue += chk->send_size;
1074 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1075 		control->top_fsn = chk->rec.data.fsn;
1076 		TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1077 	}
1078 }
1079 
1080 static int
sctp_deliver_reasm_check(struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_stream_in * strm,int inp_read_lock_held)1081 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1082                          struct sctp_stream_in *strm, int inp_read_lock_held)
1083 {
1084 	/*
1085 	 * Given a stream, strm, see if any of
1086 	 * the SSN's on it that are fragmented
1087 	 * are ready to deliver. If so go ahead
1088 	 * and place them on the read queue. In
1089 	 * so placing if we have hit the end, then
1090 	 * we need to remove them from the stream's queue.
1091 	 */
1092 	struct sctp_queued_to_read *control, *nctl = NULL;
1093 	uint32_t next_to_del;
1094 	uint32_t pd_point;
1095 	int ret = 0;
1096 
1097 	if (stcb->sctp_socket) {
1098 		pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1099 			       stcb->sctp_ep->partial_delivery_point);
1100 	} else {
1101 		pd_point = stcb->sctp_ep->partial_delivery_point;
1102 	}
1103 	control = TAILQ_FIRST(&strm->uno_inqueue);
1104 
1105 	if ((control != NULL) &&
1106 	    (asoc->idata_supported == 0)) {
1107 		/* Special handling needed for "old" data format */
1108 		if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1109 			goto done_un;
1110 		}
1111 	}
1112 	if (strm->pd_api_started) {
1113 		/* Can't add more */
1114 		return (0);
1115 	}
1116 	while (control) {
1117 		SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1118 			control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1119 		nctl = TAILQ_NEXT(control, next_instrm);
1120 		if (control->end_added) {
1121 			/* We just put the last bit on */
1122 			if (control->on_strm_q) {
1123 #ifdef INVARIANTS
1124 				if (control->on_strm_q != SCTP_ON_UNORDERED) {
1125 					panic("Huh control: %p on_q: %d -- not unordered?",
1126 					      control, control->on_strm_q);
1127 				}
1128 #endif
1129 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1130 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1131 				if (asoc->size_on_all_streams >= control->length) {
1132 					asoc->size_on_all_streams -= control->length;
1133 				} else {
1134 #ifdef INVARIANTS
1135 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1136 #else
1137 					asoc->size_on_all_streams = 0;
1138 #endif
1139 				}
1140 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1141 				control->on_strm_q = 0;
1142 			}
1143 			if (control->on_read_q == 0) {
1144 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1145 						  control,
1146 						  &stcb->sctp_socket->so_rcv, control->end_added,
1147 						  inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1148 			}
1149 		} else {
1150 			/* Can we do a PD-API for this un-ordered guy? */
1151 			if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1152 				strm->pd_api_started = 1;
1153 				control->pdapi_started = 1;
1154 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1155 						  control,
1156 						  &stcb->sctp_socket->so_rcv, control->end_added,
1157 						  inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1158 
1159 				break;
1160 			}
1161 		}
1162 		control = nctl;
1163 	}
1164 done_un:
1165 	control = TAILQ_FIRST(&strm->inqueue);
1166 	if (strm->pd_api_started) {
1167 		/* Can't add more */
1168 		return (0);
1169 	}
1170 	if (control == NULL) {
1171 		return (ret);
1172 	}
1173 	if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1174 		/* Ok the guy at the top was being partially delivered
1175 		 * completed, so we remove it. Note
1176 		 * the pd_api flag was taken off when the
1177 		 * chunk was merged on in sctp_queue_data_for_reasm below.
1178 		 */
1179 		nctl = TAILQ_NEXT(control, next_instrm);
1180 		SCTPDBG(SCTP_DEBUG_XXX,
1181 			"Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1182 			control, control->end_added, control->mid,
1183 			control->top_fsn, control->fsn_included,
1184 			strm->last_mid_delivered);
1185 		if (control->end_added) {
1186 			if (control->on_strm_q) {
1187 #ifdef INVARIANTS
1188 				if (control->on_strm_q != SCTP_ON_ORDERED) {
1189 					panic("Huh control: %p on_q: %d -- not ordered?",
1190 					      control, control->on_strm_q);
1191 				}
1192 #endif
1193 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1194 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1195 				if (asoc->size_on_all_streams >= control->length) {
1196 					asoc->size_on_all_streams -= control->length;
1197 				} else {
1198 #ifdef INVARIANTS
1199 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1200 #else
1201 					asoc->size_on_all_streams = 0;
1202 #endif
1203 				}
1204 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1205 				control->on_strm_q = 0;
1206 			}
1207 			if (strm->pd_api_started && control->pdapi_started) {
1208 				control->pdapi_started = 0;
1209 				strm->pd_api_started = 0;
1210 			}
1211 			if (control->on_read_q == 0) {
1212 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1213 						  control,
1214 						  &stcb->sctp_socket->so_rcv, control->end_added,
1215 						  inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1216 			}
1217 			control = nctl;
1218 		}
1219 	}
1220 	if (strm->pd_api_started) {
1221 		/* Can't add more must have gotten an un-ordered above being partially delivered. */
1222 		return (0);
1223 	}
1224 deliver_more:
1225 	next_to_del = strm->last_mid_delivered + 1;
1226 	if (control) {
1227 		SCTPDBG(SCTP_DEBUG_XXX,
1228 			"Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1229 			control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1230 			next_to_del);
1231 		nctl = TAILQ_NEXT(control, next_instrm);
1232 		if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1233 		    (control->first_frag_seen)) {
1234 			int done;
1235 
1236 			/* Ok we can deliver it onto the stream. */
1237 			if (control->end_added) {
1238 				/* We are done with it afterwards */
1239 				if (control->on_strm_q) {
1240 #ifdef INVARIANTS
1241 					if (control->on_strm_q != SCTP_ON_ORDERED) {
1242 						panic("Huh control: %p on_q: %d -- not ordered?",
1243 						      control, control->on_strm_q);
1244 					}
1245 #endif
1246 					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1247 					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1248 					if (asoc->size_on_all_streams >= control->length) {
1249 						asoc->size_on_all_streams -= control->length;
1250 					} else {
1251 #ifdef INVARIANTS
1252 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1253 #else
1254 						asoc->size_on_all_streams = 0;
1255 #endif
1256 					}
1257 					sctp_ucount_decr(asoc->cnt_on_all_streams);
1258 					control->on_strm_q = 0;
1259 				}
1260 				ret++;
1261 			}
1262 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1263 				/* A singleton now slipping through - mark it non-revokable too */
1264 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1265 			} else if (control->end_added == 0) {
1266 				/* Check if we can defer adding until its all there */
1267 				if ((control->length < pd_point) || (strm->pd_api_started)) {
1268 					/* Don't need it or cannot add more (one being delivered that way) */
1269 					goto out;
1270 				}
1271 			}
1272 			done = (control->end_added) && (control->last_frag_seen);
1273 			if (control->on_read_q == 0) {
1274 				if (!done) {
1275 					if (asoc->size_on_all_streams >= control->length) {
1276 						asoc->size_on_all_streams -= control->length;
1277 					} else {
1278 #ifdef INVARIANTS
1279 						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1280 #else
1281 						asoc->size_on_all_streams = 0;
1282 #endif
1283 					}
1284 					strm->pd_api_started = 1;
1285 					control->pdapi_started = 1;
1286 				}
1287 				sctp_add_to_readq(stcb->sctp_ep, stcb,
1288 						  control,
1289 						  &stcb->sctp_socket->so_rcv, control->end_added,
1290 						  inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1291 			}
1292 			strm->last_mid_delivered = next_to_del;
1293 			if (done) {
1294 				control = nctl;
1295 				goto deliver_more;
1296 			}
1297 		}
1298 	}
1299 out:
1300 	return (ret);
1301 }
1302 
1303 uint32_t
sctp_add_chk_to_control(struct sctp_queued_to_read * control,struct sctp_stream_in * strm,struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_tmit_chunk * chk,int hold_rlock)1304 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1305 			struct sctp_stream_in *strm,
1306 			struct sctp_tcb *stcb, struct sctp_association *asoc,
1307 			struct sctp_tmit_chunk *chk, int hold_rlock)
1308 {
1309 	/*
1310 	 * Given a control and a chunk, merge the
1311 	 * data from the chk onto the control and free
1312 	 * up the chunk resources.
1313 	 */
1314 	uint32_t added=0;
1315 	int i_locked = 0;
1316 
1317 	if (control->on_read_q && (hold_rlock == 0)) {
1318 		/*
1319 		 * Its being pd-api'd so we must
1320 		 * do some locks.
1321 		 */
1322 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
1323 		i_locked = 1;
1324 	}
1325 	if (control->data == NULL) {
1326 		control->data = chk->data;
1327 		sctp_setup_tail_pointer(control);
1328 	} else {
1329 		sctp_add_to_tail_pointer(control, chk->data, &added);
1330 	}
1331 	control->fsn_included = chk->rec.data.fsn;
1332 	asoc->size_on_reasm_queue -= chk->send_size;
1333 	sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1334 	sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1335 	chk->data = NULL;
1336 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1337 		control->first_frag_seen = 1;
1338 		control->sinfo_tsn = chk->rec.data.tsn;
1339 		control->sinfo_ppid = chk->rec.data.ppid;
1340 	}
1341 	if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1342 		/* Its complete */
1343 		if ((control->on_strm_q) && (control->on_read_q)) {
1344 			if (control->pdapi_started) {
1345 				control->pdapi_started = 0;
1346 				strm->pd_api_started = 0;
1347 			}
1348 			if (control->on_strm_q == SCTP_ON_UNORDERED) {
1349 				/* Unordered */
1350 				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1351 				control->on_strm_q = 0;
1352 			} else if (control->on_strm_q == SCTP_ON_ORDERED) {
1353 				/* Ordered */
1354 				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1355 				/*
1356 				 * Don't need to decrement size_on_all_streams,
1357 				 * since control is on the read queue.
1358 				 */
1359 				sctp_ucount_decr(asoc->cnt_on_all_streams);
1360 				control->on_strm_q = 0;
1361 #ifdef INVARIANTS
1362 			} else if (control->on_strm_q) {
1363 				panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1364 				      control->on_strm_q);
1365 #endif
1366 			}
1367 		}
1368 		control->end_added = 1;
1369 		control->last_frag_seen = 1;
1370 	}
1371 	if (i_locked) {
1372 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1373 	}
1374 	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1375 	return (added);
1376 }
1377 
1378 /*
1379  * Dump onto the re-assembly queue, in its proper place. After dumping on the
1380  * queue, see if anthing can be delivered. If so pull it off (or as much as
1381  * we can. If we run out of space then we must dump what we can and set the
1382  * appropriate flag to say we queued what we could.
1383  */
1384 static void
sctp_queue_data_for_reasm(struct sctp_tcb * stcb,struct sctp_association * asoc,struct sctp_queued_to_read * control,struct sctp_tmit_chunk * chk,int created_control,int * abort_flag,uint32_t tsn)1385 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1386 			  struct sctp_queued_to_read *control,
1387 			  struct sctp_tmit_chunk *chk,
1388 			  int created_control,
1389 			  int *abort_flag, uint32_t tsn)
1390 {
1391 	uint32_t next_fsn;
1392 	struct sctp_tmit_chunk *at, *nat;
1393 	struct sctp_stream_in *strm;
1394 	int do_wakeup, unordered;
1395 	uint32_t lenadded;
1396 
1397 	strm = &asoc->strmin[control->sinfo_stream];
1398 	/*
1399 	 * For old un-ordered data chunks.
1400 	 */
1401 	if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1402 		unordered = 1;
1403 	} else {
1404 		unordered = 0;
1405 	}
1406 	/* Must be added to the stream-in queue */
1407 	if (created_control) {
1408 		if ((unordered == 0) || (asoc->idata_supported)) {
1409 			sctp_ucount_incr(asoc->cnt_on_all_streams);
1410 		}
1411 		if (sctp_place_control_in_stream(strm, asoc, control)) {
1412 			/* Duplicate SSN? */
1413 			sctp_abort_in_reasm(stcb, control, chk,
1414 					    abort_flag,
1415 					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1416 			sctp_clean_up_control(stcb, control);
1417 			return;
1418 		}
1419 		if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1420 			/* Ok we created this control and now
1421 			 * lets validate that its legal i.e. there
1422 			 * is a B bit set, if not and we have
1423 			 * up to the cum-ack then its invalid.
1424 			 */
1425 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1426 				sctp_abort_in_reasm(stcb, control, chk,
1427 				                    abort_flag,
1428 				                    SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1429 				return;
1430 			}
1431 		}
1432 	}
1433 	if ((asoc->idata_supported == 0) && (unordered == 1)) {
1434 		sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1435 		return;
1436 	}
1437 	/*
1438 	 * Ok we must queue the chunk into the reasembly portion:
1439 	 *  o if its the first it goes to the control mbuf.
1440 	 *  o if its not first but the next in sequence it goes to the control,
1441 	 *    and each succeeding one in order also goes.
1442 	 *  o if its not in order we place it on the list in its place.
1443 	 */
1444 	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1445 		/* Its the very first one. */
1446 		SCTPDBG(SCTP_DEBUG_XXX,
1447 			"chunk is a first fsn: %u becomes fsn_included\n",
1448 			chk->rec.data.fsn);
1449 		if (control->first_frag_seen) {
1450 			/*
1451 			 * Error on senders part, they either
1452 			 * sent us two data chunks with FIRST,
1453 			 * or they sent two un-ordered chunks that
1454 			 * were fragmented at the same time in the same stream.
1455 			 */
1456 			sctp_abort_in_reasm(stcb, control, chk,
1457 			                    abort_flag,
1458 			                    SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1459 			return;
1460 		}
1461 		control->first_frag_seen = 1;
1462 		control->sinfo_ppid = chk->rec.data.ppid;
1463 		control->sinfo_tsn = chk->rec.data.tsn;
1464 		control->fsn_included = chk->rec.data.fsn;
1465 		control->data = chk->data;
1466 		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1467 		chk->data = NULL;
1468 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1469 		sctp_setup_tail_pointer(control);
1470 		asoc->size_on_all_streams += control->length;
1471 	} else {
1472 		/* Place the chunk in our list */
1473 		int inserted=0;
1474 		if (control->last_frag_seen == 0) {
1475 			/* Still willing to raise highest FSN seen */
1476 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1477 				SCTPDBG(SCTP_DEBUG_XXX,
1478 					"We have a new top_fsn: %u\n",
1479 					chk->rec.data.fsn);
1480 				control->top_fsn = chk->rec.data.fsn;
1481 			}
1482 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1483 				SCTPDBG(SCTP_DEBUG_XXX,
1484 					"The last fsn is now in place fsn: %u\n",
1485 					chk->rec.data.fsn);
1486 				control->last_frag_seen = 1;
1487 				if (SCTP_TSN_GT(control->top_fsn, chk->rec.data.fsn)) {
1488 					SCTPDBG(SCTP_DEBUG_XXX,
1489 						"New fsn: %u is not at top_fsn: %u -- abort\n",
1490 						chk->rec.data.fsn,
1491 						control->top_fsn);
1492 					sctp_abort_in_reasm(stcb, control, chk,
1493 							    abort_flag,
1494 							    SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1495 					return;
1496 				}
1497 			}
1498 			if (asoc->idata_supported || control->first_frag_seen) {
1499 				/*
1500 				 * For IDATA we always check since we know that
1501 				 * the first fragment is 0. For old DATA we have
1502 				 * to receive the first before we know the first FSN
1503 				 * (which is the TSN).
1504 				 */
1505 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1506 					/* We have already delivered up to this so its a dup */
1507 					sctp_abort_in_reasm(stcb, control, chk,
1508 							    abort_flag,
1509 							    SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1510 					return;
1511 				}
1512 			}
1513 		} else {
1514 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1515 				/* Second last? huh? */
1516 				SCTPDBG(SCTP_DEBUG_XXX,
1517 					"Duplicate last fsn: %u (top: %u) -- abort\n",
1518 					chk->rec.data.fsn, control->top_fsn);
1519 				sctp_abort_in_reasm(stcb, control,
1520 						    chk, abort_flag,
1521 						    SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1522 				return;
1523 			}
1524 			if (asoc->idata_supported || control->first_frag_seen) {
1525 				/*
1526 				 * For IDATA we always check since we know that
1527 				 * the first fragment is 0. For old DATA we have
1528 				 * to receive the first before we know the first FSN
1529 				 * (which is the TSN).
1530 				 */
1531 
1532 				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1533 					/* We have already delivered up to this so its a dup */
1534 					SCTPDBG(SCTP_DEBUG_XXX,
1535 						"New fsn: %u is already seen in included_fsn: %u -- abort\n",
1536 						chk->rec.data.fsn, control->fsn_included);
1537 					sctp_abort_in_reasm(stcb, control, chk,
1538 							    abort_flag,
1539 							    SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1540 					return;
1541 				}
1542 			}
1543 			/* validate not beyond top FSN if we have seen last one */
1544 			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1545 				SCTPDBG(SCTP_DEBUG_XXX,
1546 					"New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1547 					chk->rec.data.fsn,
1548 					control->top_fsn);
1549 				sctp_abort_in_reasm(stcb, control, chk,
1550 						    abort_flag,
1551 						    SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1552 				return;
1553 			}
1554 		}
1555 		/*
1556 		 * If we reach here, we need to place the
1557 		 * new chunk in the reassembly for this
1558 		 * control.
1559 		 */
1560 		SCTPDBG(SCTP_DEBUG_XXX,
1561 			"chunk is a not first fsn: %u needs to be inserted\n",
1562 			chk->rec.data.fsn);
1563 		TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1564 			if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1565 				if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1566 					/* Last not at the end? huh? */
1567 					SCTPDBG(SCTP_DEBUG_XXX,
1568 					        "Last fragment not last in list: -- abort\n");
1569 					sctp_abort_in_reasm(stcb, control,
1570 					                    chk, abort_flag,
1571 					                    SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1572 					return;
1573 				}
1574 				/*
1575 				 * This one in queue is bigger than the new one, insert
1576 				 * the new one before at.
1577 				 */
1578 				SCTPDBG(SCTP_DEBUG_XXX,
1579 					"Insert it before fsn: %u\n",
1580 					at->rec.data.fsn);
1581 				asoc->size_on_reasm_queue += chk->send_size;
1582 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1583 				TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1584 				inserted = 1;
1585 				break;
1586 			} else if (at->rec.data.fsn == chk->rec.data.fsn) {
1587 				/* Gak, He sent me a duplicate str seq number */
1588 				/*
1589 				 * foo bar, I guess I will just free this new guy,
1590 				 * should we abort too? FIX ME MAYBE? Or it COULD be
1591 				 * that the SSN's have wrapped. Maybe I should
1592 				 * compare to TSN somehow... sigh for now just blow
1593 				 * away the chunk!
1594 				 */
1595 				SCTPDBG(SCTP_DEBUG_XXX,
1596 					"Duplicate to fsn: %u -- abort\n",
1597 					at->rec.data.fsn);
1598 				sctp_abort_in_reasm(stcb, control,
1599 						    chk, abort_flag,
1600 						    SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1601 				return;
1602 			}
1603 		}
1604 		if (inserted == 0) {
1605 			/* Goes on the end */
1606 			SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1607 				chk->rec.data.fsn);
1608 			asoc->size_on_reasm_queue += chk->send_size;
1609 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1610 			TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1611 		}
1612 	}
1613 	/*
1614 	 * Ok lets see if we can suck any up into the control
1615 	 * structure that are in seq if it makes sense.
1616 	 */
1617 	do_wakeup = 0;
1618 	/*
1619 	 * If the first fragment has not been
1620 	 * seen there is no sense in looking.
1621 	 */
1622 	if (control->first_frag_seen) {
1623 		next_fsn = control->fsn_included + 1;
1624 		TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1625 			if (at->rec.data.fsn == next_fsn) {
1626 				/* We can add this one now to the control */
1627 				SCTPDBG(SCTP_DEBUG_XXX,
1628 					"Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1629 					control, at,
1630 					at->rec.data.fsn,
1631 					next_fsn, control->fsn_included);
1632 				TAILQ_REMOVE(&control->reasm, at, sctp_next);
1633 				lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1634 				if (control->on_read_q) {
1635 					do_wakeup = 1;
1636 				} else {
1637 					/*
1638 					 * We only add to the size-on-all-streams
1639 					 * if its not on the read q. The read q
1640 					 * flag will cause a sballoc so its accounted
1641 					 * for there.
1642 					 */
1643 					asoc->size_on_all_streams += lenadded;
1644 				}
1645 				next_fsn++;
1646 				if (control->end_added && control->pdapi_started) {
1647 					if (strm->pd_api_started) {
1648 						strm->pd_api_started = 0;
1649 						control->pdapi_started = 0;
1650 					}
1651 					if (control->on_read_q == 0) {
1652 						sctp_add_to_readq(stcb->sctp_ep, stcb,
1653 								  control,
1654 								  &stcb->sctp_socket->so_rcv, control->end_added,
1655 								  SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1656 					}
1657 					break;
1658 				}
1659 			} else {
1660 				break;
1661 			}
1662 		}
1663 	}
1664 	if (do_wakeup) {
1665 #if defined(__Userspace__)
1666 		sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, SCTP_READ_LOCK_NOT_HELD);
1667 #endif
1668 		/* Need to wakeup the reader */
1669 		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1670 	}
1671 }
1672 
1673 static struct sctp_queued_to_read *
sctp_find_reasm_entry(struct sctp_stream_in * strm,uint32_t mid,int ordered,int idata_supported)1674 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1675 {
1676 	struct sctp_queued_to_read *control;
1677 
1678 	if (ordered) {
1679 		TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1680 			if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1681 				break;
1682 			}
1683 		}
1684 	} else {
1685 		if (idata_supported) {
1686 			TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1687 				if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1688 					break;
1689 				}
1690 			}
1691 		} else {
1692 			control = TAILQ_FIRST(&strm->uno_inqueue);
1693 		}
1694 	}
1695 	return (control);
1696 }
1697 
1698 static int
sctp_process_a_data_chunk(struct sctp_tcb * stcb,struct sctp_association * asoc,struct mbuf ** m,int offset,int chk_length,struct sctp_nets * net,uint32_t * high_tsn,int * abort_flag,int * break_flag,int last_chunk,uint8_t chk_type)1699 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1700 			  struct mbuf **m, int offset,  int chk_length,
1701 			  struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1702 			  int *break_flag, int last_chunk, uint8_t chk_type)
1703 {
1704 	struct sctp_tmit_chunk *chk = NULL; /* make gcc happy */
1705 	struct sctp_stream_in *strm;
1706 	uint32_t tsn, fsn, gap, mid;
1707 	struct mbuf *dmbuf;
1708 	int the_len;
1709 	int need_reasm_check = 0;
1710 	uint16_t sid;
1711 	struct mbuf *op_err;
1712 	char msg[SCTP_DIAG_INFO_LEN];
1713 	struct sctp_queued_to_read *control, *ncontrol;
1714 	uint32_t ppid;
1715 	uint8_t chk_flags;
1716 	struct sctp_stream_reset_list *liste;
1717 	int ordered;
1718 	size_t clen;
1719 	int created_control = 0;
1720 
1721 	if (chk_type == SCTP_IDATA) {
1722 		struct sctp_idata_chunk *chunk, chunk_buf;
1723 
1724 		chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1725 		                                                 sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1726 		chk_flags = chunk->ch.chunk_flags;
1727 		clen = sizeof(struct sctp_idata_chunk);
1728 		tsn = ntohl(chunk->dp.tsn);
1729 		sid = ntohs(chunk->dp.sid);
1730 		mid = ntohl(chunk->dp.mid);
1731 		if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1732 			fsn = 0;
1733 			ppid = chunk->dp.ppid_fsn.ppid;
1734 		} else {
1735 			fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1736 			ppid = 0xffffffff; /* Use as an invalid value. */
1737 		}
1738 	} else {
1739 		struct sctp_data_chunk *chunk, chunk_buf;
1740 
1741 		chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1742 		                                                sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1743 		chk_flags = chunk->ch.chunk_flags;
1744 		clen = sizeof(struct sctp_data_chunk);
1745 		tsn = ntohl(chunk->dp.tsn);
1746 		sid = ntohs(chunk->dp.sid);
1747 		mid = (uint32_t)(ntohs(chunk->dp.ssn));
1748 		fsn = tsn;
1749 		ppid = chunk->dp.ppid;
1750 	}
1751 	if ((size_t)chk_length == clen) {
1752 		/*
1753 		 * Need to send an abort since we had a
1754 		 * empty data chunk.
1755 		 */
1756 		op_err = sctp_generate_no_user_data_cause(tsn);
1757 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1758 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
1759 		*abort_flag = 1;
1760 		return (0);
1761 	}
1762 	if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1763 		asoc->send_sack = 1;
1764 	}
1765 	ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1766 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1767 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1768 	}
1769 	if (stcb == NULL) {
1770 		return (0);
1771 	}
1772 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1773 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1774 		/* It is a duplicate */
1775 		SCTP_STAT_INCR(sctps_recvdupdata);
1776 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1777 			/* Record a dup for the next outbound sack */
1778 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1779 			asoc->numduptsns++;
1780 		}
1781 		asoc->send_sack = 1;
1782 		return (0);
1783 	}
1784 	/* Calculate the number of TSN's between the base and this TSN */
1785 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1786 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1787 		/* Can't hold the bit in the mapping at max array, toss it */
1788 		return (0);
1789 	}
1790 	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1791 		SCTP_TCB_LOCK_ASSERT(stcb);
1792 		if (sctp_expand_mapping_array(asoc, gap)) {
1793 			/* Can't expand, drop it */
1794 			return (0);
1795 		}
1796 	}
1797 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1798 		*high_tsn = tsn;
1799 	}
1800 	/* See if we have received this one already */
1801 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1802 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1803 		SCTP_STAT_INCR(sctps_recvdupdata);
1804 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1805 			/* Record a dup for the next outbound sack */
1806 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1807 			asoc->numduptsns++;
1808 		}
1809 		asoc->send_sack = 1;
1810 		return (0);
1811 	}
1812 	/*
1813 	 * Check to see about the GONE flag, duplicates would cause a sack
1814 	 * to be sent up above
1815 	 */
1816 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1817 	     (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1818 	     (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1819 		/*
1820 		 * wait a minute, this guy is gone, there is no longer a
1821 		 * receiver. Send peer an ABORT!
1822 		 */
1823 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1824 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
1825 		*abort_flag = 1;
1826 		return (0);
1827 	}
1828 	/*
1829 	 * Now before going further we see if there is room. If NOT then we
1830 	 * MAY let one through only IF this TSN is the one we are waiting
1831 	 * for on a partial delivery API.
1832 	 */
1833 
1834 	/* Is the stream valid? */
1835 	if (sid >= asoc->streamincnt) {
1836 		struct sctp_error_invalid_stream *cause;
1837 
1838 		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1839 		                               0, M_NOWAIT, 1, MT_DATA);
1840 		if (op_err != NULL) {
1841 			/* add some space up front so prepend will work well */
1842 			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1843 			cause = mtod(op_err, struct sctp_error_invalid_stream *);
1844 			/*
1845 			 * Error causes are just param's and this one has
1846 			 * two back to back phdr, one with the error type
1847 			 * and size, the other with the streamid and a rsvd
1848 			 */
1849 			SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1850 			cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1851 			cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1852 			cause->stream_id = htons(sid);
1853 			cause->reserved = htons(0);
1854 			sctp_queue_op_err(stcb, op_err);
1855 		}
1856 		SCTP_STAT_INCR(sctps_badsid);
1857 		SCTP_TCB_LOCK_ASSERT(stcb);
1858 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1859 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1860 			asoc->highest_tsn_inside_nr_map = tsn;
1861 		}
1862 		if (tsn == (asoc->cumulative_tsn + 1)) {
1863 			/* Update cum-ack */
1864 			asoc->cumulative_tsn = tsn;
1865 		}
1866 		return (0);
1867 	}
1868 	/*
1869 	 * If its a fragmented message, lets see if we can
1870 	 * find the control on the reassembly queues.
1871 	 */
1872 	if ((chk_type == SCTP_IDATA) &&
1873 	    ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1874 	    (fsn == 0)) {
1875 		/*
1876 		 *  The first *must* be fsn 0, and other
1877 		 *  (middle/end) pieces can *not* be fsn 0.
1878 		 * XXX: This can happen in case of a wrap around.
1879 		 *      Ignore is for now.
1880 		 */
1881 		SCTP_SNPRINTF(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", mid, chk_flags);
1882 		goto err_out;
1883 	}
1884 	control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1885 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1886 		chk_flags, control);
1887 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1888 		/* See if we can find the re-assembly entity */
1889 		if (control != NULL) {
1890 			/* We found something, does it belong? */
1891 			if (ordered && (mid != control->mid)) {
1892 				SCTP_SNPRINTF(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1893 			err_out:
1894 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1895 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
1896 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
1897 				*abort_flag = 1;
1898 				return (0);
1899 			}
1900 			if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1901 				/* We can't have a switched order with an unordered chunk */
1902 				SCTP_SNPRINTF(msg, sizeof(msg),
1903 				              "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1904 				              tsn);
1905 				goto err_out;
1906 			}
1907 			if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1908 				/* We can't have a switched unordered with a ordered chunk */
1909 				SCTP_SNPRINTF(msg, sizeof(msg),
1910 				             "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1911 				             tsn);
1912 				goto err_out;
1913 			}
1914 		}
1915 	} else {
1916 		/* Its a complete segment. Lets validate we
1917 		 * don't have a re-assembly going on with
1918 		 * the same Stream/Seq (for ordered) or in
1919 		 * the same Stream for unordered.
1920 		 */
1921 		if (control != NULL) {
1922 			if (ordered || asoc->idata_supported) {
1923 				SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1924 					chk_flags, mid);
1925 				SCTP_SNPRINTF(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1926 				goto err_out;
1927 			} else {
1928 				if ((tsn == control->fsn_included + 1) &&
1929 				    (control->end_added == 0)) {
1930 					SCTP_SNPRINTF(msg, sizeof(msg),
1931 					              "Illegal message sequence, missing end for MID: %8.8x",
1932 					              control->fsn_included);
1933 					goto err_out;
1934 				} else {
1935 					control = NULL;
1936 				}
1937 			}
1938 		}
1939 	}
1940 	/* now do the tests */
1941 	if (((asoc->cnt_on_all_streams +
1942 	      asoc->cnt_on_reasm_queue +
1943 	      asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1944 	    (((int)asoc->my_rwnd) <= 0)) {
1945 		/*
1946 		 * When we have NO room in the rwnd we check to make sure
1947 		 * the reader is doing its job...
1948 		 */
1949 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1950 			/* some to read, wake-up */
1951 #if defined(__APPLE__) && !defined(__Userspace__)
1952 			struct socket *so;
1953 
1954 			so = SCTP_INP_SO(stcb->sctp_ep);
1955 			atomic_add_int(&stcb->asoc.refcnt, 1);
1956 			SCTP_TCB_UNLOCK(stcb);
1957 			SCTP_SOCKET_LOCK(so, 1);
1958 			SCTP_TCB_LOCK(stcb);
1959 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1960 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1961 				/* assoc was freed while we were unlocked */
1962 				SCTP_SOCKET_UNLOCK(so, 1);
1963 				return (0);
1964 			}
1965 #endif
1966 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1967 #if defined(__APPLE__) && !defined(__Userspace__)
1968 			SCTP_SOCKET_UNLOCK(so, 1);
1969 #endif
1970 		}
1971 		/* now is it in the mapping array of what we have accepted? */
1972 		if (chk_type == SCTP_DATA) {
1973 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1974 			    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1975 				/* Nope not in the valid range dump it */
1976 			dump_packet:
1977 				sctp_set_rwnd(stcb, asoc);
1978 				if ((asoc->cnt_on_all_streams +
1979 				     asoc->cnt_on_reasm_queue +
1980 				     asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1981 					SCTP_STAT_INCR(sctps_datadropchklmt);
1982 				} else {
1983 					SCTP_STAT_INCR(sctps_datadroprwnd);
1984 				}
1985 				*break_flag = 1;
1986 				return (0);
1987 			}
1988 		} else {
1989 			if (control == NULL) {
1990 				goto dump_packet;
1991 			}
1992 			if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1993 				goto dump_packet;
1994 			}
1995 		}
1996 	}
1997 #ifdef SCTP_ASOCLOG_OF_TSNS
1998 	SCTP_TCB_LOCK_ASSERT(stcb);
1999 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
2000 		asoc->tsn_in_at = 0;
2001 		asoc->tsn_in_wrapped = 1;
2002 	}
2003 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
2004 	asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
2005 	asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
2006 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
2007 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
2008 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
2009 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
2010 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
2011 	asoc->tsn_in_at++;
2012 #endif
2013 	/*
2014 	 * Before we continue lets validate that we are not being fooled by
2015 	 * an evil attacker. We can only have Nk chunks based on our TSN
2016 	 * spread allowed by the mapping array N * 8 bits, so there is no
2017 	 * way our stream sequence numbers could have wrapped. We of course
2018 	 * only validate the FIRST fragment so the bit must be set.
2019 	 */
2020 	if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
2021 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
2022 	    (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
2023 	    SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
2024 		/* The incoming sseq is behind where we last delivered? */
2025 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
2026 			mid, asoc->strmin[sid].last_mid_delivered);
2027 
2028 		if (asoc->idata_supported) {
2029 			SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
2030 			              asoc->strmin[sid].last_mid_delivered,
2031 			              tsn,
2032 			              sid,
2033 			              mid);
2034 		} else {
2035 			SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2036 			              (uint16_t)asoc->strmin[sid].last_mid_delivered,
2037 			              tsn,
2038 			              sid,
2039 			              (uint16_t)mid);
2040 		}
2041 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2042 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2043 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2044 		*abort_flag = 1;
2045 		return (0);
2046 	}
2047 	if (chk_type == SCTP_IDATA) {
2048 		the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2049 	} else {
2050 		the_len = (chk_length - sizeof(struct sctp_data_chunk));
2051 	}
2052 	if (last_chunk == 0) {
2053 		if (chk_type == SCTP_IDATA) {
2054 			dmbuf = SCTP_M_COPYM(*m,
2055 					     (offset + sizeof(struct sctp_idata_chunk)),
2056 					     the_len, M_NOWAIT);
2057 		} else {
2058 			dmbuf = SCTP_M_COPYM(*m,
2059 					     (offset + sizeof(struct sctp_data_chunk)),
2060 					     the_len, M_NOWAIT);
2061 		}
2062 #ifdef SCTP_MBUF_LOGGING
2063 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2064 			sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2065 		}
2066 #endif
2067 	} else {
2068 		/* We can steal the last chunk */
2069 		int l_len;
2070 		dmbuf = *m;
2071 		/* lop off the top part */
2072 		if (chk_type == SCTP_IDATA) {
2073 			m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2074 		} else {
2075 			m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2076 		}
2077 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2078 			l_len = SCTP_BUF_LEN(dmbuf);
2079 		} else {
2080 			/* need to count up the size hopefully
2081 			 * does not hit this to often :-0
2082 			 */
2083 			struct mbuf *lat;
2084 
2085 			l_len = 0;
2086 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2087 				l_len += SCTP_BUF_LEN(lat);
2088 			}
2089 		}
2090 		if (l_len > the_len) {
2091 			/* Trim the end round bytes off  too */
2092 			m_adj(dmbuf, -(l_len - the_len));
2093 		}
2094 	}
2095 	if (dmbuf == NULL) {
2096 		SCTP_STAT_INCR(sctps_nomem);
2097 		return (0);
2098 	}
2099 	/*
2100 	 * Now no matter what, we need a control, get one
2101 	 * if we don't have one (we may have gotten it
2102 	 * above when we found the message was fragmented
2103 	 */
2104 	if (control == NULL) {
2105 		sctp_alloc_a_readq(stcb, control);
2106 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2107 					   ppid,
2108 					   sid,
2109 					   chk_flags,
2110 					   NULL, fsn, mid);
2111 		if (control == NULL) {
2112 			SCTP_STAT_INCR(sctps_nomem);
2113 			return (0);
2114 		}
2115 		if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2116 			struct mbuf *mm;
2117 
2118 			control->data = dmbuf;
2119 			control->tail_mbuf = NULL;
2120 			for (mm = control->data; mm; mm = mm->m_next) {
2121 				control->length += SCTP_BUF_LEN(mm);
2122 				if (SCTP_BUF_NEXT(mm) == NULL) {
2123 					control->tail_mbuf = mm;
2124 				}
2125 			}
2126 			control->end_added = 1;
2127 			control->last_frag_seen = 1;
2128 			control->first_frag_seen = 1;
2129 			control->fsn_included = fsn;
2130 			control->top_fsn = fsn;
2131 		}
2132 		created_control = 1;
2133 	}
2134 	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2135 		chk_flags, ordered, mid, control);
2136 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2137 	    TAILQ_EMPTY(&asoc->resetHead) &&
2138 	    ((ordered == 0) ||
2139 	     (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2140 	      TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2141 		/* Candidate for express delivery */
2142 		/*
2143 		 * Its not fragmented, No PD-API is up, Nothing in the
2144 		 * delivery queue, Its un-ordered OR ordered and the next to
2145 		 * deliver AND nothing else is stuck on the stream queue,
2146 		 * And there is room for it in the socket buffer. Lets just
2147 		 * stuff it up the buffer....
2148 		 */
2149 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2150 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2151 			asoc->highest_tsn_inside_nr_map = tsn;
2152 		}
2153 		SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2154 			control, mid);
2155 
2156 		sctp_add_to_readq(stcb->sctp_ep, stcb,
2157 		                  control, &stcb->sctp_socket->so_rcv,
2158 		                  1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2159 
2160 		if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2161 			/* for ordered, bump what we delivered */
2162 			asoc->strmin[sid].last_mid_delivered++;
2163 		}
2164 		SCTP_STAT_INCR(sctps_recvexpress);
2165 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2166 			sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2167 					      SCTP_STR_LOG_FROM_EXPRS_DEL);
2168 		}
2169 		control = NULL;
2170 		goto finish_express_del;
2171 	}
2172 
2173 	/* Now will we need a chunk too? */
2174 	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2175 		sctp_alloc_a_chunk(stcb, chk);
2176 		if (chk == NULL) {
2177 			/* No memory so we drop the chunk */
2178 			SCTP_STAT_INCR(sctps_nomem);
2179 			if (last_chunk == 0) {
2180 				/* we copied it, free the copy */
2181 				sctp_m_freem(dmbuf);
2182 			}
2183 			return (0);
2184 		}
2185 		chk->rec.data.tsn = tsn;
2186 		chk->no_fr_allowed = 0;
2187 		chk->rec.data.fsn = fsn;
2188 		chk->rec.data.mid = mid;
2189 		chk->rec.data.sid = sid;
2190 		chk->rec.data.ppid = ppid;
2191 		chk->rec.data.context = stcb->asoc.context;
2192 		chk->rec.data.doing_fast_retransmit = 0;
2193 		chk->rec.data.rcv_flags = chk_flags;
2194 		chk->asoc = asoc;
2195 		chk->send_size = the_len;
2196 		chk->whoTo = net;
2197 		SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2198 			chk,
2199 			control, mid);
2200 		atomic_add_int(&net->ref_count, 1);
2201 		chk->data = dmbuf;
2202 	}
2203 	/* Set the appropriate TSN mark */
2204 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2205 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2206 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2207 			asoc->highest_tsn_inside_nr_map = tsn;
2208 		}
2209 	} else {
2210 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2211 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2212 			asoc->highest_tsn_inside_map = tsn;
2213 		}
2214 	}
2215 	/* Now is it complete (i.e. not fragmented)? */
2216 	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2217 		/*
2218 		 * Special check for when streams are resetting. We
2219 		 * could be more smart about this and check the
2220 		 * actual stream to see if it is not being reset..
2221 		 * that way we would not create a HOLB when amongst
2222 		 * streams being reset and those not being reset.
2223 		 *
2224 		 */
2225 		if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2226 		    SCTP_TSN_GT(tsn, liste->tsn)) {
2227 			/*
2228 			 * yep its past where we need to reset... go
2229 			 * ahead and queue it.
2230 			 */
2231 			if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2232 				/* first one on */
2233 				TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2234 			} else {
2235 				struct sctp_queued_to_read *lcontrol, *nlcontrol;
2236 				unsigned char inserted = 0;
2237 				TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2238 					if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2239 						continue;
2240 					} else {
2241 						/* found it */
2242 						TAILQ_INSERT_BEFORE(lcontrol, control, next);
2243 						inserted = 1;
2244 						break;
2245 					}
2246 				}
2247 				if (inserted == 0) {
2248 					/*
2249 					 * must be put at end, use
2250 					 * prevP (all setup from
2251 					 * loop) to setup nextP.
2252 					 */
2253 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2254 				}
2255 			}
2256 			goto finish_express_del;
2257 		}
2258 		if (chk_flags & SCTP_DATA_UNORDERED) {
2259 			/* queue directly into socket buffer */
2260 			SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2261 				control, mid);
2262 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2263 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2264 			                  control,
2265 			                  &stcb->sctp_socket->so_rcv, 1,
2266 			                  SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2267 
2268 		} else {
2269 			SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2270 				mid);
2271 			sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2272 			if (*abort_flag) {
2273 				if (last_chunk) {
2274 					*m = NULL;
2275 				}
2276 				return (0);
2277 			}
2278 		}
2279 		goto finish_express_del;
2280 	}
2281 	/* If we reach here its a reassembly */
2282 	need_reasm_check = 1;
2283 	SCTPDBG(SCTP_DEBUG_XXX,
2284 		"Queue data to stream for reasm control: %p MID: %u\n",
2285 		control, mid);
2286 	sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2287 	if (*abort_flag) {
2288 		/*
2289 		 * the assoc is now gone and chk was put onto the
2290 		 * reasm queue, which has all been freed.
2291 		 */
2292 		if (last_chunk) {
2293 			*m = NULL;
2294 		}
2295 		return (0);
2296 	}
2297 finish_express_del:
2298 	/* Here we tidy up things */
2299 	if (tsn == (asoc->cumulative_tsn + 1)) {
2300 		/* Update cum-ack */
2301 		asoc->cumulative_tsn = tsn;
2302 	}
2303 	if (last_chunk) {
2304 		*m = NULL;
2305 	}
2306 	if (ordered) {
2307 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2308 	} else {
2309 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2310 	}
2311 	SCTP_STAT_INCR(sctps_recvdata);
2312 	/* Set it present please */
2313 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2314 		sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2315 	}
2316 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2317 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2318 			     asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2319 	}
2320 	if (need_reasm_check) {
2321 		(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2322 		need_reasm_check = 0;
2323 	}
2324 	/* check the special flag for stream resets */
2325 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2326 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2327 		/*
2328 		 * we have finished working through the backlogged TSN's now
2329 		 * time to reset streams. 1: call reset function. 2: free
2330 		 * pending_reply space 3: distribute any chunks in
2331 		 * pending_reply_queue.
2332 		 */
2333 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2334 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2335 		sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2336 		SCTP_FREE(liste, SCTP_M_STRESET);
2337 		/*sa_ignore FREED_MEMORY*/
2338 		liste = TAILQ_FIRST(&asoc->resetHead);
2339 		if (TAILQ_EMPTY(&asoc->resetHead)) {
2340 			/* All can be removed */
2341 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2342 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2343 				strm = &asoc->strmin[control->sinfo_stream];
2344 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2345 				if (*abort_flag) {
2346 					return (0);
2347 				}
2348 				if (need_reasm_check) {
2349 					(void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2350 					need_reasm_check = 0;
2351 				}
2352 			}
2353 		} else {
2354 			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2355 				if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2356 					break;
2357 				}
2358 				/*
2359 				 * if control->sinfo_tsn is <= liste->tsn we can
2360 				 * process it which is the NOT of
2361 				 * control->sinfo_tsn > liste->tsn
2362 				 */
2363 				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2364 				strm = &asoc->strmin[control->sinfo_stream];
2365 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2366 				if (*abort_flag) {
2367 					return (0);
2368 				}
2369 				if (need_reasm_check) {
2370 					(void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2371 					need_reasm_check = 0;
2372 				}
2373 			}
2374 		}
2375 	}
2376 	return (1);
2377 }
2378 
2379 static const int8_t sctp_map_lookup_tab[256] = {
2380   0, 1, 0, 2, 0, 1, 0, 3,
2381   0, 1, 0, 2, 0, 1, 0, 4,
2382   0, 1, 0, 2, 0, 1, 0, 3,
2383   0, 1, 0, 2, 0, 1, 0, 5,
2384   0, 1, 0, 2, 0, 1, 0, 3,
2385   0, 1, 0, 2, 0, 1, 0, 4,
2386   0, 1, 0, 2, 0, 1, 0, 3,
2387   0, 1, 0, 2, 0, 1, 0, 6,
2388   0, 1, 0, 2, 0, 1, 0, 3,
2389   0, 1, 0, 2, 0, 1, 0, 4,
2390   0, 1, 0, 2, 0, 1, 0, 3,
2391   0, 1, 0, 2, 0, 1, 0, 5,
2392   0, 1, 0, 2, 0, 1, 0, 3,
2393   0, 1, 0, 2, 0, 1, 0, 4,
2394   0, 1, 0, 2, 0, 1, 0, 3,
2395   0, 1, 0, 2, 0, 1, 0, 7,
2396   0, 1, 0, 2, 0, 1, 0, 3,
2397   0, 1, 0, 2, 0, 1, 0, 4,
2398   0, 1, 0, 2, 0, 1, 0, 3,
2399   0, 1, 0, 2, 0, 1, 0, 5,
2400   0, 1, 0, 2, 0, 1, 0, 3,
2401   0, 1, 0, 2, 0, 1, 0, 4,
2402   0, 1, 0, 2, 0, 1, 0, 3,
2403   0, 1, 0, 2, 0, 1, 0, 6,
2404   0, 1, 0, 2, 0, 1, 0, 3,
2405   0, 1, 0, 2, 0, 1, 0, 4,
2406   0, 1, 0, 2, 0, 1, 0, 3,
2407   0, 1, 0, 2, 0, 1, 0, 5,
2408   0, 1, 0, 2, 0, 1, 0, 3,
2409   0, 1, 0, 2, 0, 1, 0, 4,
2410   0, 1, 0, 2, 0, 1, 0, 3,
2411   0, 1, 0, 2, 0, 1, 0, 8
2412 };
2413 
2414 void
sctp_slide_mapping_arrays(struct sctp_tcb * stcb)2415 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2416 {
2417 	/*
2418 	 * Now we also need to check the mapping array in a couple of ways.
2419 	 * 1) Did we move the cum-ack point?
2420 	 *
2421 	 * When you first glance at this you might think
2422 	 * that all entries that make up the position
2423 	 * of the cum-ack would be in the nr-mapping array
2424 	 * only.. i.e. things up to the cum-ack are always
2425 	 * deliverable. Thats true with one exception, when
2426 	 * its a fragmented message we may not deliver the data
2427 	 * until some threshold (or all of it) is in place. So
2428 	 * we must OR the nr_mapping_array and mapping_array to
2429 	 * get a true picture of the cum-ack.
2430 	 */
2431 	struct sctp_association *asoc;
2432 	int at;
2433 	uint8_t val;
2434 	int slide_from, slide_end, lgap, distance;
2435 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
2436 
2437 	asoc = &stcb->asoc;
2438 
2439 	old_cumack = asoc->cumulative_tsn;
2440 	old_base = asoc->mapping_array_base_tsn;
2441 	old_highest = asoc->highest_tsn_inside_map;
2442 	/*
2443 	 * We could probably improve this a small bit by calculating the
2444 	 * offset of the current cum-ack as the starting point.
2445 	 */
2446 	at = 0;
2447 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2448 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2449 		if (val == 0xff) {
2450 			at += 8;
2451 		} else {
2452 			/* there is a 0 bit */
2453 			at += sctp_map_lookup_tab[val];
2454 			break;
2455 		}
2456 	}
2457 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at-1);
2458 
2459 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2460             SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2461 #ifdef INVARIANTS
2462 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2463 		      asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2464 #else
2465 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2466 			    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2467 		sctp_print_mapping_array(asoc);
2468 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2469 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2470 		}
2471 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2472 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2473 #endif
2474 	}
2475 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2476 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2477 	} else {
2478 		highest_tsn = asoc->highest_tsn_inside_map;
2479 	}
2480 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2481 		/* The complete array was completed by a single FR */
2482 		/* highest becomes the cum-ack */
2483 		int clr;
2484 #ifdef INVARIANTS
2485 		unsigned int i;
2486 #endif
2487 
2488 		/* clear the array */
2489 		clr = ((at+7) >> 3);
2490 		if (clr > asoc->mapping_array_size) {
2491 			clr = asoc->mapping_array_size;
2492 		}
2493 		memset(asoc->mapping_array, 0, clr);
2494 		memset(asoc->nr_mapping_array, 0, clr);
2495 #ifdef INVARIANTS
2496 		for (i = 0; i < asoc->mapping_array_size; i++) {
2497 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2498 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2499 				sctp_print_mapping_array(asoc);
2500 			}
2501 		}
2502 #endif
2503 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2504 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2505 	} else if (at >= 8) {
2506 		/* we can slide the mapping array down */
2507 		/* slide_from holds where we hit the first NON 0xff byte */
2508 
2509 		/*
2510 		 * now calculate the ceiling of the move using our highest
2511 		 * TSN value
2512 		 */
2513 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2514 		slide_end = (lgap >> 3);
2515 		if (slide_end < slide_from) {
2516 			sctp_print_mapping_array(asoc);
2517 #ifdef INVARIANTS
2518 			panic("impossible slide");
2519 #else
2520 			SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2521 			            lgap, slide_end, slide_from, at);
2522 			return;
2523 #endif
2524 		}
2525 		if (slide_end > asoc->mapping_array_size) {
2526 #ifdef INVARIANTS
2527 			panic("would overrun buffer");
2528 #else
2529 			SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2530 			            asoc->mapping_array_size, slide_end);
2531 			slide_end = asoc->mapping_array_size;
2532 #endif
2533 		}
2534 		distance = (slide_end - slide_from) + 1;
2535 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2536 			sctp_log_map(old_base, old_cumack, old_highest,
2537 				     SCTP_MAP_PREPARE_SLIDE);
2538 			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2539 				     (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2540 		}
2541 		if (distance + slide_from > asoc->mapping_array_size ||
2542 		    distance < 0) {
2543 			/*
2544 			 * Here we do NOT slide forward the array so that
2545 			 * hopefully when more data comes in to fill it up
2546 			 * we will be able to slide it forward. Really I
2547 			 * don't think this should happen :-0
2548 			 */
2549 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2550 				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2551 					     (uint32_t) asoc->mapping_array_size,
2552 					     SCTP_MAP_SLIDE_NONE);
2553 			}
2554 		} else {
2555 			int ii;
2556 
2557 			for (ii = 0; ii < distance; ii++) {
2558 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2559 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2560 			}
2561 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2562 				asoc->mapping_array[ii] = 0;
2563 				asoc->nr_mapping_array[ii] = 0;
2564 			}
2565 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2566 				asoc->highest_tsn_inside_map += (slide_from << 3);
2567 			}
2568 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2569 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2570 			}
2571 			asoc->mapping_array_base_tsn += (slide_from << 3);
2572 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2573 				sctp_log_map(asoc->mapping_array_base_tsn,
2574 					     asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2575 					     SCTP_MAP_SLIDE_RESULT);
2576 			}
2577 		}
2578 	}
2579 }
2580 
2581 void
sctp_sack_check(struct sctp_tcb * stcb,int was_a_gap)2582 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2583 {
2584 	struct sctp_association *asoc;
2585 	uint32_t highest_tsn;
2586 	int is_a_gap;
2587 
2588 	sctp_slide_mapping_arrays(stcb);
2589 	asoc = &stcb->asoc;
2590 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2591 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2592 	} else {
2593 		highest_tsn = asoc->highest_tsn_inside_map;
2594 	}
2595 	/* Is there a gap now? */
2596 	is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2597 
2598 	/*
2599 	 * Now we need to see if we need to queue a sack or just start the
2600 	 * timer (if allowed).
2601 	 */
2602 	if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2603 		/*
2604 		 * Ok special case, in SHUTDOWN-SENT case. here we
2605 		 * maker sure SACK timer is off and instead send a
2606 		 * SHUTDOWN and a SACK
2607 		 */
2608 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2609 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2610 			                stcb->sctp_ep, stcb, NULL,
2611 			                SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2612 		}
2613 		sctp_send_shutdown(stcb,
2614 		                   ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2615 		if (is_a_gap) {
2616 			sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2617 		}
2618 	} else {
2619 		/*
2620 		 * CMT DAC algorithm: increase number of packets
2621 		 * received since last ack
2622 		 */
2623 		stcb->asoc.cmt_dac_pkts_rcvd++;
2624 
2625 		if ((stcb->asoc.send_sack == 1) ||      /* We need to send a SACK */
2626 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2627 		                                         * longer is one */
2628 		    (stcb->asoc.numduptsns) ||          /* we have dup's */
2629 		    (is_a_gap) ||                       /* is still a gap */
2630 		    (stcb->asoc.delayed_ack == 0) ||    /* Delayed sack disabled */
2631 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)) {	/* hit limit of pkts */
2632 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2633 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2634 			    (stcb->asoc.send_sack == 0) &&
2635 			    (stcb->asoc.numduptsns == 0) &&
2636 			    (stcb->asoc.delayed_ack) &&
2637 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2638 				/*
2639 				 * CMT DAC algorithm: With CMT,
2640 				 * delay acks even in the face of
2641 				 * reordering. Therefore, if acks
2642 				 * that do not have to be sent
2643 				 * because of the above reasons,
2644 				 * will be delayed. That is, acks
2645 				 * that would have been sent due to
2646 				 * gap reports will be delayed with
2647 				 * DAC. Start the delayed ack timer.
2648 				 */
2649 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2650 				                 stcb->sctp_ep, stcb, NULL);
2651 			} else {
2652 				/*
2653 				 * Ok we must build a SACK since the
2654 				 * timer is pending, we got our
2655 				 * first packet OR there are gaps or
2656 				 * duplicates.
2657 				 */
2658 				sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
2659 				                SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
2660 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2661 			}
2662 		} else {
2663 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2664 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2665 				                 stcb->sctp_ep, stcb, NULL);
2666 			}
2667 		}
2668 	}
2669 }
2670 
2671 int
sctp_process_data(struct mbuf ** mm,int iphlen,int * offset,int length,struct sctp_inpcb * inp,struct sctp_tcb * stcb,struct sctp_nets * net,uint32_t * high_tsn)2672 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2673                   struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2674                   struct sctp_nets *net, uint32_t *high_tsn)
2675 {
2676 	struct sctp_chunkhdr *ch, chunk_buf;
2677 	struct sctp_association *asoc;
2678 	int num_chunks = 0;	/* number of control chunks processed */
2679 	int stop_proc = 0;
2680 	int break_flag, last_chunk;
2681 	int abort_flag = 0, was_a_gap;
2682 	struct mbuf *m;
2683 	uint32_t highest_tsn;
2684 	uint16_t chk_length;
2685 
2686 	/* set the rwnd */
2687 	sctp_set_rwnd(stcb, &stcb->asoc);
2688 
2689 	m = *mm;
2690 	SCTP_TCB_LOCK_ASSERT(stcb);
2691 	asoc = &stcb->asoc;
2692 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2693 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2694 	} else {
2695 		highest_tsn = asoc->highest_tsn_inside_map;
2696 	}
2697 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2698 	/*
2699 	 * setup where we got the last DATA packet from for any SACK that
2700 	 * may need to go out. Don't bump the net. This is done ONLY when a
2701 	 * chunk is assigned.
2702 	 */
2703 	asoc->last_data_chunk_from = net;
2704 
2705 	/*-
2706 	 * Now before we proceed we must figure out if this is a wasted
2707 	 * cluster... i.e. it is a small packet sent in and yet the driver
2708 	 * underneath allocated a full cluster for it. If so we must copy it
2709 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2710 	 * with cluster starvation.
2711 	 */
2712 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2713 		/* we only handle mbufs that are singletons.. not chains */
2714 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2715 		if (m) {
2716 			/* ok lets see if we can copy the data up */
2717 			caddr_t *from, *to;
2718 			/* get the pointers and copy */
2719 			to = mtod(m, caddr_t *);
2720 			from = mtod((*mm), caddr_t *);
2721 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2722 			/* copy the length and free up the old */
2723 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2724 			sctp_m_freem(*mm);
2725 			/* success, back copy */
2726 			*mm = m;
2727 		} else {
2728 			/* We are in trouble in the mbuf world .. yikes */
2729 			m = *mm;
2730 		}
2731 	}
2732 	/* get pointer to the first chunk header */
2733 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2734 	                                           sizeof(struct sctp_chunkhdr),
2735 	                                           (uint8_t *)&chunk_buf);
2736 	if (ch == NULL) {
2737 		return (1);
2738 	}
2739 	/*
2740 	 * process all DATA chunks...
2741 	 */
2742 	*high_tsn = asoc->cumulative_tsn;
2743 	break_flag = 0;
2744 	asoc->data_pkts_seen++;
2745 	while (stop_proc == 0) {
2746 		/* validate chunk length */
2747 		chk_length = ntohs(ch->chunk_length);
2748 		if (length - *offset < chk_length) {
2749 			/* all done, mutulated chunk */
2750 			stop_proc = 1;
2751 			continue;
2752 		}
2753 		if ((asoc->idata_supported == 1) &&
2754 		    (ch->chunk_type == SCTP_DATA)) {
2755 			struct mbuf *op_err;
2756 			char msg[SCTP_DIAG_INFO_LEN];
2757 
2758 			SCTP_SNPRINTF(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2759 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2760 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
2761 			sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2762 			return (2);
2763 		}
2764 		if ((asoc->idata_supported == 0) &&
2765 		    (ch->chunk_type == SCTP_IDATA)) {
2766 			struct mbuf *op_err;
2767 			char msg[SCTP_DIAG_INFO_LEN];
2768 
2769 			SCTP_SNPRINTF(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2770 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2771 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22;
2772 			sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2773 			return (2);
2774 		}
2775 		if ((ch->chunk_type == SCTP_DATA) ||
2776 		    (ch->chunk_type == SCTP_IDATA)) {
2777 			uint16_t clen;
2778 
2779 			if (ch->chunk_type == SCTP_DATA) {
2780 				clen = sizeof(struct sctp_data_chunk);
2781 			} else {
2782 				clen = sizeof(struct sctp_idata_chunk);
2783 			}
2784 			if (chk_length < clen) {
2785 				/*
2786 				 * Need to send an abort since we had a
2787 				 * invalid data chunk.
2788 				 */
2789 				struct mbuf *op_err;
2790 				char msg[SCTP_DIAG_INFO_LEN];
2791 
2792 				SCTP_SNPRINTF(msg, sizeof(msg), "%s chunk of length %u",
2793 				              ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
2794 				              chk_length);
2795 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2796 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23;
2797 				sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2798 				return (2);
2799 			}
2800 #ifdef SCTP_AUDITING_ENABLED
2801 			sctp_audit_log(0xB1, 0);
2802 #endif
2803 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2804 				last_chunk = 1;
2805 			} else {
2806 				last_chunk = 0;
2807 			}
2808 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2809 						      chk_length, net, high_tsn, &abort_flag, &break_flag,
2810 						      last_chunk, ch->chunk_type)) {
2811 				num_chunks++;
2812 			}
2813 			if (abort_flag)
2814 				return (2);
2815 
2816 			if (break_flag) {
2817 				/*
2818 				 * Set because of out of rwnd space and no
2819 				 * drop rep space left.
2820 				 */
2821 				stop_proc = 1;
2822 				continue;
2823 			}
2824 		} else {
2825 			/* not a data chunk in the data region */
2826 			switch (ch->chunk_type) {
2827 			case SCTP_INITIATION:
2828 			case SCTP_INITIATION_ACK:
2829 			case SCTP_SELECTIVE_ACK:
2830 			case SCTP_NR_SELECTIVE_ACK:
2831 			case SCTP_HEARTBEAT_REQUEST:
2832 			case SCTP_HEARTBEAT_ACK:
2833 			case SCTP_ABORT_ASSOCIATION:
2834 			case SCTP_SHUTDOWN:
2835 			case SCTP_SHUTDOWN_ACK:
2836 			case SCTP_OPERATION_ERROR:
2837 			case SCTP_COOKIE_ECHO:
2838 			case SCTP_COOKIE_ACK:
2839 			case SCTP_ECN_ECHO:
2840 			case SCTP_ECN_CWR:
2841 			case SCTP_SHUTDOWN_COMPLETE:
2842 			case SCTP_AUTHENTICATION:
2843 			case SCTP_ASCONF_ACK:
2844 			case SCTP_PACKET_DROPPED:
2845 			case SCTP_STREAM_RESET:
2846 			case SCTP_FORWARD_CUM_TSN:
2847 			case SCTP_ASCONF:
2848 			{
2849 				/*
2850 				 * Now, what do we do with KNOWN chunks that
2851 				 * are NOT in the right place?
2852 				 *
2853 				 * For now, I do nothing but ignore them. We
2854 				 * may later want to add sysctl stuff to
2855 				 * switch out and do either an ABORT() or
2856 				 * possibly process them.
2857 				 */
2858 				struct mbuf *op_err;
2859 				char msg[SCTP_DIAG_INFO_LEN];
2860 
2861 				SCTP_SNPRINTF(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2862 				              ch->chunk_type);
2863 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2864 				sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2865 				return (2);
2866 			}
2867 			default:
2868 				/*
2869 				 * Unknown chunk type: use bit rules after
2870 				 * checking length
2871 				 */
2872 				if (chk_length < sizeof(struct sctp_chunkhdr)) {
2873 					/*
2874 					 * Need to send an abort since we had a
2875 					 * invalid chunk.
2876 					 */
2877 					struct mbuf *op_err;
2878 					char msg[SCTP_DIAG_INFO_LEN];
2879 
2880 					SCTP_SNPRINTF(msg, sizeof(msg), "Chunk of length %u", chk_length);
2881 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2882 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
2883 					sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2884 					return (2);
2885 				}
2886 				if (ch->chunk_type & 0x40) {
2887 					/* Add a error report to the queue */
2888 					struct mbuf *op_err;
2889 					struct sctp_gen_error_cause *cause;
2890 
2891 					op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2892 					                               0, M_NOWAIT, 1, MT_DATA);
2893 					if (op_err != NULL) {
2894 						cause  = mtod(op_err, struct sctp_gen_error_cause *);
2895 						cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2896 						cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2897 						SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2898 						SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2899 						if (SCTP_BUF_NEXT(op_err) != NULL) {
2900 							sctp_queue_op_err(stcb, op_err);
2901 						} else {
2902 							sctp_m_freem(op_err);
2903 						}
2904 					}
2905 				}
2906 				if ((ch->chunk_type & 0x80) == 0) {
2907 					/* discard the rest of this packet */
2908 					stop_proc = 1;
2909 				}	/* else skip this bad chunk and
2910 					 * continue... */
2911 				break;
2912 			}	/* switch of chunk type */
2913 		}
2914 		*offset += SCTP_SIZE32(chk_length);
2915 		if ((*offset >= length) || stop_proc) {
2916 			/* no more data left in the mbuf chain */
2917 			stop_proc = 1;
2918 			continue;
2919 		}
2920 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2921 		                                           sizeof(struct sctp_chunkhdr),
2922 		                                           (uint8_t *)&chunk_buf);
2923 		if (ch == NULL) {
2924 			*offset = length;
2925 			stop_proc = 1;
2926 			continue;
2927 		}
2928 	}
2929 	if (break_flag) {
2930 		/*
2931 		 * we need to report rwnd overrun drops.
2932 		 */
2933 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2934 	}
2935 	if (num_chunks) {
2936 		/*
2937 		 * Did we get data, if so update the time for auto-close and
2938 		 * give peer credit for being alive.
2939 		 */
2940 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2941 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2942 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2943 				       stcb->asoc.overall_error_count,
2944 				       0,
2945 				       SCTP_FROM_SCTP_INDATA,
2946 				       __LINE__);
2947 		}
2948 		stcb->asoc.overall_error_count = 0;
2949 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2950 	}
2951 	/* now service all of the reassm queue if needed */
2952 	if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2953 		/* Assure that we ack right away */
2954 		stcb->asoc.send_sack = 1;
2955 	}
2956 	/* Start a sack timer or QUEUE a SACK for sending */
2957 	sctp_sack_check(stcb, was_a_gap);
2958 	return (0);
2959 }
2960 
2961 static int
sctp_process_segment_range(struct sctp_tcb * stcb,struct sctp_tmit_chunk ** p_tp1,uint32_t last_tsn,uint16_t frag_strt,uint16_t frag_end,int nr_sacking,int * num_frs,uint32_t * biggest_newly_acked_tsn,uint32_t * this_sack_lowest_newack,int * rto_ok)2962 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2963 			   uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2964 			   int *num_frs,
2965 			   uint32_t *biggest_newly_acked_tsn,
2966 			   uint32_t  *this_sack_lowest_newack,
2967 			   int *rto_ok)
2968 {
2969 	struct sctp_tmit_chunk *tp1;
2970 	unsigned int theTSN;
2971 	int j, wake_him = 0, circled = 0;
2972 
2973 	/* Recover the tp1 we last saw */
2974 	tp1 = *p_tp1;
2975 	if (tp1 == NULL) {
2976 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2977 	}
2978 	for (j = frag_strt; j <= frag_end; j++) {
2979 		theTSN = j + last_tsn;
2980 		while (tp1) {
2981 			if (tp1->rec.data.doing_fast_retransmit)
2982 				(*num_frs) += 1;
2983 
2984 			/*-
2985 			 * CMT: CUCv2 algorithm. For each TSN being
2986 			 * processed from the sent queue, track the
2987 			 * next expected pseudo-cumack, or
2988 			 * rtx_pseudo_cumack, if required. Separate
2989 			 * cumack trackers for first transmissions,
2990 			 * and retransmissions.
2991 			 */
2992 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2993 			    (tp1->whoTo->find_pseudo_cumack == 1) &&
2994 			    (tp1->snd_count == 1)) {
2995 				tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2996 				tp1->whoTo->find_pseudo_cumack = 0;
2997 			}
2998 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2999 			    (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
3000 			    (tp1->snd_count > 1)) {
3001 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
3002 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
3003 			}
3004 			if (tp1->rec.data.tsn == theTSN) {
3005 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
3006 					/*-
3007 					 * must be held until
3008 					 * cum-ack passes
3009 					 */
3010 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3011 						/*-
3012 						 * If it is less than RESEND, it is
3013 						 * now no-longer in flight.
3014 						 * Higher values may already be set
3015 						 * via previous Gap Ack Blocks...
3016 						 * i.e. ACKED or RESEND.
3017 						 */
3018 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3019 						                *biggest_newly_acked_tsn)) {
3020 							*biggest_newly_acked_tsn = tp1->rec.data.tsn;
3021 						}
3022 						/*-
3023 						 * CMT: SFR algo (and HTNA) - set
3024 						 * saw_newack to 1 for dest being
3025 						 * newly acked. update
3026 						 * this_sack_highest_newack if
3027 						 * appropriate.
3028 						 */
3029 						if (tp1->rec.data.chunk_was_revoked == 0)
3030 							tp1->whoTo->saw_newack = 1;
3031 
3032 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3033 						                tp1->whoTo->this_sack_highest_newack)) {
3034 							tp1->whoTo->this_sack_highest_newack =
3035 								tp1->rec.data.tsn;
3036 						}
3037 						/*-
3038 						 * CMT DAC algo: also update
3039 						 * this_sack_lowest_newack
3040 						 */
3041 						if (*this_sack_lowest_newack == 0) {
3042 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3043 								sctp_log_sack(*this_sack_lowest_newack,
3044 									      last_tsn,
3045 									      tp1->rec.data.tsn,
3046 									      0,
3047 									      0,
3048 									      SCTP_LOG_TSN_ACKED);
3049 							}
3050 							*this_sack_lowest_newack = tp1->rec.data.tsn;
3051 						}
3052 						/*-
3053 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3054 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3055 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3056 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3057 						 * Separate pseudo_cumack trackers for first transmissions and
3058 						 * retransmissions.
3059 						 */
3060 						if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3061 							if (tp1->rec.data.chunk_was_revoked == 0) {
3062 								tp1->whoTo->new_pseudo_cumack = 1;
3063 							}
3064 							tp1->whoTo->find_pseudo_cumack = 1;
3065 						}
3066 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3067 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3068 						}
3069 						if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3070 							if (tp1->rec.data.chunk_was_revoked == 0) {
3071 								tp1->whoTo->new_pseudo_cumack = 1;
3072 							}
3073 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
3074 						}
3075 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3076 							sctp_log_sack(*biggest_newly_acked_tsn,
3077 								      last_tsn,
3078 								      tp1->rec.data.tsn,
3079 								      frag_strt,
3080 								      frag_end,
3081 								      SCTP_LOG_TSN_ACKED);
3082 						}
3083 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3084 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3085 								       tp1->whoTo->flight_size,
3086 								       tp1->book_size,
3087 								       (uint32_t)(uintptr_t)tp1->whoTo,
3088 								       tp1->rec.data.tsn);
3089 						}
3090 						sctp_flight_size_decrease(tp1);
3091 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3092 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
3093 														     tp1);
3094 						}
3095 						sctp_total_flight_decrease(stcb, tp1);
3096 
3097 						tp1->whoTo->net_ack += tp1->send_size;
3098 						if (tp1->snd_count < 2) {
3099 							/*-
3100 							 * True non-retransmitted chunk
3101 							 */
3102 							tp1->whoTo->net_ack2 += tp1->send_size;
3103 
3104 							/*-
3105 							 * update RTO too ?
3106 							 */
3107 							if (tp1->do_rtt) {
3108 								if (*rto_ok &&
3109 								    sctp_calculate_rto(stcb,
3110 								                       &stcb->asoc,
3111 								                       tp1->whoTo,
3112 								                       &tp1->sent_rcv_time,
3113 								                       SCTP_RTT_FROM_DATA)) {
3114 									*rto_ok = 0;
3115 								}
3116 								if (tp1->whoTo->rto_needed == 0) {
3117 									tp1->whoTo->rto_needed = 1;
3118 								}
3119 								tp1->do_rtt = 0;
3120 							}
3121 						}
3122 					}
3123 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3124 						if (SCTP_TSN_GT(tp1->rec.data.tsn,
3125 						                stcb->asoc.this_sack_highest_gap)) {
3126 							stcb->asoc.this_sack_highest_gap =
3127 								tp1->rec.data.tsn;
3128 						}
3129 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3130 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3131 #ifdef SCTP_AUDITING_ENABLED
3132 							sctp_audit_log(0xB2,
3133 								       (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3134 #endif
3135 						}
3136 					}
3137 					/*-
3138 					 * All chunks NOT UNSENT fall through here and are marked
3139 					 * (leave PR-SCTP ones that are to skip alone though)
3140 					 */
3141 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3142 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3143 						tp1->sent = SCTP_DATAGRAM_MARKED;
3144 					}
3145 					if (tp1->rec.data.chunk_was_revoked) {
3146 						/* deflate the cwnd */
3147 						tp1->whoTo->cwnd -= tp1->book_size;
3148 						tp1->rec.data.chunk_was_revoked = 0;
3149 					}
3150 					/* NR Sack code here */
3151 					if (nr_sacking &&
3152 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3153 						if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3154 							stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3155 #ifdef INVARIANTS
3156 						} else {
3157 							panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3158 #endif
3159 						}
3160 						if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3161 						    (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3162 						    TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3163 							stcb->asoc.trigger_reset = 1;
3164 						}
3165 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3166 						if (tp1->data) {
3167 							/* sa_ignore NO_NULL_CHK */
3168 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3169 							sctp_m_freem(tp1->data);
3170 							tp1->data = NULL;
3171 						}
3172 						wake_him++;
3173 					}
3174 				}
3175 				break;
3176 			}	/* if (tp1->tsn == theTSN) */
3177 			if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3178 				break;
3179 			}
3180 			tp1 = TAILQ_NEXT(tp1, sctp_next);
3181 			if ((tp1 == NULL) && (circled == 0)) {
3182 				circled++;
3183 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3184 			}
3185 		}	/* end while (tp1) */
3186 		if (tp1 == NULL) {
3187 			circled = 0;
3188 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3189 		}
3190 		/* In case the fragments were not in order we must reset */
3191 	} /* end for (j = fragStart */
3192 	*p_tp1 = tp1;
3193 	return (wake_him);	/* Return value only used for nr-sack */
3194 }
3195 
3196 static int
sctp_handle_segments(struct mbuf * m,int * offset,struct sctp_tcb * stcb,struct sctp_association * asoc,uint32_t last_tsn,uint32_t * biggest_tsn_acked,uint32_t * biggest_newly_acked_tsn,uint32_t * this_sack_lowest_newack,int num_seg,int num_nr_seg,int * rto_ok)3197 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3198 		uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3199 		uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3200 		int num_seg, int num_nr_seg, int *rto_ok)
3201 {
3202 	struct sctp_gap_ack_block *frag, block;
3203 	struct sctp_tmit_chunk *tp1;
3204 	int i;
3205 	int num_frs = 0;
3206 	int chunk_freed;
3207 	int non_revocable;
3208 	uint16_t frag_strt, frag_end, prev_frag_end;
3209 
3210 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
3211 	prev_frag_end = 0;
3212 	chunk_freed = 0;
3213 
3214 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
3215 		if (i == num_seg) {
3216 			prev_frag_end = 0;
3217 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
3218 		}
3219 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3220 		                                                  sizeof(struct sctp_gap_ack_block), (uint8_t *) &block);
3221 		*offset += sizeof(block);
3222 		if (frag == NULL) {
3223 			return (chunk_freed);
3224 		}
3225 		frag_strt = ntohs(frag->start);
3226 		frag_end = ntohs(frag->end);
3227 
3228 		if (frag_strt > frag_end) {
3229 			/* This gap report is malformed, skip it. */
3230 			continue;
3231 		}
3232 		if (frag_strt <= prev_frag_end) {
3233 			/* This gap report is not in order, so restart. */
3234 			 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3235 		}
3236 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3237 			*biggest_tsn_acked = last_tsn + frag_end;
3238 		}
3239 		if (i < num_seg) {
3240 			non_revocable = 0;
3241 		} else {
3242 			non_revocable = 1;
3243 		}
3244 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3245 		                               non_revocable, &num_frs, biggest_newly_acked_tsn,
3246 		                               this_sack_lowest_newack, rto_ok)) {
3247 			chunk_freed = 1;
3248 		}
3249 		prev_frag_end = frag_end;
3250 	}
3251 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3252 		if (num_frs)
3253 			sctp_log_fr(*biggest_tsn_acked,
3254 			            *biggest_newly_acked_tsn,
3255 			            last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3256 	}
3257 	return (chunk_freed);
3258 }
3259 
3260 static void
sctp_check_for_revoked(struct sctp_tcb * stcb,struct sctp_association * asoc,uint32_t cumack,uint32_t biggest_tsn_acked)3261 sctp_check_for_revoked(struct sctp_tcb *stcb,
3262 		       struct sctp_association *asoc, uint32_t cumack,
3263 		       uint32_t biggest_tsn_acked)
3264 {
3265 	struct sctp_tmit_chunk *tp1;
3266 
3267 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3268 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3269 			/*
3270 			 * ok this guy is either ACK or MARKED. If it is
3271 			 * ACKED it has been previously acked but not this
3272 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
3273 			 * again.
3274 			 */
3275 			if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3276 				break;
3277 			}
3278 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3279 				/* it has been revoked */
3280 				tp1->sent = SCTP_DATAGRAM_SENT;
3281 				tp1->rec.data.chunk_was_revoked = 1;
3282 				/* We must add this stuff back in to
3283 				 * assure timers and such get started.
3284 				 */
3285 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3286 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3287 						       tp1->whoTo->flight_size,
3288 						       tp1->book_size,
3289 						       (uint32_t)(uintptr_t)tp1->whoTo,
3290 						       tp1->rec.data.tsn);
3291 				}
3292 				sctp_flight_size_increase(tp1);
3293 				sctp_total_flight_increase(stcb, tp1);
3294 				/* We inflate the cwnd to compensate for our
3295 				 * artificial inflation of the flight_size.
3296 				 */
3297 				tp1->whoTo->cwnd += tp1->book_size;
3298 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3299 					sctp_log_sack(asoc->last_acked_seq,
3300 						      cumack,
3301 						      tp1->rec.data.tsn,
3302 						      0,
3303 						      0,
3304 						      SCTP_LOG_TSN_REVOKED);
3305 				}
3306 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3307 				/* it has been re-acked in this SACK */
3308 				tp1->sent = SCTP_DATAGRAM_ACKED;
3309 			}
3310 		}
3311 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3312 			break;
3313 	}
3314 }
3315 
3316 static void
sctp_strike_gap_ack_chunks(struct sctp_tcb * stcb,struct sctp_association * asoc,uint32_t biggest_tsn_acked,uint32_t biggest_tsn_newly_acked,uint32_t this_sack_lowest_newack,int accum_moved)3317 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3318 			   uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3319 {
3320 	struct sctp_tmit_chunk *tp1;
3321 	int strike_flag = 0;
3322 	struct timeval now;
3323 	int tot_retrans = 0;
3324 	uint32_t sending_seq;
3325 	struct sctp_nets *net;
3326 	int num_dests_sacked = 0;
3327 
3328 	/*
3329 	 * select the sending_seq, this is either the next thing ready to be
3330 	 * sent but not transmitted, OR, the next seq we assign.
3331 	 */
3332 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3333 	if (tp1 == NULL) {
3334 		sending_seq = asoc->sending_seq;
3335 	} else {
3336 		sending_seq = tp1->rec.data.tsn;
3337 	}
3338 
3339 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
3340 	if ((asoc->sctp_cmt_on_off > 0) &&
3341 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3342 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3343 			if (net->saw_newack)
3344 				num_dests_sacked++;
3345 		}
3346 	}
3347 	if (stcb->asoc.prsctp_supported) {
3348 		(void)SCTP_GETTIME_TIMEVAL(&now);
3349 	}
3350 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3351 		strike_flag = 0;
3352 		if (tp1->no_fr_allowed) {
3353 			/* this one had a timeout or something */
3354 			continue;
3355 		}
3356 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3357 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
3358 				sctp_log_fr(biggest_tsn_newly_acked,
3359 					    tp1->rec.data.tsn,
3360 					    tp1->sent,
3361 					    SCTP_FR_LOG_CHECK_STRIKE);
3362 		}
3363 		if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3364 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
3365 			/* done */
3366 			break;
3367 		}
3368 		if (stcb->asoc.prsctp_supported) {
3369 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3370 				/* Is it expired? */
3371 #if !(defined(__FreeBSD__) && !defined(__Userspace__))
3372 				if (timercmp(&now, &tp1->rec.data.timetodrop, >)) {
3373 #else
3374 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3375 #endif
3376 					/* Yes so drop it */
3377 					if (tp1->data != NULL) {
3378 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3379 										 SCTP_SO_NOT_LOCKED);
3380 					}
3381 					continue;
3382 				}
3383 			}
3384 		}
3385 		if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
3386 		                !(accum_moved && asoc->fast_retran_loss_recovery)) {
3387 			/* we are beyond the tsn in the sack  */
3388 			break;
3389 		}
3390 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3391 			/* either a RESEND, ACKED, or MARKED */
3392 			/* skip */
3393 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3394 				/* Continue strikin FWD-TSN chunks */
3395 				tp1->rec.data.fwd_tsn_cnt++;
3396 			}
3397 			continue;
3398 		}
3399 		/*
3400 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3401 		 */
3402 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3403 			/*
3404 			 * No new acks were receieved for data sent to this
3405 			 * dest. Therefore, according to the SFR algo for
3406 			 * CMT, no data sent to this dest can be marked for
3407 			 * FR using this SACK.
3408 			 */
3409 			continue;
3410 		} else if (tp1->whoTo &&
3411 		           SCTP_TSN_GT(tp1->rec.data.tsn,
3412 		                       tp1->whoTo->this_sack_highest_newack) &&
3413 		           !(accum_moved && asoc->fast_retran_loss_recovery)) {
3414 			/*
3415 			 * CMT: New acks were receieved for data sent to
3416 			 * this dest. But no new acks were seen for data
3417 			 * sent after tp1. Therefore, according to the SFR
3418 			 * algo for CMT, tp1 cannot be marked for FR using
3419 			 * this SACK. This step covers part of the DAC algo
3420 			 * and the HTNA algo as well.
3421 			 */
3422 			continue;
3423 		}
3424 		/*
3425 		 * Here we check to see if we were have already done a FR
3426 		 * and if so we see if the biggest TSN we saw in the sack is
3427 		 * smaller than the recovery point. If so we don't strike
3428 		 * the tsn... otherwise we CAN strike the TSN.
3429 		 */
3430 		/*
3431 		 * @@@ JRI: Check for CMT
3432 		 * if (accum_moved && asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 0)) {
3433 		 */
3434 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3435 			/*
3436 			 * Strike the TSN if in fast-recovery and cum-ack
3437 			 * moved.
3438 			 */
3439 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3440 				sctp_log_fr(biggest_tsn_newly_acked,
3441 					    tp1->rec.data.tsn,
3442 					    tp1->sent,
3443 					    SCTP_FR_LOG_STRIKE_CHUNK);
3444 			}
3445 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3446 				tp1->sent++;
3447 			}
3448 			if ((asoc->sctp_cmt_on_off > 0) &&
3449 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3450 				/*
3451 				 * CMT DAC algorithm: If SACK flag is set to
3452 				 * 0, then lowest_newack test will not pass
3453 				 * because it would have been set to the
3454 				 * cumack earlier. If not already to be
3455 				 * rtx'd, If not a mixed sack and if tp1 is
3456 				 * not between two sacked TSNs, then mark by
3457 				 * one more.
3458 				 * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
3459 				 * two packets have been received after this missing TSN.
3460 				 */
3461 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3462 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3463 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3464 						sctp_log_fr(16 + num_dests_sacked,
3465 							    tp1->rec.data.tsn,
3466 							    tp1->sent,
3467 							    SCTP_FR_LOG_STRIKE_CHUNK);
3468 					}
3469 					tp1->sent++;
3470 				}
3471 			}
3472 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3473 		           (asoc->sctp_cmt_on_off == 0)) {
3474 			/*
3475 			 * For those that have done a FR we must take
3476 			 * special consideration if we strike. I.e the
3477 			 * biggest_newly_acked must be higher than the
3478 			 * sending_seq at the time we did the FR.
3479 			 */
3480 			if (
3481 #ifdef SCTP_FR_TO_ALTERNATE
3482 				/*
3483 				 * If FR's go to new networks, then we must only do
3484 				 * this for singly homed asoc's. However if the FR's
3485 				 * go to the same network (Armando's work) then its
3486 				 * ok to FR multiple times.
3487 				 */
3488 				(asoc->numnets < 2)
3489 #else
3490 				(1)
3491 #endif
3492 				) {
3493 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3494 				                tp1->rec.data.fast_retran_tsn)) {
3495 					/*
3496 					 * Strike the TSN, since this ack is
3497 					 * beyond where things were when we
3498 					 * did a FR.
3499 					 */
3500 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3501 						sctp_log_fr(biggest_tsn_newly_acked,
3502 							    tp1->rec.data.tsn,
3503 							    tp1->sent,
3504 							    SCTP_FR_LOG_STRIKE_CHUNK);
3505 					}
3506 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3507 						tp1->sent++;
3508 					}
3509 					strike_flag = 1;
3510 					if ((asoc->sctp_cmt_on_off > 0) &&
3511 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3512 						/*
3513 						 * CMT DAC algorithm: If
3514 						 * SACK flag is set to 0,
3515 						 * then lowest_newack test
3516 						 * will not pass because it
3517 						 * would have been set to
3518 						 * the cumack earlier. If
3519 						 * not already to be rtx'd,
3520 						 * If not a mixed sack and
3521 						 * if tp1 is not between two
3522 						 * sacked TSNs, then mark by
3523 						 * one more.
3524 						 * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
3525 						 * two packets have been received after this missing TSN.
3526 						 */
3527 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3528 						    (num_dests_sacked == 1) &&
3529 						    SCTP_TSN_GT(this_sack_lowest_newack,
3530 						                tp1->rec.data.tsn)) {
3531 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3532 								sctp_log_fr(32 + num_dests_sacked,
3533 									    tp1->rec.data.tsn,
3534 									    tp1->sent,
3535 									    SCTP_FR_LOG_STRIKE_CHUNK);
3536 							}
3537 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3538 								tp1->sent++;
3539 							}
3540 						}
3541 					}
3542 				}
3543 			}
3544 			/*
3545 			 * JRI: TODO: remove code for HTNA algo. CMT's
3546 			 * SFR algo covers HTNA.
3547 			 */
3548 		} else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3549 		                       biggest_tsn_newly_acked)) {
3550 			/*
3551 			 * We don't strike these: This is the  HTNA
3552 			 * algorithm i.e. we don't strike If our TSN is
3553 			 * larger than the Highest TSN Newly Acked.
3554 			 */
3555 			;
3556 		} else {
3557 			/* Strike the TSN */
3558 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3559 				sctp_log_fr(biggest_tsn_newly_acked,
3560 					    tp1->rec.data.tsn,
3561 					    tp1->sent,
3562 					    SCTP_FR_LOG_STRIKE_CHUNK);
3563 			}
3564 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3565 				tp1->sent++;
3566 			}
3567 			if ((asoc->sctp_cmt_on_off > 0) &&
3568 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3569 				/*
3570 				 * CMT DAC algorithm: If SACK flag is set to
3571 				 * 0, then lowest_newack test will not pass
3572 				 * because it would have been set to the
3573 				 * cumack earlier. If not already to be
3574 				 * rtx'd, If not a mixed sack and if tp1 is
3575 				 * not between two sacked TSNs, then mark by
3576 				 * one more.
3577 				 * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
3578 				 * two packets have been received after this missing TSN.
3579 				 */
3580 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3581 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3582 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3583 						sctp_log_fr(48 + num_dests_sacked,
3584 							    tp1->rec.data.tsn,
3585 							    tp1->sent,
3586 							    SCTP_FR_LOG_STRIKE_CHUNK);
3587 					}
3588 					tp1->sent++;
3589 				}
3590 			}
3591 		}
3592 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3593 			struct sctp_nets *alt;
3594 
3595 			/* fix counts and things */
3596 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3597 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3598 					       (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3599 					       tp1->book_size,
3600 					       (uint32_t)(uintptr_t)tp1->whoTo,
3601 					       tp1->rec.data.tsn);
3602 			}
3603 			if (tp1->whoTo) {
3604 				tp1->whoTo->net_ack++;
3605 				sctp_flight_size_decrease(tp1);
3606 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3607 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
3608 												     tp1);
3609 				}
3610 			}
3611 
3612 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3613 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3614 					      asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3615 			}
3616 			/* add back to the rwnd */
3617 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3618 
3619 			/* remove from the total flight */
3620 			sctp_total_flight_decrease(stcb, tp1);
3621 
3622 			if ((stcb->asoc.prsctp_supported) &&
3623 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3624 				/* Has it been retransmitted tv_sec times? - we store the retran count there. */
3625 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3626 					/* Yes, so drop it */
3627 					if (tp1->data != NULL) {
3628 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3629 										 SCTP_SO_NOT_LOCKED);
3630 					}
3631 					/* Make sure to flag we had a FR */
3632 					if (tp1->whoTo != NULL) {
3633 						tp1->whoTo->net_ack++;
3634 					}
3635 					continue;
3636 				}
3637 			}
3638 			/* SCTP_PRINTF("OK, we are now ready to FR this guy\n"); */
3639 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3640 				sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3641 					    0, SCTP_FR_MARKED);
3642 			}
3643 			if (strike_flag) {
3644 				/* This is a subsequent FR */
3645 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3646 			}
3647 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3648 			if (asoc->sctp_cmt_on_off > 0) {
3649 				/*
3650 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3651 				 * If CMT is being used, then pick dest with
3652 				 * largest ssthresh for any retransmission.
3653 				 */
3654 				tp1->no_fr_allowed = 1;
3655 				alt = tp1->whoTo;
3656 				/*sa_ignore NO_NULL_CHK*/
3657 				if (asoc->sctp_cmt_pf > 0) {
3658 					/* JRS 5/18/07 - If CMT PF is on, use the PF version of find_alt_net() */
3659 					alt = sctp_find_alternate_net(stcb, alt, 2);
3660 				} else {
3661 					/* JRS 5/18/07 - If only CMT is on, use the CMT version of find_alt_net() */
3662                                         /*sa_ignore NO_NULL_CHK*/
3663 					alt = sctp_find_alternate_net(stcb, alt, 1);
3664 				}
3665 				if (alt == NULL) {
3666 					alt = tp1->whoTo;
3667 				}
3668 				/*
3669 				 * CUCv2: If a different dest is picked for
3670 				 * the retransmission, then new
3671 				 * (rtx-)pseudo_cumack needs to be tracked
3672 				 * for orig dest. Let CUCv2 track new (rtx-)
3673 				 * pseudo-cumack always.
3674 				 */
3675 				if (tp1->whoTo) {
3676 					tp1->whoTo->find_pseudo_cumack = 1;
3677 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3678 				}
3679 			} else {/* CMT is OFF */
3680 #ifdef SCTP_FR_TO_ALTERNATE
3681 				/* Can we find an alternate? */
3682 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3683 #else
3684 				/*
3685 				 * default behavior is to NOT retransmit
3686 				 * FR's to an alternate. Armando Caro's
3687 				 * paper details why.
3688 				 */
3689 				alt = tp1->whoTo;
3690 #endif
3691 			}
3692 
3693 			tp1->rec.data.doing_fast_retransmit = 1;
3694 			tot_retrans++;
3695 			/* mark the sending seq for possible subsequent FR's */
3696 			/*
3697 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3698 			 * (uint32_t)tpi->rec.data.tsn);
3699 			 */
3700 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3701 				/*
3702 				 * If the queue of send is empty then its
3703 				 * the next sequence number that will be
3704 				 * assigned so we subtract one from this to
3705 				 * get the one we last sent.
3706 				 */
3707 				tp1->rec.data.fast_retran_tsn = sending_seq;
3708 			} else {
3709 				/*
3710 				 * If there are chunks on the send queue
3711 				 * (unsent data that has made it from the
3712 				 * stream queues but not out the door, we
3713 				 * take the first one (which will have the
3714 				 * lowest TSN) and subtract one to get the
3715 				 * one we last sent.
3716 				 */
3717 				struct sctp_tmit_chunk *ttt;
3718 
3719 				ttt = TAILQ_FIRST(&asoc->send_queue);
3720 				tp1->rec.data.fast_retran_tsn =
3721 					ttt->rec.data.tsn;
3722 			}
3723 
3724 			if (tp1->do_rtt) {
3725 				/*
3726 				 * this guy had a RTO calculation pending on
3727 				 * it, cancel it
3728 				 */
3729 				if ((tp1->whoTo != NULL) &&
3730 				    (tp1->whoTo->rto_needed == 0)) {
3731 					tp1->whoTo->rto_needed = 1;
3732 				}
3733 				tp1->do_rtt = 0;
3734 			}
3735 			if (alt != tp1->whoTo) {
3736 				/* yes, there is an alternate. */
3737 				sctp_free_remote_addr(tp1->whoTo);
3738 				/*sa_ignore FREED_MEMORY*/
3739 				tp1->whoTo = alt;
3740 				atomic_add_int(&alt->ref_count, 1);
3741 			}
3742 		}
3743 	}
3744 }
3745 
3746 struct sctp_tmit_chunk *
3747 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3748     struct sctp_association *asoc)
3749 {
3750 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3751 	struct timeval now;
3752 	int now_filled = 0;
3753 
3754 	if (asoc->prsctp_supported == 0) {
3755 		return (NULL);
3756 	}
3757 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3758 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3759 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3760 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3761 			/* no chance to advance, out of here */
3762 			break;
3763 		}
3764 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3765 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3766 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3767 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3768 					       asoc->advanced_peer_ack_point,
3769 					       tp1->rec.data.tsn, 0, 0);
3770 			}
3771 		}
3772 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3773 			/*
3774 			 * We can't fwd-tsn past any that are reliable aka
3775 			 * retransmitted until the asoc fails.
3776 			 */
3777 			break;
3778 		}
3779 		if (!now_filled) {
3780 			(void)SCTP_GETTIME_TIMEVAL(&now);
3781 			now_filled = 1;
3782 		}
3783 		/*
3784 		 * now we got a chunk which is marked for another
3785 		 * retransmission to a PR-stream but has run out its chances
3786 		 * already maybe OR has been marked to skip now. Can we skip
3787 		 * it if its a resend?
3788 		 */
3789 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3790 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3791 			/*
3792 			 * Now is this one marked for resend and its time is
3793 			 * now up?
3794 			 */
3795 #if !(defined(__FreeBSD__)  && !defined(__Userspace__))
3796 			if (timercmp(&now, &tp1->rec.data.timetodrop, >)) {
3797 #else
3798 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3799 #endif
3800 				/* Yes so drop it */
3801 				if (tp1->data) {
3802 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3803 					    1, SCTP_SO_NOT_LOCKED);
3804 				}
3805 			} else {
3806 				/*
3807 				 * No, we are done when hit one for resend
3808 				 * whos time as not expired.
3809 				 */
3810 				break;
3811 			}
3812 		}
3813 		/*
3814 		 * Ok now if this chunk is marked to drop it we can clean up
3815 		 * the chunk, advance our peer ack point and we can check
3816 		 * the next chunk.
3817 		 */
3818 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3819 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3820 			/* advance PeerAckPoint goes forward */
3821 			if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3822 				asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3823 				a_adv = tp1;
3824 			} else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3825 				/* No update but we do save the chk */
3826 				a_adv = tp1;
3827 			}
3828 		} else {
3829 			/*
3830 			 * If it is still in RESEND we can advance no
3831 			 * further
3832 			 */
3833 			break;
3834 		}
3835 	}
3836 	return (a_adv);
3837 }
3838 
3839 static int
3840 sctp_fs_audit(struct sctp_association *asoc)
3841 {
3842 	struct sctp_tmit_chunk *chk;
3843 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3844 	int ret;
3845 #ifndef INVARIANTS
3846 	int entry_flight, entry_cnt;
3847 #endif
3848 
3849 	ret = 0;
3850 #ifndef INVARIANTS
3851 	entry_flight = asoc->total_flight;
3852 	entry_cnt = asoc->total_flight_count;
3853 #endif
3854 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3855 		return (0);
3856 
3857 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3858 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3859 			SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3860 			            chk->rec.data.tsn,
3861 			            chk->send_size,
3862 			            chk->snd_count);
3863 			inflight++;
3864 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3865 			resend++;
3866 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3867 			inbetween++;
3868 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3869 			above++;
3870 		} else {
3871 			acked++;
3872 		}
3873 	}
3874 
3875 	if ((inflight > 0) || (inbetween > 0)) {
3876 #ifdef INVARIANTS
3877 		panic("Flight size-express incorrect? \n");
3878 #else
3879 		SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3880 		            entry_flight, entry_cnt);
3881 
3882 		SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3883 			    inflight, inbetween, resend, above, acked);
3884 		ret = 1;
3885 #endif
3886 	}
3887 	return (ret);
3888 }
3889 
3890 static void
3891 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3892                            struct sctp_association *asoc,
3893                            struct sctp_tmit_chunk *tp1)
3894 {
3895 	tp1->window_probe = 0;
3896 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3897 		/* TSN's skipped we do NOT move back. */
3898 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3899 			       tp1->whoTo ? tp1->whoTo->flight_size : 0,
3900 			       tp1->book_size,
3901 			       (uint32_t)(uintptr_t)tp1->whoTo,
3902 			       tp1->rec.data.tsn);
3903 		return;
3904 	}
3905 	/* First setup this by shrinking flight */
3906 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3907 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
3908 									     tp1);
3909 	}
3910 	sctp_flight_size_decrease(tp1);
3911 	sctp_total_flight_decrease(stcb, tp1);
3912 	/* Now mark for resend */
3913 	tp1->sent = SCTP_DATAGRAM_RESEND;
3914 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3915 
3916 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3917 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3918 			       tp1->whoTo->flight_size,
3919 			       tp1->book_size,
3920 			       (uint32_t)(uintptr_t)tp1->whoTo,
3921 			       tp1->rec.data.tsn);
3922 	}
3923 }
3924 
3925 void
3926 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3927                          uint32_t rwnd, int *abort_now, int ecne_seen)
3928 {
3929 	struct sctp_nets *net;
3930 	struct sctp_association *asoc;
3931 	struct sctp_tmit_chunk *tp1, *tp2;
3932 	uint32_t old_rwnd;
3933 	int win_probe_recovery = 0;
3934 	int win_probe_recovered = 0;
3935 	int j, done_once = 0;
3936 	int rto_ok = 1;
3937 	uint32_t send_s;
3938 
3939 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3940 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3941 		               rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3942 	}
3943 	SCTP_TCB_LOCK_ASSERT(stcb);
3944 #ifdef SCTP_ASOCLOG_OF_TSNS
3945 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3946 	stcb->asoc.cumack_log_at++;
3947 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3948 		stcb->asoc.cumack_log_at = 0;
3949 	}
3950 #endif
3951 	asoc = &stcb->asoc;
3952 	old_rwnd = asoc->peers_rwnd;
3953 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3954 		/* old ack */
3955 		return;
3956 	} else if (asoc->last_acked_seq == cumack) {
3957 		/* Window update sack */
3958 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3959 						    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3960 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3961 			/* SWS sender side engages */
3962 			asoc->peers_rwnd = 0;
3963 		}
3964 		if (asoc->peers_rwnd > old_rwnd) {
3965 			goto again;
3966 		}
3967 		return;
3968 	}
3969 
3970 	/* First setup for CC stuff */
3971 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3972 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3973 			/* Drag along the window_tsn for cwr's */
3974 			net->cwr_window_tsn = cumack;
3975 		}
3976 		net->prev_cwnd = net->cwnd;
3977 		net->net_ack = 0;
3978 		net->net_ack2 = 0;
3979 
3980 		/*
3981 		 * CMT: Reset CUC and Fast recovery algo variables before
3982 		 * SACK processing
3983 		 */
3984 		net->new_pseudo_cumack = 0;
3985 		net->will_exit_fast_recovery = 0;
3986 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3987 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net);
3988 		}
3989 	}
3990 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3991 		tp1 = TAILQ_LAST(&asoc->sent_queue,
3992 				 sctpchunk_listhead);
3993 		send_s = tp1->rec.data.tsn + 1;
3994 	} else {
3995 		send_s = asoc->sending_seq;
3996 	}
3997 	if (SCTP_TSN_GE(cumack, send_s)) {
3998 		struct mbuf *op_err;
3999 		char msg[SCTP_DIAG_INFO_LEN];
4000 
4001 		*abort_now = 1;
4002 		/* XXX */
4003 		SCTP_SNPRINTF(msg, sizeof(msg),
4004 		              "Cum ack %8.8x greater or equal than TSN %8.8x",
4005 		              cumack, send_s);
4006 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4007 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4008 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
4009 		return;
4010 	}
4011 	asoc->this_sack_highest_gap = cumack;
4012 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4013 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4014 			       stcb->asoc.overall_error_count,
4015 			       0,
4016 			       SCTP_FROM_SCTP_INDATA,
4017 			       __LINE__);
4018 	}
4019 	stcb->asoc.overall_error_count = 0;
4020 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4021 		/* process the new consecutive TSN first */
4022 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4023 			if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
4024 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4025 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
4026 				}
4027 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4028 					/*
4029 					 * If it is less than ACKED, it is
4030 					 * now no-longer in flight. Higher
4031 					 * values may occur during marking
4032 					 */
4033 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4034 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4035 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4036 								       tp1->whoTo->flight_size,
4037 								       tp1->book_size,
4038 								       (uint32_t)(uintptr_t)tp1->whoTo,
4039 								       tp1->rec.data.tsn);
4040 						}
4041 						sctp_flight_size_decrease(tp1);
4042 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4043 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
4044 														     tp1);
4045 						}
4046 						/* sa_ignore NO_NULL_CHK */
4047 						sctp_total_flight_decrease(stcb, tp1);
4048 					}
4049 					tp1->whoTo->net_ack += tp1->send_size;
4050 					if (tp1->snd_count < 2) {
4051 						/*
4052 						 * True non-retransmitted
4053 						 * chunk
4054 						 */
4055 						tp1->whoTo->net_ack2 +=
4056 							tp1->send_size;
4057 
4058 						/* update RTO too? */
4059 						if (tp1->do_rtt) {
4060 							if (rto_ok &&
4061 							    sctp_calculate_rto(stcb,
4062 									       &stcb->asoc,
4063 									       tp1->whoTo,
4064 									       &tp1->sent_rcv_time,
4065 									       SCTP_RTT_FROM_DATA)) {
4066 								rto_ok = 0;
4067 							}
4068 							if (tp1->whoTo->rto_needed == 0) {
4069 								tp1->whoTo->rto_needed = 1;
4070 							}
4071 							tp1->do_rtt = 0;
4072 						}
4073 					}
4074 					/*
4075 					 * CMT: CUCv2 algorithm. From the
4076 					 * cumack'd TSNs, for each TSN being
4077 					 * acked for the first time, set the
4078 					 * following variables for the
4079 					 * corresp destination.
4080 					 * new_pseudo_cumack will trigger a
4081 					 * cwnd update.
4082 					 * find_(rtx_)pseudo_cumack will
4083 					 * trigger search for the next
4084 					 * expected (rtx-)pseudo-cumack.
4085 					 */
4086 					tp1->whoTo->new_pseudo_cumack = 1;
4087 					tp1->whoTo->find_pseudo_cumack = 1;
4088 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4089 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4090 						/* sa_ignore NO_NULL_CHK */
4091 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4092 					}
4093 				}
4094 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4095 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4096 				}
4097 				if (tp1->rec.data.chunk_was_revoked) {
4098 					/* deflate the cwnd */
4099 					tp1->whoTo->cwnd -= tp1->book_size;
4100 					tp1->rec.data.chunk_was_revoked = 0;
4101 				}
4102 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4103 					if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4104 						asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4105 #ifdef INVARIANTS
4106 					} else {
4107 						panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4108 #endif
4109 					}
4110 				}
4111 				if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4112 				    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4113 				    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4114 					asoc->trigger_reset = 1;
4115 				}
4116 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4117 				if (tp1->data) {
4118 					/* sa_ignore NO_NULL_CHK */
4119 					sctp_free_bufspace(stcb, asoc, tp1, 1);
4120 					sctp_m_freem(tp1->data);
4121 					tp1->data = NULL;
4122 				}
4123 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4124 					sctp_log_sack(asoc->last_acked_seq,
4125 						      cumack,
4126 						      tp1->rec.data.tsn,
4127 						      0,
4128 						      0,
4129 						      SCTP_LOG_FREE_SENT);
4130 				}
4131 				asoc->sent_queue_cnt--;
4132 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4133 			} else {
4134 				break;
4135 			}
4136 		}
4137 	}
4138 #if defined(__Userspace__)
4139 	if (stcb->sctp_ep->recv_callback) {
4140 		if (stcb->sctp_socket) {
4141 			uint32_t inqueue_bytes, sb_free_now;
4142 			struct sctp_inpcb *inp;
4143 
4144 			inp = stcb->sctp_ep;
4145 			inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
4146 			sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv);
4147 
4148 			/* check if the amount free in the send socket buffer crossed the threshold */
4149 			if (inp->send_callback &&
4150 			    (((inp->send_sb_threshold > 0) &&
4151 			      (sb_free_now >= inp->send_sb_threshold) &&
4152 			      (stcb->asoc.chunks_on_out_queue <= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) ||
4153 			     (inp->send_sb_threshold == 0))) {
4154 				atomic_add_int(&stcb->asoc.refcnt, 1);
4155 				SCTP_TCB_UNLOCK(stcb);
4156 				inp->send_callback(stcb->sctp_socket, sb_free_now, inp->ulp_info);
4157 				SCTP_TCB_LOCK(stcb);
4158 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4159 			}
4160 		}
4161 	} else if (stcb->sctp_socket) {
4162 #else
4163 	/* sa_ignore NO_NULL_CHK */
4164 	if (stcb->sctp_socket) {
4165 #endif
4166 #if defined(__APPLE__) && !defined(__Userspace__)
4167 		struct socket *so;
4168 
4169 #endif
4170 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4171 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4172 			/* sa_ignore NO_NULL_CHK */
4173 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4174 		}
4175 #if defined(__APPLE__) && !defined(__Userspace__)
4176 		so = SCTP_INP_SO(stcb->sctp_ep);
4177 		atomic_add_int(&stcb->asoc.refcnt, 1);
4178 		SCTP_TCB_UNLOCK(stcb);
4179 		SCTP_SOCKET_LOCK(so, 1);
4180 		SCTP_TCB_LOCK(stcb);
4181 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4182 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4183 			/* assoc was freed while we were unlocked */
4184 			SCTP_SOCKET_UNLOCK(so, 1);
4185 			return;
4186 		}
4187 #endif
4188 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4189 #if defined(__APPLE__) && !defined(__Userspace__)
4190 		SCTP_SOCKET_UNLOCK(so, 1);
4191 #endif
4192 	} else {
4193 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4194 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4195 		}
4196 	}
4197 
4198 	/* JRS - Use the congestion control given in the CC module */
4199 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4200 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4201 			if (net->net_ack2 > 0) {
4202 				/*
4203 				 * Karn's rule applies to clearing error count, this
4204 				 * is optional.
4205 				 */
4206 				net->error_count = 0;
4207 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4208 					/* addr came good */
4209 					net->dest_state |= SCTP_ADDR_REACHABLE;
4210 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4211 					                0, (void *)net, SCTP_SO_NOT_LOCKED);
4212 				}
4213 				if (net == stcb->asoc.primary_destination) {
4214 					if (stcb->asoc.alternate) {
4215 						/* release the alternate, primary is good */
4216 						sctp_free_remote_addr(stcb->asoc.alternate);
4217 						stcb->asoc.alternate = NULL;
4218 					}
4219 				}
4220 				if (net->dest_state & SCTP_ADDR_PF) {
4221 					net->dest_state &= ~SCTP_ADDR_PF;
4222 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4223 					                stcb->sctp_ep, stcb, net,
4224 					                SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4225 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4226 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4227 					/* Done with this net */
4228 					net->net_ack = 0;
4229 				}
4230 				/* restore any doubled timers */
4231 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4232 				if (net->RTO < stcb->asoc.minrto) {
4233 					net->RTO = stcb->asoc.minrto;
4234 				}
4235 				if (net->RTO > stcb->asoc.maxrto) {
4236 					net->RTO = stcb->asoc.maxrto;
4237 				}
4238 			}
4239 		}
4240 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4241 	}
4242 	asoc->last_acked_seq = cumack;
4243 
4244 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4245 		/* nothing left in-flight */
4246 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4247 			net->flight_size = 0;
4248 			net->partial_bytes_acked = 0;
4249 		}
4250 		asoc->total_flight = 0;
4251 		asoc->total_flight_count = 0;
4252 	}
4253 
4254 	/* RWND update */
4255 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4256 					    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4257 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4258 		/* SWS sender side engages */
4259 		asoc->peers_rwnd = 0;
4260 	}
4261 	if (asoc->peers_rwnd > old_rwnd) {
4262 		win_probe_recovery = 1;
4263 	}
4264 	/* Now assure a timer where data is queued at */
4265 again:
4266 	j = 0;
4267 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4268 		if (win_probe_recovery && (net->window_probe)) {
4269 			win_probe_recovered = 1;
4270 			/*
4271 			 * Find first chunk that was used with window probe
4272 			 * and clear the sent
4273 			 */
4274 			/* sa_ignore FREED_MEMORY */
4275 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4276 				if (tp1->window_probe) {
4277 					/* move back to data send queue */
4278 					sctp_window_probe_recovery(stcb, asoc, tp1);
4279 					break;
4280 				}
4281 			}
4282 		}
4283 		if (net->flight_size) {
4284 			j++;
4285 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4286 			if (net->window_probe) {
4287 				net->window_probe = 0;
4288 			}
4289 		} else {
4290 			if (net->window_probe) {
4291 				/* In window probes we must assure a timer is still running there */
4292 				net->window_probe = 0;
4293 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4294 					sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4295 				}
4296 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4297 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4298 				                stcb, net,
4299 				                SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4300 			}
4301 		}
4302 	}
4303 	if ((j == 0) &&
4304 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4305 	    (asoc->sent_queue_retran_cnt == 0) &&
4306 	    (win_probe_recovered == 0) &&
4307 	    (done_once == 0)) {
4308 		/* huh, this should not happen unless all packets
4309 		 * are PR-SCTP and marked to skip of course.
4310 		 */
4311 		if (sctp_fs_audit(asoc)) {
4312 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4313 				net->flight_size = 0;
4314 			}
4315 			asoc->total_flight = 0;
4316 			asoc->total_flight_count = 0;
4317 			asoc->sent_queue_retran_cnt = 0;
4318 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4319 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4320 					sctp_flight_size_increase(tp1);
4321 					sctp_total_flight_increase(stcb, tp1);
4322 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4323 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4324 				}
4325 			}
4326 		}
4327 		done_once = 1;
4328 		goto again;
4329 	}
4330 	/**********************************/
4331 	/* Now what about shutdown issues */
4332 	/**********************************/
4333 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4334 		/* nothing left on sendqueue.. consider done */
4335 		/* clean up */
4336 		if ((asoc->stream_queue_cnt == 1) &&
4337 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4338 		     (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4339 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc))) {
4340 			SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
4341 		}
4342 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4343 		     (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4344 		    (asoc->stream_queue_cnt == 1) &&
4345 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4346 			struct mbuf *op_err;
4347 
4348 			*abort_now = 1;
4349 			/* XXX */
4350 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4351 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_28;
4352 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
4353 			return;
4354 		}
4355 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4356 		    (asoc->stream_queue_cnt == 0)) {
4357 			struct sctp_nets *netp;
4358 
4359 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4360 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4361 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4362 			}
4363 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
4364 			sctp_stop_timers_for_shutdown(stcb);
4365 			if (asoc->alternate) {
4366 				netp = asoc->alternate;
4367 			} else {
4368 				netp = asoc->primary_destination;
4369 			}
4370 			sctp_send_shutdown(stcb, netp);
4371 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4372 					 stcb->sctp_ep, stcb, netp);
4373 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4374 					 stcb->sctp_ep, stcb, NULL);
4375 		} else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4376 			   (asoc->stream_queue_cnt == 0)) {
4377 			struct sctp_nets *netp;
4378 
4379 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4380 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
4381 			sctp_stop_timers_for_shutdown(stcb);
4382 			if (asoc->alternate) {
4383 				netp = asoc->alternate;
4384 			} else {
4385 				netp = asoc->primary_destination;
4386 			}
4387 			sctp_send_shutdown_ack(stcb, netp);
4388 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4389 					 stcb->sctp_ep, stcb, netp);
4390 		}
4391 	}
4392 	/*********************************************/
4393 	/* Here we perform PR-SCTP procedures        */
4394 	/* (section 4.2)                             */
4395 	/*********************************************/
4396 	/* C1. update advancedPeerAckPoint */
4397 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4398 		asoc->advanced_peer_ack_point = cumack;
4399 	}
4400 	/* PR-Sctp issues need to be addressed too */
4401 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4402 		struct sctp_tmit_chunk *lchk;
4403 		uint32_t old_adv_peer_ack_point;
4404 
4405 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4406 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4407 		/* C3. See if we need to send a Fwd-TSN */
4408 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4409 			/*
4410 			 * ISSUE with ECN, see FWD-TSN processing.
4411 			 */
4412 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4413 				send_forward_tsn(stcb, asoc);
4414 			} else if (lchk) {
4415 				/* try to FR fwd-tsn's that get lost too */
4416 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4417 					send_forward_tsn(stcb, asoc);
4418 				}
4419 			}
4420 		}
4421 		for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
4422 			if (lchk->whoTo != NULL) {
4423 				break;
4424 			}
4425 		}
4426 		if (lchk != NULL) {
4427 			/* Assure a timer is up */
4428 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4429 			                 stcb->sctp_ep, stcb, lchk->whoTo);
4430 		}
4431 	}
4432 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4433 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4434 			       rwnd,
4435 			       stcb->asoc.peers_rwnd,
4436 			       stcb->asoc.total_flight,
4437 			       stcb->asoc.total_output_queue_size);
4438 	}
4439 }
4440 
4441 void
4442 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4443                  struct sctp_tcb *stcb,
4444                  uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4445                  int *abort_now, uint8_t flags,
4446                  uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4447 {
4448 	struct sctp_association *asoc;
4449 	struct sctp_tmit_chunk *tp1, *tp2;
4450 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4451 	uint16_t wake_him = 0;
4452 	uint32_t send_s = 0;
4453 	long j;
4454 	int accum_moved = 0;
4455 	int will_exit_fast_recovery = 0;
4456 	uint32_t a_rwnd, old_rwnd;
4457 	int win_probe_recovery = 0;
4458 	int win_probe_recovered = 0;
4459 	struct sctp_nets *net = NULL;
4460 	int done_once;
4461 	int rto_ok = 1;
4462 	uint8_t reneged_all = 0;
4463 	uint8_t cmt_dac_flag;
4464 	/*
4465 	 * we take any chance we can to service our queues since we cannot
4466 	 * get awoken when the socket is read from :<
4467 	 */
4468 	/*
4469 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4470 	 * old sack, if so discard. 2) If there is nothing left in the send
4471 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4472 	 * too, update any rwnd change and verify no timers are running.
4473 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4474 	 * moved process these first and note that it moved. 4) Process any
4475 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4476 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4477 	 * sync up flightsizes and things, stop all timers and also check
4478 	 * for shutdown_pending state. If so then go ahead and send off the
4479 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4480 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4481 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4482 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4483 	 * if in shutdown_recv state.
4484 	 */
4485 	SCTP_TCB_LOCK_ASSERT(stcb);
4486 	/* CMT DAC algo */
4487 	this_sack_lowest_newack = 0;
4488 	SCTP_STAT_INCR(sctps_slowpath_sack);
4489 	last_tsn = cum_ack;
4490 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4491 #ifdef SCTP_ASOCLOG_OF_TSNS
4492 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4493 	stcb->asoc.cumack_log_at++;
4494 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4495 		stcb->asoc.cumack_log_at = 0;
4496 	}
4497 #endif
4498 	a_rwnd = rwnd;
4499 
4500 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4501 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4502 		               rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4503 	}
4504 
4505 	old_rwnd = stcb->asoc.peers_rwnd;
4506 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4507 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4508 		               stcb->asoc.overall_error_count,
4509 		               0,
4510 		               SCTP_FROM_SCTP_INDATA,
4511 		               __LINE__);
4512 	}
4513 	stcb->asoc.overall_error_count = 0;
4514 	asoc = &stcb->asoc;
4515 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4516 		sctp_log_sack(asoc->last_acked_seq,
4517 		              cum_ack,
4518 		              0,
4519 		              num_seg,
4520 		              num_dup,
4521 		              SCTP_LOG_NEW_SACK);
4522 	}
4523 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4524 		uint16_t i;
4525 		uint32_t *dupdata, dblock;
4526 
4527 		for (i = 0; i < num_dup; i++) {
4528 			dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4529 			                                    sizeof(uint32_t), (uint8_t *)&dblock);
4530 			if (dupdata == NULL) {
4531 				break;
4532 			}
4533 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4534 		}
4535 	}
4536 	/* reality check */
4537 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4538 		tp1 = TAILQ_LAST(&asoc->sent_queue,
4539 				 sctpchunk_listhead);
4540 		send_s = tp1->rec.data.tsn + 1;
4541 	} else {
4542 		tp1 = NULL;
4543 		send_s = asoc->sending_seq;
4544 	}
4545 	if (SCTP_TSN_GE(cum_ack, send_s)) {
4546 		struct mbuf *op_err;
4547 		char msg[SCTP_DIAG_INFO_LEN];
4548 
4549 		/*
4550 		 * no way, we have not even sent this TSN out yet.
4551 		 * Peer is hopelessly messed up with us.
4552 		 */
4553 		SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4554 			    cum_ack, send_s);
4555 		if (tp1) {
4556 			SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4557 				    tp1->rec.data.tsn, (void *)tp1);
4558 		}
4559 	hopeless_peer:
4560 		*abort_now = 1;
4561 		/* XXX */
4562 		SCTP_SNPRINTF(msg, sizeof(msg),
4563 		              "Cum ack %8.8x greater or equal than TSN %8.8x",
4564 		              cum_ack, send_s);
4565 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4566 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_29;
4567 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
4568 		return;
4569 	}
4570 	/**********************/
4571 	/* 1) check the range */
4572 	/**********************/
4573 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4574 		/* acking something behind */
4575 		return;
4576 	}
4577 
4578 	/* update the Rwnd of the peer */
4579 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4580 	    TAILQ_EMPTY(&asoc->send_queue) &&
4581 	    (asoc->stream_queue_cnt == 0)) {
4582 		/* nothing left on send/sent and strmq */
4583 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4584 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4585 			                  asoc->peers_rwnd, 0, 0, a_rwnd);
4586 		}
4587 		asoc->peers_rwnd = a_rwnd;
4588 		if (asoc->sent_queue_retran_cnt) {
4589 			asoc->sent_queue_retran_cnt = 0;
4590 		}
4591 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4592 			/* SWS sender side engages */
4593 			asoc->peers_rwnd = 0;
4594 		}
4595 		/* stop any timers */
4596 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4597 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4598 			                stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4599 			net->partial_bytes_acked = 0;
4600 			net->flight_size = 0;
4601 		}
4602 		asoc->total_flight = 0;
4603 		asoc->total_flight_count = 0;
4604 		return;
4605 	}
4606 	/*
4607 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4608 	 * things. The total byte count acked is tracked in netAckSz AND
4609 	 * netAck2 is used to track the total bytes acked that are un-
4610 	 * amibguious and were never retransmitted. We track these on a per
4611 	 * destination address basis.
4612 	 */
4613 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4614 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4615 			/* Drag along the window_tsn for cwr's */
4616 			net->cwr_window_tsn = cum_ack;
4617 		}
4618 		net->prev_cwnd = net->cwnd;
4619 		net->net_ack = 0;
4620 		net->net_ack2 = 0;
4621 
4622 		/*
4623 		 * CMT: Reset CUC and Fast recovery algo variables before
4624 		 * SACK processing
4625 		 */
4626 		net->new_pseudo_cumack = 0;
4627 		net->will_exit_fast_recovery = 0;
4628 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4629 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net);
4630 		}
4631 
4632 		/*
4633 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4634 		 * to be greater than the cumack. Also reset saw_newack to 0
4635 		 * for all dests.
4636 		 */
4637 		net->saw_newack = 0;
4638 		net->this_sack_highest_newack = last_tsn;
4639 	}
4640 	/* process the new consecutive TSN first */
4641 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4642 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4643 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4644 				accum_moved = 1;
4645 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4646 					/*
4647 					 * If it is less than ACKED, it is
4648 					 * now no-longer in flight. Higher
4649 					 * values may occur during marking
4650 					 */
4651 					if ((tp1->whoTo->dest_state &
4652 					     SCTP_ADDR_UNCONFIRMED) &&
4653 					    (tp1->snd_count < 2)) {
4654 						/*
4655 						 * If there was no retran
4656 						 * and the address is
4657 						 * un-confirmed and we sent
4658 						 * there and are now
4659 						 * sacked.. its confirmed,
4660 						 * mark it so.
4661 						 */
4662 						tp1->whoTo->dest_state &=
4663 							~SCTP_ADDR_UNCONFIRMED;
4664 					}
4665 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4666 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4667 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4668 							               tp1->whoTo->flight_size,
4669 							               tp1->book_size,
4670 							               (uint32_t)(uintptr_t)tp1->whoTo,
4671 							               tp1->rec.data.tsn);
4672 						}
4673 						sctp_flight_size_decrease(tp1);
4674 						sctp_total_flight_decrease(stcb, tp1);
4675 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4676 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
4677 														     tp1);
4678 						}
4679 					}
4680 					tp1->whoTo->net_ack += tp1->send_size;
4681 
4682 					/* CMT SFR and DAC algos */
4683 					this_sack_lowest_newack = tp1->rec.data.tsn;
4684 					tp1->whoTo->saw_newack = 1;
4685 
4686 					if (tp1->snd_count < 2) {
4687 						/*
4688 						 * True non-retransmitted
4689 						 * chunk
4690 						 */
4691 						tp1->whoTo->net_ack2 +=
4692 							tp1->send_size;
4693 
4694 						/* update RTO too? */
4695 						if (tp1->do_rtt) {
4696 							if (rto_ok &&
4697 							    sctp_calculate_rto(stcb,
4698 									       &stcb->asoc,
4699 									       tp1->whoTo,
4700 									       &tp1->sent_rcv_time,
4701 									       SCTP_RTT_FROM_DATA)) {
4702 								rto_ok = 0;
4703 							}
4704 							if (tp1->whoTo->rto_needed == 0) {
4705 								tp1->whoTo->rto_needed = 1;
4706 							}
4707 							tp1->do_rtt = 0;
4708 						}
4709 					}
4710 					/*
4711 					 * CMT: CUCv2 algorithm. From the
4712 					 * cumack'd TSNs, for each TSN being
4713 					 * acked for the first time, set the
4714 					 * following variables for the
4715 					 * corresp destination.
4716 					 * new_pseudo_cumack will trigger a
4717 					 * cwnd update.
4718 					 * find_(rtx_)pseudo_cumack will
4719 					 * trigger search for the next
4720 					 * expected (rtx-)pseudo-cumack.
4721 					 */
4722 					tp1->whoTo->new_pseudo_cumack = 1;
4723 					tp1->whoTo->find_pseudo_cumack = 1;
4724 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4725 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4726 						sctp_log_sack(asoc->last_acked_seq,
4727 						              cum_ack,
4728 						              tp1->rec.data.tsn,
4729 						              0,
4730 						              0,
4731 						              SCTP_LOG_TSN_ACKED);
4732 					}
4733 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4734 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4735 					}
4736 				}
4737 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4738 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4739 #ifdef SCTP_AUDITING_ENABLED
4740 					sctp_audit_log(0xB3,
4741 					               (asoc->sent_queue_retran_cnt & 0x000000ff));
4742 #endif
4743 				}
4744 				if (tp1->rec.data.chunk_was_revoked) {
4745 					/* deflate the cwnd */
4746 					tp1->whoTo->cwnd -= tp1->book_size;
4747 					tp1->rec.data.chunk_was_revoked = 0;
4748 				}
4749 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4750 					tp1->sent = SCTP_DATAGRAM_ACKED;
4751 				}
4752 			}
4753 		} else {
4754 			break;
4755 		}
4756 	}
4757 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4758 	/* always set this up to cum-ack */
4759 	asoc->this_sack_highest_gap = last_tsn;
4760 
4761 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4762 		/*
4763 		 * thisSackHighestGap will increase while handling NEW
4764 		 * segments this_sack_highest_newack will increase while
4765 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4766 		 * used for CMT DAC algo. saw_newack will also change.
4767 		 */
4768 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4769 			&biggest_tsn_newly_acked, &this_sack_lowest_newack,
4770 			num_seg, num_nr_seg, &rto_ok)) {
4771 			wake_him++;
4772 		}
4773 		/*
4774 		 * validate the biggest_tsn_acked in the gap acks if
4775 		 * strict adherence is wanted.
4776 		 */
4777 		if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4778 			/*
4779 			 * peer is either confused or we are under
4780 			 * attack. We must abort.
4781 			 */
4782 			SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4783 				    biggest_tsn_acked, send_s);
4784 			goto hopeless_peer;
4785 		}
4786 	}
4787 	/*******************************************/
4788 	/* cancel ALL T3-send timer if accum moved */
4789 	/*******************************************/
4790 	if (asoc->sctp_cmt_on_off > 0) {
4791 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4792 			if (net->new_pseudo_cumack)
4793 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4794 				                stcb, net,
4795 				                SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4796 		}
4797 	} else {
4798 		if (accum_moved) {
4799 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4800 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4801 				                stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
4802 			}
4803 		}
4804 	}
4805 	/********************************************/
4806 	/* drop the acked chunks from the sentqueue */
4807 	/********************************************/
4808 	asoc->last_acked_seq = cum_ack;
4809 
4810 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4811 		if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4812 			break;
4813 		}
4814 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4815 			if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4816 				asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4817 #ifdef INVARIANTS
4818 			} else {
4819 				panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4820 #endif
4821 			}
4822 		}
4823 		if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4824 		    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4825 		    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4826 			asoc->trigger_reset = 1;
4827 		}
4828 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4829 		if (PR_SCTP_ENABLED(tp1->flags)) {
4830 			if (asoc->pr_sctp_cnt != 0)
4831 				asoc->pr_sctp_cnt--;
4832 		}
4833 		asoc->sent_queue_cnt--;
4834 		if (tp1->data) {
4835 			/* sa_ignore NO_NULL_CHK */
4836 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4837 			sctp_m_freem(tp1->data);
4838 			tp1->data = NULL;
4839 			if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4840 				asoc->sent_queue_cnt_removeable--;
4841 			}
4842 		}
4843 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4844 			sctp_log_sack(asoc->last_acked_seq,
4845 			              cum_ack,
4846 			              tp1->rec.data.tsn,
4847 			              0,
4848 			              0,
4849 			              SCTP_LOG_FREE_SENT);
4850 		}
4851 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4852 		wake_him++;
4853 	}
4854 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4855 #ifdef INVARIANTS
4856 		panic("Warning flight size is positive and should be 0");
4857 #else
4858 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4859 		            asoc->total_flight);
4860 #endif
4861 		asoc->total_flight = 0;
4862 	}
4863 
4864 #if defined(__Userspace__)
4865 	if (stcb->sctp_ep->recv_callback) {
4866 		if (stcb->sctp_socket) {
4867 			uint32_t inqueue_bytes, sb_free_now;
4868 			struct sctp_inpcb *inp;
4869 
4870 			inp = stcb->sctp_ep;
4871 			inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
4872 			sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv);
4873 
4874 			/* check if the amount free in the send socket buffer crossed the threshold */
4875 			if (inp->send_callback &&
4876 			   (((inp->send_sb_threshold > 0) && (sb_free_now >= inp->send_sb_threshold)) ||
4877 			    (inp->send_sb_threshold == 0))) {
4878 				atomic_add_int(&stcb->asoc.refcnt, 1);
4879 				SCTP_TCB_UNLOCK(stcb);
4880 				inp->send_callback(stcb->sctp_socket, sb_free_now, inp->ulp_info);
4881 				SCTP_TCB_LOCK(stcb);
4882 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4883 			}
4884 		}
4885 	} else if ((wake_him) && (stcb->sctp_socket)) {
4886 #else
4887 	/* sa_ignore NO_NULL_CHK */
4888 	if ((wake_him) && (stcb->sctp_socket)) {
4889 #endif
4890 #if defined(__APPLE__) && !defined(__Userspace__)
4891 		struct socket *so;
4892 
4893 #endif
4894 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4895 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4896 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4897 		}
4898 #if defined(__APPLE__) && !defined(__Userspace__)
4899 		so = SCTP_INP_SO(stcb->sctp_ep);
4900 		atomic_add_int(&stcb->asoc.refcnt, 1);
4901 		SCTP_TCB_UNLOCK(stcb);
4902 		SCTP_SOCKET_LOCK(so, 1);
4903 		SCTP_TCB_LOCK(stcb);
4904 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4905 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4906 			/* assoc was freed while we were unlocked */
4907 			SCTP_SOCKET_UNLOCK(so, 1);
4908 			return;
4909 		}
4910 #endif
4911 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4912 #if defined(__APPLE__) && !defined(__Userspace__)
4913 		SCTP_SOCKET_UNLOCK(so, 1);
4914 #endif
4915 	} else {
4916 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4917 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4918 		}
4919 	}
4920 
4921 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4922 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4923 			/* Setup so we will exit RFC2582 fast recovery */
4924 			will_exit_fast_recovery = 1;
4925 		}
4926 	}
4927 	/*
4928 	 * Check for revoked fragments:
4929 	 *
4930 	 * if Previous sack - Had no frags then we can't have any revoked if
4931 	 * Previous sack - Had frag's then - If we now have frags aka
4932 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4933 	 * some of them. else - The peer revoked all ACKED fragments, since
4934 	 * we had some before and now we have NONE.
4935 	 */
4936 
4937 	if (num_seg) {
4938 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4939 		asoc->saw_sack_with_frags = 1;
4940 	} else if (asoc->saw_sack_with_frags) {
4941 		int cnt_revoked = 0;
4942 
4943 		/* Peer revoked all dg's marked or acked */
4944 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4945 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4946 				tp1->sent = SCTP_DATAGRAM_SENT;
4947 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4948 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4949 					               tp1->whoTo->flight_size,
4950 					               tp1->book_size,
4951 					               (uint32_t)(uintptr_t)tp1->whoTo,
4952 					               tp1->rec.data.tsn);
4953 				}
4954 				sctp_flight_size_increase(tp1);
4955 				sctp_total_flight_increase(stcb, tp1);
4956 				tp1->rec.data.chunk_was_revoked = 1;
4957 				/*
4958 				 * To ensure that this increase in
4959 				 * flightsize, which is artificial,
4960 				 * does not throttle the sender, we
4961 				 * also increase the cwnd
4962 				 * artificially.
4963 				 */
4964 				tp1->whoTo->cwnd += tp1->book_size;
4965 				cnt_revoked++;
4966 			}
4967 		}
4968 		if (cnt_revoked) {
4969 			reneged_all = 1;
4970 		}
4971 		asoc->saw_sack_with_frags = 0;
4972 	}
4973 	if (num_nr_seg > 0)
4974 		asoc->saw_sack_with_nr_frags = 1;
4975 	else
4976 		asoc->saw_sack_with_nr_frags = 0;
4977 
4978 	/* JRS - Use the congestion control given in the CC module */
4979 	if (ecne_seen == 0) {
4980 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4981 			if (net->net_ack2 > 0) {
4982 				/*
4983 				 * Karn's rule applies to clearing error count, this
4984 				 * is optional.
4985 				 */
4986 				net->error_count = 0;
4987 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4988 					/* addr came good */
4989 					net->dest_state |= SCTP_ADDR_REACHABLE;
4990 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4991 					                0, (void *)net, SCTP_SO_NOT_LOCKED);
4992 				}
4993 
4994 				if (net == stcb->asoc.primary_destination) {
4995 					if (stcb->asoc.alternate) {
4996 						/* release the alternate, primary is good */
4997 						sctp_free_remote_addr(stcb->asoc.alternate);
4998 						stcb->asoc.alternate = NULL;
4999 					}
5000 				}
5001 
5002 				if (net->dest_state & SCTP_ADDR_PF) {
5003 					net->dest_state &= ~SCTP_ADDR_PF;
5004 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
5005 					                stcb->sctp_ep, stcb, net,
5006 					                SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
5007 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
5008 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
5009 					/* Done with this net */
5010 					net->net_ack = 0;
5011 				}
5012 				/* restore any doubled timers */
5013 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
5014 				if (net->RTO < stcb->asoc.minrto) {
5015 					net->RTO = stcb->asoc.minrto;
5016 				}
5017 				if (net->RTO > stcb->asoc.maxrto) {
5018 					net->RTO = stcb->asoc.maxrto;
5019 				}
5020 			}
5021 		}
5022 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
5023 	}
5024 
5025 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
5026 		/* nothing left in-flight */
5027 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5028 			/* stop all timers */
5029 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5030 			                stcb, net,
5031 			                SCTP_FROM_SCTP_INDATA + SCTP_LOC_34);
5032 			net->flight_size = 0;
5033 			net->partial_bytes_acked = 0;
5034 		}
5035 		asoc->total_flight = 0;
5036 		asoc->total_flight_count = 0;
5037 	}
5038 
5039 	/**********************************/
5040 	/* Now what about shutdown issues */
5041 	/**********************************/
5042 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
5043 		/* nothing left on sendqueue.. consider done */
5044 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5045 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5046 			                  asoc->peers_rwnd, 0, 0, a_rwnd);
5047 		}
5048 		asoc->peers_rwnd = a_rwnd;
5049 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5050 			/* SWS sender side engages */
5051 			asoc->peers_rwnd = 0;
5052 		}
5053 		/* clean up */
5054 		if ((asoc->stream_queue_cnt == 1) &&
5055 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5056 		     (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5057 		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc))) {
5058 			SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
5059 		}
5060 		if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5061 		     (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5062 		    (asoc->stream_queue_cnt == 1) &&
5063 		    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
5064 			struct mbuf *op_err;
5065 
5066 			*abort_now = 1;
5067 			/* XXX */
5068 			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
5069 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_35;
5070 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
5071 			return;
5072 		}
5073 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5074 		    (asoc->stream_queue_cnt == 0)) {
5075 			struct sctp_nets *netp;
5076 
5077 			if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
5078 			    (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5079 				SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5080 			}
5081 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
5082 			sctp_stop_timers_for_shutdown(stcb);
5083 			if (asoc->alternate) {
5084 				netp = asoc->alternate;
5085 			} else {
5086 				netp = asoc->primary_destination;
5087 			}
5088 			sctp_send_shutdown(stcb, netp);
5089 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5090 					 stcb->sctp_ep, stcb, netp);
5091 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5092 					 stcb->sctp_ep, stcb, NULL);
5093 			return;
5094 		} else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5095 			   (asoc->stream_queue_cnt == 0)) {
5096 			struct sctp_nets *netp;
5097 
5098 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5099 			SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
5100 			sctp_stop_timers_for_shutdown(stcb);
5101 			if (asoc->alternate) {
5102 				netp = asoc->alternate;
5103 			} else {
5104 				netp = asoc->primary_destination;
5105 			}
5106 			sctp_send_shutdown_ack(stcb, netp);
5107 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5108 			                 stcb->sctp_ep, stcb, netp);
5109 			return;
5110 		}
5111 	}
5112 	/*
5113 	 * Now here we are going to recycle net_ack for a different use...
5114 	 * HEADS UP.
5115 	 */
5116 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5117 		net->net_ack = 0;
5118 	}
5119 
5120 	/*
5121 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5122 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5123 	 * automatically ensure that.
5124 	 */
5125 	if ((asoc->sctp_cmt_on_off > 0) &&
5126 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5127 	    (cmt_dac_flag == 0)) {
5128 		this_sack_lowest_newack = cum_ack;
5129 	}
5130 	if ((num_seg > 0) || (num_nr_seg > 0)) {
5131 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5132 		                           biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5133 	}
5134 	/* JRS - Use the congestion control given in the CC module */
5135 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5136 
5137 	/* Now are we exiting loss recovery ? */
5138 	if (will_exit_fast_recovery) {
5139 		/* Ok, we must exit fast recovery */
5140 		asoc->fast_retran_loss_recovery = 0;
5141 	}
5142 	if ((asoc->sat_t3_loss_recovery) &&
5143 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5144 		/* end satellite t3 loss recovery */
5145 		asoc->sat_t3_loss_recovery = 0;
5146 	}
5147 	/*
5148 	 * CMT Fast recovery
5149 	 */
5150 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5151 		if (net->will_exit_fast_recovery) {
5152 			/* Ok, we must exit fast recovery */
5153 			net->fast_retran_loss_recovery = 0;
5154 		}
5155 	}
5156 
5157 	/* Adjust and set the new rwnd value */
5158 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5159 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5160 		                  asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5161 	}
5162 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5163 	                                    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5164 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5165 		/* SWS sender side engages */
5166 		asoc->peers_rwnd = 0;
5167 	}
5168 	if (asoc->peers_rwnd > old_rwnd) {
5169 		win_probe_recovery = 1;
5170 	}
5171 
5172 	/*
5173 	 * Now we must setup so we have a timer up for anyone with
5174 	 * outstanding data.
5175 	 */
5176 	done_once = 0;
5177 again:
5178 	j = 0;
5179 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5180 		if (win_probe_recovery && (net->window_probe)) {
5181 			win_probe_recovered = 1;
5182 			/*-
5183 			 * Find first chunk that was used with
5184 			 * window probe and clear the event. Put
5185 			 * it back into the send queue as if has
5186 			 * not been sent.
5187 			 */
5188 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5189 				if (tp1->window_probe) {
5190 					sctp_window_probe_recovery(stcb, asoc, tp1);
5191 					break;
5192 				}
5193 			}
5194 		}
5195 		if (net->flight_size) {
5196 			j++;
5197 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5198 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5199 				                 stcb->sctp_ep, stcb, net);
5200 			}
5201 			if (net->window_probe) {
5202 				net->window_probe = 0;
5203 			}
5204 		} else {
5205 			if (net->window_probe) {
5206 				/* In window probes we must assure a timer is still running there */
5207 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5208 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5209 					                 stcb->sctp_ep, stcb, net);
5210 				}
5211 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5212 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5213 				                stcb, net,
5214 				                SCTP_FROM_SCTP_INDATA + SCTP_LOC_36);
5215 			}
5216 		}
5217 	}
5218 	if ((j == 0) &&
5219 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5220 	    (asoc->sent_queue_retran_cnt == 0) &&
5221 	    (win_probe_recovered == 0) &&
5222 	    (done_once == 0)) {
5223 		/* huh, this should not happen unless all packets
5224 		 * are PR-SCTP and marked to skip of course.
5225 		 */
5226 		if (sctp_fs_audit(asoc)) {
5227 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5228 				net->flight_size = 0;
5229 			}
5230 			asoc->total_flight = 0;
5231 			asoc->total_flight_count = 0;
5232 			asoc->sent_queue_retran_cnt = 0;
5233 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5234 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5235 					sctp_flight_size_increase(tp1);
5236 					sctp_total_flight_increase(stcb, tp1);
5237 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5238 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5239 				}
5240 			}
5241 		}
5242 		done_once = 1;
5243 		goto again;
5244 	}
5245 	/*********************************************/
5246 	/* Here we perform PR-SCTP procedures        */
5247 	/* (section 4.2)                             */
5248 	/*********************************************/
5249 	/* C1. update advancedPeerAckPoint */
5250 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5251 		asoc->advanced_peer_ack_point = cum_ack;
5252 	}
5253 	/* C2. try to further move advancedPeerAckPoint ahead */
5254 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5255 		struct sctp_tmit_chunk *lchk;
5256 		uint32_t old_adv_peer_ack_point;
5257 
5258 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5259 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5260 		/* C3. See if we need to send a Fwd-TSN */
5261 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5262 			/*
5263 			 * ISSUE with ECN, see FWD-TSN processing.
5264 			 */
5265 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5266 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5267 				               0xee, cum_ack, asoc->advanced_peer_ack_point,
5268 				               old_adv_peer_ack_point);
5269 			}
5270 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5271 				send_forward_tsn(stcb, asoc);
5272 			} else if (lchk) {
5273 				/* try to FR fwd-tsn's that get lost too */
5274 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5275 					send_forward_tsn(stcb, asoc);
5276 				}
5277 			}
5278 		}
5279 		for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
5280 			if (lchk->whoTo != NULL) {
5281 				break;
5282 			}
5283 		}
5284 		if (lchk != NULL) {
5285 			/* Assure a timer is up */
5286 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5287 			                 stcb->sctp_ep, stcb, lchk->whoTo);
5288 		}
5289 	}
5290 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5291 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5292 		               a_rwnd,
5293 		               stcb->asoc.peers_rwnd,
5294 		               stcb->asoc.total_flight,
5295 		               stcb->asoc.total_output_queue_size);
5296 	}
5297 }
5298 
5299 void
5300 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5301 {
5302 	/* Copy cum-ack */
5303 	uint32_t cum_ack, a_rwnd;
5304 
5305 	cum_ack = ntohl(cp->cumulative_tsn_ack);
5306 	/* Arrange so a_rwnd does NOT change */
5307 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5308 
5309 	/* Now call the express sack handling */
5310 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5311 }
5312 
5313 static void
5314 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5315 			       struct sctp_stream_in *strmin)
5316 {
5317 	struct sctp_queued_to_read *control, *ncontrol;
5318 	struct sctp_association *asoc;
5319 	uint32_t mid;
5320 	int need_reasm_check = 0;
5321 
5322 	asoc = &stcb->asoc;
5323 	mid = strmin->last_mid_delivered;
5324 	/*
5325 	 * First deliver anything prior to and including the stream no that
5326 	 * came in.
5327 	 */
5328 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5329 		if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5330 			/* this is deliverable now */
5331 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG)  == SCTP_DATA_NOT_FRAG) {
5332 				if (control->on_strm_q) {
5333 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5334 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5335 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5336 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5337 #ifdef INVARIANTS
5338 					} else {
5339 						panic("strmin: %p ctl: %p unknown %d",
5340 						      strmin, control, control->on_strm_q);
5341 #endif
5342 					}
5343 					control->on_strm_q = 0;
5344 				}
5345 				/* subtract pending on streams */
5346 				if (asoc->size_on_all_streams >= control->length) {
5347 					asoc->size_on_all_streams -= control->length;
5348 				} else {
5349 #ifdef INVARIANTS
5350 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5351 #else
5352 					asoc->size_on_all_streams = 0;
5353 #endif
5354 				}
5355 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5356 				/* deliver it to at least the delivery-q */
5357 				if (stcb->sctp_socket) {
5358 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5359 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5360 							  control,
5361 							  &stcb->sctp_socket->so_rcv,
5362 							  1, SCTP_READ_LOCK_HELD,
5363 							  SCTP_SO_NOT_LOCKED);
5364 				}
5365 			} else {
5366 				/* Its a fragmented message */
5367 				if (control->first_frag_seen) {
5368 					/* Make it so this is next to deliver, we restore later */
5369 					strmin->last_mid_delivered = control->mid - 1;
5370 					need_reasm_check = 1;
5371 					break;
5372 				}
5373 			}
5374 		} else {
5375 			/* no more delivery now. */
5376 			break;
5377 		}
5378 	}
5379 	if (need_reasm_check) {
5380 		int ret;
5381 		ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5382 		if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5383 			/* Restore the next to deliver unless we are ahead */
5384 			strmin->last_mid_delivered = mid;
5385 		}
5386 		if (ret == 0) {
5387 			/* Left the front Partial one on */
5388 			return;
5389 		}
5390 		need_reasm_check = 0;
5391 	}
5392 	/*
5393 	 * now we must deliver things in queue the normal way  if any are
5394 	 * now ready.
5395 	 */
5396 	mid = strmin->last_mid_delivered + 1;
5397 	TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5398 		if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5399 			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5400 				/* this is deliverable now */
5401 				if (control->on_strm_q) {
5402 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5403 						TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5404 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5405 						TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5406 #ifdef INVARIANTS
5407 					} else {
5408 						panic("strmin: %p ctl: %p unknown %d",
5409 						      strmin, control, control->on_strm_q);
5410 #endif
5411 					}
5412 					control->on_strm_q = 0;
5413 				}
5414 				/* subtract pending on streams */
5415 				if (asoc->size_on_all_streams >= control->length) {
5416 					asoc->size_on_all_streams -= control->length;
5417 				} else {
5418 #ifdef INVARIANTS
5419 					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5420 #else
5421 					asoc->size_on_all_streams = 0;
5422 #endif
5423 				}
5424 				sctp_ucount_decr(asoc->cnt_on_all_streams);
5425 				/* deliver it to at least the delivery-q */
5426 				strmin->last_mid_delivered = control->mid;
5427 				if (stcb->sctp_socket) {
5428 					sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5429 					sctp_add_to_readq(stcb->sctp_ep, stcb,
5430 							  control,
5431 							  &stcb->sctp_socket->so_rcv, 1,
5432 							  SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5433 				}
5434 				mid = strmin->last_mid_delivered + 1;
5435 			} else {
5436 				/* Its a fragmented message */
5437 				if (control->first_frag_seen) {
5438 					/* Make it so this is next to deliver */
5439 					strmin->last_mid_delivered = control->mid - 1;
5440 					need_reasm_check = 1;
5441 					break;
5442 				}
5443 			}
5444 		} else {
5445 			break;
5446 		}
5447 	}
5448 	if (need_reasm_check) {
5449 		(void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5450 	}
5451 }
5452 
5453 static void
5454 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5455 	struct sctp_association *asoc, struct sctp_stream_in *strm,
5456 	struct sctp_queued_to_read *control, int ordered, uint32_t cumtsn)
5457 {
5458 	struct sctp_tmit_chunk *chk, *nchk;
5459 
5460 	/*
5461 	 * For now large messages held on the stream reasm that are
5462 	 * complete will be tossed too. We could in theory do more
5463 	 * work to spin through and stop after dumping one msg aka
5464 	 * seeing the start of a new msg at the head, and call the
5465 	 * delivery function... to see if it can be delivered... But
5466 	 * for now we just dump everything on the queue.
5467 	 */
5468 	if (!asoc->idata_supported && !ordered &&
5469 	    control->first_frag_seen &&
5470 	    SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5471 		return;
5472 	}
5473 	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5474 		/* Purge hanging chunks */
5475 		if (!asoc->idata_supported && !ordered) {
5476 			if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5477 				break;
5478 			}
5479 		}
5480 		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5481 		if (asoc->size_on_reasm_queue >= chk->send_size) {
5482 			asoc->size_on_reasm_queue -= chk->send_size;
5483 		} else {
5484 #ifdef INVARIANTS
5485 			panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5486 #else
5487 			asoc->size_on_reasm_queue = 0;
5488 #endif
5489 		}
5490 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5491 		if (chk->data) {
5492 			sctp_m_freem(chk->data);
5493 			chk->data = NULL;
5494 		}
5495 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5496 	}
5497 	if (!TAILQ_EMPTY(&control->reasm)) {
5498 		/* This has to be old data, unordered */
5499 		if (control->data) {
5500 			sctp_m_freem(control->data);
5501 			control->data = NULL;
5502 		}
5503 		sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5504 		chk = TAILQ_FIRST(&control->reasm);
5505 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5506 			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5507 			sctp_add_chk_to_control(control, strm, stcb, asoc,
5508 						chk, SCTP_READ_LOCK_HELD);
5509 		}
5510 		sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5511 		return;
5512 	}
5513 	if (control->on_strm_q == SCTP_ON_ORDERED) {
5514 		TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5515 		if (asoc->size_on_all_streams >= control->length) {
5516 			asoc->size_on_all_streams -= control->length;
5517 		} else {
5518 #ifdef INVARIANTS
5519 			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5520 #else
5521 			asoc->size_on_all_streams = 0;
5522 #endif
5523 		}
5524 		sctp_ucount_decr(asoc->cnt_on_all_streams);
5525 		control->on_strm_q = 0;
5526 	} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5527 		TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5528 		control->on_strm_q = 0;
5529 #ifdef INVARIANTS
5530 	} else if (control->on_strm_q) {
5531 		panic("strm: %p ctl: %p unknown %d",
5532 		    strm, control, control->on_strm_q);
5533 #endif
5534 	}
5535 	control->on_strm_q = 0;
5536 	if (control->on_read_q == 0) {
5537 		sctp_free_remote_addr(control->whoFrom);
5538 		if (control->data) {
5539 			sctp_m_freem(control->data);
5540 			control->data = NULL;
5541 		}
5542 		sctp_free_a_readq(stcb, control);
5543 	}
5544 }
5545 
5546 void
5547 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5548                         struct sctp_forward_tsn_chunk *fwd,
5549                         int *abort_flag, struct mbuf *m , int offset)
5550 {
5551 	/* The pr-sctp fwd tsn */
5552 	/*
5553 	 * here we will perform all the data receiver side steps for
5554 	 * processing FwdTSN, as required in by pr-sctp draft:
5555 	 *
5556 	 * Assume we get FwdTSN(x):
5557 	 *
5558 	 * 1) update local cumTSN to x
5559 	 * 2) try to further advance cumTSN to x + others we have
5560 	 * 3) examine and update re-ordering queue on pr-in-streams
5561 	 * 4) clean up re-assembly queue
5562 	 * 5) Send a sack to report where we are.
5563 	 */
5564 	struct sctp_association *asoc;
5565 	uint32_t new_cum_tsn, gap;
5566 	unsigned int i, fwd_sz, m_size;
5567 	uint32_t str_seq;
5568 	struct sctp_stream_in *strm;
5569 	struct sctp_queued_to_read *control, *ncontrol, *sv;
5570 
5571 	asoc = &stcb->asoc;
5572 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5573 		SCTPDBG(SCTP_DEBUG_INDATA1,
5574 			"Bad size too small/big fwd-tsn\n");
5575 		return;
5576 	}
5577 	m_size = (stcb->asoc.mapping_array_size << 3);
5578 	/*************************************************************/
5579 	/* 1. Here we update local cumTSN and shift the bitmap array */
5580 	/*************************************************************/
5581 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5582 
5583 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5584 		/* Already got there ... */
5585 		return;
5586 	}
5587 	/*
5588 	 * now we know the new TSN is more advanced, let's find the actual
5589 	 * gap
5590 	 */
5591 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5592 	asoc->cumulative_tsn = new_cum_tsn;
5593 	if (gap >= m_size) {
5594 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5595 			struct mbuf *op_err;
5596 			char msg[SCTP_DIAG_INFO_LEN];
5597 
5598 			/*
5599 			 * out of range (of single byte chunks in the rwnd I
5600 			 * give out). This must be an attacker.
5601 			 */
5602 			*abort_flag = 1;
5603 			SCTP_SNPRINTF(msg, sizeof(msg),
5604 			              "New cum ack %8.8x too high, highest TSN %8.8x",
5605 			              new_cum_tsn, asoc->highest_tsn_inside_map);
5606 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5607 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_37;
5608 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
5609 			return;
5610 		}
5611 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5612 
5613 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5614 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5615 		asoc->highest_tsn_inside_map = new_cum_tsn;
5616 
5617 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5618 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5619 
5620 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5621 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5622 		}
5623 	} else {
5624 		SCTP_TCB_LOCK_ASSERT(stcb);
5625 		for (i = 0; i <= gap; i++) {
5626 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5627 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5628 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5629 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5630 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5631 				}
5632 			}
5633 		}
5634 	}
5635 	/*************************************************************/
5636 	/* 2. Clear up re-assembly queue                             */
5637 	/*************************************************************/
5638 
5639 	/* This is now done as part of clearing up the stream/seq */
5640 	if (asoc->idata_supported == 0) {
5641 		uint16_t sid;
5642 
5643 		/* Flush all the un-ordered data based on cum-tsn */
5644 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5645 		for (sid = 0; sid < asoc->streamincnt; sid++) {
5646 			strm = &asoc->strmin[sid];
5647 			if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
5648 				sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), 0, new_cum_tsn);
5649 			}
5650 		}
5651 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5652 	}
5653 	/*******************************************************/
5654 	/* 3. Update the PR-stream re-ordering queues and fix  */
5655 	/*    delivery issues as needed.                       */
5656 	/*******************************************************/
5657 	fwd_sz -= sizeof(*fwd);
5658 	if (m && fwd_sz) {
5659 		/* New method. */
5660 		unsigned int num_str;
5661 		uint32_t mid;
5662 		uint16_t sid;
5663 		uint16_t ordered, flags;
5664 		struct sctp_strseq *stseq, strseqbuf;
5665 		struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5666 		offset += sizeof(*fwd);
5667 
5668 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5669 		if (asoc->idata_supported) {
5670 			num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5671 		} else {
5672 			num_str = fwd_sz / sizeof(struct sctp_strseq);
5673 		}
5674 		for (i = 0; i < num_str; i++) {
5675 			if (asoc->idata_supported) {
5676 				stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5677 									    sizeof(struct sctp_strseq_mid),
5678 									    (uint8_t *)&strseqbuf_m);
5679 				offset += sizeof(struct sctp_strseq_mid);
5680 				if (stseq_m == NULL) {
5681 					break;
5682 				}
5683 				sid = ntohs(stseq_m->sid);
5684 				mid = ntohl(stseq_m->mid);
5685 				flags = ntohs(stseq_m->flags);
5686 				if (flags & PR_SCTP_UNORDERED_FLAG) {
5687 					ordered = 0;
5688 				} else {
5689 					ordered = 1;
5690 				}
5691 			} else {
5692 				stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5693 									    sizeof(struct sctp_strseq),
5694 									    (uint8_t *)&strseqbuf);
5695 				offset += sizeof(struct sctp_strseq);
5696 				if (stseq == NULL) {
5697 					break;
5698 				}
5699 				sid = ntohs(stseq->sid);
5700 				mid = (uint32_t)ntohs(stseq->ssn);
5701 				ordered = 1;
5702 			}
5703 			/* Convert */
5704 
5705 			/* now process */
5706 
5707 			/*
5708 			 * Ok we now look for the stream/seq on the read queue
5709 			 * where its not all delivered. If we find it we transmute the
5710 			 * read entry into a PDI_ABORTED.
5711 			 */
5712 			if (sid >= asoc->streamincnt) {
5713 				/* screwed up streams, stop!  */
5714 				break;
5715 			}
5716 			if ((asoc->str_of_pdapi == sid) &&
5717 			    (asoc->ssn_of_pdapi == mid)) {
5718 				/* If this is the one we were partially delivering
5719 				 * now then we no longer are. Note this will change
5720 				 * with the reassembly re-write.
5721 				 */
5722 				asoc->fragmented_delivery_inprogress = 0;
5723 			}
5724 			strm = &asoc->strmin[sid];
5725 			if (ordered) {
5726 				TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, ncontrol) {
5727 					if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5728 						sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
5729 					}
5730 				}
5731 			} else {
5732 				if (asoc->idata_supported) {
5733 					TAILQ_FOREACH_SAFE(control, &strm->uno_inqueue, next_instrm, ncontrol) {
5734 						if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5735 							sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
5736 						}
5737 					}
5738 				} else {
5739 					if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
5740 						sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), ordered, new_cum_tsn);
5741 					}
5742 				}
5743 			}
5744 			TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5745 				if ((control->sinfo_stream == sid) &&
5746 				    (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5747 					str_seq = (sid << 16) | (0x0000ffff & mid);
5748 					control->pdapi_aborted = 1;
5749 					sv = stcb->asoc.control_pdapi;
5750 					control->end_added = 1;
5751 					if (control->on_strm_q == SCTP_ON_ORDERED) {
5752 						TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5753 						if (asoc->size_on_all_streams >= control->length) {
5754 							asoc->size_on_all_streams -= control->length;
5755 						} else {
5756 #ifdef INVARIANTS
5757 							panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5758 #else
5759 							asoc->size_on_all_streams = 0;
5760 #endif
5761 						}
5762 						sctp_ucount_decr(asoc->cnt_on_all_streams);
5763 					} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5764 						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5765 #ifdef INVARIANTS
5766 					} else if (control->on_strm_q) {
5767 						panic("strm: %p ctl: %p unknown %d",
5768 						      strm, control, control->on_strm_q);
5769 #endif
5770 					}
5771 					control->on_strm_q = 0;
5772 					stcb->asoc.control_pdapi = control;
5773 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5774 					                stcb,
5775 					                SCTP_PARTIAL_DELIVERY_ABORTED,
5776 					                (void *)&str_seq,
5777 							SCTP_SO_NOT_LOCKED);
5778 					stcb->asoc.control_pdapi = sv;
5779 					break;
5780 				} else if ((control->sinfo_stream == sid) &&
5781 					   SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5782 					/* We are past our victim SSN */
5783 					break;
5784 				}
5785 			}
5786 			if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5787 				/* Update the sequence number */
5788 				strm->last_mid_delivered = mid;
5789 			}
5790 			/* now kick the stream the new way */
5791 			/*sa_ignore NO_NULL_CHK*/
5792 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5793 		}
5794 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5795 	}
5796 	/*
5797 	 * Now slide thing forward.
5798 	 */
5799 	sctp_slide_mapping_arrays(stcb);
5800 }
5801