1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/systm.h>
31 #include <sys/stream.h>
32 #include <sys/strsubr.h>
33 #include <sys/ddi.h>
34 #include <sys/sunddi.h>
35 #include <sys/kmem.h>
36 #include <sys/socket.h>
37 #include <sys/random.h>
38 #include <sys/tsol/tndb.h>
39 #include <sys/tsol/tnet.h>
40 
41 #include <netinet/in.h>
42 #include <netinet/ip6.h>
43 #include <netinet/sctp.h>
44 
45 #include <inet/common.h>
46 #include <inet/ip.h>
47 #include <inet/ip6.h>
48 #include <inet/ip_ire.h>
49 #include <inet/mib2.h>
50 #include <inet/nd.h>
51 #include <inet/optcom.h>
52 #include <inet/sctp_ip.h>
53 #include <inet/ipclassifier.h>
54 
55 #include "sctp_impl.h"
56 #include "sctp_addr.h"
57 #include "sctp_asconf.h"
58 
59 static struct kmem_cache *sctp_kmem_faddr_cache;
60 static void sctp_init_faddr(sctp_t *, sctp_faddr_t *, in6_addr_t *, mblk_t *);
61 
62 /* Set the source address.  Refer to comments in sctp_get_ire(). */
63 void
64 sctp_set_saddr(sctp_t *sctp, sctp_faddr_t *fp)
65 {
66 	boolean_t v6 = !fp->isv4;
67 
68 	if (sctp->sctp_bound_to_all) {
69 		V6_SET_ZERO(fp->saddr);
70 	} else {
71 		fp->saddr = sctp_get_valid_addr(sctp, v6);
72 		if (!v6 && IN6_IS_ADDR_V4MAPPED_ANY(&fp->saddr) ||
73 		    v6 && IN6_IS_ADDR_UNSPECIFIED(&fp->saddr)) {
74 			fp->state = SCTP_FADDRS_UNREACH;
75 			/* Disable heartbeat. */
76 			fp->hb_expiry = 0;
77 			fp->hb_pending = B_FALSE;
78 			fp->strikes = 0;
79 		}
80 	}
81 }
82 
83 /*
84  * Call this function to update the cached IRE of a peer addr fp.
85  */
86 void
87 sctp_get_ire(sctp_t *sctp, sctp_faddr_t *fp)
88 {
89 	ire_t		*ire;
90 	ipaddr_t	addr4;
91 	in6_addr_t	laddr;
92 	sctp_saddr_ipif_t *sp;
93 	uint_t		ipif_seqid;
94 	int		hdrlen;
95 	ts_label_t	*tsl;
96 
97 	/* Remove the previous cache IRE */
98 	if ((ire = fp->ire) != NULL) {
99 		IRE_REFRELE_NOTR(ire);
100 		fp->ire = NULL;
101 	}
102 
103 	/*
104 	 * If this addr is not reachable, mark it as unconfirmed for now, the
105 	 * state will be changed back to unreachable later in this function
106 	 * if it is still the case.
107 	 */
108 	if (fp->state == SCTP_FADDRS_UNREACH) {
109 		fp->state = SCTP_FADDRS_UNCONFIRMED;
110 	}
111 
112 	tsl = crgetlabel(CONN_CRED(sctp->sctp_connp));
113 
114 	if (fp->isv4) {
115 		IN6_V4MAPPED_TO_IPADDR(&fp->faddr, addr4);
116 		ire = ire_cache_lookup(addr4, sctp->sctp_zoneid, tsl);
117 		if (ire != NULL)
118 			IN6_IPADDR_TO_V4MAPPED(ire->ire_src_addr, &laddr);
119 	} else {
120 		ire = ire_cache_lookup_v6(&fp->faddr, sctp->sctp_zoneid, tsl);
121 		if (ire != NULL)
122 			laddr = ire->ire_src_addr_v6;
123 	}
124 
125 	if (ire == NULL) {
126 		dprint(3, ("ire2faddr: no ire for %x:%x:%x:%x\n",
127 		    SCTP_PRINTADDR(fp->faddr)));
128 		/*
129 		 * It is tempting to just leave the src addr
130 		 * unspecified and let IP figure it out, but we
131 		 * *cannot* do this, since IP may choose a src addr
132 		 * that is not part of this association... unless
133 		 * this sctp has bound to all addrs.  So if the ire
134 		 * lookup fails, try to find one in our src addr
135 		 * list, unless the sctp has bound to all addrs, in
136 		 * which case we change the src addr to unspec.
137 		 *
138 		 * Note that if this is a v6 endpoint but it does
139 		 * not have any v4 address at this point (e.g. may
140 		 * have been  deleted), sctp_get_valid_addr() will
141 		 * return mapped INADDR_ANY.  In this case, this
142 		 * address should be marked not reachable so that
143 		 * it won't be used to send data.
144 		 */
145 		sctp_set_saddr(sctp, fp);
146 		if (fp->state == SCTP_FADDRS_UNREACH)
147 			return;
148 		goto check_current;
149 	}
150 
151 	ipif_seqid = ire->ire_ipif->ipif_seqid;
152 	dprint(2, ("ire2faddr: got ire for %x:%x:%x:%x, ",
153 	    SCTP_PRINTADDR(fp->faddr)));
154 	if (fp->isv4) {
155 		dprint(2, ("src = %x\n", ire->ire_src_addr));
156 	} else {
157 		dprint(2, ("src=%x:%x:%x:%x\n",
158 		    SCTP_PRINTADDR(ire->ire_src_addr_v6)));
159 	}
160 
161 	/* Make sure the laddr is part of this association */
162 	if ((sp = sctp_ipif_lookup(sctp, ipif_seqid)) != NULL &&
163 	    !sp->saddr_ipif_dontsrc) {
164 		if (sp->saddr_ipif_unconfirmed == 1)
165 			sp->saddr_ipif_unconfirmed = 0;
166 		fp->saddr = laddr;
167 	} else {
168 		dprint(2, ("ire2faddr: src addr is not part of assc\n"));
169 
170 		/*
171 		 * Set the src to the first saddr and hope for the best.
172 		 * Note that we will still do the ire caching below.
173 		 * Otherwise, whenever we send a packet, we need to do
174 		 * the ire lookup again and still may not get the correct
175 		 * source address.  Note that this case should very seldomly
176 		 * happen.  One scenario this can happen is an app
177 		 * explicitly bind() to an address.  But that address is
178 		 * not the preferred source address to send to the peer.
179 		 */
180 		sctp_set_saddr(sctp, fp);
181 		if (fp->state == SCTP_FADDRS_UNREACH) {
182 			IRE_REFRELE(ire);
183 			return;
184 		}
185 	}
186 
187 	/*
188 	 * Note that ire_cache_lookup_*() returns an ire with the tracing
189 	 * bits enabled.  This requires the thread holding the ire also
190 	 * do the IRE_REFRELE().  Thus we need to do IRE_REFHOLD_NOTR()
191 	 * and then IRE_REFRELE() the ire here to make the tracing bits
192 	 * work.
193 	 */
194 	IRE_REFHOLD_NOTR(ire);
195 	IRE_REFRELE(ire);
196 
197 	/* Cache the IRE */
198 	fp->ire = ire;
199 	if (fp->ire->ire_type == IRE_LOOPBACK && !sctp->sctp_loopback)
200 		sctp->sctp_loopback = 1;
201 
202 	/*
203 	 * Pull out RTO information for this faddr and use it if we don't
204 	 * have any yet.
205 	 */
206 	if (fp->srtt == -1 && ire->ire_uinfo.iulp_rtt != 0) {
207 		/* The cached value is in ms. */
208 		fp->srtt = MSEC_TO_TICK(ire->ire_uinfo.iulp_rtt);
209 		fp->rttvar = MSEC_TO_TICK(ire->ire_uinfo.iulp_rtt_sd);
210 		fp->rto = 3 * fp->srtt;
211 
212 		/* Bound the RTO by configured min and max values */
213 		if (fp->rto < sctp->sctp_rto_min) {
214 			fp->rto = sctp->sctp_rto_min;
215 		}
216 		if (fp->rto > sctp->sctp_rto_max) {
217 			fp->rto = sctp->sctp_rto_max;
218 		}
219 	}
220 
221 	/*
222 	 * Record the MTU for this faddr. If the MTU for this faddr has
223 	 * changed, check if the assc MTU will also change.
224 	 */
225 	if (fp->isv4) {
226 		hdrlen = sctp->sctp_hdr_len;
227 	} else {
228 		hdrlen = sctp->sctp_hdr6_len;
229 	}
230 	if ((fp->sfa_pmss + hdrlen) != ire->ire_max_frag) {
231 		/* Make sure that sfa_pmss is a multiple of SCTP_ALIGN. */
232 		fp->sfa_pmss = (ire->ire_max_frag - hdrlen) & ~(SCTP_ALIGN - 1);
233 		if (fp->cwnd < (fp->sfa_pmss * 2)) {
234 			fp->cwnd = fp->sfa_pmss * sctp_slow_start_initial;
235 		}
236 	}
237 
238 check_current:
239 	if (fp == sctp->sctp_current)
240 		sctp_set_faddr_current(sctp, fp);
241 }
242 
243 void
244 sctp_update_ire(sctp_t *sctp)
245 {
246 	ire_t		*ire;
247 	sctp_faddr_t	*fp;
248 
249 	for (fp = sctp->sctp_faddrs; fp != NULL; fp = fp->next) {
250 		if ((ire = fp->ire) == NULL)
251 			continue;
252 		mutex_enter(&ire->ire_lock);
253 
254 		/*
255 		 * If the cached IRE is going away, there is no point to
256 		 * update it.
257 		 */
258 		if (ire->ire_marks & IRE_MARK_CONDEMNED) {
259 			mutex_exit(&ire->ire_lock);
260 			IRE_REFRELE_NOTR(ire);
261 			fp->ire = NULL;
262 			continue;
263 		}
264 
265 		/*
266 		 * Only record the PMTU for this faddr if we actually have
267 		 * done discovery. This prevents initialized default from
268 		 * clobbering any real info that IP may have.
269 		 */
270 		if (fp->pmtu_discovered) {
271 			if (fp->isv4) {
272 				ire->ire_max_frag = fp->sfa_pmss +
273 				    sctp->sctp_hdr_len;
274 			} else {
275 				ire->ire_max_frag = fp->sfa_pmss +
276 				    sctp->sctp_hdr6_len;
277 			}
278 		}
279 
280 		if (sctp_rtt_updates != 0 &&
281 		    fp->rtt_updates >= sctp_rtt_updates) {
282 			/*
283 			 * If there is no old cached values, initialize them
284 			 * conservatively.  Set them to be (1.5 * new value).
285 			 * This code copied from ip_ire_advise().  The cached
286 			 * value is in ms.
287 			 */
288 			if (ire->ire_uinfo.iulp_rtt != 0) {
289 				ire->ire_uinfo.iulp_rtt =
290 				    (ire->ire_uinfo.iulp_rtt +
291 				    TICK_TO_MSEC(fp->srtt)) >> 1;
292 			} else {
293 				ire->ire_uinfo.iulp_rtt =
294 				    TICK_TO_MSEC(fp->srtt + (fp->srtt >> 1));
295 			}
296 			if (ire->ire_uinfo.iulp_rtt_sd != 0) {
297 				ire->ire_uinfo.iulp_rtt_sd =
298 					(ire->ire_uinfo.iulp_rtt_sd +
299 					TICK_TO_MSEC(fp->rttvar)) >> 1;
300 			} else {
301 				ire->ire_uinfo.iulp_rtt_sd =
302 				    TICK_TO_MSEC(fp->rttvar +
303 				    (fp->rttvar >> 1));
304 			}
305 			fp->rtt_updates = 0;
306 		}
307 		mutex_exit(&ire->ire_lock);
308 	}
309 }
310 
311 /*
312  * The sender must set the total length in the IP header.
313  * If sendto == NULL, the current will be used.
314  */
315 mblk_t *
316 sctp_make_mp(sctp_t *sctp, sctp_faddr_t *sendto, int trailer)
317 {
318 	mblk_t *mp;
319 	size_t ipsctplen;
320 	int isv4;
321 	sctp_faddr_t *fp;
322 
323 	ASSERT(sctp->sctp_current != NULL || sendto != NULL);
324 	if (sendto == NULL) {
325 		fp = sctp->sctp_current;
326 	} else {
327 		fp = sendto;
328 	}
329 	isv4 = fp->isv4;
330 
331 	/* Try to look for another IRE again. */
332 	if (fp->ire == NULL)
333 		sctp_get_ire(sctp, fp);
334 
335 	/* There is no suitable source address to use, return. */
336 	if (fp->state == SCTP_FADDRS_UNREACH)
337 		return (NULL);
338 
339 	if (isv4) {
340 		ipsctplen = sctp->sctp_hdr_len;
341 	} else {
342 		ipsctplen = sctp->sctp_hdr6_len;
343 	}
344 
345 	mp = allocb_cred(ipsctplen + sctp_wroff_xtra + trailer,
346 	    CONN_CRED(sctp->sctp_connp));
347 	if (mp == NULL) {
348 		ip1dbg(("sctp_make_mp: error making mp..\n"));
349 		return (NULL);
350 	}
351 	mp->b_rptr += sctp_wroff_xtra;
352 	mp->b_wptr = mp->b_rptr + ipsctplen;
353 
354 	ASSERT(OK_32PTR(mp->b_wptr));
355 
356 	if (isv4) {
357 		ipha_t *iph = (ipha_t *)mp->b_rptr;
358 
359 		bcopy(sctp->sctp_iphc, mp->b_rptr, ipsctplen);
360 		if (fp != sctp->sctp_current) {
361 			/* fiddle with the dst addr */
362 			IN6_V4MAPPED_TO_IPADDR(&fp->faddr, iph->ipha_dst);
363 			/* fix up src addr */
364 			if (!IN6_IS_ADDR_V4MAPPED_ANY(&fp->saddr)) {
365 				IN6_V4MAPPED_TO_IPADDR(&fp->saddr,
366 				    iph->ipha_src);
367 			} else if (sctp->sctp_bound_to_all) {
368 				iph->ipha_src = INADDR_ANY;
369 			}
370 		}
371 		/* set or clear the don't fragment bit */
372 		if (fp->df) {
373 			iph->ipha_fragment_offset_and_flags = htons(IPH_DF);
374 		} else {
375 			iph->ipha_fragment_offset_and_flags = 0;
376 		}
377 	} else {
378 		bcopy(sctp->sctp_iphc6, mp->b_rptr, ipsctplen);
379 		if (fp != sctp->sctp_current) {
380 			/* fiddle with the dst addr */
381 			((ip6_t *)(mp->b_rptr))->ip6_dst = fp->faddr;
382 			/* fix up src addr */
383 			if (!IN6_IS_ADDR_UNSPECIFIED(&fp->saddr)) {
384 				((ip6_t *)(mp->b_rptr))->ip6_src = fp->saddr;
385 			} else if (sctp->sctp_bound_to_all) {
386 				bzero(&((ip6_t *)(mp->b_rptr))->ip6_src,
387 				    sizeof (in6_addr_t));
388 			}
389 		}
390 	}
391 	ASSERT(sctp->sctp_connp != NULL);
392 
393 	/*
394 	 * IP will not free this IRE if it is condemned.  SCTP needs to
395 	 * free it.
396 	 */
397 	if ((fp->ire != NULL) && (fp->ire->ire_marks & IRE_MARK_CONDEMNED)) {
398 		IRE_REFRELE_NOTR(fp->ire);
399 		fp->ire = NULL;
400 	}
401 	/* Stash the conn and ire ptr info. for IP */
402 	SCTP_STASH_IPINFO(mp, fp->ire);
403 
404 	return (mp);
405 }
406 
407 /*
408  * Notify upper layers about preferred write offset, write size.
409  */
410 void
411 sctp_set_ulp_prop(sctp_t *sctp)
412 {
413 	int hdrlen;
414 
415 	if (sctp->sctp_current->isv4) {
416 		hdrlen = sctp->sctp_hdr_len;
417 	} else {
418 		hdrlen = sctp->sctp_hdr6_len;
419 	}
420 	ASSERT(sctp->sctp_ulpd);
421 
422 	ASSERT(sctp->sctp_current->sfa_pmss == sctp->sctp_mss);
423 	sctp->sctp_ulp_prop(sctp->sctp_ulpd,
424 	    sctp_wroff_xtra + hdrlen + sizeof (sctp_data_hdr_t),
425 	    sctp->sctp_mss - sizeof (sctp_data_hdr_t));
426 }
427 
428 void
429 sctp_set_iplen(sctp_t *sctp, mblk_t *mp)
430 {
431 	uint16_t	sum = 0;
432 	ipha_t		*iph;
433 	ip6_t		*ip6h;
434 	mblk_t		*pmp = mp;
435 	boolean_t	isv4;
436 
437 	isv4 = (IPH_HDR_VERSION(mp->b_rptr) == IPV4_VERSION);
438 	for (; pmp; pmp = pmp->b_cont)
439 		sum += pmp->b_wptr - pmp->b_rptr;
440 
441 	if (isv4) {
442 		iph = (ipha_t *)mp->b_rptr;
443 		iph->ipha_length = htons(sum);
444 	} else {
445 		ip6h = (ip6_t *)mp->b_rptr;
446 		/*
447 		 * If an ip6i_t is present, the real IPv6 header
448 		 * immediately follows.
449 		 */
450 		if (ip6h->ip6_nxt == IPPROTO_RAW)
451 			ip6h = (ip6_t *)&ip6h[1];
452 		ip6h->ip6_plen = htons(sum - ((char *)&sctp->sctp_ip6h[1] -
453 		    sctp->sctp_iphc6));
454 	}
455 }
456 
457 int
458 sctp_compare_faddrsets(sctp_faddr_t *a1, sctp_faddr_t *a2)
459 {
460 	int na1 = 0;
461 	int overlap = 0;
462 	int equal = 1;
463 	int onematch;
464 	sctp_faddr_t *fp1, *fp2;
465 
466 	for (fp1 = a1; fp1; fp1 = fp1->next) {
467 		onematch = 0;
468 		for (fp2 = a2; fp2; fp2 = fp2->next) {
469 			if (IN6_ARE_ADDR_EQUAL(&fp1->faddr, &fp2->faddr)) {
470 				overlap++;
471 				onematch = 1;
472 				break;
473 			}
474 			if (!onematch) {
475 				equal = 0;
476 			}
477 		}
478 		na1++;
479 	}
480 
481 	if (equal) {
482 		return (SCTP_ADDR_EQUAL);
483 	}
484 	if (overlap == na1) {
485 		return (SCTP_ADDR_SUBSET);
486 	}
487 	if (overlap) {
488 		return (SCTP_ADDR_OVERLAP);
489 	}
490 	return (SCTP_ADDR_DISJOINT);
491 }
492 
493 /*
494  * Returns 0 on success, -1 on memory allocation failure. If sleep
495  * is true, this function should never fail.  The boolean parameter
496  * first decides whether the newly created faddr structure should be
497  * added at the beginning of the list or at the end.
498  *
499  * Note: caller must hold conn fanout lock.
500  */
501 int
502 sctp_add_faddr(sctp_t *sctp, in6_addr_t *addr, int sleep, boolean_t first)
503 {
504 	sctp_faddr_t	*faddr;
505 	mblk_t		*timer_mp;
506 
507 	if (is_system_labeled()) {
508 		ts_label_t *tsl;
509 		tsol_tpc_t *rhtp;
510 		int retv;
511 
512 		tsl = crgetlabel(CONN_CRED(sctp->sctp_connp));
513 		ASSERT(tsl != NULL);
514 
515 		/* find_tpc automatically does the right thing with IPv4 */
516 		rhtp = find_tpc(addr, IPV6_VERSION, B_FALSE);
517 		if (rhtp == NULL)
518 			return (EACCES);
519 
520 		retv = EACCES;
521 		if (tsl->tsl_doi == rhtp->tpc_tp.tp_doi) {
522 			switch (rhtp->tpc_tp.host_type) {
523 			case UNLABELED:
524 				/*
525 				 * Can talk to unlabeled hosts if any of the
526 				 * following are true:
527 				 *   1. zone's label matches the remote host's
528 				 *	default label,
529 				 *   2. mac_exempt is on and the zone dominates
530 				 *	the remote host's label, or
531 				 *   3. mac_exempt is on and the socket is from
532 				 *	the global zone.
533 				 */
534 				if (blequal(&rhtp->tpc_tp.tp_def_label,
535 				    &tsl->tsl_label) ||
536 				    (sctp->sctp_mac_exempt &&
537 				    (sctp->sctp_zoneid == GLOBAL_ZONEID ||
538 				    bldominates(&tsl->tsl_label,
539 				    &rhtp->tpc_tp.tp_def_label))))
540 					retv = 0;
541 				break;
542 			case SUN_CIPSO:
543 				if (_blinrange(&tsl->tsl_label,
544 				    &rhtp->tpc_tp.tp_sl_range_cipso) ||
545 				    blinlset(&tsl->tsl_label,
546 				    rhtp->tpc_tp.tp_sl_set_cipso))
547 					retv = 0;
548 				break;
549 			}
550 		}
551 		TPC_RELE(rhtp);
552 		if (retv != 0)
553 			return (retv);
554 	}
555 
556 	if ((faddr = kmem_cache_alloc(sctp_kmem_faddr_cache, sleep)) == NULL)
557 		return (ENOMEM);
558 	timer_mp = sctp_timer_alloc((sctp), sctp_rexmit_timer);
559 	if (timer_mp == NULL) {
560 		kmem_cache_free(sctp_kmem_faddr_cache, faddr);
561 		return (ENOMEM);
562 	}
563 	((sctpt_t *)(timer_mp->b_rptr))->sctpt_faddr = faddr;
564 
565 	sctp_init_faddr(sctp, faddr, addr, timer_mp);
566 	ASSERT(faddr->next == NULL);
567 
568 	if (sctp->sctp_faddrs == NULL) {
569 		ASSERT(sctp->sctp_lastfaddr == NULL);
570 		/* only element on list; first and last are same */
571 		sctp->sctp_faddrs = sctp->sctp_lastfaddr = faddr;
572 	} else if (first) {
573 		ASSERT(sctp->sctp_lastfaddr != NULL);
574 		faddr->next = sctp->sctp_faddrs;
575 		sctp->sctp_faddrs = faddr;
576 	} else {
577 		sctp->sctp_lastfaddr->next = faddr;
578 		sctp->sctp_lastfaddr = faddr;
579 	}
580 	sctp->sctp_nfaddrs++;
581 
582 	return (0);
583 }
584 
585 sctp_faddr_t *
586 sctp_lookup_faddr(sctp_t *sctp, in6_addr_t *addr)
587 {
588 	sctp_faddr_t *fp;
589 
590 	for (fp = sctp->sctp_faddrs; fp != NULL; fp = fp->next) {
591 		if (IN6_ARE_ADDR_EQUAL(&fp->faddr, addr))
592 			break;
593 	}
594 
595 	return (fp);
596 }
597 
598 sctp_faddr_t *
599 sctp_lookup_faddr_nosctp(sctp_faddr_t *fp, in6_addr_t *addr)
600 {
601 	for (; fp; fp = fp->next) {
602 		if (IN6_ARE_ADDR_EQUAL(&fp->faddr, addr)) {
603 			break;
604 		}
605 	}
606 
607 	return (fp);
608 }
609 
610 /*
611  * To change the currently used peer address to the specified one.
612  */
613 void
614 sctp_set_faddr_current(sctp_t *sctp, sctp_faddr_t *fp)
615 {
616 	/* Now setup the composite header. */
617 	if (fp->isv4) {
618 		IN6_V4MAPPED_TO_IPADDR(&fp->faddr,
619 		    sctp->sctp_ipha->ipha_dst);
620 		IN6_V4MAPPED_TO_IPADDR(&fp->saddr, sctp->sctp_ipha->ipha_src);
621 		/* update don't fragment bit */
622 		if (fp->df) {
623 			sctp->sctp_ipha->ipha_fragment_offset_and_flags =
624 			    htons(IPH_DF);
625 		} else {
626 			sctp->sctp_ipha->ipha_fragment_offset_and_flags = 0;
627 		}
628 	} else {
629 		sctp->sctp_ip6h->ip6_dst = fp->faddr;
630 		sctp->sctp_ip6h->ip6_src = fp->saddr;
631 	}
632 
633 	sctp->sctp_current = fp;
634 	sctp->sctp_mss = fp->sfa_pmss;
635 
636 	/* Update the uppper layer for the change. */
637 	if (!SCTP_IS_DETACHED(sctp))
638 		sctp_set_ulp_prop(sctp);
639 }
640 
641 void
642 sctp_redo_faddr_srcs(sctp_t *sctp)
643 {
644 	sctp_faddr_t *fp;
645 
646 	for (fp = sctp->sctp_faddrs; fp != NULL; fp = fp->next) {
647 		sctp_get_ire(sctp, fp);
648 	}
649 }
650 
651 void
652 sctp_faddr_alive(sctp_t *sctp, sctp_faddr_t *fp)
653 {
654 	int64_t now = lbolt64;
655 
656 	fp->strikes = 0;
657 	sctp->sctp_strikes = 0;
658 	fp->lastactive = now;
659 	fp->hb_expiry = now + SET_HB_INTVL(fp);
660 	fp->hb_pending = B_FALSE;
661 	if (fp->state != SCTP_FADDRS_ALIVE) {
662 		fp->state = SCTP_FADDRS_ALIVE;
663 		sctp_intf_event(sctp, fp->faddr, SCTP_ADDR_AVAILABLE, 0);
664 
665 		/*
666 		 * If this is the primary, switch back to it now.  And
667 		 * we probably want to reset the source addr used to reach
668 		 * it.
669 		 */
670 		if (fp == sctp->sctp_primary) {
671 			sctp_set_faddr_current(sctp, fp);
672 			sctp_get_ire(sctp, fp);
673 			return;
674 		}
675 	}
676 	if (fp->ire == NULL) {
677 		/* Should have a full IRE now */
678 		sctp_get_ire(sctp, fp);
679 	}
680 }
681 
682 int
683 sctp_is_a_faddr_clean(sctp_t *sctp)
684 {
685 	sctp_faddr_t *fp;
686 
687 	for (fp = sctp->sctp_faddrs; fp; fp = fp->next) {
688 		if (fp->state == SCTP_FADDRS_ALIVE && fp->strikes == 0) {
689 			return (1);
690 		}
691 	}
692 
693 	return (0);
694 }
695 
696 /*
697  * Returns 0 if there is at leave one other active faddr, -1 if there
698  * are none. If there are none left, faddr_dead() will start killing the
699  * association.
700  * If the downed faddr was the current faddr, a new current faddr
701  * will be chosen.
702  */
703 int
704 sctp_faddr_dead(sctp_t *sctp, sctp_faddr_t *fp, int newstate)
705 {
706 	sctp_faddr_t *ofp;
707 
708 	if (fp->state == SCTP_FADDRS_ALIVE) {
709 		sctp_intf_event(sctp, fp->faddr, SCTP_ADDR_UNREACHABLE, 0);
710 	}
711 	fp->state = newstate;
712 
713 	dprint(1, ("sctp_faddr_dead: %x:%x:%x:%x down (state=%d)\n",
714 	    SCTP_PRINTADDR(fp->faddr), newstate));
715 
716 	if (fp == sctp->sctp_current) {
717 		/* Current faddr down; need to switch it */
718 		sctp->sctp_current = NULL;
719 	}
720 
721 	/* Find next alive faddr */
722 	ofp = fp;
723 	for (fp = fp->next; fp != NULL; fp = fp->next) {
724 		if (fp->state == SCTP_FADDRS_ALIVE) {
725 			break;
726 		}
727 	}
728 
729 	if (fp == NULL) {
730 		/* Continue from beginning of list */
731 		for (fp = sctp->sctp_faddrs; fp != ofp; fp = fp->next) {
732 			if (fp->state == SCTP_FADDRS_ALIVE) {
733 				break;
734 			}
735 		}
736 	}
737 
738 	/*
739 	 * Find a new fp, so if the current faddr is dead, use the new fp
740 	 * as the current one.
741 	 */
742 	if (fp != ofp) {
743 		if (sctp->sctp_current == NULL) {
744 			dprint(1, ("sctp_faddr_dead: failover->%x:%x:%x:%x\n",
745 			    SCTP_PRINTADDR(fp->faddr)));
746 			/*
747 			 * Note that we don't need to reset the source addr
748 			 * of the new fp.
749 			 */
750 			sctp_set_faddr_current(sctp, fp);
751 		}
752 		return (0);
753 	}
754 
755 
756 	/* All faddrs are down; kill the association */
757 	dprint(1, ("sctp_faddr_dead: all faddrs down, killing assoc\n"));
758 	BUMP_MIB(&sctp_mib, sctpAborted);
759 	sctp_assoc_event(sctp, sctp->sctp_state < SCTPS_ESTABLISHED ?
760 	    SCTP_CANT_STR_ASSOC : SCTP_COMM_LOST, 0, NULL);
761 	sctp_clean_death(sctp, sctp->sctp_client_errno ?
762 	    sctp->sctp_client_errno : ETIMEDOUT);
763 
764 	return (-1);
765 }
766 
767 sctp_faddr_t *
768 sctp_rotate_faddr(sctp_t *sctp, sctp_faddr_t *ofp)
769 {
770 	sctp_faddr_t *nfp = NULL;
771 
772 	if (ofp == NULL) {
773 		ofp = sctp->sctp_current;
774 	}
775 
776 	/* Find the next live one */
777 	for (nfp = ofp->next; nfp != NULL; nfp = nfp->next) {
778 		if (nfp->state == SCTP_FADDRS_ALIVE) {
779 			break;
780 		}
781 	}
782 
783 	if (nfp == NULL) {
784 		/* Continue from beginning of list */
785 		for (nfp = sctp->sctp_faddrs; nfp != ofp; nfp = nfp->next) {
786 			if (nfp->state == SCTP_FADDRS_ALIVE) {
787 				break;
788 			}
789 		}
790 	}
791 
792 	/*
793 	 * nfp could only be NULL if all faddrs are down, and when
794 	 * this happens, faddr_dead() should have killed the
795 	 * association. Hence this assertion...
796 	 */
797 	ASSERT(nfp != NULL);
798 	return (nfp);
799 }
800 
801 void
802 sctp_unlink_faddr(sctp_t *sctp, sctp_faddr_t *fp)
803 {
804 	sctp_faddr_t *fpp;
805 
806 	if (!sctp->sctp_faddrs) {
807 		return;
808 	}
809 
810 	if (fp->timer_mp != NULL) {
811 		sctp_timer_free(fp->timer_mp);
812 		fp->timer_mp = NULL;
813 		fp->timer_running = 0;
814 	}
815 	if (fp->rc_timer_mp != NULL) {
816 		sctp_timer_free(fp->rc_timer_mp);
817 		fp->rc_timer_mp = NULL;
818 		fp->rc_timer_running = 0;
819 	}
820 	if (fp->ire != NULL) {
821 		IRE_REFRELE_NOTR(fp->ire);
822 		fp->ire = NULL;
823 	}
824 
825 	if (fp == sctp->sctp_faddrs) {
826 		goto gotit;
827 	}
828 
829 	for (fpp = sctp->sctp_faddrs; fpp->next != fp; fpp = fpp->next)
830 		;
831 
832 gotit:
833 	ASSERT(sctp->sctp_conn_tfp != NULL);
834 	mutex_enter(&sctp->sctp_conn_tfp->tf_lock);
835 	if (fp == sctp->sctp_faddrs) {
836 		sctp->sctp_faddrs = fp->next;
837 	} else {
838 		fpp->next = fp->next;
839 	}
840 	mutex_exit(&sctp->sctp_conn_tfp->tf_lock);
841 	/* XXX faddr2ire? */
842 	kmem_cache_free(sctp_kmem_faddr_cache, fp);
843 	sctp->sctp_nfaddrs--;
844 }
845 
846 void
847 sctp_zap_faddrs(sctp_t *sctp, int caller_holds_lock)
848 {
849 	sctp_faddr_t *fp, *fpn;
850 
851 	if (sctp->sctp_faddrs == NULL) {
852 		ASSERT(sctp->sctp_lastfaddr == NULL);
853 		return;
854 	}
855 
856 	ASSERT(sctp->sctp_lastfaddr != NULL);
857 	sctp->sctp_lastfaddr = NULL;
858 	sctp->sctp_current = NULL;
859 	sctp->sctp_primary = NULL;
860 
861 	sctp_free_faddr_timers(sctp);
862 
863 	if (sctp->sctp_conn_tfp != NULL && !caller_holds_lock) {
864 		/* in conn fanout; need to hold lock */
865 		mutex_enter(&sctp->sctp_conn_tfp->tf_lock);
866 	}
867 
868 	for (fp = sctp->sctp_faddrs; fp; fp = fpn) {
869 		fpn = fp->next;
870 		if (fp->ire != NULL)
871 			IRE_REFRELE_NOTR(fp->ire);
872 		kmem_cache_free(sctp_kmem_faddr_cache, fp);
873 		sctp->sctp_nfaddrs--;
874 	}
875 
876 	sctp->sctp_faddrs = NULL;
877 	ASSERT(sctp->sctp_nfaddrs == 0);
878 	if (sctp->sctp_conn_tfp != NULL && !caller_holds_lock) {
879 		mutex_exit(&sctp->sctp_conn_tfp->tf_lock);
880 	}
881 
882 }
883 
884 void
885 sctp_zap_addrs(sctp_t *sctp)
886 {
887 	sctp_zap_faddrs(sctp, 0);
888 	sctp_free_saddrs(sctp);
889 }
890 
891 /*
892  * Initialize the IPv4 header. Loses any record of any IP options.
893  */
894 int
895 sctp_header_init_ipv4(sctp_t *sctp, int sleep)
896 {
897 	sctp_hdr_t	*sctph;
898 
899 	/*
900 	 * This is a simple initialization. If there's
901 	 * already a template, it should never be too small,
902 	 * so reuse it.  Otherwise, allocate space for the new one.
903 	 */
904 	if (sctp->sctp_iphc != NULL) {
905 		ASSERT(sctp->sctp_iphc_len >= SCTP_MAX_COMBINED_HEADER_LENGTH);
906 		bzero(sctp->sctp_iphc, sctp->sctp_iphc_len);
907 	} else {
908 		sctp->sctp_iphc_len = SCTP_MAX_COMBINED_HEADER_LENGTH;
909 		sctp->sctp_iphc = kmem_zalloc(sctp->sctp_iphc_len, sleep);
910 		if (sctp->sctp_iphc == NULL) {
911 			sctp->sctp_iphc_len = 0;
912 			return (ENOMEM);
913 		}
914 	}
915 
916 	sctp->sctp_ipha = (ipha_t *)sctp->sctp_iphc;
917 
918 	sctp->sctp_hdr_len = sizeof (ipha_t) + sizeof (sctp_hdr_t);
919 	sctp->sctp_ip_hdr_len = sizeof (ipha_t);
920 	sctp->sctp_ipha->ipha_length = htons(sizeof (ipha_t) +
921 	    sizeof (sctp_hdr_t));
922 	sctp->sctp_ipha->ipha_version_and_hdr_length
923 		= (IP_VERSION << 4) | IP_SIMPLE_HDR_LENGTH_IN_WORDS;
924 
925 	/*
926 	 * These two fields should be zero, and are already set above.
927 	 *
928 	 * sctp->sctp_ipha->ipha_ident,
929 	 * sctp->sctp_ipha->ipha_fragment_offset_and_flags.
930 	 */
931 
932 	sctp->sctp_ipha->ipha_ttl = sctp_ipv4_ttl;
933 	sctp->sctp_ipha->ipha_protocol = IPPROTO_SCTP;
934 
935 	sctph = (sctp_hdr_t *)(sctp->sctp_iphc + sizeof (ipha_t));
936 	sctp->sctp_sctph = sctph;
937 
938 	return (0);
939 }
940 
941 /*
942  * Update sctp_sticky_hdrs based on sctp_sticky_ipp.
943  * The headers include ip6i_t (if needed), ip6_t, any sticky extension
944  * headers, and the maximum size sctp header (to avoid reallocation
945  * on the fly for additional sctp options).
946  * Returns failure if can't allocate memory.
947  */
948 int
949 sctp_build_hdrs(sctp_t *sctp)
950 {
951 	char		*hdrs;
952 	uint_t		hdrs_len;
953 	ip6i_t		*ip6i;
954 	char		buf[SCTP_MAX_HDR_LENGTH];
955 	ip6_pkt_t	*ipp = &sctp->sctp_sticky_ipp;
956 	in6_addr_t	src;
957 	in6_addr_t	dst;
958 
959 	/*
960 	 * save the existing sctp header and source/dest IP addresses
961 	 */
962 	bcopy(sctp->sctp_sctph6, buf, sizeof (sctp_hdr_t));
963 	src = sctp->sctp_ip6h->ip6_src;
964 	dst = sctp->sctp_ip6h->ip6_dst;
965 	hdrs_len = ip_total_hdrs_len_v6(ipp) + SCTP_MAX_HDR_LENGTH;
966 	ASSERT(hdrs_len != 0);
967 	if (hdrs_len > sctp->sctp_iphc6_len) {
968 		/* Need to reallocate */
969 		hdrs = kmem_zalloc(hdrs_len, KM_NOSLEEP);
970 		if (hdrs == NULL)
971 			return (ENOMEM);
972 
973 		if (sctp->sctp_iphc6_len != 0)
974 			kmem_free(sctp->sctp_iphc6, sctp->sctp_iphc6_len);
975 		sctp->sctp_iphc6 = hdrs;
976 		sctp->sctp_iphc6_len = hdrs_len;
977 	}
978 	ip_build_hdrs_v6((uchar_t *)sctp->sctp_iphc6,
979 	    hdrs_len - SCTP_MAX_HDR_LENGTH, ipp, IPPROTO_SCTP);
980 
981 	/* Set header fields not in ipp */
982 	if (ipp->ipp_fields & IPPF_HAS_IP6I) {
983 		ip6i = (ip6i_t *)sctp->sctp_iphc6;
984 		sctp->sctp_ip6h = (ip6_t *)&ip6i[1];
985 	} else {
986 		sctp->sctp_ip6h = (ip6_t *)sctp->sctp_iphc6;
987 	}
988 	/*
989 	 * sctp->sctp_ip_hdr_len will include ip6i_t if there is one.
990 	 */
991 	sctp->sctp_ip_hdr6_len = hdrs_len - SCTP_MAX_HDR_LENGTH;
992 	sctp->sctp_sctph6 = (sctp_hdr_t *)(sctp->sctp_iphc6 +
993 	    sctp->sctp_ip_hdr6_len);
994 	sctp->sctp_hdr6_len = sctp->sctp_ip_hdr6_len + sizeof (sctp_hdr_t);
995 
996 	bcopy(buf, sctp->sctp_sctph6, sizeof (sctp_hdr_t));
997 
998 	sctp->sctp_ip6h->ip6_src = src;
999 	sctp->sctp_ip6h->ip6_dst = dst;
1000 	/*
1001 	 * If the hoplimit was not set by ip_build_hdrs_v6(), we need to
1002 	 * set it to the default value for SCTP.
1003 	 */
1004 	if (!(ipp->ipp_fields & IPPF_UNICAST_HOPS))
1005 		sctp->sctp_ip6h->ip6_hops = sctp_ipv6_hoplimit;
1006 	/*
1007 	 * If we're setting extension headers after a connection
1008 	 * has been established, and if we have a routing header
1009 	 * among the extension headers, call ip_massage_options_v6 to
1010 	 * manipulate the routing header/ip6_dst set the checksum
1011 	 * difference in the sctp header template.
1012 	 * (This happens in sctp_connect_ipv6 if the routing header
1013 	 * is set prior to the connect.)
1014 	 */
1015 
1016 	if ((sctp->sctp_state >= SCTPS_COOKIE_WAIT) &&
1017 	    (sctp->sctp_sticky_ipp.ipp_fields & IPPF_RTHDR)) {
1018 		ip6_rthdr_t *rth;
1019 
1020 		rth = ip_find_rthdr_v6(sctp->sctp_ip6h,
1021 		    (uint8_t *)sctp->sctp_sctph6);
1022 		if (rth != NULL)
1023 			(void) ip_massage_options_v6(sctp->sctp_ip6h, rth);
1024 	}
1025 	return (0);
1026 }
1027 
1028 /*
1029  * Initialize the IPv6 header. Loses any record of any IPv6 extension headers.
1030  */
1031 int
1032 sctp_header_init_ipv6(sctp_t *sctp, int sleep)
1033 {
1034 	sctp_hdr_t	*sctph;
1035 
1036 	/*
1037 	 * This is a simple initialization. If there's
1038 	 * already a template, it should never be too small,
1039 	 * so reuse it. Otherwise, allocate space for the new one.
1040 	 * Ensure that there is enough space to "downgrade" the sctp_t
1041 	 * to an IPv4 sctp_t. This requires having space for a full load
1042 	 * of IPv4 options
1043 	 */
1044 	if (sctp->sctp_iphc6 != NULL) {
1045 		ASSERT(sctp->sctp_iphc6_len >=
1046 		    SCTP_MAX_COMBINED_HEADER_LENGTH);
1047 		bzero(sctp->sctp_iphc6, sctp->sctp_iphc6_len);
1048 	} else {
1049 		sctp->sctp_iphc6_len = SCTP_MAX_COMBINED_HEADER_LENGTH;
1050 		sctp->sctp_iphc6 = kmem_zalloc(sctp->sctp_iphc_len, sleep);
1051 		if (sctp->sctp_iphc6 == NULL) {
1052 			sctp->sctp_iphc6_len = 0;
1053 			return (ENOMEM);
1054 		}
1055 	}
1056 	sctp->sctp_hdr6_len = IPV6_HDR_LEN + sizeof (sctp_hdr_t);
1057 	sctp->sctp_ip_hdr6_len = IPV6_HDR_LEN;
1058 	sctp->sctp_ip6h = (ip6_t *)sctp->sctp_iphc6;
1059 
1060 	/* Initialize the header template */
1061 
1062 	sctp->sctp_ip6h->ip6_vcf = IPV6_DEFAULT_VERS_AND_FLOW;
1063 	sctp->sctp_ip6h->ip6_plen = ntohs(sizeof (sctp_hdr_t));
1064 	sctp->sctp_ip6h->ip6_nxt = IPPROTO_SCTP;
1065 	sctp->sctp_ip6h->ip6_hops = sctp_ipv6_hoplimit;
1066 
1067 	sctph = (sctp_hdr_t *)(sctp->sctp_iphc6 + IPV6_HDR_LEN);
1068 	sctp->sctp_sctph6 = sctph;
1069 
1070 	return (0);
1071 }
1072 
1073 static int
1074 sctp_v4_label(sctp_t *sctp)
1075 {
1076 	uchar_t optbuf[IP_MAX_OPT_LENGTH];
1077 	const cred_t *cr = CONN_CRED(sctp->sctp_connp);
1078 	int added;
1079 
1080 	if (tsol_compute_label(cr, sctp->sctp_ipha->ipha_dst, optbuf,
1081 	    sctp->sctp_mac_exempt) != 0)
1082 		return (EACCES);
1083 
1084 	added = tsol_remove_secopt(sctp->sctp_ipha, sctp->sctp_hdr_len);
1085 	if (added == -1)
1086 		return (EACCES);
1087 	sctp->sctp_hdr_len += added;
1088 	sctp->sctp_sctph = (sctp_hdr_t *)((uchar_t *)sctp->sctp_sctph + added);
1089 	sctp->sctp_ip_hdr_len += added;
1090 	if ((sctp->sctp_v4label_len = optbuf[IPOPT_OLEN]) != 0) {
1091 		sctp->sctp_v4label_len = (sctp->sctp_v4label_len + 3) & ~3;
1092 		added = tsol_prepend_option(optbuf, sctp->sctp_ipha,
1093 		    sctp->sctp_hdr_len);
1094 		if (added == -1)
1095 			return (EACCES);
1096 		sctp->sctp_hdr_len += added;
1097 		sctp->sctp_sctph = (sctp_hdr_t *)((uchar_t *)sctp->sctp_sctph +
1098 		    added);
1099 		sctp->sctp_ip_hdr_len += added;
1100 	}
1101 	return (0);
1102 }
1103 
1104 static int
1105 sctp_v6_label(sctp_t *sctp)
1106 {
1107 	uchar_t optbuf[TSOL_MAX_IPV6_OPTION];
1108 	const cred_t *cr = CONN_CRED(sctp->sctp_connp);
1109 
1110 	if (tsol_compute_label_v6(cr, &sctp->sctp_ip6h->ip6_dst, optbuf,
1111 	    sctp->sctp_mac_exempt) != 0)
1112 		return (EACCES);
1113 	if (tsol_update_sticky(&sctp->sctp_sticky_ipp, &sctp->sctp_v6label_len,
1114 	    optbuf) != 0)
1115 		return (EACCES);
1116 	if (sctp_build_hdrs(sctp) != 0)
1117 		return (EACCES);
1118 	return (0);
1119 }
1120 
1121 /*
1122  * XXX implement more sophisticated logic
1123  */
1124 int
1125 sctp_set_hdraddrs(sctp_t *sctp)
1126 {
1127 	sctp_faddr_t *fp;
1128 	int gotv4 = 0;
1129 	int gotv6 = 0;
1130 
1131 	ASSERT(sctp->sctp_faddrs != NULL);
1132 	ASSERT(sctp->sctp_nsaddrs > 0);
1133 
1134 	/* Set up using the primary first */
1135 	if (IN6_IS_ADDR_V4MAPPED(&sctp->sctp_primary->faddr)) {
1136 		IN6_V4MAPPED_TO_IPADDR(&sctp->sctp_primary->faddr,
1137 		    sctp->sctp_ipha->ipha_dst);
1138 		/* saddr may be unspec; make_mp() will handle this */
1139 		IN6_V4MAPPED_TO_IPADDR(&sctp->sctp_primary->saddr,
1140 		    sctp->sctp_ipha->ipha_src);
1141 		if (!is_system_labeled() || sctp_v4_label(sctp) == 0) {
1142 			gotv4 = 1;
1143 			if (sctp->sctp_ipversion == IPV4_VERSION) {
1144 				goto copyports;
1145 			}
1146 		}
1147 	} else {
1148 		sctp->sctp_ip6h->ip6_dst = sctp->sctp_primary->faddr;
1149 		/* saddr may be unspec; make_mp() will handle this */
1150 		sctp->sctp_ip6h->ip6_src = sctp->sctp_primary->saddr;
1151 		if (!is_system_labeled() || sctp_v6_label(sctp) == 0)
1152 			gotv6 = 1;
1153 	}
1154 
1155 	for (fp = sctp->sctp_faddrs; fp; fp = fp->next) {
1156 		if (!gotv4 && IN6_IS_ADDR_V4MAPPED(&fp->faddr)) {
1157 			IN6_V4MAPPED_TO_IPADDR(&fp->faddr,
1158 			    sctp->sctp_ipha->ipha_dst);
1159 			/* copy in the faddr_t's saddr */
1160 			IN6_V4MAPPED_TO_IPADDR(&fp->saddr,
1161 			    sctp->sctp_ipha->ipha_src);
1162 			if (!is_system_labeled() || sctp_v4_label(sctp) == 0) {
1163 				gotv4 = 1;
1164 				if (sctp->sctp_ipversion == IPV4_VERSION ||
1165 				    gotv6) {
1166 					break;
1167 				}
1168 			}
1169 		} else if (!gotv6 && !IN6_IS_ADDR_V4MAPPED(&fp->faddr)) {
1170 			sctp->sctp_ip6h->ip6_dst = fp->faddr;
1171 			/* copy in the faddr_t's saddr */
1172 			sctp->sctp_ip6h->ip6_src = fp->saddr;
1173 			if (!is_system_labeled() || sctp_v6_label(sctp) == 0) {
1174 				gotv6 = 1;
1175 				if (gotv4)
1176 					break;
1177 			}
1178 		}
1179 	}
1180 
1181 copyports:
1182 	if (!gotv4 && !gotv6)
1183 		return (EACCES);
1184 
1185 	/* copy in the ports for good measure */
1186 	sctp->sctp_sctph->sh_sport = sctp->sctp_lport;
1187 	sctp->sctp_sctph->sh_dport = sctp->sctp_fport;
1188 
1189 	sctp->sctp_sctph6->sh_sport = sctp->sctp_lport;
1190 	sctp->sctp_sctph6->sh_dport = sctp->sctp_fport;
1191 	return (0);
1192 }
1193 
1194 void
1195 sctp_add_unrec_parm(sctp_parm_hdr_t *uph, mblk_t **errmp)
1196 {
1197 	mblk_t *mp;
1198 	sctp_parm_hdr_t *ph;
1199 	size_t len;
1200 	int pad;
1201 
1202 	len = sizeof (*ph) + ntohs(uph->sph_len);
1203 	if ((pad = len % 4) != 0) {
1204 		pad = 4 - pad;
1205 		len += pad;
1206 	}
1207 	mp = allocb(len, BPRI_MED);
1208 	if (mp == NULL) {
1209 		return;
1210 	}
1211 
1212 	ph = (sctp_parm_hdr_t *)(mp->b_rptr);
1213 	ph->sph_type = htons(PARM_UNRECOGNIZED);
1214 	ph->sph_len = htons(len - pad);
1215 
1216 	/* copy in the unrecognized parameter */
1217 	bcopy(uph, ph + 1, ntohs(uph->sph_len));
1218 
1219 	mp->b_wptr = mp->b_rptr + len;
1220 	if (*errmp != NULL) {
1221 		linkb(*errmp, mp);
1222 	} else {
1223 		*errmp = mp;
1224 	}
1225 }
1226 
1227 /*
1228  * o Bounds checking
1229  * o Updates remaining
1230  * o Checks alignment
1231  */
1232 sctp_parm_hdr_t *
1233 sctp_next_parm(sctp_parm_hdr_t *current, ssize_t *remaining)
1234 {
1235 	int pad;
1236 	uint16_t len;
1237 
1238 	len = ntohs(current->sph_len);
1239 	*remaining -= len;
1240 	if (*remaining < sizeof (*current) || len < sizeof (*current)) {
1241 		return (NULL);
1242 	}
1243 	if ((pad = len & (SCTP_ALIGN - 1)) != 0) {
1244 		pad = SCTP_ALIGN - pad;
1245 		*remaining -= pad;
1246 	}
1247 	/*LINTED pointer cast may result in improper alignment*/
1248 	current = (sctp_parm_hdr_t *)((char *)current + len + pad);
1249 	return (current);
1250 }
1251 
1252 /*
1253  * Sets the address parameters given in the INIT chunk into sctp's
1254  * faddrs; if psctp is non-NULL, copies psctp's saddrs. If there are
1255  * no address parameters in the INIT chunk, a single faddr is created
1256  * from the ip hdr at the beginning of pkt.
1257  * If there already are existing addresses hanging from sctp, merge
1258  * them in, if the old info contains addresses which are not present
1259  * in this new info, get rid of them, and clean the pointers if there's
1260  * messages which have this as their target address.
1261  *
1262  * We also re-adjust the source address list here since the list may
1263  * contain more than what is actually part of the association. If
1264  * we get here from sctp_send_cookie_echo(), we are on the active
1265  * side and psctp will be NULL and ich will be the INIT-ACK chunk.
1266  * If we get here from sctp_accept_comm(), ich will be the INIT chunk
1267  * and psctp will the listening endpoint.
1268  *
1269  * INIT processing: When processing the INIT we inherit the src address
1270  * list from the listener. For a loopback or linklocal association, we
1271  * delete the list and just take the address from the IP header (since
1272  * that's how we created the INIT-ACK). Additionally, for loopback we
1273  * ignore the address params in the INIT. For determining which address
1274  * types were sent in the INIT-ACK we follow the same logic as in
1275  * creating the INIT-ACK. We delete addresses of the type that are not
1276  * supported by the peer.
1277  *
1278  * INIT-ACK processing: When processing the INIT-ACK since we had not
1279  * included addr params for loopback or linklocal addresses when creating
1280  * the INIT, we just use the address from the IP header. Further, for
1281  * loopback we ignore the addr param list. We mark addresses of the
1282  * type not supported by the peer as unconfirmed.
1283  *
1284  * In case of INIT processing we look for supported address types in the
1285  * supported address param, if present. In both cases the address type in
1286  * the IP header is supported as well as types for addresses in the param
1287  * list, if any.
1288  *
1289  * Once we have the supported address types sctp_check_saddr() runs through
1290  * the source address list and deletes or marks as unconfirmed address of
1291  * types not supported by the peer.
1292  *
1293  * Returns 0 on success, sys errno on failure
1294  */
1295 int
1296 sctp_get_addrparams(sctp_t *sctp, sctp_t *psctp, mblk_t *pkt,
1297     sctp_chunk_hdr_t *ich, uint_t *sctp_options)
1298 {
1299 	sctp_init_chunk_t	*init;
1300 	ipha_t			*iph;
1301 	ip6_t			*ip6h;
1302 	in6_addr_t		hdrsaddr[1];
1303 	in6_addr_t		hdrdaddr[1];
1304 	sctp_parm_hdr_t		*ph;
1305 	ssize_t			remaining;
1306 	int			isv4;
1307 	int			err;
1308 	sctp_faddr_t		*fp;
1309 	int			supp_af = 0;
1310 	boolean_t		check_saddr = B_TRUE;
1311 	in6_addr_t		curaddr;
1312 
1313 	if (sctp_options != NULL)
1314 		*sctp_options = 0;
1315 
1316 	/* extract the address from the IP header */
1317 	isv4 = (IPH_HDR_VERSION(pkt->b_rptr) == IPV4_VERSION);
1318 	if (isv4) {
1319 		iph = (ipha_t *)pkt->b_rptr;
1320 		IN6_IPADDR_TO_V4MAPPED(iph->ipha_src, hdrsaddr);
1321 		IN6_IPADDR_TO_V4MAPPED(iph->ipha_dst, hdrdaddr);
1322 		supp_af |= PARM_SUPP_V4;
1323 	} else {
1324 		ip6h = (ip6_t *)pkt->b_rptr;
1325 		hdrsaddr[0] = ip6h->ip6_src;
1326 		hdrdaddr[0] = ip6h->ip6_dst;
1327 		supp_af |= PARM_SUPP_V6;
1328 	}
1329 
1330 	/*
1331 	 * Unfortunately, we can't delay this because adding an faddr
1332 	 * looks for the presence of the source address (from the ire
1333 	 * for the faddr) in the source address list. We could have
1334 	 * delayed this if, say, this was a loopback/linklocal connection.
1335 	 * Now, we just end up nuking this list and taking the addr from
1336 	 * the IP header for loopback/linklocal.
1337 	 */
1338 	if (psctp != NULL && psctp->sctp_nsaddrs > 0) {
1339 		ASSERT(sctp->sctp_nsaddrs == 0);
1340 
1341 		err = sctp_dup_saddrs(psctp, sctp, KM_NOSLEEP);
1342 		if (err != 0)
1343 			return (err);
1344 	}
1345 	/*
1346 	 * We will add the faddr before parsing the address list as this
1347 	 * might be a loopback connection and we would not have to
1348 	 * go through the list.
1349 	 *
1350 	 * Make sure the header's addr is in the list
1351 	 */
1352 	fp = sctp_lookup_faddr(sctp, hdrsaddr);
1353 	if (fp == NULL) {
1354 		/* not included; add it now */
1355 		err = sctp_add_faddr(sctp, hdrsaddr, KM_NOSLEEP, B_TRUE);
1356 		if (err != 0)
1357 			return (err);
1358 
1359 		/* sctp_faddrs will be the hdr addr */
1360 		fp = sctp->sctp_faddrs;
1361 	}
1362 	/* make the header addr the primary */
1363 
1364 	if (cl_sctp_assoc_change != NULL && psctp == NULL)
1365 		curaddr = sctp->sctp_current->faddr;
1366 
1367 	sctp->sctp_primary = fp;
1368 	sctp->sctp_current = fp;
1369 	sctp->sctp_mss = fp->sfa_pmss;
1370 
1371 	/* For loopback connections & linklocal get address from the header */
1372 	if (sctp->sctp_loopback || sctp->sctp_linklocal) {
1373 		if (sctp->sctp_nsaddrs != 0)
1374 			sctp_free_saddrs(sctp);
1375 		if ((err = sctp_saddr_add_addr(sctp, hdrdaddr, 0)) != 0)
1376 			return (err);
1377 		/* For loopback ignore address list */
1378 		if (sctp->sctp_loopback)
1379 			return (0);
1380 		check_saddr = B_FALSE;
1381 	}
1382 
1383 	/* Walk the params in the INIT [ACK], pulling out addr params */
1384 	remaining = ntohs(ich->sch_len) - sizeof (*ich) -
1385 	    sizeof (sctp_init_chunk_t);
1386 	if (remaining < sizeof (*ph)) {
1387 		if (check_saddr) {
1388 			sctp_check_saddr(sctp, supp_af, psctp == NULL ?
1389 			    B_FALSE : B_TRUE);
1390 		}
1391 		ASSERT(sctp_saddr_lookup(sctp, hdrdaddr, 0) != NULL);
1392 		return (0);
1393 	}
1394 
1395 	init = (sctp_init_chunk_t *)(ich + 1);
1396 	ph = (sctp_parm_hdr_t *)(init + 1);
1397 
1398 	/* params will have already been byteordered when validating */
1399 	while (ph != NULL) {
1400 		if (ph->sph_type == htons(PARM_SUPP_ADDRS)) {
1401 			int		plen;
1402 			uint16_t	*p;
1403 			uint16_t	addrtype;
1404 
1405 			ASSERT(psctp != NULL);
1406 			plen = ntohs(ph->sph_len);
1407 			p = (uint16_t *)(ph + 1);
1408 			while (plen > 0) {
1409 				addrtype = ntohs(*p);
1410 				switch (addrtype) {
1411 					case PARM_ADDR6:
1412 						supp_af |= PARM_SUPP_V6;
1413 						break;
1414 					case PARM_ADDR4:
1415 						supp_af |= PARM_SUPP_V4;
1416 						break;
1417 					default:
1418 						break;
1419 				}
1420 				p++;
1421 				plen -= sizeof (*p);
1422 			}
1423 		} else if (ph->sph_type == htons(PARM_ADDR4)) {
1424 			if (remaining >= PARM_ADDR4_LEN) {
1425 				in6_addr_t addr;
1426 				ipaddr_t ta;
1427 
1428 				supp_af |= PARM_SUPP_V4;
1429 				/*
1430 				 * Screen out broad/multicasts & loopback.
1431 				 * If the endpoint only accepts v6 address,
1432 				 * go to the next one.
1433 				 */
1434 				bcopy(ph + 1, &ta, sizeof (ta));
1435 				if (ta == 0 ||
1436 				    ta == INADDR_BROADCAST ||
1437 				    ta == htonl(INADDR_LOOPBACK) ||
1438 				    IN_MULTICAST(ta) ||
1439 				    sctp->sctp_connp->conn_ipv6_v6only) {
1440 					goto next;
1441 				}
1442 				/*
1443 				 * XXX also need to check for subnet
1444 				 * broadcasts. This should probably
1445 				 * wait until we have full access
1446 				 * to the ILL tables.
1447 				 */
1448 
1449 				IN6_INADDR_TO_V4MAPPED((struct in_addr *)
1450 				    (ph + 1), &addr);
1451 				/* Check for duplicate. */
1452 				if (sctp_lookup_faddr(sctp, &addr) != NULL)
1453 					goto next;
1454 
1455 				/* OK, add it to the faddr set */
1456 				err = sctp_add_faddr(sctp, &addr, KM_NOSLEEP,
1457 				    B_FALSE);
1458 				if (err != 0)
1459 					return (err);
1460 			}
1461 		} else if (ph->sph_type == htons(PARM_ADDR6) &&
1462 		    sctp->sctp_family == AF_INET6) {
1463 			/* An v4 socket should not take v6 addresses. */
1464 			if (remaining >= PARM_ADDR6_LEN) {
1465 				in6_addr_t *addr6;
1466 
1467 				supp_af |= PARM_SUPP_V6;
1468 				addr6 = (in6_addr_t *)(ph + 1);
1469 				/*
1470 				 * Screen out link locals, mcast, loopback
1471 				 * and bogus v6 address.
1472 				 */
1473 				if (IN6_IS_ADDR_LINKLOCAL(addr6) ||
1474 				    IN6_IS_ADDR_MULTICAST(addr6) ||
1475 				    IN6_IS_ADDR_LOOPBACK(addr6) ||
1476 				    IN6_IS_ADDR_V4MAPPED(addr6)) {
1477 					goto next;
1478 				}
1479 				/* Check for duplicate. */
1480 				if (sctp_lookup_faddr(sctp, addr6) != NULL)
1481 					goto next;
1482 
1483 				err = sctp_add_faddr(sctp,
1484 				    (in6_addr_t *)(ph + 1), KM_NOSLEEP,
1485 				    B_FALSE);
1486 				if (err != 0)
1487 					return (err);
1488 			}
1489 		} else if (ph->sph_type == htons(PARM_FORWARD_TSN)) {
1490 			if (sctp_options != NULL)
1491 				*sctp_options |= SCTP_PRSCTP_OPTION;
1492 		} /* else; skip */
1493 
1494 next:
1495 		ph = sctp_next_parm(ph, &remaining);
1496 	}
1497 	if (check_saddr) {
1498 		sctp_check_saddr(sctp, supp_af, psctp == NULL ? B_FALSE :
1499 		    B_TRUE);
1500 	}
1501 	ASSERT(sctp_saddr_lookup(sctp, hdrdaddr, 0) != NULL);
1502 	/*
1503 	 * We have the right address list now, update clustering's
1504 	 * knowledge because when we sent the INIT we had just added
1505 	 * the address the INIT was sent to.
1506 	 */
1507 	if (psctp == NULL && cl_sctp_assoc_change != NULL) {
1508 		uchar_t	*alist;
1509 		size_t	asize;
1510 		uchar_t	*dlist;
1511 		size_t	dsize;
1512 
1513 		asize = sizeof (in6_addr_t) * sctp->sctp_nfaddrs;
1514 		alist = kmem_alloc(asize, KM_NOSLEEP);
1515 		if (alist == NULL) {
1516 			SCTP_KSTAT(sctp_cl_assoc_change);
1517 			return (ENOMEM);
1518 		}
1519 		/*
1520 		 * Just include the address the INIT was sent to in the
1521 		 * delete list and send the entire faddr list. We could
1522 		 * do it differently (i.e include all the addresses in the
1523 		 * add list even if it contains the original address OR
1524 		 * remove the original address from the add list etc.), but
1525 		 * this seems reasonable enough.
1526 		 */
1527 		dsize = sizeof (in6_addr_t);
1528 		dlist = kmem_alloc(dsize, KM_NOSLEEP);
1529 		if (dlist == NULL) {
1530 			kmem_free(alist, asize);
1531 			SCTP_KSTAT(sctp_cl_assoc_change);
1532 			return (ENOMEM);
1533 		}
1534 		bcopy(&curaddr, dlist, sizeof (curaddr));
1535 		sctp_get_faddr_list(sctp, alist, asize);
1536 		(*cl_sctp_assoc_change)(sctp->sctp_family, alist, asize,
1537 		    sctp->sctp_nfaddrs, dlist, dsize, 1, SCTP_CL_PADDR,
1538 		    (cl_sctp_handle_t)sctp);
1539 		/* alist and dlist will be freed by the clustering module */
1540 	}
1541 	return (0);
1542 }
1543 
1544 /*
1545  * Returns 0 if the check failed and the restart should be refused,
1546  * 1 if the check succeeded.
1547  */
1548 int
1549 sctp_secure_restart_check(mblk_t *pkt, sctp_chunk_hdr_t *ich, uint32_t ports,
1550     int sleep)
1551 {
1552 	sctp_faddr_t *fp, *fpa, *fphead = NULL;
1553 	sctp_parm_hdr_t *ph;
1554 	ssize_t remaining;
1555 	int isv4;
1556 	ipha_t *iph;
1557 	ip6_t *ip6h;
1558 	in6_addr_t hdraddr[1];
1559 	int retval = 0;
1560 	sctp_tf_t *tf;
1561 	sctp_t *sctp;
1562 	int compres;
1563 	sctp_init_chunk_t *init;
1564 	int nadded = 0;
1565 
1566 	/* extract the address from the IP header */
1567 	isv4 = (IPH_HDR_VERSION(pkt->b_rptr) == IPV4_VERSION);
1568 	if (isv4) {
1569 		iph = (ipha_t *)pkt->b_rptr;
1570 		IN6_IPADDR_TO_V4MAPPED(iph->ipha_src, hdraddr);
1571 	} else {
1572 		ip6h = (ip6_t *)pkt->b_rptr;
1573 		hdraddr[0] = ip6h->ip6_src;
1574 	}
1575 
1576 	/* Walk the params in the INIT [ACK], pulling out addr params */
1577 	remaining = ntohs(ich->sch_len) - sizeof (*ich) -
1578 	    sizeof (sctp_init_chunk_t);
1579 	if (remaining < sizeof (*ph)) {
1580 		/* no parameters; restart OK */
1581 		return (1);
1582 	}
1583 	init = (sctp_init_chunk_t *)(ich + 1);
1584 	ph = (sctp_parm_hdr_t *)(init + 1);
1585 
1586 	while (ph != NULL) {
1587 		/* params will have already been byteordered when validating */
1588 		if (ph->sph_type == htons(PARM_ADDR4)) {
1589 			if (remaining >= PARM_ADDR4_LEN) {
1590 				in6_addr_t addr;
1591 				IN6_INADDR_TO_V4MAPPED((struct in_addr *)
1592 				    (ph + 1), &addr);
1593 				fpa = kmem_cache_alloc(sctp_kmem_faddr_cache,
1594 				    sleep);
1595 				if (!fpa) {
1596 					goto done;
1597 				}
1598 				bzero(fpa, sizeof (*fpa));
1599 				fpa->faddr = addr;
1600 				fpa->next = NULL;
1601 			}
1602 		} else if (ph->sph_type == htons(PARM_ADDR6)) {
1603 			if (remaining >= PARM_ADDR6_LEN) {
1604 				fpa = kmem_cache_alloc(sctp_kmem_faddr_cache,
1605 				    sleep);
1606 				if (!fpa) {
1607 					goto done;
1608 				}
1609 				bzero(fpa, sizeof (*fpa));
1610 				bcopy(ph + 1, &fpa->faddr,
1611 				    sizeof (fpa->faddr));
1612 				fpa->next = NULL;
1613 			}
1614 		} else {
1615 			/* else not addr param; skip */
1616 			fpa = NULL;
1617 		}
1618 		/* link in the new addr, if it was an addr param */
1619 		if (fpa) {
1620 			if (!fphead) {
1621 				fphead = fpa;
1622 				fp = fphead;
1623 			} else {
1624 				fp->next = fpa;
1625 				fp = fpa;
1626 			}
1627 		}
1628 
1629 		ph = sctp_next_parm(ph, &remaining);
1630 	}
1631 
1632 	if (fphead == NULL) {
1633 		/* no addr parameters; restart OK */
1634 		return (1);
1635 	}
1636 
1637 	/*
1638 	 * got at least one; make sure the header's addr is
1639 	 * in the list
1640 	 */
1641 	fp = sctp_lookup_faddr_nosctp(fphead, hdraddr);
1642 	if (!fp) {
1643 		/* not included; add it now */
1644 		fp = kmem_cache_alloc(sctp_kmem_faddr_cache, sleep);
1645 		if (!fp) {
1646 			goto done;
1647 		}
1648 		bzero(fp, sizeof (*fp));
1649 		fp->faddr = *hdraddr;
1650 		fp->next = fphead;
1651 		fphead = fp;
1652 	}
1653 
1654 	/*
1655 	 * Now, we can finally do the check: For each sctp instance
1656 	 * on the hash line for ports, compare its faddr set against
1657 	 * the new one. If the new one is a strict subset of any
1658 	 * existing sctp's faddrs, the restart is OK. However, if there
1659 	 * is an overlap, this could be an attack, so return failure.
1660 	 * If all sctp's faddrs are disjoint, this is a legitimate new
1661 	 * association.
1662 	 */
1663 	tf = &(sctp_conn_fanout[SCTP_CONN_HASH(ports)]);
1664 	mutex_enter(&tf->tf_lock);
1665 
1666 	for (sctp = tf->tf_sctp; sctp; sctp = sctp->sctp_conn_hash_next) {
1667 		if (ports != sctp->sctp_ports) {
1668 			continue;
1669 		}
1670 		compres = sctp_compare_faddrsets(fphead, sctp->sctp_faddrs);
1671 		if (compres <= SCTP_ADDR_SUBSET) {
1672 			retval = 1;
1673 			mutex_exit(&tf->tf_lock);
1674 			goto done;
1675 		}
1676 		if (compres == SCTP_ADDR_OVERLAP) {
1677 			dprint(1,
1678 			    ("new assoc from %x:%x:%x:%x overlaps with %p\n",
1679 			    SCTP_PRINTADDR(*hdraddr), (void *)sctp));
1680 			/*
1681 			 * While we still hold the lock, we need to
1682 			 * figure out which addresses have been
1683 			 * added so we can include them in the abort
1684 			 * we will send back. Since these faddrs will
1685 			 * never be used, we overload the rto field
1686 			 * here, setting it to 0 if the address was
1687 			 * not added, 1 if it was added.
1688 			 */
1689 			for (fp = fphead; fp; fp = fp->next) {
1690 				if (sctp_lookup_faddr(sctp, &fp->faddr)) {
1691 					fp->rto = 0;
1692 				} else {
1693 					fp->rto = 1;
1694 					nadded++;
1695 				}
1696 			}
1697 			mutex_exit(&tf->tf_lock);
1698 			goto done;
1699 		}
1700 	}
1701 	mutex_exit(&tf->tf_lock);
1702 
1703 	/* All faddrs are disjoint; legit new association */
1704 	retval = 1;
1705 
1706 done:
1707 	/* If are attempted adds, send back an abort listing the addrs */
1708 	if (nadded > 0) {
1709 		void *dtail;
1710 		size_t dlen;
1711 
1712 		dtail = kmem_alloc(PARM_ADDR6_LEN * nadded, KM_NOSLEEP);
1713 		if (dtail == NULL) {
1714 			goto cleanup;
1715 		}
1716 
1717 		ph = dtail;
1718 		dlen = 0;
1719 		for (fp = fphead; fp; fp = fp->next) {
1720 			if (fp->rto == 0) {
1721 				continue;
1722 			}
1723 			if (IN6_IS_ADDR_V4MAPPED(&fp->faddr)) {
1724 				ipaddr_t addr4;
1725 
1726 				ph->sph_type = htons(PARM_ADDR4);
1727 				ph->sph_len = htons(PARM_ADDR4_LEN);
1728 				IN6_V4MAPPED_TO_IPADDR(&fp->faddr, addr4);
1729 				ph++;
1730 				bcopy(&addr4, ph, sizeof (addr4));
1731 				ph = (sctp_parm_hdr_t *)
1732 				    ((char *)ph + sizeof (addr4));
1733 				dlen += PARM_ADDR4_LEN;
1734 			} else {
1735 				ph->sph_type = htons(PARM_ADDR6);
1736 				ph->sph_len = htons(PARM_ADDR6_LEN);
1737 				ph++;
1738 				bcopy(&fp->faddr, ph, sizeof (fp->faddr));
1739 				ph = (sctp_parm_hdr_t *)
1740 				    ((char *)ph + sizeof (fp->faddr));
1741 				dlen += PARM_ADDR6_LEN;
1742 			}
1743 		}
1744 
1745 		/* Send off the abort */
1746 		sctp_send_abort(sctp, sctp_init2vtag(ich),
1747 		    SCTP_ERR_RESTART_NEW_ADDRS, dtail, dlen, pkt, 0, B_TRUE);
1748 
1749 		kmem_free(dtail, PARM_ADDR6_LEN * nadded);
1750 	}
1751 
1752 cleanup:
1753 	/* Clean up */
1754 	if (fphead) {
1755 		sctp_faddr_t *fpn;
1756 		for (fp = fphead; fp; fp = fpn) {
1757 			fpn = fp->next;
1758 			kmem_cache_free(sctp_kmem_faddr_cache, fp);
1759 		}
1760 	}
1761 
1762 	return (retval);
1763 }
1764 
1765 /*
1766  * Reset any state related to transmitted chunks.
1767  */
1768 void
1769 sctp_congest_reset(sctp_t *sctp)
1770 {
1771 	sctp_faddr_t	*fp;
1772 	mblk_t		*mp;
1773 
1774 	for (fp = sctp->sctp_faddrs; fp != NULL; fp = fp->next) {
1775 		fp->ssthresh = sctp_initial_mtu;
1776 		fp->cwnd = fp->sfa_pmss * sctp_slow_start_initial;
1777 		fp->suna = 0;
1778 		fp->pba = 0;
1779 	}
1780 	/*
1781 	 * Clean up the transmit list as well since we have reset accounting
1782 	 * on all the fps. Send event upstream, if required.
1783 	 */
1784 	while ((mp = sctp->sctp_xmit_head) != NULL) {
1785 		sctp->sctp_xmit_head = mp->b_next;
1786 		mp->b_next = NULL;
1787 		if (sctp->sctp_xmit_head != NULL)
1788 			sctp->sctp_xmit_head->b_prev = NULL;
1789 		sctp_sendfail_event(sctp, mp, 0, B_TRUE);
1790 	}
1791 	sctp->sctp_xmit_head = NULL;
1792 	sctp->sctp_xmit_tail = NULL;
1793 	sctp->sctp_xmit_unacked = NULL;
1794 
1795 	sctp->sctp_unacked = 0;
1796 	/*
1797 	 * Any control message as well. We will clean-up this list as well.
1798 	 * This contains any pending ASCONF request that we have queued/sent.
1799 	 * If we do get an ACK we will just drop it. However, given that
1800 	 * we are restarting chances are we aren't going to get any.
1801 	 */
1802 	if (sctp->sctp_cxmit_list != NULL)
1803 		sctp_asconf_free_cxmit(sctp, NULL);
1804 	sctp->sctp_cxmit_list = NULL;
1805 	sctp->sctp_cchunk_pend = 0;
1806 
1807 	sctp->sctp_rexmitting = B_FALSE;
1808 	sctp->sctp_rxt_nxttsn = 0;
1809 	sctp->sctp_rxt_maxtsn = 0;
1810 
1811 	sctp->sctp_zero_win_probe = B_FALSE;
1812 }
1813 
1814 static void
1815 sctp_init_faddr(sctp_t *sctp, sctp_faddr_t *fp, in6_addr_t *addr,
1816     mblk_t *timer_mp)
1817 {
1818 	bcopy(addr, &fp->faddr, sizeof (*addr));
1819 	if (IN6_IS_ADDR_V4MAPPED(addr)) {
1820 		fp->isv4 = 1;
1821 		/* Make sure that sfa_pmss is a multiple of SCTP_ALIGN. */
1822 		fp->sfa_pmss = (sctp_initial_mtu - sctp->sctp_hdr_len) &
1823 			~(SCTP_ALIGN - 1);
1824 	} else {
1825 		fp->isv4 = 0;
1826 		fp->sfa_pmss = (sctp_initial_mtu - sctp->sctp_hdr6_len) &
1827 			~(SCTP_ALIGN - 1);
1828 	}
1829 	fp->cwnd = sctp_slow_start_initial * fp->sfa_pmss;
1830 	fp->rto = MIN(sctp->sctp_rto_initial, sctp->sctp_init_rto_max);
1831 	fp->srtt = -1;
1832 	fp->rtt_updates = 0;
1833 	fp->strikes = 0;
1834 	fp->max_retr = sctp->sctp_pp_max_rxt;
1835 	/* Mark it as not confirmed. */
1836 	fp->state = SCTP_FADDRS_UNCONFIRMED;
1837 	fp->hb_interval = sctp->sctp_hb_interval;
1838 	fp->ssthresh = sctp_initial_ssthresh;
1839 	fp->suna = 0;
1840 	fp->pba = 0;
1841 	fp->acked = 0;
1842 	fp->lastactive = lbolt64;
1843 	fp->timer_mp = timer_mp;
1844 	fp->hb_pending = B_FALSE;
1845 	fp->timer_running = 0;
1846 	fp->df = 1;
1847 	fp->pmtu_discovered = 0;
1848 	fp->rc_timer_mp = NULL;
1849 	fp->rc_timer_running = 0;
1850 	fp->next = NULL;
1851 	fp->ire = NULL;
1852 	fp->T3expire = 0;
1853 	(void) random_get_pseudo_bytes((uint8_t *)&fp->hb_secret,
1854 	    sizeof (fp->hb_secret));
1855 	fp->hb_expiry = lbolt64;
1856 
1857 	sctp_get_ire(sctp, fp);
1858 }
1859 
1860 /*ARGSUSED*/
1861 static void
1862 faddr_destructor(void *buf, void *cdrarg)
1863 {
1864 	sctp_faddr_t *fp = buf;
1865 
1866 	ASSERT(fp->timer_mp == NULL);
1867 	ASSERT(fp->timer_running == 0);
1868 
1869 	ASSERT(fp->rc_timer_mp == NULL);
1870 	ASSERT(fp->rc_timer_running == 0);
1871 }
1872 
1873 void
1874 sctp_faddr_init(void)
1875 {
1876 	sctp_kmem_faddr_cache = kmem_cache_create("sctp_faddr_cache",
1877 	    sizeof (sctp_faddr_t), 0, NULL, faddr_destructor,
1878 	    NULL, NULL, NULL, 0);
1879 }
1880 
1881 void
1882 sctp_faddr_fini(void)
1883 {
1884 	kmem_cache_destroy(sctp_kmem_faddr_cache);
1885 }
1886