xref: /freebsd/sys/netinet/sctp_bsd_addr.c (revision aa0a1e58)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *   this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *   the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /* $KAME: sctp_output.c,v 1.46 2005/03/06 16:04:17 itojun Exp $	 */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_pcb.h>
41 #include <netinet/sctp_header.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_output.h>
44 #include <netinet/sctp_bsd_addr.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctputil.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_asconf.h>
49 #include <netinet/sctp_sysctl.h>
50 #include <netinet/sctp_indata.h>
51 #include <sys/unistd.h>
52 
53 /* Declare all of our malloc named types */
54 MALLOC_DEFINE(SCTP_M_MAP, "sctp_map", "sctp asoc map descriptor");
55 MALLOC_DEFINE(SCTP_M_STRMI, "sctp_stri", "sctp stream in array");
56 MALLOC_DEFINE(SCTP_M_STRMO, "sctp_stro", "sctp stream out array");
57 MALLOC_DEFINE(SCTP_M_ASC_ADDR, "sctp_aadr", "sctp asconf address");
58 MALLOC_DEFINE(SCTP_M_ASC_IT, "sctp_a_it", "sctp asconf iterator");
59 MALLOC_DEFINE(SCTP_M_AUTH_CL, "sctp_atcl", "sctp auth chunklist");
60 MALLOC_DEFINE(SCTP_M_AUTH_KY, "sctp_atky", "sctp auth key");
61 MALLOC_DEFINE(SCTP_M_AUTH_HL, "sctp_athm", "sctp auth hmac list");
62 MALLOC_DEFINE(SCTP_M_AUTH_IF, "sctp_athi", "sctp auth info");
63 MALLOC_DEFINE(SCTP_M_STRESET, "sctp_stre", "sctp stream reset");
64 MALLOC_DEFINE(SCTP_M_CMSG, "sctp_cmsg", "sctp CMSG buffer");
65 MALLOC_DEFINE(SCTP_M_COPYAL, "sctp_cpal", "sctp copy all");
66 MALLOC_DEFINE(SCTP_M_VRF, "sctp_vrf", "sctp vrf struct");
67 MALLOC_DEFINE(SCTP_M_IFA, "sctp_ifa", "sctp ifa struct");
68 MALLOC_DEFINE(SCTP_M_IFN, "sctp_ifn", "sctp ifn struct");
69 MALLOC_DEFINE(SCTP_M_TIMW, "sctp_timw", "sctp time block");
70 MALLOC_DEFINE(SCTP_M_MVRF, "sctp_mvrf", "sctp mvrf pcb list");
71 MALLOC_DEFINE(SCTP_M_ITER, "sctp_iter", "sctp iterator control");
72 MALLOC_DEFINE(SCTP_M_SOCKOPT, "sctp_socko", "sctp socket option");
73 MALLOC_DEFINE(SCTP_M_MCORE, "sctp_mcore", "sctp mcore queue");
74 
75 /* Global NON-VNET structure that controls the iterator */
76 struct iterator_control sctp_it_ctl;
77 static int __sctp_thread_based_iterator_started = 0;
78 
79 
80 static void
81 sctp_cleanup_itqueue(void)
82 {
83 	struct sctp_iterator *it, *nit;
84 
85 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
86 		if (it->function_atend != NULL) {
87 			(*it->function_atend) (it->pointer, it->val);
88 		}
89 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
90 		SCTP_FREE(it, SCTP_M_ITER);
91 	}
92 }
93 
94 
95 void
96 sctp_wakeup_iterator(void)
97 {
98 	wakeup(&sctp_it_ctl.iterator_running);
99 }
100 
101 static void
102 sctp_iterator_thread(void *v)
103 {
104 	SCTP_IPI_ITERATOR_WQ_LOCK();
105 	while (1) {
106 		msleep(&sctp_it_ctl.iterator_running,
107 		    &sctp_it_ctl.ipi_iterator_wq_mtx,
108 		    0, "waiting_for_work", 0);
109 		if (sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) {
110 			SCTP_IPI_ITERATOR_WQ_DESTROY();
111 			SCTP_ITERATOR_LOCK_DESTROY();
112 			sctp_cleanup_itqueue();
113 			__sctp_thread_based_iterator_started = 0;
114 			kthread_exit();
115 		}
116 		sctp_iterator_worker();
117 	}
118 }
119 
120 void
121 sctp_startup_iterator(void)
122 {
123 	if (__sctp_thread_based_iterator_started) {
124 		/* You only get one */
125 		return;
126 	}
127 	/* init the iterator head */
128 	__sctp_thread_based_iterator_started = 1;
129 	sctp_it_ctl.iterator_running = 0;
130 	sctp_it_ctl.iterator_flags = 0;
131 	sctp_it_ctl.cur_it = NULL;
132 	SCTP_ITERATOR_LOCK_INIT();
133 	SCTP_IPI_ITERATOR_WQ_INIT();
134 	TAILQ_INIT(&sctp_it_ctl.iteratorhead);
135 
136 	int ret;
137 
138 	ret = kproc_create(sctp_iterator_thread,
139 	    (void *)NULL,
140 	    &sctp_it_ctl.thread_proc,
141 	    RFPROC,
142 	    SCTP_KTHREAD_PAGES,
143 	    SCTP_KTRHEAD_NAME);
144 }
145 
146 #ifdef INET6
147 
148 void
149 sctp_gather_internal_ifa_flags(struct sctp_ifa *ifa)
150 {
151 	struct in6_ifaddr *ifa6;
152 
153 	ifa6 = (struct in6_ifaddr *)ifa->ifa;
154 	ifa->flags = ifa6->ia6_flags;
155 	if (!MODULE_GLOBAL(ip6_use_deprecated)) {
156 		if (ifa->flags &
157 		    IN6_IFF_DEPRECATED) {
158 			ifa->localifa_flags |= SCTP_ADDR_IFA_UNUSEABLE;
159 		} else {
160 			ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE;
161 		}
162 	} else {
163 		ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE;
164 	}
165 	if (ifa->flags &
166 	    (IN6_IFF_DETACHED |
167 	    IN6_IFF_ANYCAST |
168 	    IN6_IFF_NOTREADY)) {
169 		ifa->localifa_flags |= SCTP_ADDR_IFA_UNUSEABLE;
170 	} else {
171 		ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE;
172 	}
173 }
174 
175 #endif				/* INET6 */
176 
177 
178 static uint32_t
179 sctp_is_desired_interface_type(struct ifaddr *ifa)
180 {
181 	int result;
182 
183 	/* check the interface type to see if it's one we care about */
184 	switch (ifa->ifa_ifp->if_type) {
185 	case IFT_ETHER:
186 	case IFT_ISO88023:
187 	case IFT_ISO88024:
188 	case IFT_ISO88025:
189 	case IFT_ISO88026:
190 	case IFT_STARLAN:
191 	case IFT_P10:
192 	case IFT_P80:
193 	case IFT_HY:
194 	case IFT_FDDI:
195 	case IFT_XETHER:
196 	case IFT_ISDNBASIC:
197 	case IFT_ISDNPRIMARY:
198 	case IFT_PTPSERIAL:
199 	case IFT_OTHER:
200 	case IFT_PPP:
201 	case IFT_LOOP:
202 	case IFT_SLIP:
203 	case IFT_GIF:
204 	case IFT_L2VLAN:
205 	case IFT_IP:
206 	case IFT_IPOVERCDLC:
207 	case IFT_IPOVERCLAW:
208 	case IFT_VIRTUALIPADDRESS:
209 		result = 1;
210 		break;
211 	default:
212 		result = 0;
213 	}
214 
215 	return (result);
216 }
217 
218 
219 
220 
221 static void
222 sctp_init_ifns_for_vrf(int vrfid)
223 {
224 	/*
225 	 * Here we must apply ANY locks needed by the IFN we access and also
226 	 * make sure we lock any IFA that exists as we float through the
227 	 * list of IFA's
228 	 */
229 	struct ifnet *ifn;
230 	struct ifaddr *ifa;
231 	struct in6_ifaddr *ifa6;
232 	struct sctp_ifa *sctp_ifa;
233 	uint32_t ifa_flags;
234 
235 	IFNET_RLOCK();
236 	TAILQ_FOREACH(ifn, &MODULE_GLOBAL(ifnet), if_list) {
237 		IF_ADDR_LOCK(ifn);
238 		TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
239 			if (ifa->ifa_addr == NULL) {
240 				continue;
241 			}
242 			if ((ifa->ifa_addr->sa_family != AF_INET) && (ifa->ifa_addr->sa_family != AF_INET6)) {
243 				/* non inet/inet6 skip */
244 				continue;
245 			}
246 			if (ifa->ifa_addr->sa_family == AF_INET6) {
247 				if (IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr)) {
248 					/* skip unspecifed addresses */
249 					continue;
250 				}
251 			} else {
252 				if (((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr == 0) {
253 					continue;
254 				}
255 			}
256 			if (sctp_is_desired_interface_type(ifa) == 0) {
257 				/* non desired type */
258 				continue;
259 			}
260 			if (ifa->ifa_addr->sa_family == AF_INET6) {
261 				ifa6 = (struct in6_ifaddr *)ifa;
262 				ifa_flags = ifa6->ia6_flags;
263 			} else {
264 				ifa_flags = 0;
265 			}
266 			sctp_ifa = sctp_add_addr_to_vrf(vrfid,
267 			    (void *)ifn,
268 			    ifn->if_index,
269 			    ifn->if_type,
270 			    ifn->if_xname,
271 			    (void *)ifa,
272 			    ifa->ifa_addr,
273 			    ifa_flags,
274 			    0);
275 			if (sctp_ifa) {
276 				sctp_ifa->localifa_flags &= ~SCTP_ADDR_DEFER_USE;
277 			}
278 		}
279 		IF_ADDR_UNLOCK(ifn);
280 	}
281 	IFNET_RUNLOCK();
282 }
283 
284 void
285 sctp_init_vrf_list(int vrfid)
286 {
287 	if (vrfid > SCTP_MAX_VRF_ID)
288 		/* can't do that */
289 		return;
290 
291 	/* Don't care about return here */
292 	(void)sctp_allocate_vrf(vrfid);
293 
294 	/*
295 	 * Now we need to build all the ifn's for this vrf and there
296 	 * addresses
297 	 */
298 	sctp_init_ifns_for_vrf(vrfid);
299 }
300 
301 void
302 sctp_addr_change(struct ifaddr *ifa, int cmd)
303 {
304 	uint32_t ifa_flags = 0;
305 
306 	/*
307 	 * BSD only has one VRF, if this changes we will need to hook in the
308 	 * right things here to get the id to pass to the address managment
309 	 * routine.
310 	 */
311 	if (SCTP_BASE_VAR(first_time) == 0) {
312 		/* Special test to see if my ::1 will showup with this */
313 		SCTP_BASE_VAR(first_time) = 1;
314 		sctp_init_ifns_for_vrf(SCTP_DEFAULT_VRFID);
315 	}
316 	if ((cmd != RTM_ADD) && (cmd != RTM_DELETE)) {
317 		/* don't know what to do with this */
318 		return;
319 	}
320 	if (ifa->ifa_addr == NULL) {
321 		return;
322 	}
323 	if ((ifa->ifa_addr->sa_family != AF_INET) && (ifa->ifa_addr->sa_family != AF_INET6)) {
324 		/* non inet/inet6 skip */
325 		return;
326 	}
327 	if (ifa->ifa_addr->sa_family == AF_INET6) {
328 		ifa_flags = ((struct in6_ifaddr *)ifa)->ia6_flags;
329 		if (IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr)) {
330 			/* skip unspecifed addresses */
331 			return;
332 		}
333 	} else {
334 		if (((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr == 0) {
335 			return;
336 		}
337 	}
338 
339 	if (sctp_is_desired_interface_type(ifa) == 0) {
340 		/* non desired type */
341 		return;
342 	}
343 	if (cmd == RTM_ADD) {
344 		(void)sctp_add_addr_to_vrf(SCTP_DEFAULT_VRFID, (void *)ifa->ifa_ifp,
345 		    ifa->ifa_ifp->if_index, ifa->ifa_ifp->if_type,
346 		    ifa->ifa_ifp->if_xname,
347 		    (void *)ifa, ifa->ifa_addr, ifa_flags, 1);
348 	} else {
349 
350 		sctp_del_addr_from_vrf(SCTP_DEFAULT_VRFID, ifa->ifa_addr,
351 		    ifa->ifa_ifp->if_index,
352 		    ifa->ifa_ifp->if_xname
353 		    );
354 		/*
355 		 * We don't bump refcount here so when it completes the
356 		 * final delete will happen.
357 		 */
358 	}
359 }
360 
361 void
362      sctp_add_or_del_interfaces(int (*pred) (struct ifnet *), int add){
363 	struct ifnet *ifn;
364 	struct ifaddr *ifa;
365 
366 	IFNET_RLOCK();
367 	TAILQ_FOREACH(ifn, &MODULE_GLOBAL(ifnet), if_list) {
368 		if (!(*pred) (ifn)) {
369 			continue;
370 		}
371 		TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
372 			sctp_addr_change(ifa, add ? RTM_ADD : RTM_DELETE);
373 		}
374 	}
375 	IFNET_RUNLOCK();
376 }
377 
378 struct mbuf *
379 sctp_get_mbuf_for_msg(unsigned int space_needed, int want_header,
380     int how, int allonebuf, int type)
381 {
382 	struct mbuf *m = NULL;
383 
384 	m = m_getm2(NULL, space_needed, how, type, want_header ? M_PKTHDR : 0);
385 	if (m == NULL) {
386 		/* bad, no memory */
387 		return (m);
388 	}
389 	if (allonebuf) {
390 		int siz;
391 
392 		if (SCTP_BUF_IS_EXTENDED(m)) {
393 			siz = SCTP_BUF_EXTEND_SIZE(m);
394 		} else {
395 			if (want_header)
396 				siz = MHLEN;
397 			else
398 				siz = MLEN;
399 		}
400 		if (siz < space_needed) {
401 			m_freem(m);
402 			return (NULL);
403 		}
404 	}
405 	if (SCTP_BUF_NEXT(m)) {
406 		sctp_m_freem(SCTP_BUF_NEXT(m));
407 		SCTP_BUF_NEXT(m) = NULL;
408 	}
409 #ifdef SCTP_MBUF_LOGGING
410 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
411 		if (SCTP_BUF_IS_EXTENDED(m)) {
412 			sctp_log_mb(m, SCTP_MBUF_IALLOC);
413 		}
414 	}
415 #endif
416 	return (m);
417 }
418 
419 
420 #ifdef SCTP_PACKET_LOGGING
421 void
422 sctp_packet_log(struct mbuf *m, int length)
423 {
424 	int *lenat, thisone;
425 	void *copyto;
426 	uint32_t *tick_tock;
427 	int total_len;
428 	int grabbed_lock = 0;
429 	int value, newval, thisend, thisbegin;
430 
431 	/*
432 	 * Buffer layout. -sizeof this entry (total_len) -previous end
433 	 * (value) -ticks of log      (ticks) o -ip packet o -as logged -
434 	 * where this started (thisbegin) x <--end points here
435 	 */
436 	total_len = SCTP_SIZE32((length + (4 * sizeof(int))));
437 	/* Log a packet to the buffer. */
438 	if (total_len > SCTP_PACKET_LOG_SIZE) {
439 		/* Can't log this packet I have not a buffer big enough */
440 		return;
441 	}
442 	if (length < (int)(SCTP_MIN_V4_OVERHEAD + sizeof(struct sctp_cookie_ack_chunk))) {
443 		return;
444 	}
445 	atomic_add_int(&SCTP_BASE_VAR(packet_log_writers), 1);
446 try_again:
447 	if (SCTP_BASE_VAR(packet_log_writers) > SCTP_PKTLOG_WRITERS_NEED_LOCK) {
448 		SCTP_IP_PKTLOG_LOCK();
449 		grabbed_lock = 1;
450 again_locked:
451 		value = SCTP_BASE_VAR(packet_log_end);
452 		newval = SCTP_BASE_VAR(packet_log_end) + total_len;
453 		if (newval >= SCTP_PACKET_LOG_SIZE) {
454 			/* we wrapped */
455 			thisbegin = 0;
456 			thisend = total_len;
457 		} else {
458 			thisbegin = SCTP_BASE_VAR(packet_log_end);
459 			thisend = newval;
460 		}
461 		if (!(atomic_cmpset_int(&SCTP_BASE_VAR(packet_log_end), value, thisend))) {
462 			goto again_locked;
463 		}
464 	} else {
465 		value = SCTP_BASE_VAR(packet_log_end);
466 		newval = SCTP_BASE_VAR(packet_log_end) + total_len;
467 		if (newval >= SCTP_PACKET_LOG_SIZE) {
468 			/* we wrapped */
469 			thisbegin = 0;
470 			thisend = total_len;
471 		} else {
472 			thisbegin = SCTP_BASE_VAR(packet_log_end);
473 			thisend = newval;
474 		}
475 		if (!(atomic_cmpset_int(&SCTP_BASE_VAR(packet_log_end), value, thisend))) {
476 			goto try_again;
477 		}
478 	}
479 	/* Sanity check */
480 	if (thisend >= SCTP_PACKET_LOG_SIZE) {
481 		printf("Insanity stops a log thisbegin:%d thisend:%d writers:%d lock:%d end:%d\n",
482 		    thisbegin,
483 		    thisend,
484 		    SCTP_BASE_VAR(packet_log_writers),
485 		    grabbed_lock,
486 		    SCTP_BASE_VAR(packet_log_end));
487 		SCTP_BASE_VAR(packet_log_end) = 0;
488 		goto no_log;
489 
490 	}
491 	lenat = (int *)&SCTP_BASE_VAR(packet_log_buffer)[thisbegin];
492 	*lenat = total_len;
493 	lenat++;
494 	*lenat = value;
495 	lenat++;
496 	tick_tock = (uint32_t *) lenat;
497 	lenat++;
498 	*tick_tock = sctp_get_tick_count();
499 	copyto = (void *)lenat;
500 	thisone = thisend - sizeof(int);
501 	lenat = (int *)&SCTP_BASE_VAR(packet_log_buffer)[thisone];
502 	*lenat = thisbegin;
503 	if (grabbed_lock) {
504 		SCTP_IP_PKTLOG_UNLOCK();
505 		grabbed_lock = 0;
506 	}
507 	m_copydata(m, 0, length, (caddr_t)copyto);
508 no_log:
509 	if (grabbed_lock) {
510 		SCTP_IP_PKTLOG_UNLOCK();
511 	}
512 	atomic_subtract_int(&SCTP_BASE_VAR(packet_log_writers), 1);
513 }
514 
515 
516 int
517 sctp_copy_out_packet_log(uint8_t * target, int length)
518 {
519 	/*
520 	 * We wind through the packet log starting at start copying up to
521 	 * length bytes out. We return the number of bytes copied.
522 	 */
523 	int tocopy, this_copy;
524 	int *lenat;
525 	int did_delay = 0;
526 
527 	tocopy = length;
528 	if (length < (int)(2 * sizeof(int))) {
529 		/* not enough room */
530 		return (0);
531 	}
532 	if (SCTP_PKTLOG_WRITERS_NEED_LOCK) {
533 		atomic_add_int(&SCTP_BASE_VAR(packet_log_writers), SCTP_PKTLOG_WRITERS_NEED_LOCK);
534 again:
535 		if ((did_delay == 0) && (SCTP_BASE_VAR(packet_log_writers) != SCTP_PKTLOG_WRITERS_NEED_LOCK)) {
536 			/*
537 			 * we delay here for just a moment hoping the
538 			 * writer(s) that were present when we entered will
539 			 * have left and we only have locking ones that will
540 			 * contend with us for the lock. This does not
541 			 * assure 100% access, but its good enough for a
542 			 * logging facility like this.
543 			 */
544 			did_delay = 1;
545 			DELAY(10);
546 			goto again;
547 		}
548 	}
549 	SCTP_IP_PKTLOG_LOCK();
550 	lenat = (int *)target;
551 	*lenat = SCTP_BASE_VAR(packet_log_end);
552 	lenat++;
553 	this_copy = min((length - sizeof(int)), SCTP_PACKET_LOG_SIZE);
554 	memcpy((void *)lenat, (void *)SCTP_BASE_VAR(packet_log_buffer), this_copy);
555 	if (SCTP_PKTLOG_WRITERS_NEED_LOCK) {
556 		atomic_subtract_int(&SCTP_BASE_VAR(packet_log_writers),
557 		    SCTP_PKTLOG_WRITERS_NEED_LOCK);
558 	}
559 	SCTP_IP_PKTLOG_UNLOCK();
560 	return (this_copy + sizeof(int));
561 }
562 
563 #endif
564