xref: /freebsd/sys/netinet/sctp_bsd_addr.c (revision e28a4053)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctp_output.c,v 1.46 2005/03/06 16:04:17 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_pcb.h>
39 #include <netinet/sctp_header.h>
40 #include <netinet/sctputil.h>
41 #include <netinet/sctp_output.h>
42 #include <netinet/sctp_bsd_addr.h>
43 #include <netinet/sctp_uio.h>
44 #include <netinet/sctputil.h>
45 #include <netinet/sctp_timer.h>
46 #include <netinet/sctp_asconf.h>
47 #include <netinet/sctp_sysctl.h>
48 #include <netinet/sctp_indata.h>
49 #include <sys/unistd.h>
50 
51 /* Declare all of our malloc named types */
52 MALLOC_DEFINE(SCTP_M_MAP, "sctp_map", "sctp asoc map descriptor");
53 MALLOC_DEFINE(SCTP_M_STRMI, "sctp_stri", "sctp stream in array");
54 MALLOC_DEFINE(SCTP_M_STRMO, "sctp_stro", "sctp stream out array");
55 MALLOC_DEFINE(SCTP_M_ASC_ADDR, "sctp_aadr", "sctp asconf address");
56 MALLOC_DEFINE(SCTP_M_ASC_IT, "sctp_a_it", "sctp asconf iterator");
57 MALLOC_DEFINE(SCTP_M_AUTH_CL, "sctp_atcl", "sctp auth chunklist");
58 MALLOC_DEFINE(SCTP_M_AUTH_KY, "sctp_atky", "sctp auth key");
59 MALLOC_DEFINE(SCTP_M_AUTH_HL, "sctp_athm", "sctp auth hmac list");
60 MALLOC_DEFINE(SCTP_M_AUTH_IF, "sctp_athi", "sctp auth info");
61 MALLOC_DEFINE(SCTP_M_STRESET, "sctp_stre", "sctp stream reset");
62 MALLOC_DEFINE(SCTP_M_CMSG, "sctp_cmsg", "sctp CMSG buffer");
63 MALLOC_DEFINE(SCTP_M_COPYAL, "sctp_cpal", "sctp copy all");
64 MALLOC_DEFINE(SCTP_M_VRF, "sctp_vrf", "sctp vrf struct");
65 MALLOC_DEFINE(SCTP_M_IFA, "sctp_ifa", "sctp ifa struct");
66 MALLOC_DEFINE(SCTP_M_IFN, "sctp_ifn", "sctp ifn struct");
67 MALLOC_DEFINE(SCTP_M_TIMW, "sctp_timw", "sctp time block");
68 MALLOC_DEFINE(SCTP_M_MVRF, "sctp_mvrf", "sctp mvrf pcb list");
69 MALLOC_DEFINE(SCTP_M_ITER, "sctp_iter", "sctp iterator control");
70 MALLOC_DEFINE(SCTP_M_SOCKOPT, "sctp_socko", "sctp socket option");
71 
72 /* Global NON-VNET structure that controls the iterator */
73 struct iterator_control sctp_it_ctl;
74 static int __sctp_thread_based_iterator_started = 0;
75 
76 
77 static void
78 sctp_cleanup_itqueue(void)
79 {
80 	struct sctp_iterator *it;
81 
82 	while ((it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead)) != NULL) {
83 		if (it->function_atend != NULL) {
84 			(*it->function_atend) (it->pointer, it->val);
85 		}
86 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
87 		SCTP_FREE(it, SCTP_M_ITER);
88 	}
89 }
90 
91 
92 void
93 sctp_wakeup_iterator(void)
94 {
95 	wakeup(&sctp_it_ctl.iterator_running);
96 }
97 
98 static void
99 sctp_iterator_thread(void *v)
100 {
101 	SCTP_IPI_ITERATOR_WQ_LOCK();
102 	while (1) {
103 		msleep(&sctp_it_ctl.iterator_running,
104 		    &sctp_it_ctl.ipi_iterator_wq_mtx,
105 		    0, "waiting_for_work", 0);
106 		if (sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) {
107 			SCTP_IPI_ITERATOR_WQ_DESTROY();
108 			SCTP_ITERATOR_LOCK_DESTROY();
109 			sctp_cleanup_itqueue();
110 			__sctp_thread_based_iterator_started = 0;
111 			kthread_exit();
112 		}
113 		sctp_iterator_worker();
114 	}
115 }
116 
117 void
118 sctp_startup_iterator(void)
119 {
120 	if (__sctp_thread_based_iterator_started) {
121 		/* You only get one */
122 		return;
123 	}
124 	/* init the iterator head */
125 	__sctp_thread_based_iterator_started = 1;
126 	sctp_it_ctl.iterator_running = 0;
127 	sctp_it_ctl.iterator_flags = 0;
128 	sctp_it_ctl.cur_it = NULL;
129 	SCTP_ITERATOR_LOCK_INIT();
130 	SCTP_IPI_ITERATOR_WQ_INIT();
131 	TAILQ_INIT(&sctp_it_ctl.iteratorhead);
132 
133 	int ret;
134 
135 	ret = kproc_create(sctp_iterator_thread,
136 	    (void *)NULL,
137 	    &sctp_it_ctl.thread_proc,
138 	    RFPROC,
139 	    SCTP_KTHREAD_PAGES,
140 	    SCTP_KTRHEAD_NAME);
141 }
142 
143 #ifdef INET6
144 
145 void
146 sctp_gather_internal_ifa_flags(struct sctp_ifa *ifa)
147 {
148 	struct in6_ifaddr *ifa6;
149 
150 	ifa6 = (struct in6_ifaddr *)ifa->ifa;
151 	ifa->flags = ifa6->ia6_flags;
152 	if (!MODULE_GLOBAL(ip6_use_deprecated)) {
153 		if (ifa->flags &
154 		    IN6_IFF_DEPRECATED) {
155 			ifa->localifa_flags |= SCTP_ADDR_IFA_UNUSEABLE;
156 		} else {
157 			ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE;
158 		}
159 	} else {
160 		ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE;
161 	}
162 	if (ifa->flags &
163 	    (IN6_IFF_DETACHED |
164 	    IN6_IFF_ANYCAST |
165 	    IN6_IFF_NOTREADY)) {
166 		ifa->localifa_flags |= SCTP_ADDR_IFA_UNUSEABLE;
167 	} else {
168 		ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE;
169 	}
170 }
171 
172 #endif				/* INET6 */
173 
174 
175 static uint32_t
176 sctp_is_desired_interface_type(struct ifaddr *ifa)
177 {
178 	int result;
179 
180 	/* check the interface type to see if it's one we care about */
181 	switch (ifa->ifa_ifp->if_type) {
182 	case IFT_ETHER:
183 	case IFT_ISO88023:
184 	case IFT_ISO88024:
185 	case IFT_ISO88025:
186 	case IFT_ISO88026:
187 	case IFT_STARLAN:
188 	case IFT_P10:
189 	case IFT_P80:
190 	case IFT_HY:
191 	case IFT_FDDI:
192 	case IFT_XETHER:
193 	case IFT_ISDNBASIC:
194 	case IFT_ISDNPRIMARY:
195 	case IFT_PTPSERIAL:
196 	case IFT_OTHER:
197 	case IFT_PPP:
198 	case IFT_LOOP:
199 	case IFT_SLIP:
200 	case IFT_GIF:
201 	case IFT_L2VLAN:
202 	case IFT_IP:
203 	case IFT_IPOVERCDLC:
204 	case IFT_IPOVERCLAW:
205 	case IFT_VIRTUALIPADDRESS:
206 		result = 1;
207 		break;
208 	default:
209 		result = 0;
210 	}
211 
212 	return (result);
213 }
214 
215 
216 
217 
218 static void
219 sctp_init_ifns_for_vrf(int vrfid)
220 {
221 	/*
222 	 * Here we must apply ANY locks needed by the IFN we access and also
223 	 * make sure we lock any IFA that exists as we float through the
224 	 * list of IFA's
225 	 */
226 	struct ifnet *ifn;
227 	struct ifaddr *ifa;
228 	struct in6_ifaddr *ifa6;
229 	struct sctp_ifa *sctp_ifa;
230 	uint32_t ifa_flags;
231 
232 	IFNET_RLOCK();
233 	TAILQ_FOREACH(ifn, &MODULE_GLOBAL(ifnet), if_list) {
234 		IF_ADDR_LOCK(ifn);
235 		TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
236 			if (ifa->ifa_addr == NULL) {
237 				continue;
238 			}
239 			if ((ifa->ifa_addr->sa_family != AF_INET) && (ifa->ifa_addr->sa_family != AF_INET6)) {
240 				/* non inet/inet6 skip */
241 				continue;
242 			}
243 			if (ifa->ifa_addr->sa_family == AF_INET6) {
244 				if (IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr)) {
245 					/* skip unspecifed addresses */
246 					continue;
247 				}
248 			} else {
249 				if (((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr == 0) {
250 					continue;
251 				}
252 			}
253 			if (sctp_is_desired_interface_type(ifa) == 0) {
254 				/* non desired type */
255 				continue;
256 			}
257 			if (ifa->ifa_addr->sa_family == AF_INET6) {
258 				ifa6 = (struct in6_ifaddr *)ifa;
259 				ifa_flags = ifa6->ia6_flags;
260 			} else {
261 				ifa_flags = 0;
262 			}
263 			sctp_ifa = sctp_add_addr_to_vrf(vrfid,
264 			    (void *)ifn,
265 			    ifn->if_index,
266 			    ifn->if_type,
267 			    ifn->if_xname,
268 			    (void *)ifa,
269 			    ifa->ifa_addr,
270 			    ifa_flags,
271 			    0);
272 			if (sctp_ifa) {
273 				sctp_ifa->localifa_flags &= ~SCTP_ADDR_DEFER_USE;
274 			}
275 		}
276 		IF_ADDR_UNLOCK(ifn);
277 	}
278 	IFNET_RUNLOCK();
279 }
280 
281 void
282 sctp_init_vrf_list(int vrfid)
283 {
284 	if (vrfid > SCTP_MAX_VRF_ID)
285 		/* can't do that */
286 		return;
287 
288 	/* Don't care about return here */
289 	(void)sctp_allocate_vrf(vrfid);
290 
291 	/*
292 	 * Now we need to build all the ifn's for this vrf and there
293 	 * addresses
294 	 */
295 	sctp_init_ifns_for_vrf(vrfid);
296 }
297 
298 void
299 sctp_addr_change(struct ifaddr *ifa, int cmd)
300 {
301 	uint32_t ifa_flags = 0;
302 
303 	/*
304 	 * BSD only has one VRF, if this changes we will need to hook in the
305 	 * right things here to get the id to pass to the address managment
306 	 * routine.
307 	 */
308 	if (SCTP_BASE_VAR(first_time) == 0) {
309 		/* Special test to see if my ::1 will showup with this */
310 		SCTP_BASE_VAR(first_time) = 1;
311 		sctp_init_ifns_for_vrf(SCTP_DEFAULT_VRFID);
312 	}
313 	if ((cmd != RTM_ADD) && (cmd != RTM_DELETE)) {
314 		/* don't know what to do with this */
315 		return;
316 	}
317 	if (ifa->ifa_addr == NULL) {
318 		return;
319 	}
320 	if ((ifa->ifa_addr->sa_family != AF_INET) && (ifa->ifa_addr->sa_family != AF_INET6)) {
321 		/* non inet/inet6 skip */
322 		return;
323 	}
324 	if (ifa->ifa_addr->sa_family == AF_INET6) {
325 		ifa_flags = ((struct in6_ifaddr *)ifa)->ia6_flags;
326 		if (IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr)) {
327 			/* skip unspecifed addresses */
328 			return;
329 		}
330 	} else {
331 		if (((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr == 0) {
332 			return;
333 		}
334 	}
335 
336 	if (sctp_is_desired_interface_type(ifa) == 0) {
337 		/* non desired type */
338 		return;
339 	}
340 	if (cmd == RTM_ADD) {
341 		(void)sctp_add_addr_to_vrf(SCTP_DEFAULT_VRFID, (void *)ifa->ifa_ifp,
342 		    ifa->ifa_ifp->if_index, ifa->ifa_ifp->if_type,
343 		    ifa->ifa_ifp->if_xname,
344 		    (void *)ifa, ifa->ifa_addr, ifa_flags, 1);
345 	} else {
346 
347 		sctp_del_addr_from_vrf(SCTP_DEFAULT_VRFID, ifa->ifa_addr,
348 		    ifa->ifa_ifp->if_index,
349 		    ifa->ifa_ifp->if_xname
350 		    );
351 		/*
352 		 * We don't bump refcount here so when it completes the
353 		 * final delete will happen.
354 		 */
355 	}
356 }
357 
358 void
359      sctp_add_or_del_interfaces(int (*pred) (struct ifnet *), int add){
360 	struct ifnet *ifn;
361 	struct ifaddr *ifa;
362 
363 	IFNET_RLOCK();
364 	TAILQ_FOREACH(ifn, &MODULE_GLOBAL(ifnet), if_list) {
365 		if (!(*pred) (ifn)) {
366 			continue;
367 		}
368 		TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
369 			sctp_addr_change(ifa, add ? RTM_ADD : RTM_DELETE);
370 		}
371 	}
372 	IFNET_RUNLOCK();
373 }
374 
375 struct mbuf *
376 sctp_get_mbuf_for_msg(unsigned int space_needed, int want_header,
377     int how, int allonebuf, int type)
378 {
379 	struct mbuf *m = NULL;
380 
381 	m = m_getm2(NULL, space_needed, how, type, want_header ? M_PKTHDR : 0);
382 	if (m == NULL) {
383 		/* bad, no memory */
384 		return (m);
385 	}
386 	if (allonebuf) {
387 		int siz;
388 
389 		if (SCTP_BUF_IS_EXTENDED(m)) {
390 			siz = SCTP_BUF_EXTEND_SIZE(m);
391 		} else {
392 			if (want_header)
393 				siz = MHLEN;
394 			else
395 				siz = MLEN;
396 		}
397 		if (siz < space_needed) {
398 			m_freem(m);
399 			return (NULL);
400 		}
401 	}
402 	if (SCTP_BUF_NEXT(m)) {
403 		sctp_m_freem(SCTP_BUF_NEXT(m));
404 		SCTP_BUF_NEXT(m) = NULL;
405 	}
406 #ifdef SCTP_MBUF_LOGGING
407 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
408 		if (SCTP_BUF_IS_EXTENDED(m)) {
409 			sctp_log_mb(m, SCTP_MBUF_IALLOC);
410 		}
411 	}
412 #endif
413 	return (m);
414 }
415 
416 
417 #ifdef SCTP_PACKET_LOGGING
418 void
419 sctp_packet_log(struct mbuf *m, int length)
420 {
421 	int *lenat, thisone;
422 	void *copyto;
423 	uint32_t *tick_tock;
424 	int total_len;
425 	int grabbed_lock = 0;
426 	int value, newval, thisend, thisbegin;
427 
428 	/*
429 	 * Buffer layout. -sizeof this entry (total_len) -previous end
430 	 * (value) -ticks of log      (ticks) o -ip packet o -as logged -
431 	 * where this started (thisbegin) x <--end points here
432 	 */
433 	total_len = SCTP_SIZE32((length + (4 * sizeof(int))));
434 	/* Log a packet to the buffer. */
435 	if (total_len > SCTP_PACKET_LOG_SIZE) {
436 		/* Can't log this packet I have not a buffer big enough */
437 		return;
438 	}
439 	if (length < (int)(SCTP_MIN_V4_OVERHEAD + sizeof(struct sctp_cookie_ack_chunk))) {
440 		return;
441 	}
442 	atomic_add_int(&SCTP_BASE_VAR(packet_log_writers), 1);
443 try_again:
444 	if (SCTP_BASE_VAR(packet_log_writers) > SCTP_PKTLOG_WRITERS_NEED_LOCK) {
445 		SCTP_IP_PKTLOG_LOCK();
446 		grabbed_lock = 1;
447 again_locked:
448 		value = SCTP_BASE_VAR(packet_log_end);
449 		newval = SCTP_BASE_VAR(packet_log_end) + total_len;
450 		if (newval >= SCTP_PACKET_LOG_SIZE) {
451 			/* we wrapped */
452 			thisbegin = 0;
453 			thisend = total_len;
454 		} else {
455 			thisbegin = SCTP_BASE_VAR(packet_log_end);
456 			thisend = newval;
457 		}
458 		if (!(atomic_cmpset_int(&SCTP_BASE_VAR(packet_log_end), value, thisend))) {
459 			goto again_locked;
460 		}
461 	} else {
462 		value = SCTP_BASE_VAR(packet_log_end);
463 		newval = SCTP_BASE_VAR(packet_log_end) + total_len;
464 		if (newval >= SCTP_PACKET_LOG_SIZE) {
465 			/* we wrapped */
466 			thisbegin = 0;
467 			thisend = total_len;
468 		} else {
469 			thisbegin = SCTP_BASE_VAR(packet_log_end);
470 			thisend = newval;
471 		}
472 		if (!(atomic_cmpset_int(&SCTP_BASE_VAR(packet_log_end), value, thisend))) {
473 			goto try_again;
474 		}
475 	}
476 	/* Sanity check */
477 	if (thisend >= SCTP_PACKET_LOG_SIZE) {
478 		printf("Insanity stops a log thisbegin:%d thisend:%d writers:%d lock:%d end:%d\n",
479 		    thisbegin,
480 		    thisend,
481 		    SCTP_BASE_VAR(packet_log_writers),
482 		    grabbed_lock,
483 		    SCTP_BASE_VAR(packet_log_end));
484 		SCTP_BASE_VAR(packet_log_end) = 0;
485 		goto no_log;
486 
487 	}
488 	lenat = (int *)&SCTP_BASE_VAR(packet_log_buffer)[thisbegin];
489 	*lenat = total_len;
490 	lenat++;
491 	*lenat = value;
492 	lenat++;
493 	tick_tock = (uint32_t *) lenat;
494 	lenat++;
495 	*tick_tock = sctp_get_tick_count();
496 	copyto = (void *)lenat;
497 	thisone = thisend - sizeof(int);
498 	lenat = (int *)&SCTP_BASE_VAR(packet_log_buffer)[thisone];
499 	*lenat = thisbegin;
500 	if (grabbed_lock) {
501 		SCTP_IP_PKTLOG_UNLOCK();
502 		grabbed_lock = 0;
503 	}
504 	m_copydata(m, 0, length, (caddr_t)copyto);
505 no_log:
506 	if (grabbed_lock) {
507 		SCTP_IP_PKTLOG_UNLOCK();
508 	}
509 	atomic_subtract_int(&SCTP_BASE_VAR(packet_log_writers), 1);
510 }
511 
512 
513 int
514 sctp_copy_out_packet_log(uint8_t * target, int length)
515 {
516 	/*
517 	 * We wind through the packet log starting at start copying up to
518 	 * length bytes out. We return the number of bytes copied.
519 	 */
520 	int tocopy, this_copy;
521 	int *lenat;
522 	int did_delay = 0;
523 
524 	tocopy = length;
525 	if (length < (int)(2 * sizeof(int))) {
526 		/* not enough room */
527 		return (0);
528 	}
529 	if (SCTP_PKTLOG_WRITERS_NEED_LOCK) {
530 		atomic_add_int(&SCTP_BASE_VAR(packet_log_writers), SCTP_PKTLOG_WRITERS_NEED_LOCK);
531 again:
532 		if ((did_delay == 0) && (SCTP_BASE_VAR(packet_log_writers) != SCTP_PKTLOG_WRITERS_NEED_LOCK)) {
533 			/*
534 			 * we delay here for just a moment hoping the
535 			 * writer(s) that were present when we entered will
536 			 * have left and we only have locking ones that will
537 			 * contend with us for the lock. This does not
538 			 * assure 100% access, but its good enough for a
539 			 * logging facility like this.
540 			 */
541 			did_delay = 1;
542 			DELAY(10);
543 			goto again;
544 		}
545 	}
546 	SCTP_IP_PKTLOG_LOCK();
547 	lenat = (int *)target;
548 	*lenat = SCTP_BASE_VAR(packet_log_end);
549 	lenat++;
550 	this_copy = min((length - sizeof(int)), SCTP_PACKET_LOG_SIZE);
551 	memcpy((void *)lenat, (void *)SCTP_BASE_VAR(packet_log_buffer), this_copy);
552 	if (SCTP_PKTLOG_WRITERS_NEED_LOCK) {
553 		atomic_subtract_int(&SCTP_BASE_VAR(packet_log_writers),
554 		    SCTP_PKTLOG_WRITERS_NEED_LOCK);
555 	}
556 	SCTP_IP_PKTLOG_UNLOCK();
557 	return (this_copy + sizeof(int));
558 }
559 
560 #endif
561