xref: /freebsd/sys/net/if_ethersubr.c (revision e7102929)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1982, 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include "opt_inet.h"
33 #include "opt_inet6.h"
34 #include "opt_netgraph.h"
35 #include "opt_mbuf_profiling.h"
36 #include "opt_rss.h"
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/devctl.h>
41 #include <sys/eventhandler.h>
42 #include <sys/jail.h>
43 #include <sys/kernel.h>
44 #include <sys/lock.h>
45 #include <sys/malloc.h>
46 #include <sys/mbuf.h>
47 #include <sys/module.h>
48 #include <sys/msan.h>
49 #include <sys/proc.h>
50 #include <sys/priv.h>
51 #include <sys/random.h>
52 #include <sys/socket.h>
53 #include <sys/sockio.h>
54 #include <sys/sysctl.h>
55 #include <sys/uuid.h>
56 #ifdef KDB
57 #include <sys/kdb.h>
58 #endif
59 
60 #include <net/ieee_oui.h>
61 #include <net/if.h>
62 #include <net/if_var.h>
63 #include <net/if_private.h>
64 #include <net/if_arp.h>
65 #include <net/netisr.h>
66 #include <net/route.h>
67 #include <net/if_llc.h>
68 #include <net/if_dl.h>
69 #include <net/if_types.h>
70 #include <net/bpf.h>
71 #include <net/ethernet.h>
72 #include <net/if_bridgevar.h>
73 #include <net/if_vlan_var.h>
74 #include <net/if_llatbl.h>
75 #include <net/pfil.h>
76 #include <net/rss_config.h>
77 #include <net/vnet.h>
78 
79 #include <netpfil/pf/pf_mtag.h>
80 
81 #if defined(INET) || defined(INET6)
82 #include <netinet/in.h>
83 #include <netinet/in_var.h>
84 #include <netinet/if_ether.h>
85 #include <netinet/ip_carp.h>
86 #include <netinet/ip_var.h>
87 #endif
88 #ifdef INET6
89 #include <netinet6/nd6.h>
90 #endif
91 #include <security/mac/mac_framework.h>
92 
93 #include <crypto/sha1.h>
94 
95 #ifdef CTASSERT
96 CTASSERT(sizeof (struct ether_header) == ETHER_ADDR_LEN * 2 + 2);
97 CTASSERT(sizeof (struct ether_addr) == ETHER_ADDR_LEN);
98 #endif
99 
100 VNET_DEFINE(pfil_head_t, link_pfil_head);	/* Packet filter hooks */
101 
102 /* netgraph node hooks for ng_ether(4) */
103 void	(*ng_ether_input_p)(struct ifnet *ifp, struct mbuf **mp);
104 void	(*ng_ether_input_orphan_p)(struct ifnet *ifp, struct mbuf *m);
105 int	(*ng_ether_output_p)(struct ifnet *ifp, struct mbuf **mp);
106 void	(*ng_ether_attach_p)(struct ifnet *ifp);
107 void	(*ng_ether_detach_p)(struct ifnet *ifp);
108 
109 void	(*vlan_input_p)(struct ifnet *, struct mbuf *);
110 
111 /* if_bridge(4) support */
112 void	(*bridge_dn_p)(struct mbuf *, struct ifnet *);
113 
114 /* if_lagg(4) support */
115 struct mbuf *(*lagg_input_ethernet_p)(struct ifnet *, struct mbuf *);
116 
117 static const u_char etherbroadcastaddr[ETHER_ADDR_LEN] =
118 			{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
119 
120 static	int ether_resolvemulti(struct ifnet *, struct sockaddr **,
121 		struct sockaddr *);
122 static	int ether_requestencap(struct ifnet *, struct if_encap_req *);
123 
124 static inline bool ether_do_pcp(struct ifnet *, struct mbuf *);
125 
126 #define senderr(e) do { error = (e); goto bad;} while (0)
127 
128 static void
update_mbuf_csumflags(struct mbuf * src,struct mbuf * dst)129 update_mbuf_csumflags(struct mbuf *src, struct mbuf *dst)
130 {
131 	int csum_flags = 0;
132 
133 	if (src->m_pkthdr.csum_flags & CSUM_IP)
134 		csum_flags |= (CSUM_IP_CHECKED|CSUM_IP_VALID);
135 	if (src->m_pkthdr.csum_flags & CSUM_DELAY_DATA)
136 		csum_flags |= (CSUM_DATA_VALID|CSUM_PSEUDO_HDR);
137 	if (src->m_pkthdr.csum_flags & CSUM_SCTP)
138 		csum_flags |= CSUM_SCTP_VALID;
139 	dst->m_pkthdr.csum_flags |= csum_flags;
140 	if (csum_flags & CSUM_DATA_VALID)
141 		dst->m_pkthdr.csum_data = 0xffff;
142 }
143 
144 /*
145  * Handle link-layer encapsulation requests.
146  */
147 static int
ether_requestencap(struct ifnet * ifp,struct if_encap_req * req)148 ether_requestencap(struct ifnet *ifp, struct if_encap_req *req)
149 {
150 	struct ether_header *eh;
151 	struct arphdr *ah;
152 	uint16_t etype;
153 	const u_char *lladdr;
154 
155 	if (req->rtype != IFENCAP_LL)
156 		return (EOPNOTSUPP);
157 
158 	if (req->bufsize < ETHER_HDR_LEN)
159 		return (ENOMEM);
160 
161 	eh = (struct ether_header *)req->buf;
162 	lladdr = req->lladdr;
163 	req->lladdr_off = 0;
164 
165 	switch (req->family) {
166 	case AF_INET:
167 		etype = htons(ETHERTYPE_IP);
168 		break;
169 	case AF_INET6:
170 		etype = htons(ETHERTYPE_IPV6);
171 		break;
172 	case AF_ARP:
173 		ah = (struct arphdr *)req->hdata;
174 		ah->ar_hrd = htons(ARPHRD_ETHER);
175 
176 		switch(ntohs(ah->ar_op)) {
177 		case ARPOP_REVREQUEST:
178 		case ARPOP_REVREPLY:
179 			etype = htons(ETHERTYPE_REVARP);
180 			break;
181 		case ARPOP_REQUEST:
182 		case ARPOP_REPLY:
183 		default:
184 			etype = htons(ETHERTYPE_ARP);
185 			break;
186 		}
187 
188 		if (req->flags & IFENCAP_FLAG_BROADCAST)
189 			lladdr = ifp->if_broadcastaddr;
190 		break;
191 	default:
192 		return (EAFNOSUPPORT);
193 	}
194 
195 	memcpy(&eh->ether_type, &etype, sizeof(eh->ether_type));
196 	memcpy(eh->ether_dhost, lladdr, ETHER_ADDR_LEN);
197 	memcpy(eh->ether_shost, IF_LLADDR(ifp), ETHER_ADDR_LEN);
198 	req->bufsize = sizeof(struct ether_header);
199 
200 	return (0);
201 }
202 
203 static int
ether_resolve_addr(struct ifnet * ifp,struct mbuf * m,const struct sockaddr * dst,struct route * ro,u_char * phdr,uint32_t * pflags,struct llentry ** plle)204 ether_resolve_addr(struct ifnet *ifp, struct mbuf *m,
205 	const struct sockaddr *dst, struct route *ro, u_char *phdr,
206 	uint32_t *pflags, struct llentry **plle)
207 {
208 	uint32_t lleflags = 0;
209 	int error = 0;
210 #if defined(INET) || defined(INET6)
211 	struct ether_header *eh = (struct ether_header *)phdr;
212 	uint16_t etype;
213 #endif
214 
215 	if (plle)
216 		*plle = NULL;
217 
218 	switch (dst->sa_family) {
219 #ifdef INET
220 	case AF_INET:
221 		if ((m->m_flags & (M_BCAST | M_MCAST)) == 0)
222 			error = arpresolve(ifp, 0, m, dst, phdr, &lleflags,
223 			    plle);
224 		else {
225 			if (m->m_flags & M_BCAST)
226 				memcpy(eh->ether_dhost, ifp->if_broadcastaddr,
227 				    ETHER_ADDR_LEN);
228 			else {
229 				const struct in_addr *a;
230 				a = &(((const struct sockaddr_in *)dst)->sin_addr);
231 				ETHER_MAP_IP_MULTICAST(a, eh->ether_dhost);
232 			}
233 			etype = htons(ETHERTYPE_IP);
234 			memcpy(&eh->ether_type, &etype, sizeof(etype));
235 			memcpy(eh->ether_shost, IF_LLADDR(ifp), ETHER_ADDR_LEN);
236 		}
237 		break;
238 #endif
239 #ifdef INET6
240 	case AF_INET6:
241 		if ((m->m_flags & M_MCAST) == 0) {
242 			int af = RO_GET_FAMILY(ro, dst);
243 			error = nd6_resolve(ifp, LLE_SF(af, 0), m, dst, phdr,
244 			    &lleflags, plle);
245 		} else {
246 			const struct in6_addr *a6;
247 			a6 = &(((const struct sockaddr_in6 *)dst)->sin6_addr);
248 			ETHER_MAP_IPV6_MULTICAST(a6, eh->ether_dhost);
249 			etype = htons(ETHERTYPE_IPV6);
250 			memcpy(&eh->ether_type, &etype, sizeof(etype));
251 			memcpy(eh->ether_shost, IF_LLADDR(ifp), ETHER_ADDR_LEN);
252 		}
253 		break;
254 #endif
255 	default:
256 		if_printf(ifp, "can't handle af%d\n", dst->sa_family);
257 		if (m != NULL)
258 			m_freem(m);
259 		return (EAFNOSUPPORT);
260 	}
261 
262 	if (error == EHOSTDOWN) {
263 		if (ro != NULL && (ro->ro_flags & RT_HAS_GW) != 0)
264 			error = EHOSTUNREACH;
265 	}
266 
267 	if (error != 0)
268 		return (error);
269 
270 	*pflags = RT_MAY_LOOP;
271 	if (lleflags & LLE_IFADDR)
272 		*pflags |= RT_L2_ME;
273 
274 	return (0);
275 }
276 
277 /*
278  * Ethernet output routine.
279  * Encapsulate a packet of type family for the local net.
280  * Use trailer local net encapsulation if enough data in first
281  * packet leaves a multiple of 512 bytes of data in remainder.
282  */
283 int
ether_output(struct ifnet * ifp,struct mbuf * m,const struct sockaddr * dst,struct route * ro)284 ether_output(struct ifnet *ifp, struct mbuf *m,
285 	const struct sockaddr *dst, struct route *ro)
286 {
287 	int error = 0;
288 	char linkhdr[ETHER_HDR_LEN], *phdr;
289 	struct ether_header *eh;
290 	struct pf_mtag *t;
291 	bool loop_copy;
292 	int hlen;	/* link layer header length */
293 	uint32_t pflags;
294 	struct llentry *lle = NULL;
295 	int addref = 0;
296 
297 	phdr = NULL;
298 	pflags = 0;
299 	if (ro != NULL) {
300 		/* XXX BPF uses ro_prepend */
301 		if (ro->ro_prepend != NULL) {
302 			phdr = ro->ro_prepend;
303 			hlen = ro->ro_plen;
304 		} else if (!(m->m_flags & (M_BCAST | M_MCAST))) {
305 			if ((ro->ro_flags & RT_LLE_CACHE) != 0) {
306 				lle = ro->ro_lle;
307 				if (lle != NULL &&
308 				    (lle->la_flags & LLE_VALID) == 0) {
309 					LLE_FREE(lle);
310 					lle = NULL;	/* redundant */
311 					ro->ro_lle = NULL;
312 				}
313 				if (lle == NULL) {
314 					/* if we lookup, keep cache */
315 					addref = 1;
316 				} else
317 					/*
318 					 * Notify LLE code that
319 					 * the entry was used
320 					 * by datapath.
321 					 */
322 					llentry_provide_feedback(lle);
323 			}
324 			if (lle != NULL) {
325 				phdr = lle->r_linkdata;
326 				hlen = lle->r_hdrlen;
327 				pflags = lle->r_flags;
328 			}
329 		}
330 	}
331 
332 #ifdef MAC
333 	error = mac_ifnet_check_transmit(ifp, m);
334 	if (error)
335 		senderr(error);
336 #endif
337 
338 	M_PROFILE(m);
339 	if (ifp->if_flags & IFF_MONITOR)
340 		senderr(ENETDOWN);
341 	if (!((ifp->if_flags & IFF_UP) &&
342 	    (ifp->if_drv_flags & IFF_DRV_RUNNING)))
343 		senderr(ENETDOWN);
344 
345 	if (phdr == NULL) {
346 		/* No prepend data supplied. Try to calculate ourselves. */
347 		phdr = linkhdr;
348 		hlen = ETHER_HDR_LEN;
349 		error = ether_resolve_addr(ifp, m, dst, ro, phdr, &pflags,
350 		    addref ? &lle : NULL);
351 		if (addref && lle != NULL)
352 			ro->ro_lle = lle;
353 		if (error != 0)
354 			return (error == EWOULDBLOCK ? 0 : error);
355 	}
356 
357 	if ((pflags & RT_L2_ME) != 0) {
358 		update_mbuf_csumflags(m, m);
359 		return (if_simloop(ifp, m, RO_GET_FAMILY(ro, dst), 0));
360 	}
361 	loop_copy = (pflags & RT_MAY_LOOP) != 0;
362 
363 	/*
364 	 * Add local net header.  If no space in first mbuf,
365 	 * allocate another.
366 	 *
367 	 * Note that we do prepend regardless of RT_HAS_HEADER flag.
368 	 * This is done because BPF code shifts m_data pointer
369 	 * to the end of ethernet header prior to calling if_output().
370 	 */
371 	M_PREPEND(m, hlen, M_NOWAIT);
372 	if (m == NULL)
373 		senderr(ENOBUFS);
374 	if ((pflags & RT_HAS_HEADER) == 0) {
375 		eh = mtod(m, struct ether_header *);
376 		memcpy(eh, phdr, hlen);
377 	}
378 
379 	/*
380 	 * If a simplex interface, and the packet is being sent to our
381 	 * Ethernet address or a broadcast address, loopback a copy.
382 	 * XXX To make a simplex device behave exactly like a duplex
383 	 * device, we should copy in the case of sending to our own
384 	 * ethernet address (thus letting the original actually appear
385 	 * on the wire). However, we don't do that here for security
386 	 * reasons and compatibility with the original behavior.
387 	 */
388 	if ((m->m_flags & M_BCAST) && loop_copy && (ifp->if_flags & IFF_SIMPLEX) &&
389 	    ((t = pf_find_mtag(m)) == NULL || !t->routed)) {
390 		struct mbuf *n;
391 
392 		/*
393 		 * Because if_simloop() modifies the packet, we need a
394 		 * writable copy through m_dup() instead of a readonly
395 		 * one as m_copy[m] would give us. The alternative would
396 		 * be to modify if_simloop() to handle the readonly mbuf,
397 		 * but performancewise it is mostly equivalent (trading
398 		 * extra data copying vs. extra locking).
399 		 *
400 		 * XXX This is a local workaround.  A number of less
401 		 * often used kernel parts suffer from the same bug.
402 		 * See PR kern/105943 for a proposed general solution.
403 		 */
404 		if ((n = m_dup(m, M_NOWAIT)) != NULL) {
405 			update_mbuf_csumflags(m, n);
406 			(void)if_simloop(ifp, n, RO_GET_FAMILY(ro, dst), hlen);
407 		} else
408 			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
409 	}
410 
411        /*
412 	* Bridges require special output handling.
413 	*/
414 	if (ifp->if_bridge) {
415 		BRIDGE_OUTPUT(ifp, m, error);
416 		return (error);
417 	}
418 
419 #if defined(INET) || defined(INET6)
420 	if (ifp->if_carp &&
421 	    (error = (*carp_output_p)(ifp, m, dst)))
422 		goto bad;
423 #endif
424 
425 	/* Handle ng_ether(4) processing, if any */
426 	if (ifp->if_l2com != NULL) {
427 		KASSERT(ng_ether_output_p != NULL,
428 		    ("ng_ether_output_p is NULL"));
429 		if ((error = (*ng_ether_output_p)(ifp, &m)) != 0) {
430 bad:			if (m != NULL)
431 				m_freem(m);
432 			return (error);
433 		}
434 		if (m == NULL)
435 			return (0);
436 	}
437 
438 	/* Continue with link-layer output */
439 	return ether_output_frame(ifp, m);
440 }
441 
442 static bool
ether_set_pcp(struct mbuf ** mp,struct ifnet * ifp,uint8_t pcp)443 ether_set_pcp(struct mbuf **mp, struct ifnet *ifp, uint8_t pcp)
444 {
445 	struct ether_8021q_tag qtag;
446 	struct ether_header *eh;
447 
448 	eh = mtod(*mp, struct ether_header *);
449 	if (eh->ether_type == htons(ETHERTYPE_VLAN) ||
450 	    eh->ether_type == htons(ETHERTYPE_QINQ)) {
451 		(*mp)->m_flags &= ~M_VLANTAG;
452 		return (true);
453 	}
454 
455 	qtag.vid = 0;
456 	qtag.pcp = pcp;
457 	qtag.proto = ETHERTYPE_VLAN;
458 	if (ether_8021q_frame(mp, ifp, ifp, &qtag))
459 		return (true);
460 	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
461 	return (false);
462 }
463 
464 /*
465  * Ethernet link layer output routine to send a raw frame to the device.
466  *
467  * This assumes that the 14 byte Ethernet header is present and contiguous
468  * in the first mbuf (if BRIDGE'ing).
469  */
470 int
ether_output_frame(struct ifnet * ifp,struct mbuf * m)471 ether_output_frame(struct ifnet *ifp, struct mbuf *m)
472 {
473 	if (ether_do_pcp(ifp, m) && !ether_set_pcp(&m, ifp, ifp->if_pcp))
474 		return (0);
475 
476 	if (PFIL_HOOKED_OUT(V_link_pfil_head))
477 		switch (pfil_mbuf_out(V_link_pfil_head, &m, ifp, NULL)) {
478 		case PFIL_DROPPED:
479 			return (EACCES);
480 		case PFIL_CONSUMED:
481 			return (0);
482 		}
483 
484 #ifdef EXPERIMENTAL
485 #if defined(INET6) && defined(INET)
486 	/* draft-ietf-6man-ipv6only-flag */
487 	/* Catch ETHERTYPE_IP, and ETHERTYPE_[REV]ARP if we are v6-only. */
488 	if ((ND_IFINFO(ifp)->flags & ND6_IFF_IPV6_ONLY_MASK) != 0) {
489 		struct ether_header *eh;
490 
491 		eh = mtod(m, struct ether_header *);
492 		switch (ntohs(eh->ether_type)) {
493 		case ETHERTYPE_IP:
494 		case ETHERTYPE_ARP:
495 		case ETHERTYPE_REVARP:
496 			m_freem(m);
497 			return (EAFNOSUPPORT);
498 			/* NOTREACHED */
499 			break;
500 		};
501 	}
502 #endif
503 #endif
504 
505 	/*
506 	 * Queue message on interface, update output statistics if successful,
507 	 * and start output if interface not yet active.
508 	 *
509 	 * If KMSAN is enabled, use it to verify that the data does not contain
510 	 * any uninitialized bytes.
511 	 */
512 	kmsan_check_mbuf(m, "ether_output");
513 	return ((ifp->if_transmit)(ifp, m));
514 }
515 
516 /*
517  * Process a received Ethernet packet; the packet is in the
518  * mbuf chain m with the ethernet header at the front.
519  */
520 static void
ether_input_internal(struct ifnet * ifp,struct mbuf * m)521 ether_input_internal(struct ifnet *ifp, struct mbuf *m)
522 {
523 	struct ether_header *eh;
524 	u_short etype;
525 
526 	if ((ifp->if_flags & IFF_UP) == 0) {
527 		m_freem(m);
528 		return;
529 	}
530 #ifdef DIAGNOSTIC
531 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
532 		if_printf(ifp, "discard frame at !IFF_DRV_RUNNING\n");
533 		m_freem(m);
534 		return;
535 	}
536 #endif
537 	if (__predict_false(m->m_len < ETHER_HDR_LEN)) {
538 		/* Drivers should pullup and ensure the mbuf is valid */
539 		if_printf(ifp, "discard frame w/o leading ethernet "
540 				"header (len %d pkt len %d)\n",
541 				m->m_len, m->m_pkthdr.len);
542 		if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
543 		m_freem(m);
544 		return;
545 	}
546 	eh = mtod(m, struct ether_header *);
547 	etype = ntohs(eh->ether_type);
548 	random_harvest_queue_ether(m, sizeof(*m));
549 
550 #ifdef EXPERIMENTAL
551 #if defined(INET6) && defined(INET)
552 	/* draft-ietf-6man-ipv6only-flag */
553 	/* Catch ETHERTYPE_IP, and ETHERTYPE_[REV]ARP if we are v6-only. */
554 	if ((ND_IFINFO(ifp)->flags & ND6_IFF_IPV6_ONLY_MASK) != 0) {
555 		switch (etype) {
556 		case ETHERTYPE_IP:
557 		case ETHERTYPE_ARP:
558 		case ETHERTYPE_REVARP:
559 			m_freem(m);
560 			return;
561 			/* NOTREACHED */
562 			break;
563 		};
564 	}
565 #endif
566 #endif
567 
568 	CURVNET_SET_QUIET(ifp->if_vnet);
569 
570 	if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
571 		if (ETHER_IS_BROADCAST(eh->ether_dhost))
572 			m->m_flags |= M_BCAST;
573 		else
574 			m->m_flags |= M_MCAST;
575 		if_inc_counter(ifp, IFCOUNTER_IMCASTS, 1);
576 	}
577 
578 #ifdef MAC
579 	/*
580 	 * Tag the mbuf with an appropriate MAC label before any other
581 	 * consumers can get to it.
582 	 */
583 	mac_ifnet_create_mbuf(ifp, m);
584 #endif
585 
586 	/*
587 	 * Give bpf a chance at the packet.
588 	 */
589 	ETHER_BPF_MTAP(ifp, m);
590 
591 	/*
592 	 * If the CRC is still on the packet, trim it off. We do this once
593 	 * and once only in case we are re-entered. Nothing else on the
594 	 * Ethernet receive path expects to see the FCS.
595 	 */
596 	if (m->m_flags & M_HASFCS) {
597 		m_adj(m, -ETHER_CRC_LEN);
598 		m->m_flags &= ~M_HASFCS;
599 	}
600 
601 	if (!(ifp->if_capenable & IFCAP_HWSTATS))
602 		if_inc_counter(ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
603 
604 	/* Allow monitor mode to claim this frame, after stats are updated. */
605 	if (ifp->if_flags & IFF_MONITOR) {
606 		m_freem(m);
607 		CURVNET_RESTORE();
608 		return;
609 	}
610 
611 	/* Handle input from a lagg(4) port */
612 	if (ifp->if_type == IFT_IEEE8023ADLAG) {
613 		KASSERT(lagg_input_ethernet_p != NULL,
614 		    ("%s: if_lagg not loaded!", __func__));
615 		m = (*lagg_input_ethernet_p)(ifp, m);
616 		if (m != NULL)
617 			ifp = m->m_pkthdr.rcvif;
618 		else {
619 			CURVNET_RESTORE();
620 			return;
621 		}
622 	}
623 
624 	/*
625 	 * If the hardware did not process an 802.1Q tag, do this now,
626 	 * to allow 802.1P priority frames to be passed to the main input
627 	 * path correctly.
628 	 */
629 	if ((m->m_flags & M_VLANTAG) == 0 &&
630 	    ((etype == ETHERTYPE_VLAN) || (etype == ETHERTYPE_QINQ))) {
631 		struct ether_vlan_header *evl;
632 
633 		if (m->m_len < sizeof(*evl) &&
634 		    (m = m_pullup(m, sizeof(*evl))) == NULL) {
635 #ifdef DIAGNOSTIC
636 			if_printf(ifp, "cannot pullup VLAN header\n");
637 #endif
638 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
639 			CURVNET_RESTORE();
640 			return;
641 		}
642 
643 		evl = mtod(m, struct ether_vlan_header *);
644 		m->m_pkthdr.ether_vtag = ntohs(evl->evl_tag);
645 		m->m_flags |= M_VLANTAG;
646 
647 		bcopy((char *)evl, (char *)evl + ETHER_VLAN_ENCAP_LEN,
648 		    ETHER_HDR_LEN - ETHER_TYPE_LEN);
649 		m_adj(m, ETHER_VLAN_ENCAP_LEN);
650 		eh = mtod(m, struct ether_header *);
651 	}
652 
653 	M_SETFIB(m, ifp->if_fib);
654 
655 	/* Allow ng_ether(4) to claim this frame. */
656 	if (ifp->if_l2com != NULL) {
657 		KASSERT(ng_ether_input_p != NULL,
658 		    ("%s: ng_ether_input_p is NULL", __func__));
659 		m->m_flags &= ~M_PROMISC;
660 		(*ng_ether_input_p)(ifp, &m);
661 		if (m == NULL) {
662 			CURVNET_RESTORE();
663 			return;
664 		}
665 		eh = mtod(m, struct ether_header *);
666 	}
667 
668 	/*
669 	 * Allow if_bridge(4) to claim this frame.
670 	 *
671 	 * The BRIDGE_INPUT() macro will update ifp if the bridge changed it
672 	 * and the frame should be delivered locally.
673 	 *
674 	 * If M_BRIDGE_INJECT is set, the packet was received directly by the
675 	 * bridge via netmap, so "ifp" is the bridge itself and the packet
676 	 * should be re-examined.
677 	 */
678 	if (ifp->if_bridge != NULL || (m->m_flags & M_BRIDGE_INJECT) != 0) {
679 		m->m_flags &= ~M_PROMISC;
680 		BRIDGE_INPUT(ifp, m);
681 		if (m == NULL) {
682 			CURVNET_RESTORE();
683 			return;
684 		}
685 		eh = mtod(m, struct ether_header *);
686 	}
687 
688 #if defined(INET) || defined(INET6)
689 	/*
690 	 * Clear M_PROMISC on frame so that carp(4) will see it when the
691 	 * mbuf flows up to Layer 3.
692 	 * FreeBSD's implementation of carp(4) uses the inprotosw
693 	 * to dispatch IPPROTO_CARP. carp(4) also allocates its own
694 	 * Ethernet addresses of the form 00:00:5e:00:01:xx, which
695 	 * is outside the scope of the M_PROMISC test below.
696 	 * TODO: Maintain a hash table of ethernet addresses other than
697 	 * ether_dhost which may be active on this ifp.
698 	 */
699 	if (ifp->if_carp && (*carp_forus_p)(ifp, eh->ether_dhost)) {
700 		m->m_flags &= ~M_PROMISC;
701 	} else
702 #endif
703 	{
704 		/*
705 		 * If the frame received was not for our MAC address, set the
706 		 * M_PROMISC flag on the mbuf chain. The frame may need to
707 		 * be seen by the rest of the Ethernet input path in case of
708 		 * re-entry (e.g. bridge, vlan, netgraph) but should not be
709 		 * seen by upper protocol layers.
710 		 */
711 		if (!ETHER_IS_MULTICAST(eh->ether_dhost) &&
712 		    bcmp(IF_LLADDR(ifp), eh->ether_dhost, ETHER_ADDR_LEN) != 0)
713 			m->m_flags |= M_PROMISC;
714 	}
715 
716 	ether_demux(ifp, m);
717 	CURVNET_RESTORE();
718 }
719 
720 /*
721  * Ethernet input dispatch; by default, direct dispatch here regardless of
722  * global configuration.  However, if RSS is enabled, hook up RSS affinity
723  * so that when deferred or hybrid dispatch is enabled, we can redistribute
724  * load based on RSS.
725  *
726  * XXXRW: Would be nice if the ifnet passed up a flag indicating whether or
727  * not it had already done work distribution via multi-queue.  Then we could
728  * direct dispatch in the event load balancing was already complete and
729  * handle the case of interfaces with different capabilities better.
730  *
731  * XXXRW: Sort of want an M_DISTRIBUTED flag to avoid multiple distributions
732  * at multiple layers?
733  *
734  * XXXRW: For now, enable all this only if RSS is compiled in, although it
735  * works fine without RSS.  Need to characterise the performance overhead
736  * of the detour through the netisr code in the event the result is always
737  * direct dispatch.
738  */
739 static void
ether_nh_input(struct mbuf * m)740 ether_nh_input(struct mbuf *m)
741 {
742 
743 	M_ASSERTPKTHDR(m);
744 	KASSERT(m->m_pkthdr.rcvif != NULL,
745 	    ("%s: NULL interface pointer", __func__));
746 	ether_input_internal(m->m_pkthdr.rcvif, m);
747 }
748 
749 static struct netisr_handler	ether_nh = {
750 	.nh_name = "ether",
751 	.nh_handler = ether_nh_input,
752 	.nh_proto = NETISR_ETHER,
753 #ifdef RSS
754 	.nh_policy = NETISR_POLICY_CPU,
755 	.nh_dispatch = NETISR_DISPATCH_DIRECT,
756 	.nh_m2cpuid = rss_m2cpuid,
757 #else
758 	.nh_policy = NETISR_POLICY_SOURCE,
759 	.nh_dispatch = NETISR_DISPATCH_DIRECT,
760 #endif
761 };
762 
763 static void
ether_init(__unused void * arg)764 ether_init(__unused void *arg)
765 {
766 
767 	netisr_register(&ether_nh);
768 }
769 SYSINIT(ether, SI_SUB_INIT_IF, SI_ORDER_ANY, ether_init, NULL);
770 
771 static void
vnet_ether_init(__unused void * arg)772 vnet_ether_init(__unused void *arg)
773 {
774 	struct pfil_head_args args;
775 
776 	args.pa_version = PFIL_VERSION;
777 	args.pa_flags = PFIL_IN | PFIL_OUT;
778 	args.pa_type = PFIL_TYPE_ETHERNET;
779 	args.pa_headname = PFIL_ETHER_NAME;
780 	V_link_pfil_head = pfil_head_register(&args);
781 
782 #ifdef VIMAGE
783 	netisr_register_vnet(&ether_nh);
784 #endif
785 }
786 VNET_SYSINIT(vnet_ether_init, SI_SUB_PROTO_IF, SI_ORDER_ANY,
787     vnet_ether_init, NULL);
788 
789 #ifdef VIMAGE
790 static void
vnet_ether_pfil_destroy(__unused void * arg)791 vnet_ether_pfil_destroy(__unused void *arg)
792 {
793 
794 	pfil_head_unregister(V_link_pfil_head);
795 }
796 VNET_SYSUNINIT(vnet_ether_pfil_uninit, SI_SUB_PROTO_PFIL, SI_ORDER_ANY,
797     vnet_ether_pfil_destroy, NULL);
798 
799 static void
vnet_ether_destroy(__unused void * arg)800 vnet_ether_destroy(__unused void *arg)
801 {
802 
803 	netisr_unregister_vnet(&ether_nh);
804 }
805 VNET_SYSUNINIT(vnet_ether_uninit, SI_SUB_PROTO_IF, SI_ORDER_ANY,
806     vnet_ether_destroy, NULL);
807 #endif
808 
809 static void
ether_input(struct ifnet * ifp,struct mbuf * m)810 ether_input(struct ifnet *ifp, struct mbuf *m)
811 {
812 	struct epoch_tracker et;
813 	struct mbuf *mn;
814 	bool needs_epoch;
815 
816 	needs_epoch = (ifp->if_flags & IFF_NEEDSEPOCH);
817 #ifdef INVARIANTS
818 	/*
819 	 * This temporary code is here to prevent epoch unaware and unmarked
820 	 * drivers to panic the system.  Once all drivers are taken care of,
821 	 * the whole INVARIANTS block should go away.
822 	 */
823 	if (!needs_epoch && !in_epoch(net_epoch_preempt)) {
824 		static bool printedonce;
825 
826 		needs_epoch = true;
827 		if (!printedonce) {
828 			printedonce = true;
829 			if_printf(ifp, "called %s w/o net epoch! "
830 			    "PLEASE file a bug report.", __func__);
831 #ifdef KDB
832 			kdb_backtrace();
833 #endif
834 		}
835 	}
836 #endif
837 
838 	/*
839 	 * The drivers are allowed to pass in a chain of packets linked with
840 	 * m_nextpkt. We split them up into separate packets here and pass
841 	 * them up. This allows the drivers to amortize the receive lock.
842 	 */
843 	CURVNET_SET_QUIET(ifp->if_vnet);
844 	if (__predict_false(needs_epoch))
845 		NET_EPOCH_ENTER(et);
846 	while (m) {
847 		mn = m->m_nextpkt;
848 		m->m_nextpkt = NULL;
849 
850 		/*
851 		 * We will rely on rcvif being set properly in the deferred
852 		 * context, so assert it is correct here.
853 		 */
854 		MPASS((m->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0);
855 		KASSERT(m->m_pkthdr.rcvif == ifp, ("%s: ifnet mismatch m %p "
856 		    "rcvif %p ifp %p", __func__, m, m->m_pkthdr.rcvif, ifp));
857 		netisr_dispatch(NETISR_ETHER, m);
858 		m = mn;
859 	}
860 	if (__predict_false(needs_epoch))
861 		NET_EPOCH_EXIT(et);
862 	CURVNET_RESTORE();
863 }
864 
865 /*
866  * Upper layer processing for a received Ethernet packet.
867  */
868 void
ether_demux(struct ifnet * ifp,struct mbuf * m)869 ether_demux(struct ifnet *ifp, struct mbuf *m)
870 {
871 	struct ether_header *eh;
872 	int i, isr;
873 	u_short ether_type;
874 
875 	NET_EPOCH_ASSERT();
876 	KASSERT(ifp != NULL, ("%s: NULL interface pointer", __func__));
877 
878 	/* Do not grab PROMISC frames in case we are re-entered. */
879 	if (PFIL_HOOKED_IN(V_link_pfil_head) && !(m->m_flags & M_PROMISC)) {
880 		i = pfil_mbuf_in(V_link_pfil_head, &m, ifp, NULL);
881 		if (i != PFIL_PASS)
882 			return;
883 	}
884 
885 	eh = mtod(m, struct ether_header *);
886 	ether_type = ntohs(eh->ether_type);
887 
888 	/*
889 	 * If this frame has a VLAN tag other than 0, call vlan_input()
890 	 * if its module is loaded. Otherwise, drop.
891 	 */
892 	if ((m->m_flags & M_VLANTAG) &&
893 	    EVL_VLANOFTAG(m->m_pkthdr.ether_vtag) != 0) {
894 		if (ifp->if_vlantrunk == NULL) {
895 			if_inc_counter(ifp, IFCOUNTER_NOPROTO, 1);
896 			m_freem(m);
897 			return;
898 		}
899 		KASSERT(vlan_input_p != NULL,("%s: VLAN not loaded!",
900 		    __func__));
901 		/* Clear before possibly re-entering ether_input(). */
902 		m->m_flags &= ~M_PROMISC;
903 		(*vlan_input_p)(ifp, m);
904 		return;
905 	}
906 
907 	/*
908 	 * Pass promiscuously received frames to the upper layer if the user
909 	 * requested this by setting IFF_PPROMISC. Otherwise, drop them.
910 	 */
911 	if ((ifp->if_flags & IFF_PPROMISC) == 0 && (m->m_flags & M_PROMISC)) {
912 		m_freem(m);
913 		return;
914 	}
915 
916 	/*
917 	 * Reset layer specific mbuf flags to avoid confusing upper layers.
918 	 */
919 	m->m_flags &= ~M_VLANTAG;
920 	m_clrprotoflags(m);
921 
922 	/*
923 	 * Dispatch frame to upper layer.
924 	 */
925 	switch (ether_type) {
926 #ifdef INET
927 	case ETHERTYPE_IP:
928 		isr = NETISR_IP;
929 		break;
930 
931 	case ETHERTYPE_ARP:
932 		if (ifp->if_flags & IFF_NOARP) {
933 			/* Discard packet if ARP is disabled on interface */
934 			m_freem(m);
935 			return;
936 		}
937 		isr = NETISR_ARP;
938 		break;
939 #endif
940 #ifdef INET6
941 	case ETHERTYPE_IPV6:
942 		isr = NETISR_IPV6;
943 		break;
944 #endif
945 	default:
946 		goto discard;
947 	}
948 
949 	/* Strip off Ethernet header. */
950 	m_adj(m, ETHER_HDR_LEN);
951 
952 	netisr_dispatch(isr, m);
953 	return;
954 
955 discard:
956 	/*
957 	 * Packet is to be discarded.  If netgraph is present,
958 	 * hand the packet to it for last chance processing;
959 	 * otherwise dispose of it.
960 	 */
961 	if (ifp->if_l2com != NULL) {
962 		KASSERT(ng_ether_input_orphan_p != NULL,
963 		    ("ng_ether_input_orphan_p is NULL"));
964 		(*ng_ether_input_orphan_p)(ifp, m);
965 		return;
966 	}
967 	m_freem(m);
968 }
969 
970 /*
971  * Convert Ethernet address to printable (loggable) representation.
972  * This routine is for compatibility; it's better to just use
973  *
974  *	printf("%6D", <pointer to address>, ":");
975  *
976  * since there's no static buffer involved.
977  */
978 char *
ether_sprintf(const u_char * ap)979 ether_sprintf(const u_char *ap)
980 {
981 	static char etherbuf[18];
982 	snprintf(etherbuf, sizeof (etherbuf), "%6D", ap, ":");
983 	return (etherbuf);
984 }
985 
986 /*
987  * Perform common duties while attaching to interface list
988  */
989 void
ether_ifattach(struct ifnet * ifp,const u_int8_t * lla)990 ether_ifattach(struct ifnet *ifp, const u_int8_t *lla)
991 {
992 	int i;
993 	struct ifaddr *ifa;
994 	struct sockaddr_dl *sdl;
995 
996 	ifp->if_addrlen = ETHER_ADDR_LEN;
997 	ifp->if_hdrlen = ETHER_HDR_LEN;
998 	ifp->if_mtu = ETHERMTU;
999 	if_attach(ifp);
1000 	ifp->if_output = ether_output;
1001 	ifp->if_input = ether_input;
1002 	ifp->if_resolvemulti = ether_resolvemulti;
1003 	ifp->if_requestencap = ether_requestencap;
1004 #ifdef VIMAGE
1005 	ifp->if_reassign = ether_reassign;
1006 #endif
1007 	if (ifp->if_baudrate == 0)
1008 		ifp->if_baudrate = IF_Mbps(10);		/* just a default */
1009 	ifp->if_broadcastaddr = etherbroadcastaddr;
1010 
1011 	ifa = ifp->if_addr;
1012 	KASSERT(ifa != NULL, ("%s: no lladdr!\n", __func__));
1013 	sdl = (struct sockaddr_dl *)ifa->ifa_addr;
1014 	sdl->sdl_type = IFT_ETHER;
1015 	sdl->sdl_alen = ifp->if_addrlen;
1016 	bcopy(lla, LLADDR(sdl), ifp->if_addrlen);
1017 
1018 	if (ifp->if_hw_addr != NULL)
1019 		bcopy(lla, ifp->if_hw_addr, ifp->if_addrlen);
1020 
1021 	bpfattach(ifp, DLT_EN10MB, ETHER_HDR_LEN);
1022 	if (ng_ether_attach_p != NULL)
1023 		(*ng_ether_attach_p)(ifp);
1024 
1025 	/* Announce Ethernet MAC address if non-zero. */
1026 	for (i = 0; i < ifp->if_addrlen; i++)
1027 		if (lla[i] != 0)
1028 			break;
1029 	if (i != ifp->if_addrlen)
1030 		if_printf(ifp, "Ethernet address: %6D\n", lla, ":");
1031 
1032 	uuid_ether_add(LLADDR(sdl));
1033 
1034 	/* Add necessary bits are setup; announce it now. */
1035 	EVENTHANDLER_INVOKE(ether_ifattach_event, ifp);
1036 	if (IS_DEFAULT_VNET(curvnet))
1037 		devctl_notify("ETHERNET", ifp->if_xname, "IFATTACH", NULL);
1038 }
1039 
1040 /*
1041  * Perform common duties while detaching an Ethernet interface
1042  */
1043 void
ether_ifdetach(struct ifnet * ifp)1044 ether_ifdetach(struct ifnet *ifp)
1045 {
1046 	struct sockaddr_dl *sdl;
1047 
1048 	sdl = (struct sockaddr_dl *)(ifp->if_addr->ifa_addr);
1049 	uuid_ether_del(LLADDR(sdl));
1050 
1051 	if (ifp->if_l2com != NULL) {
1052 		KASSERT(ng_ether_detach_p != NULL,
1053 		    ("ng_ether_detach_p is NULL"));
1054 		(*ng_ether_detach_p)(ifp);
1055 	}
1056 
1057 	bpfdetach(ifp);
1058 	if_detach(ifp);
1059 }
1060 
1061 #ifdef VIMAGE
1062 void
ether_reassign(struct ifnet * ifp,struct vnet * new_vnet,char * unused __unused)1063 ether_reassign(struct ifnet *ifp, struct vnet *new_vnet, char *unused __unused)
1064 {
1065 
1066 	if (ifp->if_l2com != NULL) {
1067 		KASSERT(ng_ether_detach_p != NULL,
1068 		    ("ng_ether_detach_p is NULL"));
1069 		(*ng_ether_detach_p)(ifp);
1070 	}
1071 
1072 	if (ng_ether_attach_p != NULL) {
1073 		CURVNET_SET_QUIET(new_vnet);
1074 		(*ng_ether_attach_p)(ifp);
1075 		CURVNET_RESTORE();
1076 	}
1077 }
1078 #endif
1079 
1080 SYSCTL_DECL(_net_link);
1081 SYSCTL_NODE(_net_link, IFT_ETHER, ether, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1082     "Ethernet");
1083 
1084 #if 0
1085 /*
1086  * This is for reference.  We have a table-driven version
1087  * of the little-endian crc32 generator, which is faster
1088  * than the double-loop.
1089  */
1090 uint32_t
1091 ether_crc32_le(const uint8_t *buf, size_t len)
1092 {
1093 	size_t i;
1094 	uint32_t crc;
1095 	int bit;
1096 	uint8_t data;
1097 
1098 	crc = 0xffffffff;	/* initial value */
1099 
1100 	for (i = 0; i < len; i++) {
1101 		for (data = *buf++, bit = 0; bit < 8; bit++, data >>= 1) {
1102 			carry = (crc ^ data) & 1;
1103 			crc >>= 1;
1104 			if (carry)
1105 				crc = (crc ^ ETHER_CRC_POLY_LE);
1106 		}
1107 	}
1108 
1109 	return (crc);
1110 }
1111 #else
1112 uint32_t
ether_crc32_le(const uint8_t * buf,size_t len)1113 ether_crc32_le(const uint8_t *buf, size_t len)
1114 {
1115 	static const uint32_t crctab[] = {
1116 		0x00000000, 0x1db71064, 0x3b6e20c8, 0x26d930ac,
1117 		0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c,
1118 		0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
1119 		0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c
1120 	};
1121 	size_t i;
1122 	uint32_t crc;
1123 
1124 	crc = 0xffffffff;	/* initial value */
1125 
1126 	for (i = 0; i < len; i++) {
1127 		crc ^= buf[i];
1128 		crc = (crc >> 4) ^ crctab[crc & 0xf];
1129 		crc = (crc >> 4) ^ crctab[crc & 0xf];
1130 	}
1131 
1132 	return (crc);
1133 }
1134 #endif
1135 
1136 uint32_t
ether_crc32_be(const uint8_t * buf,size_t len)1137 ether_crc32_be(const uint8_t *buf, size_t len)
1138 {
1139 	size_t i;
1140 	uint32_t crc, carry;
1141 	int bit;
1142 	uint8_t data;
1143 
1144 	crc = 0xffffffff;	/* initial value */
1145 
1146 	for (i = 0; i < len; i++) {
1147 		for (data = *buf++, bit = 0; bit < 8; bit++, data >>= 1) {
1148 			carry = ((crc & 0x80000000) ? 1 : 0) ^ (data & 0x01);
1149 			crc <<= 1;
1150 			if (carry)
1151 				crc = (crc ^ ETHER_CRC_POLY_BE) | carry;
1152 		}
1153 	}
1154 
1155 	return (crc);
1156 }
1157 
1158 int
ether_ioctl(struct ifnet * ifp,u_long command,caddr_t data)1159 ether_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1160 {
1161 	struct ifaddr *ifa = (struct ifaddr *) data;
1162 	struct ifreq *ifr = (struct ifreq *) data;
1163 	int error = 0;
1164 
1165 	switch (command) {
1166 	case SIOCSIFADDR:
1167 		ifp->if_flags |= IFF_UP;
1168 
1169 		switch (ifa->ifa_addr->sa_family) {
1170 #ifdef INET
1171 		case AF_INET:
1172 			ifp->if_init(ifp->if_softc);	/* before arpwhohas */
1173 			arp_ifinit(ifp, ifa);
1174 			break;
1175 #endif
1176 		default:
1177 			ifp->if_init(ifp->if_softc);
1178 			break;
1179 		}
1180 		break;
1181 
1182 	case SIOCGIFADDR:
1183 		bcopy(IF_LLADDR(ifp), &ifr->ifr_addr.sa_data[0],
1184 		    ETHER_ADDR_LEN);
1185 		break;
1186 
1187 	case SIOCSIFMTU:
1188 		/*
1189 		 * Set the interface MTU.
1190 		 */
1191 		if (ifr->ifr_mtu > ETHERMTU) {
1192 			error = EINVAL;
1193 		} else {
1194 			ifp->if_mtu = ifr->ifr_mtu;
1195 		}
1196 		break;
1197 
1198 	case SIOCSLANPCP:
1199 		error = priv_check(curthread, PRIV_NET_SETLANPCP);
1200 		if (error != 0)
1201 			break;
1202 		if (ifr->ifr_lan_pcp > 7 &&
1203 		    ifr->ifr_lan_pcp != IFNET_PCP_NONE) {
1204 			error = EINVAL;
1205 		} else {
1206 			ifp->if_pcp = ifr->ifr_lan_pcp;
1207 			/* broadcast event about PCP change */
1208 			EVENTHANDLER_INVOKE(ifnet_event, ifp, IFNET_EVENT_PCP);
1209 		}
1210 		break;
1211 
1212 	case SIOCGLANPCP:
1213 		ifr->ifr_lan_pcp = ifp->if_pcp;
1214 		break;
1215 
1216 	default:
1217 		error = EINVAL;			/* XXX netbsd has ENOTTY??? */
1218 		break;
1219 	}
1220 	return (error);
1221 }
1222 
1223 static int
ether_resolvemulti(struct ifnet * ifp,struct sockaddr ** llsa,struct sockaddr * sa)1224 ether_resolvemulti(struct ifnet *ifp, struct sockaddr **llsa,
1225 	struct sockaddr *sa)
1226 {
1227 	struct sockaddr_dl *sdl;
1228 #ifdef INET
1229 	struct sockaddr_in *sin;
1230 #endif
1231 #ifdef INET6
1232 	struct sockaddr_in6 *sin6;
1233 #endif
1234 	u_char *e_addr;
1235 
1236 	switch(sa->sa_family) {
1237 	case AF_LINK:
1238 		/*
1239 		 * No mapping needed. Just check that it's a valid MC address.
1240 		 */
1241 		sdl = (struct sockaddr_dl *)sa;
1242 		e_addr = LLADDR(sdl);
1243 		if (!ETHER_IS_MULTICAST(e_addr))
1244 			return EADDRNOTAVAIL;
1245 		*llsa = NULL;
1246 		return 0;
1247 
1248 #ifdef INET
1249 	case AF_INET:
1250 		sin = (struct sockaddr_in *)sa;
1251 		if (!IN_MULTICAST(ntohl(sin->sin_addr.s_addr)))
1252 			return EADDRNOTAVAIL;
1253 		sdl = link_init_sdl(ifp, *llsa, IFT_ETHER);
1254 		sdl->sdl_alen = ETHER_ADDR_LEN;
1255 		e_addr = LLADDR(sdl);
1256 		ETHER_MAP_IP_MULTICAST(&sin->sin_addr, e_addr);
1257 		*llsa = (struct sockaddr *)sdl;
1258 		return 0;
1259 #endif
1260 #ifdef INET6
1261 	case AF_INET6:
1262 		sin6 = (struct sockaddr_in6 *)sa;
1263 		if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1264 			/*
1265 			 * An IP6 address of 0 means listen to all
1266 			 * of the Ethernet multicast address used for IP6.
1267 			 * (This is used for multicast routers.)
1268 			 */
1269 			ifp->if_flags |= IFF_ALLMULTI;
1270 			*llsa = NULL;
1271 			return 0;
1272 		}
1273 		if (!IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr))
1274 			return EADDRNOTAVAIL;
1275 		sdl = link_init_sdl(ifp, *llsa, IFT_ETHER);
1276 		sdl->sdl_alen = ETHER_ADDR_LEN;
1277 		e_addr = LLADDR(sdl);
1278 		ETHER_MAP_IPV6_MULTICAST(&sin6->sin6_addr, e_addr);
1279 		*llsa = (struct sockaddr *)sdl;
1280 		return 0;
1281 #endif
1282 
1283 	default:
1284 		/*
1285 		 * Well, the text isn't quite right, but it's the name
1286 		 * that counts...
1287 		 */
1288 		return EAFNOSUPPORT;
1289 	}
1290 }
1291 
1292 static moduledata_t ether_mod = {
1293 	.name = "ether",
1294 };
1295 
1296 void
ether_vlan_mtap(struct bpf_if * bp,struct mbuf * m,void * data,u_int dlen)1297 ether_vlan_mtap(struct bpf_if *bp, struct mbuf *m, void *data, u_int dlen)
1298 {
1299 	struct ether_vlan_header vlan;
1300 	struct mbuf mv, mb;
1301 
1302 	KASSERT((m->m_flags & M_VLANTAG) != 0,
1303 	    ("%s: vlan information not present", __func__));
1304 	KASSERT(m->m_len >= sizeof(struct ether_header),
1305 	    ("%s: mbuf not large enough for header", __func__));
1306 	bcopy(mtod(m, char *), &vlan, sizeof(struct ether_header));
1307 	vlan.evl_proto = vlan.evl_encap_proto;
1308 	vlan.evl_encap_proto = htons(ETHERTYPE_VLAN);
1309 	vlan.evl_tag = htons(m->m_pkthdr.ether_vtag);
1310 	m->m_len -= sizeof(struct ether_header);
1311 	m->m_data += sizeof(struct ether_header);
1312 	/*
1313 	 * If a data link has been supplied by the caller, then we will need to
1314 	 * re-create a stack allocated mbuf chain with the following structure:
1315 	 *
1316 	 * (1) mbuf #1 will contain the supplied data link
1317 	 * (2) mbuf #2 will contain the vlan header
1318 	 * (3) mbuf #3 will contain the original mbuf's packet data
1319 	 *
1320 	 * Otherwise, submit the packet and vlan header via bpf_mtap2().
1321 	 */
1322 	if (data != NULL) {
1323 		mv.m_next = m;
1324 		mv.m_data = (caddr_t)&vlan;
1325 		mv.m_len = sizeof(vlan);
1326 		mb.m_next = &mv;
1327 		mb.m_data = data;
1328 		mb.m_len = dlen;
1329 		bpf_mtap(bp, &mb);
1330 	} else
1331 		bpf_mtap2(bp, &vlan, sizeof(vlan), m);
1332 	m->m_len += sizeof(struct ether_header);
1333 	m->m_data -= sizeof(struct ether_header);
1334 }
1335 
1336 struct mbuf *
ether_vlanencap_proto(struct mbuf * m,uint16_t tag,uint16_t proto)1337 ether_vlanencap_proto(struct mbuf *m, uint16_t tag, uint16_t proto)
1338 {
1339 	struct ether_vlan_header *evl;
1340 
1341 	M_PREPEND(m, ETHER_VLAN_ENCAP_LEN, M_NOWAIT);
1342 	if (m == NULL)
1343 		return (NULL);
1344 	/* M_PREPEND takes care of m_len, m_pkthdr.len for us */
1345 
1346 	if (m->m_len < sizeof(*evl)) {
1347 		m = m_pullup(m, sizeof(*evl));
1348 		if (m == NULL)
1349 			return (NULL);
1350 	}
1351 
1352 	/*
1353 	 * Transform the Ethernet header into an Ethernet header
1354 	 * with 802.1Q encapsulation.
1355 	 */
1356 	evl = mtod(m, struct ether_vlan_header *);
1357 	bcopy((char *)evl + ETHER_VLAN_ENCAP_LEN,
1358 	    (char *)evl, ETHER_HDR_LEN - ETHER_TYPE_LEN);
1359 	evl->evl_encap_proto = htons(proto);
1360 	evl->evl_tag = htons(tag);
1361 	return (m);
1362 }
1363 
1364 void
ether_bpf_mtap_if(struct ifnet * ifp,struct mbuf * m)1365 ether_bpf_mtap_if(struct ifnet *ifp, struct mbuf *m)
1366 {
1367 	if (bpf_peers_present(ifp->if_bpf)) {
1368 		M_ASSERTVALID(m);
1369 		if ((m->m_flags & M_VLANTAG) != 0)
1370 			ether_vlan_mtap(ifp->if_bpf, m, NULL, 0);
1371 		else
1372 			bpf_mtap(ifp->if_bpf, m);
1373 	}
1374 }
1375 
1376 static SYSCTL_NODE(_net_link, IFT_L2VLAN, vlan, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1377     "IEEE 802.1Q VLAN");
1378 static SYSCTL_NODE(_net_link_vlan, PF_LINK, link,
1379     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1380     "for consistency");
1381 
1382 VNET_DEFINE_STATIC(int, soft_pad);
1383 #define	V_soft_pad	VNET(soft_pad)
1384 SYSCTL_INT(_net_link_vlan, OID_AUTO, soft_pad, CTLFLAG_RW | CTLFLAG_VNET,
1385     &VNET_NAME(soft_pad), 0,
1386     "pad short frames before tagging");
1387 
1388 /*
1389  * For now, make preserving PCP via an mbuf tag optional, as it increases
1390  * per-packet memory allocations and frees.  In the future, it would be
1391  * preferable to reuse ether_vtag for this, or similar.
1392  */
1393 VNET_DEFINE(int, vlan_mtag_pcp) = 0;
1394 #define	V_vlan_mtag_pcp	VNET(vlan_mtag_pcp)
1395 SYSCTL_INT(_net_link_vlan, OID_AUTO, mtag_pcp, CTLFLAG_RW | CTLFLAG_VNET,
1396     &VNET_NAME(vlan_mtag_pcp), 0,
1397     "Retain VLAN PCP information as packets are passed up the stack");
1398 
1399 static inline bool
ether_do_pcp(struct ifnet * ifp,struct mbuf * m)1400 ether_do_pcp(struct ifnet *ifp, struct mbuf *m)
1401 {
1402 	if (ifp->if_type == IFT_L2VLAN)
1403 		return (false);
1404 	if (ifp->if_pcp != IFNET_PCP_NONE || (m->m_flags & M_VLANTAG) != 0)
1405 		return (true);
1406 	if (V_vlan_mtag_pcp &&
1407 	    m_tag_locate(m, MTAG_8021Q, MTAG_8021Q_PCP_OUT, NULL) != NULL)
1408 		return (true);
1409 	return (false);
1410 }
1411 
1412 bool
ether_8021q_frame(struct mbuf ** mp,struct ifnet * ife,struct ifnet * p,const struct ether_8021q_tag * qtag)1413 ether_8021q_frame(struct mbuf **mp, struct ifnet *ife, struct ifnet *p,
1414     const struct ether_8021q_tag *qtag)
1415 {
1416 	struct m_tag *mtag;
1417 	int n;
1418 	uint16_t tag;
1419 	uint8_t pcp = qtag->pcp;
1420 	static const char pad[8];	/* just zeros */
1421 
1422 	/*
1423 	 * Pad the frame to the minimum size allowed if told to.
1424 	 * This option is in accord with IEEE Std 802.1Q, 2003 Ed.,
1425 	 * paragraph C.4.4.3.b.  It can help to work around buggy
1426 	 * bridges that violate paragraph C.4.4.3.a from the same
1427 	 * document, i.e., fail to pad short frames after untagging.
1428 	 * E.g., a tagged frame 66 bytes long (incl. FCS) is OK, but
1429 	 * untagging it will produce a 62-byte frame, which is a runt
1430 	 * and requires padding.  There are VLAN-enabled network
1431 	 * devices that just discard such runts instead or mishandle
1432 	 * them somehow.
1433 	 */
1434 	if (V_soft_pad && p->if_type == IFT_ETHER) {
1435 		for (n = ETHERMIN + ETHER_HDR_LEN - (*mp)->m_pkthdr.len;
1436 		     n > 0; n -= sizeof(pad)) {
1437 			if (!m_append(*mp, min(n, sizeof(pad)), pad))
1438 				break;
1439 		}
1440 		if (n > 0) {
1441 			m_freem(*mp);
1442 			*mp = NULL;
1443 			if_printf(ife, "cannot pad short frame");
1444 			return (false);
1445 		}
1446 	}
1447 
1448 	/*
1449 	 * If PCP is set in mbuf, use it
1450 	 */
1451 	if ((*mp)->m_flags & M_VLANTAG) {
1452 		pcp = EVL_PRIOFTAG((*mp)->m_pkthdr.ether_vtag);
1453 	}
1454 
1455 	/*
1456 	 * If underlying interface can do VLAN tag insertion itself,
1457 	 * just pass the packet along. However, we need some way to
1458 	 * tell the interface where the packet came from so that it
1459 	 * knows how to find the VLAN tag to use, so we attach a
1460 	 * packet tag that holds it.
1461 	 */
1462 	if (V_vlan_mtag_pcp && (mtag = m_tag_locate(*mp, MTAG_8021Q,
1463 	    MTAG_8021Q_PCP_OUT, NULL)) != NULL)
1464 		tag = EVL_MAKETAG(qtag->vid, *(uint8_t *)(mtag + 1), 0);
1465 	else
1466 		tag = EVL_MAKETAG(qtag->vid, pcp, 0);
1467 	if ((p->if_capenable & IFCAP_VLAN_HWTAGGING) &&
1468 	    (qtag->proto == ETHERTYPE_VLAN)) {
1469 		(*mp)->m_pkthdr.ether_vtag = tag;
1470 		(*mp)->m_flags |= M_VLANTAG;
1471 	} else {
1472 		*mp = ether_vlanencap_proto(*mp, tag, qtag->proto);
1473 		if (*mp == NULL) {
1474 			if_printf(ife, "unable to prepend 802.1Q header");
1475 			return (false);
1476 		}
1477 		(*mp)->m_flags &= ~M_VLANTAG;
1478 	}
1479 	return (true);
1480 }
1481 
1482 /*
1483  * Allocate an address from the FreeBSD Foundation OUI.  This uses a
1484  * cryptographic hash function on the containing jail's name, UUID and the
1485  * interface name to attempt to provide a unique but stable address.
1486  * Pseudo-interfaces which require a MAC address should use this function to
1487  * allocate non-locally-administered addresses.
1488  */
1489 void
ether_gen_addr_byname(const char * nameunit,struct ether_addr * hwaddr)1490 ether_gen_addr_byname(const char *nameunit, struct ether_addr *hwaddr)
1491 {
1492 	SHA1_CTX ctx;
1493 	char *buf;
1494 	char uuid[HOSTUUIDLEN + 1];
1495 	uint64_t addr;
1496 	int i, sz;
1497 	char digest[SHA1_RESULTLEN];
1498 	char jailname[MAXHOSTNAMELEN];
1499 
1500 	getcredhostuuid(curthread->td_ucred, uuid, sizeof(uuid));
1501 	if (strncmp(uuid, DEFAULT_HOSTUUID, sizeof(uuid)) == 0) {
1502 		/* Fall back to a random mac address. */
1503 		goto rando;
1504 	}
1505 
1506 	/* If each (vnet) jail would also have a unique hostuuid this would not
1507 	 * be necessary. */
1508 	getjailname(curthread->td_ucred, jailname, sizeof(jailname));
1509 	sz = asprintf(&buf, M_TEMP, "%s-%s-%s", uuid, nameunit,
1510 	    jailname);
1511 	if (sz < 0) {
1512 		/* Fall back to a random mac address. */
1513 		goto rando;
1514 	}
1515 
1516 	SHA1Init(&ctx);
1517 	SHA1Update(&ctx, buf, sz);
1518 	SHA1Final(digest, &ctx);
1519 	free(buf, M_TEMP);
1520 
1521 	addr = ((digest[0] << 16) | (digest[1] << 8) | digest[2]) &
1522 	    OUI_FREEBSD_GENERATED_MASK;
1523 	addr = OUI_FREEBSD(addr);
1524 	for (i = 0; i < ETHER_ADDR_LEN; ++i) {
1525 		hwaddr->octet[i] = addr >> ((ETHER_ADDR_LEN - i - 1) * 8) &
1526 		    0xFF;
1527 	}
1528 
1529 	return;
1530 rando:
1531 	arc4rand(hwaddr, sizeof(*hwaddr), 0);
1532 	/* Unicast */
1533 	hwaddr->octet[0] &= 0xFE;
1534 	/* Locally administered. */
1535 	hwaddr->octet[0] |= 0x02;
1536 }
1537 
1538 void
ether_gen_addr(struct ifnet * ifp,struct ether_addr * hwaddr)1539 ether_gen_addr(struct ifnet *ifp, struct ether_addr *hwaddr)
1540 {
1541 	ether_gen_addr_byname(if_name(ifp), hwaddr);
1542 }
1543 
1544 DECLARE_MODULE(ether, ether_mod, SI_SUB_INIT_IF, SI_ORDER_ANY);
1545 MODULE_VERSION(ether, 1);
1546