1 /*- 2 * Copyright 1998 Massachusetts Institute of Technology 3 * Copyright 2012 ADARA Networks, Inc. 4 * Copyright 2017 Dell EMC Isilon 5 * 6 * Portions of this software were developed by Robert N. M. Watson under 7 * contract to ADARA Networks, Inc. 8 * 9 * Permission to use, copy, modify, and distribute this software and 10 * its documentation for any purpose and without fee is hereby 11 * granted, provided that both the above copyright notice and this 12 * permission notice appear in all copies, that both the above 13 * copyright notice and this permission notice appear in all 14 * supporting documentation, and that the name of M.I.T. not be used 15 * in advertising or publicity pertaining to distribution of the 16 * software without specific, written prior permission. M.I.T. makes 17 * no representations about the suitability of this software for any 18 * purpose. It is provided "as is" without express or implied 19 * warranty. 20 * 21 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS 22 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, 23 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT 25 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 29 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 /* 36 * if_vlan.c - pseudo-device driver for IEEE 802.1Q virtual LANs. 37 * This is sort of sneaky in the implementation, since 38 * we need to pretend to be enough of an Ethernet implementation 39 * to make arp work. The way we do this is by telling everyone 40 * that we are an Ethernet, and then catch the packets that 41 * ether_output() sends to us via if_transmit(), rewrite them for 42 * use by the real outgoing interface, and ask it to send them. 43 */ 44 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include "opt_inet.h" 49 #include "opt_vlan.h" 50 #include "opt_ratelimit.h" 51 52 #include <sys/param.h> 53 #include <sys/eventhandler.h> 54 #include <sys/kernel.h> 55 #include <sys/lock.h> 56 #include <sys/malloc.h> 57 #include <sys/mbuf.h> 58 #include <sys/module.h> 59 #include <sys/rmlock.h> 60 #include <sys/priv.h> 61 #include <sys/queue.h> 62 #include <sys/socket.h> 63 #include <sys/sockio.h> 64 #include <sys/sysctl.h> 65 #include <sys/systm.h> 66 #include <sys/sx.h> 67 #include <sys/taskqueue.h> 68 69 #include <net/bpf.h> 70 #include <net/ethernet.h> 71 #include <net/if.h> 72 #include <net/if_var.h> 73 #include <net/if_clone.h> 74 #include <net/if_dl.h> 75 #include <net/if_types.h> 76 #include <net/if_vlan_var.h> 77 #include <net/vnet.h> 78 79 #ifdef INET 80 #include <netinet/in.h> 81 #include <netinet/if_ether.h> 82 #endif 83 84 #define VLAN_DEF_HWIDTH 4 85 #define VLAN_IFFLAGS (IFF_BROADCAST | IFF_MULTICAST) 86 87 #define UP_AND_RUNNING(ifp) \ 88 ((ifp)->if_flags & IFF_UP && (ifp)->if_drv_flags & IFF_DRV_RUNNING) 89 90 CK_SLIST_HEAD(ifvlanhead, ifvlan); 91 92 struct ifvlantrunk { 93 struct ifnet *parent; /* parent interface of this trunk */ 94 struct mtx lock; 95 #ifdef VLAN_ARRAY 96 #define VLAN_ARRAY_SIZE (EVL_VLID_MASK + 1) 97 struct ifvlan *vlans[VLAN_ARRAY_SIZE]; /* static table */ 98 #else 99 struct ifvlanhead *hash; /* dynamic hash-list table */ 100 uint16_t hmask; 101 uint16_t hwidth; 102 #endif 103 int refcnt; 104 }; 105 106 /* 107 * This macro provides a facility to iterate over every vlan on a trunk with 108 * the assumption that none will be added/removed during iteration. 109 */ 110 #ifdef VLAN_ARRAY 111 #define VLAN_FOREACH(_ifv, _trunk) \ 112 size_t _i; \ 113 for (_i = 0; _i < VLAN_ARRAY_SIZE; _i++) \ 114 if (((_ifv) = (_trunk)->vlans[_i]) != NULL) 115 #else /* VLAN_ARRAY */ 116 #define VLAN_FOREACH(_ifv, _trunk) \ 117 struct ifvlan *_next; \ 118 size_t _i; \ 119 for (_i = 0; _i < (1 << (_trunk)->hwidth); _i++) \ 120 CK_SLIST_FOREACH_SAFE((_ifv), &(_trunk)->hash[_i], ifv_list, _next) 121 #endif /* VLAN_ARRAY */ 122 123 /* 124 * This macro provides a facility to iterate over every vlan on a trunk while 125 * also modifying the number of vlans on the trunk. The iteration continues 126 * until some condition is met or there are no more vlans on the trunk. 127 */ 128 #ifdef VLAN_ARRAY 129 /* The VLAN_ARRAY case is simple -- just a for loop using the condition. */ 130 #define VLAN_FOREACH_UNTIL_SAFE(_ifv, _trunk, _cond) \ 131 size_t _i; \ 132 for (_i = 0; !(_cond) && _i < VLAN_ARRAY_SIZE; _i++) \ 133 if (((_ifv) = (_trunk)->vlans[_i])) 134 #else /* VLAN_ARRAY */ 135 /* 136 * The hash table case is more complicated. We allow for the hash table to be 137 * modified (i.e. vlans removed) while we are iterating over it. To allow for 138 * this we must restart the iteration every time we "touch" something during 139 * the iteration, since removal will resize the hash table and invalidate our 140 * current position. If acting on the touched element causes the trunk to be 141 * emptied, then iteration also stops. 142 */ 143 #define VLAN_FOREACH_UNTIL_SAFE(_ifv, _trunk, _cond) \ 144 size_t _i; \ 145 bool _touch = false; \ 146 for (_i = 0; \ 147 !(_cond) && _i < (1 << (_trunk)->hwidth); \ 148 _i = (_touch && ((_trunk) != NULL) ? 0 : _i + 1), _touch = false) \ 149 if (((_ifv) = CK_SLIST_FIRST(&(_trunk)->hash[_i])) != NULL && \ 150 (_touch = true)) 151 #endif /* VLAN_ARRAY */ 152 153 struct vlan_mc_entry { 154 struct sockaddr_dl mc_addr; 155 CK_SLIST_ENTRY(vlan_mc_entry) mc_entries; 156 }; 157 158 struct ifvlan { 159 struct ifvlantrunk *ifv_trunk; 160 struct ifnet *ifv_ifp; 161 #define TRUNK(ifv) ((ifv)->ifv_trunk) 162 #define PARENT(ifv) ((ifv)->ifv_trunk->parent) 163 void *ifv_cookie; 164 int ifv_pflags; /* special flags we have set on parent */ 165 int ifv_capenable; 166 struct ifv_linkmib { 167 int ifvm_encaplen; /* encapsulation length */ 168 int ifvm_mtufudge; /* MTU fudged by this much */ 169 int ifvm_mintu; /* min transmission unit */ 170 uint16_t ifvm_proto; /* encapsulation ethertype */ 171 uint16_t ifvm_tag; /* tag to apply on packets leaving if */ 172 uint16_t ifvm_vid; /* VLAN ID */ 173 uint8_t ifvm_pcp; /* Priority Code Point (PCP). */ 174 } ifv_mib; 175 struct task lladdr_task; 176 CK_SLIST_HEAD(, vlan_mc_entry) vlan_mc_listhead; 177 #ifndef VLAN_ARRAY 178 CK_SLIST_ENTRY(ifvlan) ifv_list; 179 #endif 180 }; 181 #define ifv_proto ifv_mib.ifvm_proto 182 #define ifv_tag ifv_mib.ifvm_tag 183 #define ifv_vid ifv_mib.ifvm_vid 184 #define ifv_pcp ifv_mib.ifvm_pcp 185 #define ifv_encaplen ifv_mib.ifvm_encaplen 186 #define ifv_mtufudge ifv_mib.ifvm_mtufudge 187 #define ifv_mintu ifv_mib.ifvm_mintu 188 189 /* Special flags we should propagate to parent. */ 190 static struct { 191 int flag; 192 int (*func)(struct ifnet *, int); 193 } vlan_pflags[] = { 194 {IFF_PROMISC, ifpromisc}, 195 {IFF_ALLMULTI, if_allmulti}, 196 {0, NULL} 197 }; 198 199 extern int vlan_mtag_pcp; 200 201 static const char vlanname[] = "vlan"; 202 static MALLOC_DEFINE(M_VLAN, vlanname, "802.1Q Virtual LAN Interface"); 203 204 static eventhandler_tag ifdetach_tag; 205 static eventhandler_tag iflladdr_tag; 206 207 /* 208 * if_vlan uses two module-level synchronizations primitives to allow concurrent 209 * modification of vlan interfaces and (mostly) allow for vlans to be destroyed 210 * while they are being used for tx/rx. To accomplish this in a way that has 211 * acceptable performance and cooperation with other parts of the network stack 212 * there is a non-sleepable epoch(9) and an sx(9). 213 * 214 * The performance-sensitive paths that warrant using the epoch(9) are 215 * vlan_transmit and vlan_input. Both have to check for the vlan interface's 216 * existence using if_vlantrunk, and being in the network tx/rx paths the use 217 * of an epoch(9) gives a measureable improvement in performance. 218 * 219 * The reason for having an sx(9) is mostly because there are still areas that 220 * must be sleepable and also have safe concurrent access to a vlan interface. 221 * Since the sx(9) exists, it is used by default in most paths unless sleeping 222 * is not permitted, or if it is not clear whether sleeping is permitted. 223 * 224 */ 225 #define _VLAN_SX_ID ifv_sx 226 227 static struct sx _VLAN_SX_ID; 228 229 #define VLAN_LOCKING_INIT() \ 230 sx_init(&_VLAN_SX_ID, "vlan_sx") 231 232 #define VLAN_LOCKING_DESTROY() \ 233 sx_destroy(&_VLAN_SX_ID) 234 235 #define VLAN_RLOCK() NET_EPOCH_ENTER(); 236 #define VLAN_RUNLOCK() NET_EPOCH_EXIT(); 237 #define VLAN_RLOCK_ASSERT() MPASS(in_epoch(net_epoch_preempt)) 238 239 #define VLAN_SLOCK() sx_slock(&_VLAN_SX_ID) 240 #define VLAN_SUNLOCK() sx_sunlock(&_VLAN_SX_ID) 241 #define VLAN_XLOCK() sx_xlock(&_VLAN_SX_ID) 242 #define VLAN_XUNLOCK() sx_xunlock(&_VLAN_SX_ID) 243 #define VLAN_SLOCK_ASSERT() sx_assert(&_VLAN_SX_ID, SA_SLOCKED) 244 #define VLAN_XLOCK_ASSERT() sx_assert(&_VLAN_SX_ID, SA_XLOCKED) 245 #define VLAN_SXLOCK_ASSERT() sx_assert(&_VLAN_SX_ID, SA_LOCKED) 246 247 248 /* 249 * We also have a per-trunk mutex that should be acquired when changing 250 * its state. 251 */ 252 #define TRUNK_LOCK_INIT(trunk) mtx_init(&(trunk)->lock, vlanname, NULL, MTX_DEF) 253 #define TRUNK_LOCK_DESTROY(trunk) mtx_destroy(&(trunk)->lock) 254 #define TRUNK_RLOCK(trunk) NET_EPOCH_ENTER() 255 #define TRUNK_WLOCK(trunk) mtx_lock(&(trunk)->lock) 256 #define TRUNK_RUNLOCK(trunk) NET_EPOCH_EXIT(); 257 #define TRUNK_WUNLOCK(trunk) mtx_unlock(&(trunk)->lock) 258 #define TRUNK_RLOCK_ASSERT(trunk) MPASS(in_epoch(net_epoch_preempt)) 259 #define TRUNK_LOCK_ASSERT(trunk) MPASS(in_epoch(net_epoch_preempt) || mtx_owned(&(trunk)->lock)) 260 #define TRUNK_WLOCK_ASSERT(trunk) mtx_assert(&(trunk)->lock, MA_OWNED); 261 262 /* 263 * The VLAN_ARRAY substitutes the dynamic hash with a static array 264 * with 4096 entries. In theory this can give a boost in processing, 265 * however in practice it does not. Probably this is because the array 266 * is too big to fit into CPU cache. 267 */ 268 #ifndef VLAN_ARRAY 269 static void vlan_inithash(struct ifvlantrunk *trunk); 270 static void vlan_freehash(struct ifvlantrunk *trunk); 271 static int vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv); 272 static int vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv); 273 static void vlan_growhash(struct ifvlantrunk *trunk, int howmuch); 274 static __inline struct ifvlan * vlan_gethash(struct ifvlantrunk *trunk, 275 uint16_t vid); 276 #endif 277 static void trunk_destroy(struct ifvlantrunk *trunk); 278 279 static void vlan_init(void *foo); 280 static void vlan_input(struct ifnet *ifp, struct mbuf *m); 281 static int vlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr); 282 #ifdef RATELIMIT 283 static int vlan_snd_tag_alloc(struct ifnet *, 284 union if_snd_tag_alloc_params *, struct m_snd_tag **); 285 #endif 286 static void vlan_qflush(struct ifnet *ifp); 287 static int vlan_setflag(struct ifnet *ifp, int flag, int status, 288 int (*func)(struct ifnet *, int)); 289 static int vlan_setflags(struct ifnet *ifp, int status); 290 static int vlan_setmulti(struct ifnet *ifp); 291 static int vlan_transmit(struct ifnet *ifp, struct mbuf *m); 292 static void vlan_unconfig(struct ifnet *ifp); 293 static void vlan_unconfig_locked(struct ifnet *ifp, int departing); 294 static int vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t tag); 295 static void vlan_link_state(struct ifnet *ifp); 296 static void vlan_capabilities(struct ifvlan *ifv); 297 static void vlan_trunk_capabilities(struct ifnet *ifp); 298 299 static struct ifnet *vlan_clone_match_ethervid(const char *, int *); 300 static int vlan_clone_match(struct if_clone *, const char *); 301 static int vlan_clone_create(struct if_clone *, char *, size_t, caddr_t); 302 static int vlan_clone_destroy(struct if_clone *, struct ifnet *); 303 304 static void vlan_ifdetach(void *arg, struct ifnet *ifp); 305 static void vlan_iflladdr(void *arg, struct ifnet *ifp); 306 307 static void vlan_lladdr_fn(void *arg, int pending); 308 309 static struct if_clone *vlan_cloner; 310 311 #ifdef VIMAGE 312 VNET_DEFINE_STATIC(struct if_clone *, vlan_cloner); 313 #define V_vlan_cloner VNET(vlan_cloner) 314 #endif 315 316 #ifndef VLAN_ARRAY 317 #define HASH(n, m) ((((n) >> 8) ^ ((n) >> 4) ^ (n)) & (m)) 318 319 static void 320 vlan_inithash(struct ifvlantrunk *trunk) 321 { 322 int i, n; 323 324 /* 325 * The trunk must not be locked here since we call malloc(M_WAITOK). 326 * It is OK in case this function is called before the trunk struct 327 * gets hooked up and becomes visible from other threads. 328 */ 329 330 KASSERT(trunk->hwidth == 0 && trunk->hash == NULL, 331 ("%s: hash already initialized", __func__)); 332 333 trunk->hwidth = VLAN_DEF_HWIDTH; 334 n = 1 << trunk->hwidth; 335 trunk->hmask = n - 1; 336 trunk->hash = malloc(sizeof(struct ifvlanhead) * n, M_VLAN, M_WAITOK); 337 for (i = 0; i < n; i++) 338 CK_SLIST_INIT(&trunk->hash[i]); 339 } 340 341 static void 342 vlan_freehash(struct ifvlantrunk *trunk) 343 { 344 #ifdef INVARIANTS 345 int i; 346 347 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 348 for (i = 0; i < (1 << trunk->hwidth); i++) 349 KASSERT(CK_SLIST_EMPTY(&trunk->hash[i]), 350 ("%s: hash table not empty", __func__)); 351 #endif 352 free(trunk->hash, M_VLAN); 353 trunk->hash = NULL; 354 trunk->hwidth = trunk->hmask = 0; 355 } 356 357 static int 358 vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 359 { 360 int i, b; 361 struct ifvlan *ifv2; 362 363 VLAN_XLOCK_ASSERT(); 364 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 365 366 b = 1 << trunk->hwidth; 367 i = HASH(ifv->ifv_vid, trunk->hmask); 368 CK_SLIST_FOREACH(ifv2, &trunk->hash[i], ifv_list) 369 if (ifv->ifv_vid == ifv2->ifv_vid) 370 return (EEXIST); 371 372 /* 373 * Grow the hash when the number of vlans exceeds half of the number of 374 * hash buckets squared. This will make the average linked-list length 375 * buckets/2. 376 */ 377 if (trunk->refcnt > (b * b) / 2) { 378 vlan_growhash(trunk, 1); 379 i = HASH(ifv->ifv_vid, trunk->hmask); 380 } 381 CK_SLIST_INSERT_HEAD(&trunk->hash[i], ifv, ifv_list); 382 trunk->refcnt++; 383 384 return (0); 385 } 386 387 static int 388 vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 389 { 390 int i, b; 391 struct ifvlan *ifv2; 392 393 VLAN_XLOCK_ASSERT(); 394 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 395 396 b = 1 << trunk->hwidth; 397 i = HASH(ifv->ifv_vid, trunk->hmask); 398 CK_SLIST_FOREACH(ifv2, &trunk->hash[i], ifv_list) 399 if (ifv2 == ifv) { 400 trunk->refcnt--; 401 CK_SLIST_REMOVE(&trunk->hash[i], ifv2, ifvlan, ifv_list); 402 if (trunk->refcnt < (b * b) / 2) 403 vlan_growhash(trunk, -1); 404 return (0); 405 } 406 407 panic("%s: vlan not found\n", __func__); 408 return (ENOENT); /*NOTREACHED*/ 409 } 410 411 /* 412 * Grow the hash larger or smaller if memory permits. 413 */ 414 static void 415 vlan_growhash(struct ifvlantrunk *trunk, int howmuch) 416 { 417 struct ifvlan *ifv; 418 struct ifvlanhead *hash2; 419 int hwidth2, i, j, n, n2; 420 421 VLAN_XLOCK_ASSERT(); 422 KASSERT(trunk->hwidth > 0, ("%s: hwidth not positive", __func__)); 423 424 if (howmuch == 0) { 425 /* Harmless yet obvious coding error */ 426 printf("%s: howmuch is 0\n", __func__); 427 return; 428 } 429 430 hwidth2 = trunk->hwidth + howmuch; 431 n = 1 << trunk->hwidth; 432 n2 = 1 << hwidth2; 433 /* Do not shrink the table below the default */ 434 if (hwidth2 < VLAN_DEF_HWIDTH) 435 return; 436 437 hash2 = malloc(sizeof(struct ifvlanhead) * n2, M_VLAN, M_WAITOK); 438 if (hash2 == NULL) { 439 printf("%s: out of memory -- hash size not changed\n", 440 __func__); 441 return; /* We can live with the old hash table */ 442 } 443 for (j = 0; j < n2; j++) 444 CK_SLIST_INIT(&hash2[j]); 445 for (i = 0; i < n; i++) 446 while ((ifv = CK_SLIST_FIRST(&trunk->hash[i])) != NULL) { 447 CK_SLIST_REMOVE(&trunk->hash[i], ifv, ifvlan, ifv_list); 448 j = HASH(ifv->ifv_vid, n2 - 1); 449 CK_SLIST_INSERT_HEAD(&hash2[j], ifv, ifv_list); 450 } 451 NET_EPOCH_WAIT(); 452 free(trunk->hash, M_VLAN); 453 trunk->hash = hash2; 454 trunk->hwidth = hwidth2; 455 trunk->hmask = n2 - 1; 456 457 if (bootverbose) 458 if_printf(trunk->parent, 459 "VLAN hash table resized from %d to %d buckets\n", n, n2); 460 } 461 462 static __inline struct ifvlan * 463 vlan_gethash(struct ifvlantrunk *trunk, uint16_t vid) 464 { 465 struct ifvlan *ifv; 466 467 TRUNK_RLOCK_ASSERT(trunk); 468 469 CK_SLIST_FOREACH(ifv, &trunk->hash[HASH(vid, trunk->hmask)], ifv_list) 470 if (ifv->ifv_vid == vid) 471 return (ifv); 472 return (NULL); 473 } 474 475 #if 0 476 /* Debugging code to view the hashtables. */ 477 static void 478 vlan_dumphash(struct ifvlantrunk *trunk) 479 { 480 int i; 481 struct ifvlan *ifv; 482 483 for (i = 0; i < (1 << trunk->hwidth); i++) { 484 printf("%d: ", i); 485 CK_SLIST_FOREACH(ifv, &trunk->hash[i], ifv_list) 486 printf("%s ", ifv->ifv_ifp->if_xname); 487 printf("\n"); 488 } 489 } 490 #endif /* 0 */ 491 #else 492 493 static __inline struct ifvlan * 494 vlan_gethash(struct ifvlantrunk *trunk, uint16_t vid) 495 { 496 497 return trunk->vlans[vid]; 498 } 499 500 static __inline int 501 vlan_inshash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 502 { 503 504 if (trunk->vlans[ifv->ifv_vid] != NULL) 505 return EEXIST; 506 trunk->vlans[ifv->ifv_vid] = ifv; 507 trunk->refcnt++; 508 509 return (0); 510 } 511 512 static __inline int 513 vlan_remhash(struct ifvlantrunk *trunk, struct ifvlan *ifv) 514 { 515 516 trunk->vlans[ifv->ifv_vid] = NULL; 517 trunk->refcnt--; 518 519 return (0); 520 } 521 522 static __inline void 523 vlan_freehash(struct ifvlantrunk *trunk) 524 { 525 } 526 527 static __inline void 528 vlan_inithash(struct ifvlantrunk *trunk) 529 { 530 } 531 532 #endif /* !VLAN_ARRAY */ 533 534 static void 535 trunk_destroy(struct ifvlantrunk *trunk) 536 { 537 VLAN_XLOCK_ASSERT(); 538 539 vlan_freehash(trunk); 540 trunk->parent->if_vlantrunk = NULL; 541 TRUNK_LOCK_DESTROY(trunk); 542 if_rele(trunk->parent); 543 free(trunk, M_VLAN); 544 } 545 546 /* 547 * Program our multicast filter. What we're actually doing is 548 * programming the multicast filter of the parent. This has the 549 * side effect of causing the parent interface to receive multicast 550 * traffic that it doesn't really want, which ends up being discarded 551 * later by the upper protocol layers. Unfortunately, there's no way 552 * to avoid this: there really is only one physical interface. 553 */ 554 static int 555 vlan_setmulti(struct ifnet *ifp) 556 { 557 struct ifnet *ifp_p; 558 struct ifmultiaddr *ifma; 559 struct ifvlan *sc; 560 struct vlan_mc_entry *mc; 561 int error; 562 563 VLAN_XLOCK_ASSERT(); 564 565 /* Find the parent. */ 566 sc = ifp->if_softc; 567 ifp_p = PARENT(sc); 568 569 CURVNET_SET_QUIET(ifp_p->if_vnet); 570 571 /* First, remove any existing filter entries. */ 572 while ((mc = CK_SLIST_FIRST(&sc->vlan_mc_listhead)) != NULL) { 573 CK_SLIST_REMOVE_HEAD(&sc->vlan_mc_listhead, mc_entries); 574 (void)if_delmulti(ifp_p, (struct sockaddr *)&mc->mc_addr); 575 NET_EPOCH_WAIT(); 576 free(mc, M_VLAN); 577 } 578 579 /* Now program new ones. */ 580 IF_ADDR_WLOCK(ifp); 581 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 582 if (ifma->ifma_addr->sa_family != AF_LINK) 583 continue; 584 mc = malloc(sizeof(struct vlan_mc_entry), M_VLAN, M_NOWAIT); 585 if (mc == NULL) { 586 IF_ADDR_WUNLOCK(ifp); 587 return (ENOMEM); 588 } 589 bcopy(ifma->ifma_addr, &mc->mc_addr, ifma->ifma_addr->sa_len); 590 mc->mc_addr.sdl_index = ifp_p->if_index; 591 CK_SLIST_INSERT_HEAD(&sc->vlan_mc_listhead, mc, mc_entries); 592 } 593 IF_ADDR_WUNLOCK(ifp); 594 CK_SLIST_FOREACH (mc, &sc->vlan_mc_listhead, mc_entries) { 595 error = if_addmulti(ifp_p, (struct sockaddr *)&mc->mc_addr, 596 NULL); 597 if (error) 598 return (error); 599 } 600 601 CURVNET_RESTORE(); 602 return (0); 603 } 604 605 /* 606 * A handler for parent interface link layer address changes. 607 * If the parent interface link layer address is changed we 608 * should also change it on all children vlans. 609 */ 610 static void 611 vlan_iflladdr(void *arg __unused, struct ifnet *ifp) 612 { 613 struct ifvlan *ifv; 614 struct ifnet *ifv_ifp; 615 struct ifvlantrunk *trunk; 616 struct sockaddr_dl *sdl; 617 618 /* Need the rmlock since this is run on taskqueue_swi. */ 619 VLAN_RLOCK(); 620 trunk = ifp->if_vlantrunk; 621 if (trunk == NULL) { 622 VLAN_RUNLOCK(); 623 return; 624 } 625 626 /* 627 * OK, it's a trunk. Loop over and change all vlan's lladdrs on it. 628 * We need an exclusive lock here to prevent concurrent SIOCSIFLLADDR 629 * ioctl calls on the parent garbling the lladdr of the child vlan. 630 */ 631 TRUNK_WLOCK(trunk); 632 VLAN_FOREACH(ifv, trunk) { 633 /* 634 * Copy new new lladdr into the ifv_ifp, enqueue a task 635 * to actually call if_setlladdr. if_setlladdr needs to 636 * be deferred to a taskqueue because it will call into 637 * the if_vlan ioctl path and try to acquire the global 638 * lock. 639 */ 640 ifv_ifp = ifv->ifv_ifp; 641 bcopy(IF_LLADDR(ifp), IF_LLADDR(ifv_ifp), 642 ifp->if_addrlen); 643 sdl = (struct sockaddr_dl *)ifv_ifp->if_addr->ifa_addr; 644 sdl->sdl_alen = ifp->if_addrlen; 645 taskqueue_enqueue(taskqueue_thread, &ifv->lladdr_task); 646 } 647 TRUNK_WUNLOCK(trunk); 648 VLAN_RUNLOCK(); 649 } 650 651 /* 652 * A handler for network interface departure events. 653 * Track departure of trunks here so that we don't access invalid 654 * pointers or whatever if a trunk is ripped from under us, e.g., 655 * by ejecting its hot-plug card. However, if an ifnet is simply 656 * being renamed, then there's no need to tear down the state. 657 */ 658 static void 659 vlan_ifdetach(void *arg __unused, struct ifnet *ifp) 660 { 661 struct ifvlan *ifv; 662 struct ifvlantrunk *trunk; 663 664 /* If the ifnet is just being renamed, don't do anything. */ 665 if (ifp->if_flags & IFF_RENAMING) 666 return; 667 VLAN_XLOCK(); 668 trunk = ifp->if_vlantrunk; 669 if (trunk == NULL) { 670 VLAN_XUNLOCK(); 671 return; 672 } 673 674 /* 675 * OK, it's a trunk. Loop over and detach all vlan's on it. 676 * Check trunk pointer after each vlan_unconfig() as it will 677 * free it and set to NULL after the last vlan was detached. 678 */ 679 VLAN_FOREACH_UNTIL_SAFE(ifv, ifp->if_vlantrunk, 680 ifp->if_vlantrunk == NULL) 681 vlan_unconfig_locked(ifv->ifv_ifp, 1); 682 683 /* Trunk should have been destroyed in vlan_unconfig(). */ 684 KASSERT(ifp->if_vlantrunk == NULL, ("%s: purge failed", __func__)); 685 VLAN_XUNLOCK(); 686 } 687 688 /* 689 * Return the trunk device for a virtual interface. 690 */ 691 static struct ifnet * 692 vlan_trunkdev(struct ifnet *ifp) 693 { 694 struct ifvlan *ifv; 695 696 if (ifp->if_type != IFT_L2VLAN) 697 return (NULL); 698 699 VLAN_RLOCK(); 700 ifv = ifp->if_softc; 701 ifp = NULL; 702 if (ifv->ifv_trunk) 703 ifp = PARENT(ifv); 704 VLAN_RUNLOCK(); 705 return (ifp); 706 } 707 708 /* 709 * Return the 12-bit VLAN VID for this interface, for use by external 710 * components such as Infiniband. 711 * 712 * XXXRW: Note that the function name here is historical; it should be named 713 * vlan_vid(). 714 */ 715 static int 716 vlan_tag(struct ifnet *ifp, uint16_t *vidp) 717 { 718 struct ifvlan *ifv; 719 720 if (ifp->if_type != IFT_L2VLAN) 721 return (EINVAL); 722 ifv = ifp->if_softc; 723 *vidp = ifv->ifv_vid; 724 return (0); 725 } 726 727 static int 728 vlan_pcp(struct ifnet *ifp, uint16_t *pcpp) 729 { 730 struct ifvlan *ifv; 731 732 if (ifp->if_type != IFT_L2VLAN) 733 return (EINVAL); 734 ifv = ifp->if_softc; 735 *pcpp = ifv->ifv_pcp; 736 return (0); 737 } 738 739 /* 740 * Return a driver specific cookie for this interface. Synchronization 741 * with setcookie must be provided by the driver. 742 */ 743 static void * 744 vlan_cookie(struct ifnet *ifp) 745 { 746 struct ifvlan *ifv; 747 748 if (ifp->if_type != IFT_L2VLAN) 749 return (NULL); 750 ifv = ifp->if_softc; 751 return (ifv->ifv_cookie); 752 } 753 754 /* 755 * Store a cookie in our softc that drivers can use to store driver 756 * private per-instance data in. 757 */ 758 static int 759 vlan_setcookie(struct ifnet *ifp, void *cookie) 760 { 761 struct ifvlan *ifv; 762 763 if (ifp->if_type != IFT_L2VLAN) 764 return (EINVAL); 765 ifv = ifp->if_softc; 766 ifv->ifv_cookie = cookie; 767 return (0); 768 } 769 770 /* 771 * Return the vlan device present at the specific VID. 772 */ 773 static struct ifnet * 774 vlan_devat(struct ifnet *ifp, uint16_t vid) 775 { 776 struct ifvlantrunk *trunk; 777 struct ifvlan *ifv; 778 779 VLAN_RLOCK(); 780 trunk = ifp->if_vlantrunk; 781 if (trunk == NULL) { 782 VLAN_RUNLOCK(); 783 return (NULL); 784 } 785 ifp = NULL; 786 ifv = vlan_gethash(trunk, vid); 787 if (ifv) 788 ifp = ifv->ifv_ifp; 789 VLAN_RUNLOCK(); 790 return (ifp); 791 } 792 793 /* 794 * Recalculate the cached VLAN tag exposed via the MIB. 795 */ 796 static void 797 vlan_tag_recalculate(struct ifvlan *ifv) 798 { 799 800 ifv->ifv_tag = EVL_MAKETAG(ifv->ifv_vid, ifv->ifv_pcp, 0); 801 } 802 803 /* 804 * VLAN support can be loaded as a module. The only place in the 805 * system that's intimately aware of this is ether_input. We hook 806 * into this code through vlan_input_p which is defined there and 807 * set here. No one else in the system should be aware of this so 808 * we use an explicit reference here. 809 */ 810 extern void (*vlan_input_p)(struct ifnet *, struct mbuf *); 811 812 /* For if_link_state_change() eyes only... */ 813 extern void (*vlan_link_state_p)(struct ifnet *); 814 815 static int 816 vlan_modevent(module_t mod, int type, void *data) 817 { 818 819 switch (type) { 820 case MOD_LOAD: 821 ifdetach_tag = EVENTHANDLER_REGISTER(ifnet_departure_event, 822 vlan_ifdetach, NULL, EVENTHANDLER_PRI_ANY); 823 if (ifdetach_tag == NULL) 824 return (ENOMEM); 825 iflladdr_tag = EVENTHANDLER_REGISTER(iflladdr_event, 826 vlan_iflladdr, NULL, EVENTHANDLER_PRI_ANY); 827 if (iflladdr_tag == NULL) 828 return (ENOMEM); 829 VLAN_LOCKING_INIT(); 830 vlan_input_p = vlan_input; 831 vlan_link_state_p = vlan_link_state; 832 vlan_trunk_cap_p = vlan_trunk_capabilities; 833 vlan_trunkdev_p = vlan_trunkdev; 834 vlan_cookie_p = vlan_cookie; 835 vlan_setcookie_p = vlan_setcookie; 836 vlan_tag_p = vlan_tag; 837 vlan_pcp_p = vlan_pcp; 838 vlan_devat_p = vlan_devat; 839 #ifndef VIMAGE 840 vlan_cloner = if_clone_advanced(vlanname, 0, vlan_clone_match, 841 vlan_clone_create, vlan_clone_destroy); 842 #endif 843 if (bootverbose) 844 printf("vlan: initialized, using " 845 #ifdef VLAN_ARRAY 846 "full-size arrays" 847 #else 848 "hash tables with chaining" 849 #endif 850 851 "\n"); 852 break; 853 case MOD_UNLOAD: 854 #ifndef VIMAGE 855 if_clone_detach(vlan_cloner); 856 #endif 857 EVENTHANDLER_DEREGISTER(ifnet_departure_event, ifdetach_tag); 858 EVENTHANDLER_DEREGISTER(iflladdr_event, iflladdr_tag); 859 vlan_input_p = NULL; 860 vlan_link_state_p = NULL; 861 vlan_trunk_cap_p = NULL; 862 vlan_trunkdev_p = NULL; 863 vlan_tag_p = NULL; 864 vlan_cookie_p = NULL; 865 vlan_setcookie_p = NULL; 866 vlan_devat_p = NULL; 867 VLAN_LOCKING_DESTROY(); 868 if (bootverbose) 869 printf("vlan: unloaded\n"); 870 break; 871 default: 872 return (EOPNOTSUPP); 873 } 874 return (0); 875 } 876 877 static moduledata_t vlan_mod = { 878 "if_vlan", 879 vlan_modevent, 880 0 881 }; 882 883 DECLARE_MODULE(if_vlan, vlan_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); 884 MODULE_VERSION(if_vlan, 3); 885 886 #ifdef VIMAGE 887 static void 888 vnet_vlan_init(const void *unused __unused) 889 { 890 891 vlan_cloner = if_clone_advanced(vlanname, 0, vlan_clone_match, 892 vlan_clone_create, vlan_clone_destroy); 893 V_vlan_cloner = vlan_cloner; 894 } 895 VNET_SYSINIT(vnet_vlan_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY, 896 vnet_vlan_init, NULL); 897 898 static void 899 vnet_vlan_uninit(const void *unused __unused) 900 { 901 902 if_clone_detach(V_vlan_cloner); 903 } 904 VNET_SYSUNINIT(vnet_vlan_uninit, SI_SUB_INIT_IF, SI_ORDER_FIRST, 905 vnet_vlan_uninit, NULL); 906 #endif 907 908 /* 909 * Check for <etherif>.<vlan> style interface names. 910 */ 911 static struct ifnet * 912 vlan_clone_match_ethervid(const char *name, int *vidp) 913 { 914 char ifname[IFNAMSIZ]; 915 char *cp; 916 struct ifnet *ifp; 917 int vid; 918 919 strlcpy(ifname, name, IFNAMSIZ); 920 if ((cp = strchr(ifname, '.')) == NULL) 921 return (NULL); 922 *cp = '\0'; 923 if ((ifp = ifunit_ref(ifname)) == NULL) 924 return (NULL); 925 /* Parse VID. */ 926 if (*++cp == '\0') { 927 if_rele(ifp); 928 return (NULL); 929 } 930 vid = 0; 931 for(; *cp >= '0' && *cp <= '9'; cp++) 932 vid = (vid * 10) + (*cp - '0'); 933 if (*cp != '\0') { 934 if_rele(ifp); 935 return (NULL); 936 } 937 if (vidp != NULL) 938 *vidp = vid; 939 940 return (ifp); 941 } 942 943 static int 944 vlan_clone_match(struct if_clone *ifc, const char *name) 945 { 946 const char *cp; 947 948 if (vlan_clone_match_ethervid(name, NULL) != NULL) 949 return (1); 950 951 if (strncmp(vlanname, name, strlen(vlanname)) != 0) 952 return (0); 953 for (cp = name + 4; *cp != '\0'; cp++) { 954 if (*cp < '0' || *cp > '9') 955 return (0); 956 } 957 958 return (1); 959 } 960 961 static int 962 vlan_clone_create(struct if_clone *ifc, char *name, size_t len, caddr_t params) 963 { 964 char *dp; 965 int wildcard; 966 int unit; 967 int error; 968 int vid; 969 struct ifvlan *ifv; 970 struct ifnet *ifp; 971 struct ifnet *p; 972 struct ifaddr *ifa; 973 struct sockaddr_dl *sdl; 974 struct vlanreq vlr; 975 static const u_char eaddr[ETHER_ADDR_LEN]; /* 00:00:00:00:00:00 */ 976 977 /* 978 * There are 3 (ugh) ways to specify the cloned device: 979 * o pass a parameter block with the clone request. 980 * o specify parameters in the text of the clone device name 981 * o specify no parameters and get an unattached device that 982 * must be configured separately. 983 * The first technique is preferred; the latter two are 984 * supported for backwards compatibility. 985 * 986 * XXXRW: Note historic use of the word "tag" here. New ioctls may be 987 * called for. 988 */ 989 if (params) { 990 error = copyin(params, &vlr, sizeof(vlr)); 991 if (error) 992 return error; 993 p = ifunit_ref(vlr.vlr_parent); 994 if (p == NULL) 995 return (ENXIO); 996 error = ifc_name2unit(name, &unit); 997 if (error != 0) { 998 if_rele(p); 999 return (error); 1000 } 1001 vid = vlr.vlr_tag; 1002 wildcard = (unit < 0); 1003 } else if ((p = vlan_clone_match_ethervid(name, &vid)) != NULL) { 1004 unit = -1; 1005 wildcard = 0; 1006 } else { 1007 p = NULL; 1008 error = ifc_name2unit(name, &unit); 1009 if (error != 0) 1010 return (error); 1011 1012 wildcard = (unit < 0); 1013 } 1014 1015 error = ifc_alloc_unit(ifc, &unit); 1016 if (error != 0) { 1017 if (p != NULL) 1018 if_rele(p); 1019 return (error); 1020 } 1021 1022 /* In the wildcard case, we need to update the name. */ 1023 if (wildcard) { 1024 for (dp = name; *dp != '\0'; dp++); 1025 if (snprintf(dp, len - (dp-name), "%d", unit) > 1026 len - (dp-name) - 1) { 1027 panic("%s: interface name too long", __func__); 1028 } 1029 } 1030 1031 ifv = malloc(sizeof(struct ifvlan), M_VLAN, M_WAITOK | M_ZERO); 1032 ifp = ifv->ifv_ifp = if_alloc(IFT_ETHER); 1033 if (ifp == NULL) { 1034 ifc_free_unit(ifc, unit); 1035 free(ifv, M_VLAN); 1036 if (p != NULL) 1037 if_rele(p); 1038 return (ENOSPC); 1039 } 1040 CK_SLIST_INIT(&ifv->vlan_mc_listhead); 1041 ifp->if_softc = ifv; 1042 /* 1043 * Set the name manually rather than using if_initname because 1044 * we don't conform to the default naming convention for interfaces. 1045 */ 1046 strlcpy(ifp->if_xname, name, IFNAMSIZ); 1047 ifp->if_dname = vlanname; 1048 ifp->if_dunit = unit; 1049 /* NB: flags are not set here */ 1050 ifp->if_linkmib = &ifv->ifv_mib; 1051 ifp->if_linkmiblen = sizeof(ifv->ifv_mib); 1052 /* NB: mtu is not set here */ 1053 1054 ifp->if_init = vlan_init; 1055 ifp->if_transmit = vlan_transmit; 1056 ifp->if_qflush = vlan_qflush; 1057 ifp->if_ioctl = vlan_ioctl; 1058 #ifdef RATELIMIT 1059 ifp->if_snd_tag_alloc = vlan_snd_tag_alloc; 1060 #endif 1061 ifp->if_flags = VLAN_IFFLAGS; 1062 ether_ifattach(ifp, eaddr); 1063 /* Now undo some of the damage... */ 1064 ifp->if_baudrate = 0; 1065 ifp->if_type = IFT_L2VLAN; 1066 ifp->if_hdrlen = ETHER_VLAN_ENCAP_LEN; 1067 ifa = ifp->if_addr; 1068 sdl = (struct sockaddr_dl *)ifa->ifa_addr; 1069 sdl->sdl_type = IFT_L2VLAN; 1070 1071 if (p != NULL) { 1072 error = vlan_config(ifv, p, vid); 1073 if_rele(p); 1074 if (error != 0) { 1075 /* 1076 * Since we've partially failed, we need to back 1077 * out all the way, otherwise userland could get 1078 * confused. Thus, we destroy the interface. 1079 */ 1080 ether_ifdetach(ifp); 1081 vlan_unconfig(ifp); 1082 if_free(ifp); 1083 ifc_free_unit(ifc, unit); 1084 free(ifv, M_VLAN); 1085 1086 return (error); 1087 } 1088 } 1089 1090 return (0); 1091 } 1092 1093 static int 1094 vlan_clone_destroy(struct if_clone *ifc, struct ifnet *ifp) 1095 { 1096 struct ifvlan *ifv = ifp->if_softc; 1097 int unit = ifp->if_dunit; 1098 1099 ether_ifdetach(ifp); /* first, remove it from system-wide lists */ 1100 vlan_unconfig(ifp); /* now it can be unconfigured and freed */ 1101 /* 1102 * We should have the only reference to the ifv now, so we can now 1103 * drain any remaining lladdr task before freeing the ifnet and the 1104 * ifvlan. 1105 */ 1106 taskqueue_drain(taskqueue_thread, &ifv->lladdr_task); 1107 NET_EPOCH_WAIT(); 1108 if_free(ifp); 1109 free(ifv, M_VLAN); 1110 ifc_free_unit(ifc, unit); 1111 1112 return (0); 1113 } 1114 1115 /* 1116 * The ifp->if_init entry point for vlan(4) is a no-op. 1117 */ 1118 static void 1119 vlan_init(void *foo __unused) 1120 { 1121 } 1122 1123 /* 1124 * The if_transmit method for vlan(4) interface. 1125 */ 1126 static int 1127 vlan_transmit(struct ifnet *ifp, struct mbuf *m) 1128 { 1129 struct ifvlan *ifv; 1130 struct ifnet *p; 1131 int error, len, mcast; 1132 1133 VLAN_RLOCK(); 1134 ifv = ifp->if_softc; 1135 if (TRUNK(ifv) == NULL) { 1136 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1137 VLAN_RUNLOCK(); 1138 m_freem(m); 1139 return (ENETDOWN); 1140 } 1141 p = PARENT(ifv); 1142 len = m->m_pkthdr.len; 1143 mcast = (m->m_flags & (M_MCAST | M_BCAST)) ? 1 : 0; 1144 1145 BPF_MTAP(ifp, m); 1146 1147 /* 1148 * Do not run parent's if_transmit() if the parent is not up, 1149 * or parent's driver will cause a system crash. 1150 */ 1151 if (!UP_AND_RUNNING(p)) { 1152 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1153 VLAN_RUNLOCK(); 1154 m_freem(m); 1155 return (ENETDOWN); 1156 } 1157 1158 if (!ether_8021q_frame(&m, ifp, p, ifv->ifv_vid, ifv->ifv_pcp)) { 1159 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1160 VLAN_RUNLOCK(); 1161 return (0); 1162 } 1163 1164 /* 1165 * Send it, precisely as ether_output() would have. 1166 */ 1167 error = (p->if_transmit)(p, m); 1168 if (error == 0) { 1169 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1170 if_inc_counter(ifp, IFCOUNTER_OBYTES, len); 1171 if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast); 1172 } else 1173 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1174 VLAN_RUNLOCK(); 1175 return (error); 1176 } 1177 1178 /* 1179 * The ifp->if_qflush entry point for vlan(4) is a no-op. 1180 */ 1181 static void 1182 vlan_qflush(struct ifnet *ifp __unused) 1183 { 1184 } 1185 1186 static void 1187 vlan_input(struct ifnet *ifp, struct mbuf *m) 1188 { 1189 struct ifvlantrunk *trunk; 1190 struct ifvlan *ifv; 1191 struct m_tag *mtag; 1192 uint16_t vid, tag; 1193 1194 VLAN_RLOCK(); 1195 trunk = ifp->if_vlantrunk; 1196 if (trunk == NULL) { 1197 VLAN_RUNLOCK(); 1198 m_freem(m); 1199 return; 1200 } 1201 1202 if (m->m_flags & M_VLANTAG) { 1203 /* 1204 * Packet is tagged, but m contains a normal 1205 * Ethernet frame; the tag is stored out-of-band. 1206 */ 1207 tag = m->m_pkthdr.ether_vtag; 1208 m->m_flags &= ~M_VLANTAG; 1209 } else { 1210 struct ether_vlan_header *evl; 1211 1212 /* 1213 * Packet is tagged in-band as specified by 802.1q. 1214 */ 1215 switch (ifp->if_type) { 1216 case IFT_ETHER: 1217 if (m->m_len < sizeof(*evl) && 1218 (m = m_pullup(m, sizeof(*evl))) == NULL) { 1219 if_printf(ifp, "cannot pullup VLAN header\n"); 1220 VLAN_RUNLOCK(); 1221 return; 1222 } 1223 evl = mtod(m, struct ether_vlan_header *); 1224 tag = ntohs(evl->evl_tag); 1225 1226 /* 1227 * Remove the 802.1q header by copying the Ethernet 1228 * addresses over it and adjusting the beginning of 1229 * the data in the mbuf. The encapsulated Ethernet 1230 * type field is already in place. 1231 */ 1232 bcopy((char *)evl, (char *)evl + ETHER_VLAN_ENCAP_LEN, 1233 ETHER_HDR_LEN - ETHER_TYPE_LEN); 1234 m_adj(m, ETHER_VLAN_ENCAP_LEN); 1235 break; 1236 1237 default: 1238 #ifdef INVARIANTS 1239 panic("%s: %s has unsupported if_type %u", 1240 __func__, ifp->if_xname, ifp->if_type); 1241 #endif 1242 if_inc_counter(ifp, IFCOUNTER_NOPROTO, 1); 1243 VLAN_RUNLOCK(); 1244 m_freem(m); 1245 return; 1246 } 1247 } 1248 1249 vid = EVL_VLANOFTAG(tag); 1250 1251 ifv = vlan_gethash(trunk, vid); 1252 if (ifv == NULL || !UP_AND_RUNNING(ifv->ifv_ifp)) { 1253 VLAN_RUNLOCK(); 1254 if_inc_counter(ifp, IFCOUNTER_NOPROTO, 1); 1255 m_freem(m); 1256 return; 1257 } 1258 1259 if (vlan_mtag_pcp) { 1260 /* 1261 * While uncommon, it is possible that we will find a 802.1q 1262 * packet encapsulated inside another packet that also had an 1263 * 802.1q header. For example, ethernet tunneled over IPSEC 1264 * arriving over ethernet. In that case, we replace the 1265 * existing 802.1q PCP m_tag value. 1266 */ 1267 mtag = m_tag_locate(m, MTAG_8021Q, MTAG_8021Q_PCP_IN, NULL); 1268 if (mtag == NULL) { 1269 mtag = m_tag_alloc(MTAG_8021Q, MTAG_8021Q_PCP_IN, 1270 sizeof(uint8_t), M_NOWAIT); 1271 if (mtag == NULL) { 1272 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1273 VLAN_RUNLOCK(); 1274 m_freem(m); 1275 return; 1276 } 1277 m_tag_prepend(m, mtag); 1278 } 1279 *(uint8_t *)(mtag + 1) = EVL_PRIOFTAG(tag); 1280 } 1281 1282 m->m_pkthdr.rcvif = ifv->ifv_ifp; 1283 if_inc_counter(ifv->ifv_ifp, IFCOUNTER_IPACKETS, 1); 1284 VLAN_RUNLOCK(); 1285 1286 /* Pass it back through the parent's input routine. */ 1287 (*ifv->ifv_ifp->if_input)(ifv->ifv_ifp, m); 1288 } 1289 1290 static void 1291 vlan_lladdr_fn(void *arg, int pending __unused) 1292 { 1293 struct ifvlan *ifv; 1294 struct ifnet *ifp; 1295 1296 ifv = (struct ifvlan *)arg; 1297 ifp = ifv->ifv_ifp; 1298 /* The ifv_ifp already has the lladdr copied in. */ 1299 if_setlladdr(ifp, IF_LLADDR(ifp), ifp->if_addrlen); 1300 } 1301 1302 static int 1303 vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t vid) 1304 { 1305 struct ifvlantrunk *trunk; 1306 struct ifnet *ifp; 1307 int error = 0; 1308 1309 /* 1310 * We can handle non-ethernet hardware types as long as 1311 * they handle the tagging and headers themselves. 1312 */ 1313 if (p->if_type != IFT_ETHER && 1314 (p->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) 1315 return (EPROTONOSUPPORT); 1316 if ((p->if_flags & VLAN_IFFLAGS) != VLAN_IFFLAGS) 1317 return (EPROTONOSUPPORT); 1318 /* 1319 * Don't let the caller set up a VLAN VID with 1320 * anything except VLID bits. 1321 * VID numbers 0x0 and 0xFFF are reserved. 1322 */ 1323 if (vid == 0 || vid == 0xFFF || (vid & ~EVL_VLID_MASK)) 1324 return (EINVAL); 1325 if (ifv->ifv_trunk) 1326 return (EBUSY); 1327 1328 VLAN_XLOCK(); 1329 if (p->if_vlantrunk == NULL) { 1330 trunk = malloc(sizeof(struct ifvlantrunk), 1331 M_VLAN, M_WAITOK | M_ZERO); 1332 vlan_inithash(trunk); 1333 TRUNK_LOCK_INIT(trunk); 1334 TRUNK_WLOCK(trunk); 1335 p->if_vlantrunk = trunk; 1336 trunk->parent = p; 1337 if_ref(trunk->parent); 1338 TRUNK_WUNLOCK(trunk); 1339 } else { 1340 trunk = p->if_vlantrunk; 1341 } 1342 1343 ifv->ifv_vid = vid; /* must set this before vlan_inshash() */ 1344 ifv->ifv_pcp = 0; /* Default: best effort delivery. */ 1345 vlan_tag_recalculate(ifv); 1346 error = vlan_inshash(trunk, ifv); 1347 if (error) 1348 goto done; 1349 ifv->ifv_proto = ETHERTYPE_VLAN; 1350 ifv->ifv_encaplen = ETHER_VLAN_ENCAP_LEN; 1351 ifv->ifv_mintu = ETHERMIN; 1352 ifv->ifv_pflags = 0; 1353 ifv->ifv_capenable = -1; 1354 1355 /* 1356 * If the parent supports the VLAN_MTU capability, 1357 * i.e. can Tx/Rx larger than ETHER_MAX_LEN frames, 1358 * use it. 1359 */ 1360 if (p->if_capenable & IFCAP_VLAN_MTU) { 1361 /* 1362 * No need to fudge the MTU since the parent can 1363 * handle extended frames. 1364 */ 1365 ifv->ifv_mtufudge = 0; 1366 } else { 1367 /* 1368 * Fudge the MTU by the encapsulation size. This 1369 * makes us incompatible with strictly compliant 1370 * 802.1Q implementations, but allows us to use 1371 * the feature with other NetBSD implementations, 1372 * which might still be useful. 1373 */ 1374 ifv->ifv_mtufudge = ifv->ifv_encaplen; 1375 } 1376 1377 ifv->ifv_trunk = trunk; 1378 ifp = ifv->ifv_ifp; 1379 /* 1380 * Initialize fields from our parent. This duplicates some 1381 * work with ether_ifattach() but allows for non-ethernet 1382 * interfaces to also work. 1383 */ 1384 ifp->if_mtu = p->if_mtu - ifv->ifv_mtufudge; 1385 ifp->if_baudrate = p->if_baudrate; 1386 ifp->if_output = p->if_output; 1387 ifp->if_input = p->if_input; 1388 ifp->if_resolvemulti = p->if_resolvemulti; 1389 ifp->if_addrlen = p->if_addrlen; 1390 ifp->if_broadcastaddr = p->if_broadcastaddr; 1391 ifp->if_pcp = ifv->ifv_pcp; 1392 1393 /* 1394 * Copy only a selected subset of flags from the parent. 1395 * Other flags are none of our business. 1396 */ 1397 #define VLAN_COPY_FLAGS (IFF_SIMPLEX) 1398 ifp->if_flags &= ~VLAN_COPY_FLAGS; 1399 ifp->if_flags |= p->if_flags & VLAN_COPY_FLAGS; 1400 #undef VLAN_COPY_FLAGS 1401 1402 ifp->if_link_state = p->if_link_state; 1403 1404 TRUNK_RLOCK(TRUNK(ifv)); 1405 vlan_capabilities(ifv); 1406 TRUNK_RUNLOCK(TRUNK(ifv)); 1407 1408 /* 1409 * Set up our interface address to reflect the underlying 1410 * physical interface's. 1411 */ 1412 bcopy(IF_LLADDR(p), IF_LLADDR(ifp), p->if_addrlen); 1413 ((struct sockaddr_dl *)ifp->if_addr->ifa_addr)->sdl_alen = 1414 p->if_addrlen; 1415 1416 TASK_INIT(&ifv->lladdr_task, 0, vlan_lladdr_fn, ifv); 1417 1418 /* We are ready for operation now. */ 1419 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1420 1421 /* Update flags on the parent, if necessary. */ 1422 vlan_setflags(ifp, 1); 1423 1424 /* 1425 * Configure multicast addresses that may already be 1426 * joined on the vlan device. 1427 */ 1428 (void)vlan_setmulti(ifp); 1429 1430 done: 1431 if (error == 0) 1432 EVENTHANDLER_INVOKE(vlan_config, p, ifv->ifv_vid); 1433 VLAN_XUNLOCK(); 1434 1435 return (error); 1436 } 1437 1438 static void 1439 vlan_unconfig(struct ifnet *ifp) 1440 { 1441 1442 VLAN_XLOCK(); 1443 vlan_unconfig_locked(ifp, 0); 1444 VLAN_XUNLOCK(); 1445 } 1446 1447 static void 1448 vlan_unconfig_locked(struct ifnet *ifp, int departing) 1449 { 1450 struct ifvlantrunk *trunk; 1451 struct vlan_mc_entry *mc; 1452 struct ifvlan *ifv; 1453 struct ifnet *parent; 1454 int error; 1455 1456 VLAN_XLOCK_ASSERT(); 1457 1458 ifv = ifp->if_softc; 1459 trunk = ifv->ifv_trunk; 1460 parent = NULL; 1461 1462 if (trunk != NULL) { 1463 parent = trunk->parent; 1464 1465 /* 1466 * Since the interface is being unconfigured, we need to 1467 * empty the list of multicast groups that we may have joined 1468 * while we were alive from the parent's list. 1469 */ 1470 while ((mc = CK_SLIST_FIRST(&ifv->vlan_mc_listhead)) != NULL) { 1471 /* 1472 * If the parent interface is being detached, 1473 * all its multicast addresses have already 1474 * been removed. Warn about errors if 1475 * if_delmulti() does fail, but don't abort as 1476 * all callers expect vlan destruction to 1477 * succeed. 1478 */ 1479 if (!departing) { 1480 error = if_delmulti(parent, 1481 (struct sockaddr *)&mc->mc_addr); 1482 if (error) 1483 if_printf(ifp, 1484 "Failed to delete multicast address from parent: %d\n", 1485 error); 1486 } 1487 CK_SLIST_REMOVE_HEAD(&ifv->vlan_mc_listhead, mc_entries); 1488 NET_EPOCH_WAIT(); 1489 free(mc, M_VLAN); 1490 } 1491 1492 vlan_setflags(ifp, 0); /* clear special flags on parent */ 1493 1494 vlan_remhash(trunk, ifv); 1495 ifv->ifv_trunk = NULL; 1496 1497 /* 1498 * Check if we were the last. 1499 */ 1500 if (trunk->refcnt == 0) { 1501 parent->if_vlantrunk = NULL; 1502 NET_EPOCH_WAIT(); 1503 trunk_destroy(trunk); 1504 } 1505 } 1506 1507 /* Disconnect from parent. */ 1508 if (ifv->ifv_pflags) 1509 if_printf(ifp, "%s: ifv_pflags unclean\n", __func__); 1510 ifp->if_mtu = ETHERMTU; 1511 ifp->if_link_state = LINK_STATE_UNKNOWN; 1512 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1513 1514 /* 1515 * Only dispatch an event if vlan was 1516 * attached, otherwise there is nothing 1517 * to cleanup anyway. 1518 */ 1519 if (parent != NULL) 1520 EVENTHANDLER_INVOKE(vlan_unconfig, parent, ifv->ifv_vid); 1521 } 1522 1523 /* Handle a reference counted flag that should be set on the parent as well */ 1524 static int 1525 vlan_setflag(struct ifnet *ifp, int flag, int status, 1526 int (*func)(struct ifnet *, int)) 1527 { 1528 struct ifvlan *ifv; 1529 int error; 1530 1531 VLAN_SXLOCK_ASSERT(); 1532 1533 ifv = ifp->if_softc; 1534 status = status ? (ifp->if_flags & flag) : 0; 1535 /* Now "status" contains the flag value or 0 */ 1536 1537 /* 1538 * See if recorded parent's status is different from what 1539 * we want it to be. If it is, flip it. We record parent's 1540 * status in ifv_pflags so that we won't clear parent's flag 1541 * we haven't set. In fact, we don't clear or set parent's 1542 * flags directly, but get or release references to them. 1543 * That's why we can be sure that recorded flags still are 1544 * in accord with actual parent's flags. 1545 */ 1546 if (status != (ifv->ifv_pflags & flag)) { 1547 error = (*func)(PARENT(ifv), status); 1548 if (error) 1549 return (error); 1550 ifv->ifv_pflags &= ~flag; 1551 ifv->ifv_pflags |= status; 1552 } 1553 return (0); 1554 } 1555 1556 /* 1557 * Handle IFF_* flags that require certain changes on the parent: 1558 * if "status" is true, update parent's flags respective to our if_flags; 1559 * if "status" is false, forcedly clear the flags set on parent. 1560 */ 1561 static int 1562 vlan_setflags(struct ifnet *ifp, int status) 1563 { 1564 int error, i; 1565 1566 for (i = 0; vlan_pflags[i].flag; i++) { 1567 error = vlan_setflag(ifp, vlan_pflags[i].flag, 1568 status, vlan_pflags[i].func); 1569 if (error) 1570 return (error); 1571 } 1572 return (0); 1573 } 1574 1575 /* Inform all vlans that their parent has changed link state */ 1576 static void 1577 vlan_link_state(struct ifnet *ifp) 1578 { 1579 struct ifvlantrunk *trunk; 1580 struct ifvlan *ifv; 1581 1582 /* Called from a taskqueue_swi task, so we cannot sleep. */ 1583 VLAN_RLOCK(); 1584 trunk = ifp->if_vlantrunk; 1585 if (trunk == NULL) { 1586 VLAN_RUNLOCK(); 1587 return; 1588 } 1589 1590 TRUNK_WLOCK(trunk); 1591 VLAN_FOREACH(ifv, trunk) { 1592 ifv->ifv_ifp->if_baudrate = trunk->parent->if_baudrate; 1593 if_link_state_change(ifv->ifv_ifp, 1594 trunk->parent->if_link_state); 1595 } 1596 TRUNK_WUNLOCK(trunk); 1597 VLAN_RUNLOCK(); 1598 } 1599 1600 static void 1601 vlan_capabilities(struct ifvlan *ifv) 1602 { 1603 struct ifnet *p; 1604 struct ifnet *ifp; 1605 struct ifnet_hw_tsomax hw_tsomax; 1606 int cap = 0, ena = 0, mena; 1607 u_long hwa = 0; 1608 1609 VLAN_SXLOCK_ASSERT(); 1610 TRUNK_RLOCK_ASSERT(TRUNK(ifv)); 1611 p = PARENT(ifv); 1612 ifp = ifv->ifv_ifp; 1613 1614 /* Mask parent interface enabled capabilities disabled by user. */ 1615 mena = p->if_capenable & ifv->ifv_capenable; 1616 1617 /* 1618 * If the parent interface can do checksum offloading 1619 * on VLANs, then propagate its hardware-assisted 1620 * checksumming flags. Also assert that checksum 1621 * offloading requires hardware VLAN tagging. 1622 */ 1623 if (p->if_capabilities & IFCAP_VLAN_HWCSUM) 1624 cap |= p->if_capabilities & (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6); 1625 if (p->if_capenable & IFCAP_VLAN_HWCSUM && 1626 p->if_capenable & IFCAP_VLAN_HWTAGGING) { 1627 ena |= mena & (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6); 1628 if (ena & IFCAP_TXCSUM) 1629 hwa |= p->if_hwassist & (CSUM_IP | CSUM_TCP | 1630 CSUM_UDP | CSUM_SCTP); 1631 if (ena & IFCAP_TXCSUM_IPV6) 1632 hwa |= p->if_hwassist & (CSUM_TCP_IPV6 | 1633 CSUM_UDP_IPV6 | CSUM_SCTP_IPV6); 1634 } 1635 1636 /* 1637 * If the parent interface can do TSO on VLANs then 1638 * propagate the hardware-assisted flag. TSO on VLANs 1639 * does not necessarily require hardware VLAN tagging. 1640 */ 1641 memset(&hw_tsomax, 0, sizeof(hw_tsomax)); 1642 if_hw_tsomax_common(p, &hw_tsomax); 1643 if_hw_tsomax_update(ifp, &hw_tsomax); 1644 if (p->if_capabilities & IFCAP_VLAN_HWTSO) 1645 cap |= p->if_capabilities & IFCAP_TSO; 1646 if (p->if_capenable & IFCAP_VLAN_HWTSO) { 1647 ena |= mena & IFCAP_TSO; 1648 if (ena & IFCAP_TSO) 1649 hwa |= p->if_hwassist & CSUM_TSO; 1650 } 1651 1652 /* 1653 * If the parent interface can do LRO and checksum offloading on 1654 * VLANs, then guess it may do LRO on VLANs. False positive here 1655 * cost nothing, while false negative may lead to some confusions. 1656 */ 1657 if (p->if_capabilities & IFCAP_VLAN_HWCSUM) 1658 cap |= p->if_capabilities & IFCAP_LRO; 1659 if (p->if_capenable & IFCAP_VLAN_HWCSUM) 1660 ena |= p->if_capenable & IFCAP_LRO; 1661 1662 /* 1663 * If the parent interface can offload TCP connections over VLANs then 1664 * propagate its TOE capability to the VLAN interface. 1665 * 1666 * All TOE drivers in the tree today can deal with VLANs. If this 1667 * changes then IFCAP_VLAN_TOE should be promoted to a full capability 1668 * with its own bit. 1669 */ 1670 #define IFCAP_VLAN_TOE IFCAP_TOE 1671 if (p->if_capabilities & IFCAP_VLAN_TOE) 1672 cap |= p->if_capabilities & IFCAP_TOE; 1673 if (p->if_capenable & IFCAP_VLAN_TOE) { 1674 TOEDEV(ifp) = TOEDEV(p); 1675 ena |= mena & IFCAP_TOE; 1676 } 1677 1678 /* 1679 * If the parent interface supports dynamic link state, so does the 1680 * VLAN interface. 1681 */ 1682 cap |= (p->if_capabilities & IFCAP_LINKSTATE); 1683 ena |= (mena & IFCAP_LINKSTATE); 1684 1685 #ifdef RATELIMIT 1686 /* 1687 * If the parent interface supports ratelimiting, so does the 1688 * VLAN interface. 1689 */ 1690 cap |= (p->if_capabilities & IFCAP_TXRTLMT); 1691 ena |= (mena & IFCAP_TXRTLMT); 1692 #endif 1693 1694 ifp->if_capabilities = cap; 1695 ifp->if_capenable = ena; 1696 ifp->if_hwassist = hwa; 1697 } 1698 1699 static void 1700 vlan_trunk_capabilities(struct ifnet *ifp) 1701 { 1702 struct ifvlantrunk *trunk; 1703 struct ifvlan *ifv; 1704 1705 VLAN_SLOCK(); 1706 trunk = ifp->if_vlantrunk; 1707 if (trunk == NULL) { 1708 VLAN_SUNLOCK(); 1709 return; 1710 } 1711 TRUNK_RLOCK(trunk); 1712 VLAN_FOREACH(ifv, trunk) { 1713 vlan_capabilities(ifv); 1714 } 1715 TRUNK_RUNLOCK(trunk); 1716 VLAN_SUNLOCK(); 1717 } 1718 1719 static int 1720 vlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1721 { 1722 struct ifnet *p; 1723 struct ifreq *ifr; 1724 struct ifaddr *ifa; 1725 struct ifvlan *ifv; 1726 struct ifvlantrunk *trunk; 1727 struct vlanreq vlr; 1728 int error = 0; 1729 1730 ifr = (struct ifreq *)data; 1731 ifa = (struct ifaddr *) data; 1732 ifv = ifp->if_softc; 1733 1734 switch (cmd) { 1735 case SIOCSIFADDR: 1736 ifp->if_flags |= IFF_UP; 1737 #ifdef INET 1738 if (ifa->ifa_addr->sa_family == AF_INET) 1739 arp_ifinit(ifp, ifa); 1740 #endif 1741 break; 1742 case SIOCGIFADDR: 1743 bcopy(IF_LLADDR(ifp), &ifr->ifr_addr.sa_data[0], 1744 ifp->if_addrlen); 1745 break; 1746 case SIOCGIFMEDIA: 1747 VLAN_SLOCK(); 1748 if (TRUNK(ifv) != NULL) { 1749 p = PARENT(ifv); 1750 if_ref(p); 1751 error = (*p->if_ioctl)(p, SIOCGIFMEDIA, data); 1752 if_rele(p); 1753 /* Limit the result to the parent's current config. */ 1754 if (error == 0) { 1755 struct ifmediareq *ifmr; 1756 1757 ifmr = (struct ifmediareq *)data; 1758 if (ifmr->ifm_count >= 1 && ifmr->ifm_ulist) { 1759 ifmr->ifm_count = 1; 1760 error = copyout(&ifmr->ifm_current, 1761 ifmr->ifm_ulist, 1762 sizeof(int)); 1763 } 1764 } 1765 } else { 1766 error = EINVAL; 1767 } 1768 VLAN_SUNLOCK(); 1769 break; 1770 1771 case SIOCSIFMEDIA: 1772 error = EINVAL; 1773 break; 1774 1775 case SIOCSIFMTU: 1776 /* 1777 * Set the interface MTU. 1778 */ 1779 VLAN_SLOCK(); 1780 trunk = TRUNK(ifv); 1781 if (trunk != NULL) { 1782 TRUNK_WLOCK(trunk); 1783 if (ifr->ifr_mtu > 1784 (PARENT(ifv)->if_mtu - ifv->ifv_mtufudge) || 1785 ifr->ifr_mtu < 1786 (ifv->ifv_mintu - ifv->ifv_mtufudge)) 1787 error = EINVAL; 1788 else 1789 ifp->if_mtu = ifr->ifr_mtu; 1790 TRUNK_WUNLOCK(trunk); 1791 } else 1792 error = EINVAL; 1793 VLAN_SUNLOCK(); 1794 break; 1795 1796 case SIOCSETVLAN: 1797 #ifdef VIMAGE 1798 /* 1799 * XXXRW/XXXBZ: The goal in these checks is to allow a VLAN 1800 * interface to be delegated to a jail without allowing the 1801 * jail to change what underlying interface/VID it is 1802 * associated with. We are not entirely convinced that this 1803 * is the right way to accomplish that policy goal. 1804 */ 1805 if (ifp->if_vnet != ifp->if_home_vnet) { 1806 error = EPERM; 1807 break; 1808 } 1809 #endif 1810 error = copyin(ifr_data_get_ptr(ifr), &vlr, sizeof(vlr)); 1811 if (error) 1812 break; 1813 if (vlr.vlr_parent[0] == '\0') { 1814 vlan_unconfig(ifp); 1815 break; 1816 } 1817 p = ifunit_ref(vlr.vlr_parent); 1818 if (p == NULL) { 1819 error = ENOENT; 1820 break; 1821 } 1822 error = vlan_config(ifv, p, vlr.vlr_tag); 1823 if_rele(p); 1824 break; 1825 1826 case SIOCGETVLAN: 1827 #ifdef VIMAGE 1828 if (ifp->if_vnet != ifp->if_home_vnet) { 1829 error = EPERM; 1830 break; 1831 } 1832 #endif 1833 bzero(&vlr, sizeof(vlr)); 1834 VLAN_SLOCK(); 1835 if (TRUNK(ifv) != NULL) { 1836 strlcpy(vlr.vlr_parent, PARENT(ifv)->if_xname, 1837 sizeof(vlr.vlr_parent)); 1838 vlr.vlr_tag = ifv->ifv_vid; 1839 } 1840 VLAN_SUNLOCK(); 1841 error = copyout(&vlr, ifr_data_get_ptr(ifr), sizeof(vlr)); 1842 break; 1843 1844 case SIOCSIFFLAGS: 1845 /* 1846 * We should propagate selected flags to the parent, 1847 * e.g., promiscuous mode. 1848 */ 1849 VLAN_XLOCK(); 1850 if (TRUNK(ifv) != NULL) 1851 error = vlan_setflags(ifp, 1); 1852 VLAN_XUNLOCK(); 1853 break; 1854 1855 case SIOCADDMULTI: 1856 case SIOCDELMULTI: 1857 /* 1858 * If we don't have a parent, just remember the membership for 1859 * when we do. 1860 * 1861 * XXX We need the rmlock here to avoid sleeping while 1862 * holding in6_multi_mtx. 1863 */ 1864 VLAN_XLOCK(); 1865 trunk = TRUNK(ifv); 1866 if (trunk != NULL) 1867 error = vlan_setmulti(ifp); 1868 VLAN_XUNLOCK(); 1869 1870 break; 1871 case SIOCGVLANPCP: 1872 #ifdef VIMAGE 1873 if (ifp->if_vnet != ifp->if_home_vnet) { 1874 error = EPERM; 1875 break; 1876 } 1877 #endif 1878 ifr->ifr_vlan_pcp = ifv->ifv_pcp; 1879 break; 1880 1881 case SIOCSVLANPCP: 1882 #ifdef VIMAGE 1883 if (ifp->if_vnet != ifp->if_home_vnet) { 1884 error = EPERM; 1885 break; 1886 } 1887 #endif 1888 error = priv_check(curthread, PRIV_NET_SETVLANPCP); 1889 if (error) 1890 break; 1891 if (ifr->ifr_vlan_pcp > 7) { 1892 error = EINVAL; 1893 break; 1894 } 1895 ifv->ifv_pcp = ifr->ifr_vlan_pcp; 1896 ifp->if_pcp = ifv->ifv_pcp; 1897 vlan_tag_recalculate(ifv); 1898 /* broadcast event about PCP change */ 1899 EVENTHANDLER_INVOKE(ifnet_event, ifp, IFNET_EVENT_PCP); 1900 break; 1901 1902 case SIOCSIFCAP: 1903 VLAN_SLOCK(); 1904 ifv->ifv_capenable = ifr->ifr_reqcap; 1905 trunk = TRUNK(ifv); 1906 if (trunk != NULL) { 1907 TRUNK_RLOCK(trunk); 1908 vlan_capabilities(ifv); 1909 TRUNK_RUNLOCK(trunk); 1910 } 1911 VLAN_SUNLOCK(); 1912 break; 1913 1914 default: 1915 error = EINVAL; 1916 break; 1917 } 1918 1919 return (error); 1920 } 1921 1922 #ifdef RATELIMIT 1923 static int 1924 vlan_snd_tag_alloc(struct ifnet *ifp, 1925 union if_snd_tag_alloc_params *params, 1926 struct m_snd_tag **ppmt) 1927 { 1928 1929 /* get trunk device */ 1930 ifp = vlan_trunkdev(ifp); 1931 if (ifp == NULL || (ifp->if_capenable & IFCAP_TXRTLMT) == 0) 1932 return (EOPNOTSUPP); 1933 /* forward allocation request */ 1934 return (ifp->if_snd_tag_alloc(ifp, params, ppmt)); 1935 } 1936 #endif 1937