1 /* 2 * Copyright (c) 1980, 1986, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)if.c 8.3 (Berkeley) 1/4/94 30 * $FreeBSD: src/sys/net/if.c,v 1.185 2004/03/13 02:35:03 brooks Exp $ 31 */ 32 33 #include "opt_compat.h" 34 #include "opt_inet6.h" 35 #include "opt_inet.h" 36 #include "opt_ifpoll.h" 37 38 #include <sys/param.h> 39 #include <sys/malloc.h> 40 #include <sys/mbuf.h> 41 #include <sys/systm.h> 42 #include <sys/proc.h> 43 #include <sys/priv.h> 44 #include <sys/protosw.h> 45 #include <sys/socket.h> 46 #include <sys/socketvar.h> 47 #include <sys/socketops.h> 48 #include <sys/kernel.h> 49 #include <sys/ktr.h> 50 #include <sys/mutex.h> 51 #include <sys/sockio.h> 52 #include <sys/syslog.h> 53 #include <sys/sysctl.h> 54 #include <sys/domain.h> 55 #include <sys/thread.h> 56 #include <sys/serialize.h> 57 #include <sys/bus.h> 58 59 #include <sys/thread2.h> 60 #include <sys/msgport2.h> 61 #include <sys/mutex2.h> 62 63 #include <net/if.h> 64 #include <net/if_arp.h> 65 #include <net/if_dl.h> 66 #include <net/if_types.h> 67 #include <net/if_var.h> 68 #include <net/ifq_var.h> 69 #include <net/radix.h> 70 #include <net/route.h> 71 #include <net/if_clone.h> 72 #include <net/netisr2.h> 73 #include <net/netmsg2.h> 74 75 #include <machine/atomic.h> 76 #include <machine/stdarg.h> 77 #include <machine/smp.h> 78 79 #if defined(INET) || defined(INET6) 80 /*XXX*/ 81 #include <netinet/in.h> 82 #include <netinet/in_var.h> 83 #include <netinet/if_ether.h> 84 #ifdef INET6 85 #include <netinet6/in6_var.h> 86 #include <netinet6/in6_ifattach.h> 87 #endif 88 #endif 89 90 #if defined(COMPAT_43) 91 #include <emulation/43bsd/43bsd_socket.h> 92 #endif /* COMPAT_43 */ 93 94 struct netmsg_ifaddr { 95 struct netmsg_base base; 96 struct ifaddr *ifa; 97 struct ifnet *ifp; 98 int tail; 99 }; 100 101 struct ifsubq_stage_head { 102 TAILQ_HEAD(, ifsubq_stage) stg_head; 103 } __cachealign; 104 105 /* 106 * System initialization 107 */ 108 static void if_attachdomain(void *); 109 static void if_attachdomain1(struct ifnet *); 110 static int ifconf(u_long, caddr_t, struct ucred *); 111 static void ifinit(void *); 112 static void ifnetinit(void *); 113 static void if_slowtimo(void *); 114 static void link_rtrequest(int, struct rtentry *); 115 static int if_rtdel(struct radix_node *, void *); 116 static void if_slowtimo_dispatch(netmsg_t); 117 118 /* Helper functions */ 119 static void ifsq_watchdog_reset(struct ifsubq_watchdog *); 120 static int if_delmulti_serialized(struct ifnet *, struct sockaddr *); 121 static struct ifnet_array *ifnet_array_alloc(int); 122 static void ifnet_array_free(struct ifnet_array *); 123 static struct ifnet_array *ifnet_array_add(struct ifnet *, 124 const struct ifnet_array *); 125 static struct ifnet_array *ifnet_array_del(struct ifnet *, 126 const struct ifnet_array *); 127 128 #ifdef INET6 129 /* 130 * XXX: declare here to avoid to include many inet6 related files.. 131 * should be more generalized? 132 */ 133 extern void nd6_setmtu(struct ifnet *); 134 #endif 135 136 SYSCTL_NODE(_net, PF_LINK, link, CTLFLAG_RW, 0, "Link layers"); 137 SYSCTL_NODE(_net_link, 0, generic, CTLFLAG_RW, 0, "Generic link-management"); 138 139 static int ifsq_stage_cntmax = 4; 140 TUNABLE_INT("net.link.stage_cntmax", &ifsq_stage_cntmax); 141 SYSCTL_INT(_net_link, OID_AUTO, stage_cntmax, CTLFLAG_RW, 142 &ifsq_stage_cntmax, 0, "ifq staging packet count max"); 143 144 static int if_stats_compat = 0; 145 SYSCTL_INT(_net_link, OID_AUTO, stats_compat, CTLFLAG_RW, 146 &if_stats_compat, 0, "Compat the old ifnet stats"); 147 148 SYSINIT(interfaces, SI_SUB_PROTO_IF, SI_ORDER_FIRST, ifinit, NULL); 149 /* Must be after netisr_init */ 150 SYSINIT(ifnet, SI_SUB_PRE_DRIVERS, SI_ORDER_SECOND, ifnetinit, NULL); 151 152 static if_com_alloc_t *if_com_alloc[256]; 153 static if_com_free_t *if_com_free[256]; 154 155 MALLOC_DEFINE(M_IFADDR, "ifaddr", "interface address"); 156 MALLOC_DEFINE(M_IFMADDR, "ether_multi", "link-level multicast address"); 157 MALLOC_DEFINE(M_IFNET, "ifnet", "interface structure"); 158 159 int ifqmaxlen = IFQ_MAXLEN; 160 struct ifnethead ifnet = TAILQ_HEAD_INITIALIZER(ifnet); 161 162 static struct ifnet_array ifnet_array0; 163 static struct ifnet_array *ifnet_array = &ifnet_array0; 164 165 static struct callout if_slowtimo_timer; 166 static struct netmsg_base if_slowtimo_netmsg; 167 168 int if_index = 0; 169 struct ifnet **ifindex2ifnet = NULL; 170 static struct thread ifnet_threads[MAXCPU]; 171 static struct mtx ifnet_mtx = MTX_INITIALIZER("ifnet"); 172 173 static struct ifsubq_stage_head ifsubq_stage_heads[MAXCPU]; 174 175 #ifdef notyet 176 #define IFQ_KTR_STRING "ifq=%p" 177 #define IFQ_KTR_ARGS struct ifaltq *ifq 178 #ifndef KTR_IFQ 179 #define KTR_IFQ KTR_ALL 180 #endif 181 KTR_INFO_MASTER(ifq); 182 KTR_INFO(KTR_IFQ, ifq, enqueue, 0, IFQ_KTR_STRING, IFQ_KTR_ARGS); 183 KTR_INFO(KTR_IFQ, ifq, dequeue, 1, IFQ_KTR_STRING, IFQ_KTR_ARGS); 184 #define logifq(name, arg) KTR_LOG(ifq_ ## name, arg) 185 186 #define IF_START_KTR_STRING "ifp=%p" 187 #define IF_START_KTR_ARGS struct ifnet *ifp 188 #ifndef KTR_IF_START 189 #define KTR_IF_START KTR_ALL 190 #endif 191 KTR_INFO_MASTER(if_start); 192 KTR_INFO(KTR_IF_START, if_start, run, 0, 193 IF_START_KTR_STRING, IF_START_KTR_ARGS); 194 KTR_INFO(KTR_IF_START, if_start, sched, 1, 195 IF_START_KTR_STRING, IF_START_KTR_ARGS); 196 KTR_INFO(KTR_IF_START, if_start, avoid, 2, 197 IF_START_KTR_STRING, IF_START_KTR_ARGS); 198 KTR_INFO(KTR_IF_START, if_start, contend_sched, 3, 199 IF_START_KTR_STRING, IF_START_KTR_ARGS); 200 KTR_INFO(KTR_IF_START, if_start, chase_sched, 4, 201 IF_START_KTR_STRING, IF_START_KTR_ARGS); 202 #define logifstart(name, arg) KTR_LOG(if_start_ ## name, arg) 203 #endif 204 205 TAILQ_HEAD(, ifg_group) ifg_head = TAILQ_HEAD_INITIALIZER(ifg_head); 206 207 /* 208 * Network interface utility routines. 209 * 210 * Routines with ifa_ifwith* names take sockaddr *'s as 211 * parameters. 212 */ 213 /* ARGSUSED*/ 214 static void 215 ifinit(void *dummy) 216 { 217 struct ifnet *ifp; 218 219 callout_init_mp(&if_slowtimo_timer); 220 netmsg_init(&if_slowtimo_netmsg, NULL, &netisr_adone_rport, 221 MSGF_PRIORITY, if_slowtimo_dispatch); 222 223 /* XXX is this necessary? */ 224 ifnet_lock(); 225 TAILQ_FOREACH(ifp, &ifnetlist, if_link) { 226 if (ifp->if_snd.altq_maxlen == 0) { 227 if_printf(ifp, "XXX: driver didn't set altq_maxlen\n"); 228 ifq_set_maxlen(&ifp->if_snd, ifqmaxlen); 229 } 230 } 231 ifnet_unlock(); 232 233 /* Start if_slowtimo */ 234 lwkt_sendmsg(netisr_cpuport(0), &if_slowtimo_netmsg.lmsg); 235 } 236 237 static void 238 ifsq_ifstart_ipifunc(void *arg) 239 { 240 struct ifaltq_subque *ifsq = arg; 241 struct lwkt_msg *lmsg = ifsq_get_ifstart_lmsg(ifsq, mycpuid); 242 243 crit_enter(); 244 if (lmsg->ms_flags & MSGF_DONE) 245 lwkt_sendmsg_oncpu(netisr_cpuport(mycpuid), lmsg); 246 crit_exit(); 247 } 248 249 static __inline void 250 ifsq_stage_remove(struct ifsubq_stage_head *head, struct ifsubq_stage *stage) 251 { 252 KKASSERT(stage->stg_flags & IFSQ_STAGE_FLAG_QUED); 253 TAILQ_REMOVE(&head->stg_head, stage, stg_link); 254 stage->stg_flags &= ~(IFSQ_STAGE_FLAG_QUED | IFSQ_STAGE_FLAG_SCHED); 255 stage->stg_cnt = 0; 256 stage->stg_len = 0; 257 } 258 259 static __inline void 260 ifsq_stage_insert(struct ifsubq_stage_head *head, struct ifsubq_stage *stage) 261 { 262 KKASSERT((stage->stg_flags & 263 (IFSQ_STAGE_FLAG_QUED | IFSQ_STAGE_FLAG_SCHED)) == 0); 264 stage->stg_flags |= IFSQ_STAGE_FLAG_QUED; 265 TAILQ_INSERT_TAIL(&head->stg_head, stage, stg_link); 266 } 267 268 /* 269 * Schedule ifnet.if_start on the subqueue owner CPU 270 */ 271 static void 272 ifsq_ifstart_schedule(struct ifaltq_subque *ifsq, int force) 273 { 274 int cpu; 275 276 if (!force && curthread->td_type == TD_TYPE_NETISR && 277 ifsq_stage_cntmax > 0) { 278 struct ifsubq_stage *stage = ifsq_get_stage(ifsq, mycpuid); 279 280 stage->stg_cnt = 0; 281 stage->stg_len = 0; 282 if ((stage->stg_flags & IFSQ_STAGE_FLAG_QUED) == 0) 283 ifsq_stage_insert(&ifsubq_stage_heads[mycpuid], stage); 284 stage->stg_flags |= IFSQ_STAGE_FLAG_SCHED; 285 return; 286 } 287 288 cpu = ifsq_get_cpuid(ifsq); 289 if (cpu != mycpuid) 290 lwkt_send_ipiq(globaldata_find(cpu), ifsq_ifstart_ipifunc, ifsq); 291 else 292 ifsq_ifstart_ipifunc(ifsq); 293 } 294 295 /* 296 * NOTE: 297 * This function will release ifnet.if_start subqueue interlock, 298 * if ifnet.if_start for the subqueue does not need to be scheduled 299 */ 300 static __inline int 301 ifsq_ifstart_need_schedule(struct ifaltq_subque *ifsq, int running) 302 { 303 if (!running || ifsq_is_empty(ifsq) 304 #ifdef ALTQ 305 || ifsq->ifsq_altq->altq_tbr != NULL 306 #endif 307 ) { 308 ALTQ_SQ_LOCK(ifsq); 309 /* 310 * ifnet.if_start subqueue interlock is released, if: 311 * 1) Hardware can not take any packets, due to 312 * o interface is marked down 313 * o hardware queue is full (ifsq_is_oactive) 314 * Under the second situation, hardware interrupt 315 * or polling(4) will call/schedule ifnet.if_start 316 * on the subqueue when hardware queue is ready 317 * 2) There is no packet in the subqueue. 318 * Further ifq_dispatch or ifq_handoff will call/ 319 * schedule ifnet.if_start on the subqueue. 320 * 3) TBR is used and it does not allow further 321 * dequeueing. 322 * TBR callout will call ifnet.if_start on the 323 * subqueue. 324 */ 325 if (!running || !ifsq_data_ready(ifsq)) { 326 ifsq_clr_started(ifsq); 327 ALTQ_SQ_UNLOCK(ifsq); 328 return 0; 329 } 330 ALTQ_SQ_UNLOCK(ifsq); 331 } 332 return 1; 333 } 334 335 static void 336 ifsq_ifstart_dispatch(netmsg_t msg) 337 { 338 struct lwkt_msg *lmsg = &msg->base.lmsg; 339 struct ifaltq_subque *ifsq = lmsg->u.ms_resultp; 340 struct ifnet *ifp = ifsq_get_ifp(ifsq); 341 struct globaldata *gd = mycpu; 342 int running = 0, need_sched; 343 344 crit_enter_gd(gd); 345 346 lwkt_replymsg(lmsg, 0); /* reply ASAP */ 347 348 if (gd->gd_cpuid != ifsq_get_cpuid(ifsq)) { 349 /* 350 * We need to chase the subqueue owner CPU change. 351 */ 352 ifsq_ifstart_schedule(ifsq, 1); 353 crit_exit_gd(gd); 354 return; 355 } 356 357 ifsq_serialize_hw(ifsq); 358 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) { 359 ifp->if_start(ifp, ifsq); 360 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) 361 running = 1; 362 } 363 need_sched = ifsq_ifstart_need_schedule(ifsq, running); 364 ifsq_deserialize_hw(ifsq); 365 366 if (need_sched) { 367 /* 368 * More data need to be transmitted, ifnet.if_start is 369 * scheduled on the subqueue owner CPU, and we keep going. 370 * NOTE: ifnet.if_start subqueue interlock is not released. 371 */ 372 ifsq_ifstart_schedule(ifsq, 0); 373 } 374 375 crit_exit_gd(gd); 376 } 377 378 /* Device driver ifnet.if_start helper function */ 379 void 380 ifsq_devstart(struct ifaltq_subque *ifsq) 381 { 382 struct ifnet *ifp = ifsq_get_ifp(ifsq); 383 int running = 0; 384 385 ASSERT_ALTQ_SQ_SERIALIZED_HW(ifsq); 386 387 ALTQ_SQ_LOCK(ifsq); 388 if (ifsq_is_started(ifsq) || !ifsq_data_ready(ifsq)) { 389 ALTQ_SQ_UNLOCK(ifsq); 390 return; 391 } 392 ifsq_set_started(ifsq); 393 ALTQ_SQ_UNLOCK(ifsq); 394 395 ifp->if_start(ifp, ifsq); 396 397 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) 398 running = 1; 399 400 if (ifsq_ifstart_need_schedule(ifsq, running)) { 401 /* 402 * More data need to be transmitted, ifnet.if_start is 403 * scheduled on ifnet's CPU, and we keep going. 404 * NOTE: ifnet.if_start interlock is not released. 405 */ 406 ifsq_ifstart_schedule(ifsq, 0); 407 } 408 } 409 410 void 411 if_devstart(struct ifnet *ifp) 412 { 413 ifsq_devstart(ifq_get_subq_default(&ifp->if_snd)); 414 } 415 416 /* Device driver ifnet.if_start schedule helper function */ 417 void 418 ifsq_devstart_sched(struct ifaltq_subque *ifsq) 419 { 420 ifsq_ifstart_schedule(ifsq, 1); 421 } 422 423 void 424 if_devstart_sched(struct ifnet *ifp) 425 { 426 ifsq_devstart_sched(ifq_get_subq_default(&ifp->if_snd)); 427 } 428 429 static void 430 if_default_serialize(struct ifnet *ifp, enum ifnet_serialize slz __unused) 431 { 432 lwkt_serialize_enter(ifp->if_serializer); 433 } 434 435 static void 436 if_default_deserialize(struct ifnet *ifp, enum ifnet_serialize slz __unused) 437 { 438 lwkt_serialize_exit(ifp->if_serializer); 439 } 440 441 static int 442 if_default_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz __unused) 443 { 444 return lwkt_serialize_try(ifp->if_serializer); 445 } 446 447 #ifdef INVARIANTS 448 static void 449 if_default_serialize_assert(struct ifnet *ifp, 450 enum ifnet_serialize slz __unused, 451 boolean_t serialized) 452 { 453 if (serialized) 454 ASSERT_SERIALIZED(ifp->if_serializer); 455 else 456 ASSERT_NOT_SERIALIZED(ifp->if_serializer); 457 } 458 #endif 459 460 /* 461 * Attach an interface to the list of "active" interfaces. 462 * 463 * The serializer is optional. 464 */ 465 void 466 if_attach(struct ifnet *ifp, lwkt_serialize_t serializer) 467 { 468 unsigned socksize; 469 int namelen, masklen; 470 struct sockaddr_dl *sdl, *sdl_addr; 471 struct ifaddr *ifa; 472 struct ifaltq *ifq; 473 struct ifnet **old_ifindex2ifnet = NULL; 474 struct ifnet_array *old_ifnet_array; 475 int i, q; 476 477 static int if_indexlim = 8; 478 479 if (ifp->if_serialize != NULL) { 480 KASSERT(ifp->if_deserialize != NULL && 481 ifp->if_tryserialize != NULL && 482 ifp->if_serialize_assert != NULL, 483 ("serialize functions are partially setup")); 484 485 /* 486 * If the device supplies serialize functions, 487 * then clear if_serializer to catch any invalid 488 * usage of this field. 489 */ 490 KASSERT(serializer == NULL, 491 ("both serialize functions and default serializer " 492 "are supplied")); 493 ifp->if_serializer = NULL; 494 } else { 495 KASSERT(ifp->if_deserialize == NULL && 496 ifp->if_tryserialize == NULL && 497 ifp->if_serialize_assert == NULL, 498 ("serialize functions are partially setup")); 499 ifp->if_serialize = if_default_serialize; 500 ifp->if_deserialize = if_default_deserialize; 501 ifp->if_tryserialize = if_default_tryserialize; 502 #ifdef INVARIANTS 503 ifp->if_serialize_assert = if_default_serialize_assert; 504 #endif 505 506 /* 507 * The serializer can be passed in from the device, 508 * allowing the same serializer to be used for both 509 * the interrupt interlock and the device queue. 510 * If not specified, the netif structure will use an 511 * embedded serializer. 512 */ 513 if (serializer == NULL) { 514 serializer = &ifp->if_default_serializer; 515 lwkt_serialize_init(serializer); 516 } 517 ifp->if_serializer = serializer; 518 } 519 520 /* 521 * XXX - 522 * The old code would work if the interface passed a pre-existing 523 * chain of ifaddrs to this code. We don't trust our callers to 524 * properly initialize the tailq, however, so we no longer allow 525 * this unlikely case. 526 */ 527 ifp->if_addrheads = kmalloc(ncpus * sizeof(struct ifaddrhead), 528 M_IFADDR, M_WAITOK | M_ZERO); 529 for (i = 0; i < ncpus; ++i) 530 TAILQ_INIT(&ifp->if_addrheads[i]); 531 532 TAILQ_INIT(&ifp->if_multiaddrs); 533 TAILQ_INIT(&ifp->if_groups); 534 getmicrotime(&ifp->if_lastchange); 535 536 /* 537 * create a Link Level name for this device 538 */ 539 namelen = strlen(ifp->if_xname); 540 masklen = offsetof(struct sockaddr_dl, sdl_data[0]) + namelen; 541 socksize = masklen + ifp->if_addrlen; 542 if (socksize < sizeof(*sdl)) 543 socksize = sizeof(*sdl); 544 socksize = RT_ROUNDUP(socksize); 545 ifa = ifa_create(sizeof(struct ifaddr) + 2 * socksize); 546 sdl = sdl_addr = (struct sockaddr_dl *)(ifa + 1); 547 sdl->sdl_len = socksize; 548 sdl->sdl_family = AF_LINK; 549 bcopy(ifp->if_xname, sdl->sdl_data, namelen); 550 sdl->sdl_nlen = namelen; 551 sdl->sdl_type = ifp->if_type; 552 ifp->if_lladdr = ifa; 553 ifa->ifa_ifp = ifp; 554 ifa->ifa_rtrequest = link_rtrequest; 555 ifa->ifa_addr = (struct sockaddr *)sdl; 556 sdl = (struct sockaddr_dl *)(socksize + (caddr_t)sdl); 557 ifa->ifa_netmask = (struct sockaddr *)sdl; 558 sdl->sdl_len = masklen; 559 while (namelen != 0) 560 sdl->sdl_data[--namelen] = 0xff; 561 ifa_iflink(ifa, ifp, 0 /* Insert head */); 562 563 ifp->if_data_pcpu = kmalloc_cachealign( 564 ncpus * sizeof(struct ifdata_pcpu), M_DEVBUF, M_WAITOK | M_ZERO); 565 566 if (ifp->if_mapsubq == NULL) 567 ifp->if_mapsubq = ifq_mapsubq_default; 568 569 ifq = &ifp->if_snd; 570 ifq->altq_type = 0; 571 ifq->altq_disc = NULL; 572 ifq->altq_flags &= ALTQF_CANTCHANGE; 573 ifq->altq_tbr = NULL; 574 ifq->altq_ifp = ifp; 575 576 if (ifq->altq_subq_cnt <= 0) 577 ifq->altq_subq_cnt = 1; 578 ifq->altq_subq = kmalloc_cachealign( 579 ifq->altq_subq_cnt * sizeof(struct ifaltq_subque), 580 M_DEVBUF, M_WAITOK | M_ZERO); 581 582 if (ifq->altq_maxlen == 0) { 583 if_printf(ifp, "driver didn't set altq_maxlen\n"); 584 ifq_set_maxlen(ifq, ifqmaxlen); 585 } 586 587 for (q = 0; q < ifq->altq_subq_cnt; ++q) { 588 struct ifaltq_subque *ifsq = &ifq->altq_subq[q]; 589 590 ALTQ_SQ_LOCK_INIT(ifsq); 591 ifsq->ifsq_index = q; 592 593 ifsq->ifsq_altq = ifq; 594 ifsq->ifsq_ifp = ifp; 595 596 ifsq->ifsq_maxlen = ifq->altq_maxlen; 597 ifsq->ifsq_maxbcnt = ifsq->ifsq_maxlen * MCLBYTES; 598 ifsq->ifsq_prepended = NULL; 599 ifsq->ifsq_started = 0; 600 ifsq->ifsq_hw_oactive = 0; 601 ifsq_set_cpuid(ifsq, 0); 602 if (ifp->if_serializer != NULL) 603 ifsq_set_hw_serialize(ifsq, ifp->if_serializer); 604 605 ifsq->ifsq_stage = 606 kmalloc_cachealign(ncpus * sizeof(struct ifsubq_stage), 607 M_DEVBUF, M_WAITOK | M_ZERO); 608 for (i = 0; i < ncpus; ++i) 609 ifsq->ifsq_stage[i].stg_subq = ifsq; 610 611 ifsq->ifsq_ifstart_nmsg = 612 kmalloc(ncpus * sizeof(struct netmsg_base), 613 M_LWKTMSG, M_WAITOK); 614 for (i = 0; i < ncpus; ++i) { 615 netmsg_init(&ifsq->ifsq_ifstart_nmsg[i], NULL, 616 &netisr_adone_rport, 0, ifsq_ifstart_dispatch); 617 ifsq->ifsq_ifstart_nmsg[i].lmsg.u.ms_resultp = ifsq; 618 } 619 } 620 ifq_set_classic(ifq); 621 622 /* 623 * Increase mbuf cluster/jcluster limits for the mbufs that 624 * could sit on the device queues for quite some time. 625 */ 626 if (ifp->if_nmbclusters > 0) 627 mcl_inclimit(ifp->if_nmbclusters); 628 if (ifp->if_nmbjclusters > 0) 629 mjcl_inclimit(ifp->if_nmbjclusters); 630 631 /* 632 * Install this ifp into ifindex2inet, ifnet queue and ifnet 633 * array after it is setup. 634 * 635 * Protect ifindex2ifnet, ifnet queue and ifnet array changes 636 * by ifnet lock, so that non-netisr threads could get a 637 * consistent view. 638 */ 639 ifnet_lock(); 640 641 /* Don't update if_index until ifindex2ifnet is setup */ 642 ifp->if_index = if_index + 1; 643 sdl_addr->sdl_index = ifp->if_index; 644 645 /* 646 * Install this ifp into ifindex2ifnet 647 */ 648 if (ifindex2ifnet == NULL || ifp->if_index >= if_indexlim) { 649 unsigned int n; 650 struct ifnet **q; 651 652 /* 653 * Grow ifindex2ifnet 654 */ 655 if_indexlim <<= 1; 656 n = if_indexlim * sizeof(*q); 657 q = kmalloc(n, M_IFADDR, M_WAITOK | M_ZERO); 658 if (ifindex2ifnet != NULL) { 659 bcopy(ifindex2ifnet, q, n/2); 660 /* Free old ifindex2ifnet after sync all netisrs */ 661 old_ifindex2ifnet = ifindex2ifnet; 662 } 663 ifindex2ifnet = q; 664 } 665 ifindex2ifnet[ifp->if_index] = ifp; 666 /* 667 * Update if_index after this ifp is installed into ifindex2ifnet, 668 * so that netisrs could get a consistent view of ifindex2ifnet. 669 */ 670 cpu_sfence(); 671 if_index = ifp->if_index; 672 673 /* 674 * Install this ifp into ifnet array. 675 */ 676 /* Free old ifnet array after sync all netisrs */ 677 old_ifnet_array = ifnet_array; 678 ifnet_array = ifnet_array_add(ifp, old_ifnet_array); 679 680 /* 681 * Install this ifp into ifnet queue. 682 */ 683 TAILQ_INSERT_TAIL(&ifnetlist, ifp, if_link); 684 685 ifnet_unlock(); 686 687 /* 688 * Sync all netisrs so that the old ifindex2ifnet and ifnet array 689 * are no longer accessed and we can free them safely later on. 690 */ 691 netmsg_service_sync(); 692 if (old_ifindex2ifnet != NULL) 693 kfree(old_ifindex2ifnet, M_IFADDR); 694 ifnet_array_free(old_ifnet_array); 695 696 if (!SLIST_EMPTY(&domains)) 697 if_attachdomain1(ifp); 698 699 /* Announce the interface. */ 700 EVENTHANDLER_INVOKE(ifnet_attach_event, ifp); 701 devctl_notify("IFNET", ifp->if_xname, "ATTACH", NULL); 702 rt_ifannouncemsg(ifp, IFAN_ARRIVAL); 703 } 704 705 static void 706 if_attachdomain(void *dummy) 707 { 708 struct ifnet *ifp; 709 710 ifnet_lock(); 711 TAILQ_FOREACH(ifp, &ifnetlist, if_list) 712 if_attachdomain1(ifp); 713 ifnet_unlock(); 714 } 715 SYSINIT(domainifattach, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_FIRST, 716 if_attachdomain, NULL); 717 718 static void 719 if_attachdomain1(struct ifnet *ifp) 720 { 721 struct domain *dp; 722 723 crit_enter(); 724 725 /* address family dependent data region */ 726 bzero(ifp->if_afdata, sizeof(ifp->if_afdata)); 727 SLIST_FOREACH(dp, &domains, dom_next) 728 if (dp->dom_ifattach) 729 ifp->if_afdata[dp->dom_family] = 730 (*dp->dom_ifattach)(ifp); 731 crit_exit(); 732 } 733 734 /* 735 * Purge all addresses whose type is _not_ AF_LINK 736 */ 737 static void 738 if_purgeaddrs_nolink_dispatch(netmsg_t nmsg) 739 { 740 struct lwkt_msg *lmsg = &nmsg->lmsg; 741 struct ifnet *ifp = lmsg->u.ms_resultp; 742 struct ifaddr_container *ifac, *next; 743 744 ASSERT_IN_NETISR(0); 745 746 /* 747 * The ifaddr processing in the following loop will block, 748 * however, this function is called in netisr0, in which 749 * ifaddr list changes happen, so we don't care about the 750 * blockness of the ifaddr processing here. 751 */ 752 TAILQ_FOREACH_MUTABLE(ifac, &ifp->if_addrheads[mycpuid], 753 ifa_link, next) { 754 struct ifaddr *ifa = ifac->ifa; 755 756 /* Ignore marker */ 757 if (ifa->ifa_addr->sa_family == AF_UNSPEC) 758 continue; 759 760 /* Leave link ifaddr as it is */ 761 if (ifa->ifa_addr->sa_family == AF_LINK) 762 continue; 763 #ifdef INET 764 /* XXX: Ugly!! ad hoc just for INET */ 765 if (ifa->ifa_addr && ifa->ifa_addr->sa_family == AF_INET) { 766 struct ifaliasreq ifr; 767 #ifdef IFADDR_DEBUG_VERBOSE 768 int i; 769 770 kprintf("purge in4 addr %p: ", ifa); 771 for (i = 0; i < ncpus; ++i) 772 kprintf("%d ", ifa->ifa_containers[i].ifa_refcnt); 773 kprintf("\n"); 774 #endif 775 776 bzero(&ifr, sizeof ifr); 777 ifr.ifra_addr = *ifa->ifa_addr; 778 if (ifa->ifa_dstaddr) 779 ifr.ifra_broadaddr = *ifa->ifa_dstaddr; 780 if (in_control(SIOCDIFADDR, (caddr_t)&ifr, ifp, 781 NULL) == 0) 782 continue; 783 } 784 #endif /* INET */ 785 #ifdef INET6 786 if (ifa->ifa_addr && ifa->ifa_addr->sa_family == AF_INET6) { 787 #ifdef IFADDR_DEBUG_VERBOSE 788 int i; 789 790 kprintf("purge in6 addr %p: ", ifa); 791 for (i = 0; i < ncpus; ++i) 792 kprintf("%d ", ifa->ifa_containers[i].ifa_refcnt); 793 kprintf("\n"); 794 #endif 795 796 in6_purgeaddr(ifa); 797 /* ifp_addrhead is already updated */ 798 continue; 799 } 800 #endif /* INET6 */ 801 ifa_ifunlink(ifa, ifp); 802 ifa_destroy(ifa); 803 } 804 805 lwkt_replymsg(lmsg, 0); 806 } 807 808 void 809 if_purgeaddrs_nolink(struct ifnet *ifp) 810 { 811 struct netmsg_base nmsg; 812 struct lwkt_msg *lmsg = &nmsg.lmsg; 813 814 ASSERT_CANDOMSG_NETISR0(curthread); 815 816 netmsg_init(&nmsg, NULL, &curthread->td_msgport, 0, 817 if_purgeaddrs_nolink_dispatch); 818 lmsg->u.ms_resultp = ifp; 819 lwkt_domsg(netisr_cpuport(0), lmsg, 0); 820 } 821 822 static void 823 ifq_stage_detach_handler(netmsg_t nmsg) 824 { 825 struct ifaltq *ifq = nmsg->lmsg.u.ms_resultp; 826 int q; 827 828 for (q = 0; q < ifq->altq_subq_cnt; ++q) { 829 struct ifaltq_subque *ifsq = &ifq->altq_subq[q]; 830 struct ifsubq_stage *stage = ifsq_get_stage(ifsq, mycpuid); 831 832 if (stage->stg_flags & IFSQ_STAGE_FLAG_QUED) 833 ifsq_stage_remove(&ifsubq_stage_heads[mycpuid], stage); 834 } 835 lwkt_replymsg(&nmsg->lmsg, 0); 836 } 837 838 static void 839 ifq_stage_detach(struct ifaltq *ifq) 840 { 841 struct netmsg_base base; 842 int cpu; 843 844 netmsg_init(&base, NULL, &curthread->td_msgport, 0, 845 ifq_stage_detach_handler); 846 base.lmsg.u.ms_resultp = ifq; 847 848 for (cpu = 0; cpu < ncpus; ++cpu) 849 lwkt_domsg(netisr_cpuport(cpu), &base.lmsg, 0); 850 } 851 852 struct netmsg_if_rtdel { 853 struct netmsg_base base; 854 struct ifnet *ifp; 855 }; 856 857 static void 858 if_rtdel_dispatch(netmsg_t msg) 859 { 860 struct netmsg_if_rtdel *rmsg = (void *)msg; 861 int i, nextcpu, cpu; 862 863 cpu = mycpuid; 864 for (i = 1; i <= AF_MAX; i++) { 865 struct radix_node_head *rnh; 866 867 if ((rnh = rt_tables[cpu][i]) == NULL) 868 continue; 869 rnh->rnh_walktree(rnh, if_rtdel, rmsg->ifp); 870 } 871 872 nextcpu = cpu + 1; 873 if (nextcpu < ncpus) 874 lwkt_forwardmsg(netisr_cpuport(nextcpu), &rmsg->base.lmsg); 875 else 876 lwkt_replymsg(&rmsg->base.lmsg, 0); 877 } 878 879 /* 880 * Detach an interface, removing it from the 881 * list of "active" interfaces. 882 */ 883 void 884 if_detach(struct ifnet *ifp) 885 { 886 struct ifnet_array *old_ifnet_array; 887 struct netmsg_if_rtdel msg; 888 struct domain *dp; 889 int q; 890 891 /* Announce that the interface is gone. */ 892 EVENTHANDLER_INVOKE(ifnet_detach_event, ifp); 893 rt_ifannouncemsg(ifp, IFAN_DEPARTURE); 894 devctl_notify("IFNET", ifp->if_xname, "DETACH", NULL); 895 896 /* 897 * Remove this ifp from ifindex2inet, ifnet queue and ifnet 898 * array before it is whacked. 899 * 900 * Protect ifindex2ifnet, ifnet queue and ifnet array changes 901 * by ifnet lock, so that non-netisr threads could get a 902 * consistent view. 903 */ 904 ifnet_lock(); 905 906 /* 907 * Remove this ifp from ifindex2ifnet and maybe decrement if_index. 908 */ 909 ifindex2ifnet[ifp->if_index] = NULL; 910 while (if_index > 0 && ifindex2ifnet[if_index] == NULL) 911 if_index--; 912 913 /* 914 * Remove this ifp from ifnet queue. 915 */ 916 TAILQ_REMOVE(&ifnetlist, ifp, if_link); 917 918 /* 919 * Remove this ifp from ifnet array. 920 */ 921 /* Free old ifnet array after sync all netisrs */ 922 old_ifnet_array = ifnet_array; 923 ifnet_array = ifnet_array_del(ifp, old_ifnet_array); 924 925 ifnet_unlock(); 926 927 /* 928 * Sync all netisrs so that the old ifnet array is no longer 929 * accessed and we can free it safely later on. 930 */ 931 netmsg_service_sync(); 932 ifnet_array_free(old_ifnet_array); 933 934 /* 935 * Remove routes and flush queues. 936 */ 937 crit_enter(); 938 #ifdef IFPOLL_ENABLE 939 if (ifp->if_flags & IFF_NPOLLING) 940 ifpoll_deregister(ifp); 941 #endif 942 if_down(ifp); 943 944 /* Decrease the mbuf clusters/jclusters limits increased by us */ 945 if (ifp->if_nmbclusters > 0) 946 mcl_inclimit(-ifp->if_nmbclusters); 947 if (ifp->if_nmbjclusters > 0) 948 mjcl_inclimit(-ifp->if_nmbjclusters); 949 950 #ifdef ALTQ 951 if (ifq_is_enabled(&ifp->if_snd)) 952 altq_disable(&ifp->if_snd); 953 if (ifq_is_attached(&ifp->if_snd)) 954 altq_detach(&ifp->if_snd); 955 #endif 956 957 /* 958 * Clean up all addresses. 959 */ 960 ifp->if_lladdr = NULL; 961 962 if_purgeaddrs_nolink(ifp); 963 if (!TAILQ_EMPTY(&ifp->if_addrheads[mycpuid])) { 964 struct ifaddr *ifa; 965 966 ifa = TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa; 967 KASSERT(ifa->ifa_addr->sa_family == AF_LINK, 968 ("non-link ifaddr is left on if_addrheads")); 969 970 ifa_ifunlink(ifa, ifp); 971 ifa_destroy(ifa); 972 KASSERT(TAILQ_EMPTY(&ifp->if_addrheads[mycpuid]), 973 ("there are still ifaddrs left on if_addrheads")); 974 } 975 976 #ifdef INET 977 /* 978 * Remove all IPv4 kernel structures related to ifp. 979 */ 980 in_ifdetach(ifp); 981 #endif 982 983 #ifdef INET6 984 /* 985 * Remove all IPv6 kernel structs related to ifp. This should be done 986 * before removing routing entries below, since IPv6 interface direct 987 * routes are expected to be removed by the IPv6-specific kernel API. 988 * Otherwise, the kernel will detect some inconsistency and bark it. 989 */ 990 in6_ifdetach(ifp); 991 #endif 992 993 /* 994 * Delete all remaining routes using this interface 995 */ 996 netmsg_init(&msg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY, 997 if_rtdel_dispatch); 998 msg.ifp = ifp; 999 rt_domsg_global(&msg.base); 1000 1001 SLIST_FOREACH(dp, &domains, dom_next) 1002 if (dp->dom_ifdetach && ifp->if_afdata[dp->dom_family]) 1003 (*dp->dom_ifdetach)(ifp, 1004 ifp->if_afdata[dp->dom_family]); 1005 1006 kfree(ifp->if_addrheads, M_IFADDR); 1007 1008 lwkt_synchronize_ipiqs("if_detach"); 1009 ifq_stage_detach(&ifp->if_snd); 1010 1011 for (q = 0; q < ifp->if_snd.altq_subq_cnt; ++q) { 1012 struct ifaltq_subque *ifsq = &ifp->if_snd.altq_subq[q]; 1013 1014 kfree(ifsq->ifsq_ifstart_nmsg, M_LWKTMSG); 1015 kfree(ifsq->ifsq_stage, M_DEVBUF); 1016 } 1017 kfree(ifp->if_snd.altq_subq, M_DEVBUF); 1018 1019 kfree(ifp->if_data_pcpu, M_DEVBUF); 1020 1021 crit_exit(); 1022 } 1023 1024 /* 1025 * Create interface group without members 1026 */ 1027 struct ifg_group * 1028 if_creategroup(const char *groupname) 1029 { 1030 struct ifg_group *ifg = NULL; 1031 1032 if ((ifg = (struct ifg_group *)kmalloc(sizeof(struct ifg_group), 1033 M_TEMP, M_NOWAIT)) == NULL) 1034 return (NULL); 1035 1036 strlcpy(ifg->ifg_group, groupname, sizeof(ifg->ifg_group)); 1037 ifg->ifg_refcnt = 0; 1038 ifg->ifg_carp_demoted = 0; 1039 TAILQ_INIT(&ifg->ifg_members); 1040 #if NPF > 0 1041 pfi_attach_ifgroup(ifg); 1042 #endif 1043 TAILQ_INSERT_TAIL(&ifg_head, ifg, ifg_next); 1044 1045 return (ifg); 1046 } 1047 1048 /* 1049 * Add a group to an interface 1050 */ 1051 int 1052 if_addgroup(struct ifnet *ifp, const char *groupname) 1053 { 1054 struct ifg_list *ifgl; 1055 struct ifg_group *ifg = NULL; 1056 struct ifg_member *ifgm; 1057 1058 if (groupname[0] && groupname[strlen(groupname) - 1] >= '0' && 1059 groupname[strlen(groupname) - 1] <= '9') 1060 return (EINVAL); 1061 1062 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) 1063 if (!strcmp(ifgl->ifgl_group->ifg_group, groupname)) 1064 return (EEXIST); 1065 1066 if ((ifgl = kmalloc(sizeof(*ifgl), M_TEMP, M_NOWAIT)) == NULL) 1067 return (ENOMEM); 1068 1069 if ((ifgm = kmalloc(sizeof(*ifgm), M_TEMP, M_NOWAIT)) == NULL) { 1070 kfree(ifgl, M_TEMP); 1071 return (ENOMEM); 1072 } 1073 1074 TAILQ_FOREACH(ifg, &ifg_head, ifg_next) 1075 if (!strcmp(ifg->ifg_group, groupname)) 1076 break; 1077 1078 if (ifg == NULL && (ifg = if_creategroup(groupname)) == NULL) { 1079 kfree(ifgl, M_TEMP); 1080 kfree(ifgm, M_TEMP); 1081 return (ENOMEM); 1082 } 1083 1084 ifg->ifg_refcnt++; 1085 ifgl->ifgl_group = ifg; 1086 ifgm->ifgm_ifp = ifp; 1087 1088 TAILQ_INSERT_TAIL(&ifg->ifg_members, ifgm, ifgm_next); 1089 TAILQ_INSERT_TAIL(&ifp->if_groups, ifgl, ifgl_next); 1090 1091 #if NPF > 0 1092 pfi_group_change(groupname); 1093 #endif 1094 1095 return (0); 1096 } 1097 1098 /* 1099 * Remove a group from an interface 1100 */ 1101 int 1102 if_delgroup(struct ifnet *ifp, const char *groupname) 1103 { 1104 struct ifg_list *ifgl; 1105 struct ifg_member *ifgm; 1106 1107 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) 1108 if (!strcmp(ifgl->ifgl_group->ifg_group, groupname)) 1109 break; 1110 if (ifgl == NULL) 1111 return (ENOENT); 1112 1113 TAILQ_REMOVE(&ifp->if_groups, ifgl, ifgl_next); 1114 1115 TAILQ_FOREACH(ifgm, &ifgl->ifgl_group->ifg_members, ifgm_next) 1116 if (ifgm->ifgm_ifp == ifp) 1117 break; 1118 1119 if (ifgm != NULL) { 1120 TAILQ_REMOVE(&ifgl->ifgl_group->ifg_members, ifgm, ifgm_next); 1121 kfree(ifgm, M_TEMP); 1122 } 1123 1124 if (--ifgl->ifgl_group->ifg_refcnt == 0) { 1125 TAILQ_REMOVE(&ifg_head, ifgl->ifgl_group, ifg_next); 1126 #if NPF > 0 1127 pfi_detach_ifgroup(ifgl->ifgl_group); 1128 #endif 1129 kfree(ifgl->ifgl_group, M_TEMP); 1130 } 1131 1132 kfree(ifgl, M_TEMP); 1133 1134 #if NPF > 0 1135 pfi_group_change(groupname); 1136 #endif 1137 1138 return (0); 1139 } 1140 1141 /* 1142 * Stores all groups from an interface in memory pointed 1143 * to by data 1144 */ 1145 int 1146 if_getgroup(caddr_t data, struct ifnet *ifp) 1147 { 1148 int len, error; 1149 struct ifg_list *ifgl; 1150 struct ifg_req ifgrq, *ifgp; 1151 struct ifgroupreq *ifgr = (struct ifgroupreq *)data; 1152 1153 if (ifgr->ifgr_len == 0) { 1154 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) 1155 ifgr->ifgr_len += sizeof(struct ifg_req); 1156 return (0); 1157 } 1158 1159 len = ifgr->ifgr_len; 1160 ifgp = ifgr->ifgr_groups; 1161 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) { 1162 if (len < sizeof(ifgrq)) 1163 return (EINVAL); 1164 bzero(&ifgrq, sizeof ifgrq); 1165 strlcpy(ifgrq.ifgrq_group, ifgl->ifgl_group->ifg_group, 1166 sizeof(ifgrq.ifgrq_group)); 1167 if ((error = copyout((caddr_t)&ifgrq, (caddr_t)ifgp, 1168 sizeof(struct ifg_req)))) 1169 return (error); 1170 len -= sizeof(ifgrq); 1171 ifgp++; 1172 } 1173 1174 return (0); 1175 } 1176 1177 /* 1178 * Stores all members of a group in memory pointed to by data 1179 */ 1180 int 1181 if_getgroupmembers(caddr_t data) 1182 { 1183 struct ifgroupreq *ifgr = (struct ifgroupreq *)data; 1184 struct ifg_group *ifg; 1185 struct ifg_member *ifgm; 1186 struct ifg_req ifgrq, *ifgp; 1187 int len, error; 1188 1189 TAILQ_FOREACH(ifg, &ifg_head, ifg_next) 1190 if (!strcmp(ifg->ifg_group, ifgr->ifgr_name)) 1191 break; 1192 if (ifg == NULL) 1193 return (ENOENT); 1194 1195 if (ifgr->ifgr_len == 0) { 1196 TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next) 1197 ifgr->ifgr_len += sizeof(ifgrq); 1198 return (0); 1199 } 1200 1201 len = ifgr->ifgr_len; 1202 ifgp = ifgr->ifgr_groups; 1203 TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next) { 1204 if (len < sizeof(ifgrq)) 1205 return (EINVAL); 1206 bzero(&ifgrq, sizeof ifgrq); 1207 strlcpy(ifgrq.ifgrq_member, ifgm->ifgm_ifp->if_xname, 1208 sizeof(ifgrq.ifgrq_member)); 1209 if ((error = copyout((caddr_t)&ifgrq, (caddr_t)ifgp, 1210 sizeof(struct ifg_req)))) 1211 return (error); 1212 len -= sizeof(ifgrq); 1213 ifgp++; 1214 } 1215 1216 return (0); 1217 } 1218 1219 /* 1220 * Delete Routes for a Network Interface 1221 * 1222 * Called for each routing entry via the rnh->rnh_walktree() call above 1223 * to delete all route entries referencing a detaching network interface. 1224 * 1225 * Arguments: 1226 * rn pointer to node in the routing table 1227 * arg argument passed to rnh->rnh_walktree() - detaching interface 1228 * 1229 * Returns: 1230 * 0 successful 1231 * errno failed - reason indicated 1232 * 1233 */ 1234 static int 1235 if_rtdel(struct radix_node *rn, void *arg) 1236 { 1237 struct rtentry *rt = (struct rtentry *)rn; 1238 struct ifnet *ifp = arg; 1239 int err; 1240 1241 if (rt->rt_ifp == ifp) { 1242 1243 /* 1244 * Protect (sorta) against walktree recursion problems 1245 * with cloned routes 1246 */ 1247 if (!(rt->rt_flags & RTF_UP)) 1248 return (0); 1249 1250 err = rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, 1251 rt_mask(rt), rt->rt_flags, 1252 NULL); 1253 if (err) { 1254 log(LOG_WARNING, "if_rtdel: error %d\n", err); 1255 } 1256 } 1257 1258 return (0); 1259 } 1260 1261 static __inline boolean_t 1262 ifa_prefer(const struct ifaddr *cur_ifa, const struct ifaddr *old_ifa) 1263 { 1264 if (old_ifa == NULL) 1265 return TRUE; 1266 1267 if ((old_ifa->ifa_ifp->if_flags & IFF_UP) == 0 && 1268 (cur_ifa->ifa_ifp->if_flags & IFF_UP)) 1269 return TRUE; 1270 if ((old_ifa->ifa_flags & IFA_ROUTE) == 0 && 1271 (cur_ifa->ifa_flags & IFA_ROUTE)) 1272 return TRUE; 1273 return FALSE; 1274 } 1275 1276 /* 1277 * Locate an interface based on a complete address. 1278 */ 1279 struct ifaddr * 1280 ifa_ifwithaddr(struct sockaddr *addr) 1281 { 1282 const struct ifnet_array *arr; 1283 int i; 1284 1285 arr = ifnet_array_get(); 1286 for (i = 0; i < arr->ifnet_count; ++i) { 1287 struct ifnet *ifp = arr->ifnet_arr[i]; 1288 struct ifaddr_container *ifac; 1289 1290 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1291 struct ifaddr *ifa = ifac->ifa; 1292 1293 if (ifa->ifa_addr->sa_family != addr->sa_family) 1294 continue; 1295 if (sa_equal(addr, ifa->ifa_addr)) 1296 return (ifa); 1297 if ((ifp->if_flags & IFF_BROADCAST) && 1298 ifa->ifa_broadaddr && 1299 /* IPv6 doesn't have broadcast */ 1300 ifa->ifa_broadaddr->sa_len != 0 && 1301 sa_equal(ifa->ifa_broadaddr, addr)) 1302 return (ifa); 1303 } 1304 } 1305 return (NULL); 1306 } 1307 1308 /* 1309 * Locate the point to point interface with a given destination address. 1310 */ 1311 struct ifaddr * 1312 ifa_ifwithdstaddr(struct sockaddr *addr) 1313 { 1314 const struct ifnet_array *arr; 1315 int i; 1316 1317 arr = ifnet_array_get(); 1318 for (i = 0; i < arr->ifnet_count; ++i) { 1319 struct ifnet *ifp = arr->ifnet_arr[i]; 1320 struct ifaddr_container *ifac; 1321 1322 if (!(ifp->if_flags & IFF_POINTOPOINT)) 1323 continue; 1324 1325 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1326 struct ifaddr *ifa = ifac->ifa; 1327 1328 if (ifa->ifa_addr->sa_family != addr->sa_family) 1329 continue; 1330 if (ifa->ifa_dstaddr && 1331 sa_equal(addr, ifa->ifa_dstaddr)) 1332 return (ifa); 1333 } 1334 } 1335 return (NULL); 1336 } 1337 1338 /* 1339 * Find an interface on a specific network. If many, choice 1340 * is most specific found. 1341 */ 1342 struct ifaddr * 1343 ifa_ifwithnet(struct sockaddr *addr) 1344 { 1345 struct ifaddr *ifa_maybe = NULL; 1346 u_int af = addr->sa_family; 1347 char *addr_data = addr->sa_data, *cplim; 1348 const struct ifnet_array *arr; 1349 int i; 1350 1351 /* 1352 * AF_LINK addresses can be looked up directly by their index number, 1353 * so do that if we can. 1354 */ 1355 if (af == AF_LINK) { 1356 struct sockaddr_dl *sdl = (struct sockaddr_dl *)addr; 1357 1358 if (sdl->sdl_index && sdl->sdl_index <= if_index) 1359 return (ifindex2ifnet[sdl->sdl_index]->if_lladdr); 1360 } 1361 1362 /* 1363 * Scan though each interface, looking for ones that have 1364 * addresses in this address family. 1365 */ 1366 arr = ifnet_array_get(); 1367 for (i = 0; i < arr->ifnet_count; ++i) { 1368 struct ifnet *ifp = arr->ifnet_arr[i]; 1369 struct ifaddr_container *ifac; 1370 1371 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1372 struct ifaddr *ifa = ifac->ifa; 1373 char *cp, *cp2, *cp3; 1374 1375 if (ifa->ifa_addr->sa_family != af) 1376 next: continue; 1377 if (af == AF_INET && ifp->if_flags & IFF_POINTOPOINT) { 1378 /* 1379 * This is a bit broken as it doesn't 1380 * take into account that the remote end may 1381 * be a single node in the network we are 1382 * looking for. 1383 * The trouble is that we don't know the 1384 * netmask for the remote end. 1385 */ 1386 if (ifa->ifa_dstaddr != NULL && 1387 sa_equal(addr, ifa->ifa_dstaddr)) 1388 return (ifa); 1389 } else { 1390 /* 1391 * if we have a special address handler, 1392 * then use it instead of the generic one. 1393 */ 1394 if (ifa->ifa_claim_addr) { 1395 if ((*ifa->ifa_claim_addr)(ifa, addr)) { 1396 return (ifa); 1397 } else { 1398 continue; 1399 } 1400 } 1401 1402 /* 1403 * Scan all the bits in the ifa's address. 1404 * If a bit dissagrees with what we are 1405 * looking for, mask it with the netmask 1406 * to see if it really matters. 1407 * (A byte at a time) 1408 */ 1409 if (ifa->ifa_netmask == 0) 1410 continue; 1411 cp = addr_data; 1412 cp2 = ifa->ifa_addr->sa_data; 1413 cp3 = ifa->ifa_netmask->sa_data; 1414 cplim = ifa->ifa_netmask->sa_len + 1415 (char *)ifa->ifa_netmask; 1416 while (cp3 < cplim) 1417 if ((*cp++ ^ *cp2++) & *cp3++) 1418 goto next; /* next address! */ 1419 /* 1420 * If the netmask of what we just found 1421 * is more specific than what we had before 1422 * (if we had one) then remember the new one 1423 * before continuing to search for an even 1424 * better one. If the netmasks are equal, 1425 * we prefer the this ifa based on the result 1426 * of ifa_prefer(). 1427 */ 1428 if (ifa_maybe == NULL || 1429 rn_refines((char *)ifa->ifa_netmask, 1430 (char *)ifa_maybe->ifa_netmask) || 1431 (sa_equal(ifa_maybe->ifa_netmask, 1432 ifa->ifa_netmask) && 1433 ifa_prefer(ifa, ifa_maybe))) 1434 ifa_maybe = ifa; 1435 } 1436 } 1437 } 1438 return (ifa_maybe); 1439 } 1440 1441 /* 1442 * Find an interface address specific to an interface best matching 1443 * a given address. 1444 */ 1445 struct ifaddr * 1446 ifaof_ifpforaddr(struct sockaddr *addr, struct ifnet *ifp) 1447 { 1448 struct ifaddr_container *ifac; 1449 char *cp, *cp2, *cp3; 1450 char *cplim; 1451 struct ifaddr *ifa_maybe = NULL; 1452 u_int af = addr->sa_family; 1453 1454 if (af >= AF_MAX) 1455 return (0); 1456 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1457 struct ifaddr *ifa = ifac->ifa; 1458 1459 if (ifa->ifa_addr->sa_family != af) 1460 continue; 1461 if (ifa_maybe == NULL) 1462 ifa_maybe = ifa; 1463 if (ifa->ifa_netmask == NULL) { 1464 if (sa_equal(addr, ifa->ifa_addr) || 1465 (ifa->ifa_dstaddr != NULL && 1466 sa_equal(addr, ifa->ifa_dstaddr))) 1467 return (ifa); 1468 continue; 1469 } 1470 if (ifp->if_flags & IFF_POINTOPOINT) { 1471 if (sa_equal(addr, ifa->ifa_dstaddr)) 1472 return (ifa); 1473 } else { 1474 cp = addr->sa_data; 1475 cp2 = ifa->ifa_addr->sa_data; 1476 cp3 = ifa->ifa_netmask->sa_data; 1477 cplim = ifa->ifa_netmask->sa_len + (char *)ifa->ifa_netmask; 1478 for (; cp3 < cplim; cp3++) 1479 if ((*cp++ ^ *cp2++) & *cp3) 1480 break; 1481 if (cp3 == cplim) 1482 return (ifa); 1483 } 1484 } 1485 return (ifa_maybe); 1486 } 1487 1488 /* 1489 * Default action when installing a route with a Link Level gateway. 1490 * Lookup an appropriate real ifa to point to. 1491 * This should be moved to /sys/net/link.c eventually. 1492 */ 1493 static void 1494 link_rtrequest(int cmd, struct rtentry *rt) 1495 { 1496 struct ifaddr *ifa; 1497 struct sockaddr *dst; 1498 struct ifnet *ifp; 1499 1500 if (cmd != RTM_ADD || (ifa = rt->rt_ifa) == NULL || 1501 (ifp = ifa->ifa_ifp) == NULL || (dst = rt_key(rt)) == NULL) 1502 return; 1503 ifa = ifaof_ifpforaddr(dst, ifp); 1504 if (ifa != NULL) { 1505 IFAFREE(rt->rt_ifa); 1506 IFAREF(ifa); 1507 rt->rt_ifa = ifa; 1508 if (ifa->ifa_rtrequest && ifa->ifa_rtrequest != link_rtrequest) 1509 ifa->ifa_rtrequest(cmd, rt); 1510 } 1511 } 1512 1513 struct netmsg_ifroute { 1514 struct netmsg_base base; 1515 struct ifnet *ifp; 1516 int flag; 1517 int fam; 1518 }; 1519 1520 /* 1521 * Mark an interface down and notify protocols of the transition. 1522 */ 1523 static void 1524 if_unroute_dispatch(netmsg_t nmsg) 1525 { 1526 struct netmsg_ifroute *msg = (struct netmsg_ifroute *)nmsg; 1527 struct ifnet *ifp = msg->ifp; 1528 int flag = msg->flag, fam = msg->fam; 1529 struct ifaddr_container *ifac; 1530 1531 ifp->if_flags &= ~flag; 1532 getmicrotime(&ifp->if_lastchange); 1533 /* 1534 * The ifaddr processing in the following loop will block, 1535 * however, this function is called in netisr0, in which 1536 * ifaddr list changes happen, so we don't care about the 1537 * blockness of the ifaddr processing here. 1538 */ 1539 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1540 struct ifaddr *ifa = ifac->ifa; 1541 1542 /* Ignore marker */ 1543 if (ifa->ifa_addr->sa_family == AF_UNSPEC) 1544 continue; 1545 1546 if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family)) 1547 kpfctlinput(PRC_IFDOWN, ifa->ifa_addr); 1548 } 1549 ifq_purge_all(&ifp->if_snd); 1550 rt_ifmsg(ifp); 1551 1552 lwkt_replymsg(&nmsg->lmsg, 0); 1553 } 1554 1555 void 1556 if_unroute(struct ifnet *ifp, int flag, int fam) 1557 { 1558 struct netmsg_ifroute msg; 1559 1560 ASSERT_CANDOMSG_NETISR0(curthread); 1561 1562 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 0, 1563 if_unroute_dispatch); 1564 msg.ifp = ifp; 1565 msg.flag = flag; 1566 msg.fam = fam; 1567 lwkt_domsg(netisr_cpuport(0), &msg.base.lmsg, 0); 1568 } 1569 1570 /* 1571 * Mark an interface up and notify protocols of the transition. 1572 */ 1573 static void 1574 if_route_dispatch(netmsg_t nmsg) 1575 { 1576 struct netmsg_ifroute *msg = (struct netmsg_ifroute *)nmsg; 1577 struct ifnet *ifp = msg->ifp; 1578 int flag = msg->flag, fam = msg->fam; 1579 struct ifaddr_container *ifac; 1580 1581 ifq_purge_all(&ifp->if_snd); 1582 ifp->if_flags |= flag; 1583 getmicrotime(&ifp->if_lastchange); 1584 /* 1585 * The ifaddr processing in the following loop will block, 1586 * however, this function is called in netisr0, in which 1587 * ifaddr list changes happen, so we don't care about the 1588 * blockness of the ifaddr processing here. 1589 */ 1590 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1591 struct ifaddr *ifa = ifac->ifa; 1592 1593 /* Ignore marker */ 1594 if (ifa->ifa_addr->sa_family == AF_UNSPEC) 1595 continue; 1596 1597 if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family)) 1598 kpfctlinput(PRC_IFUP, ifa->ifa_addr); 1599 } 1600 rt_ifmsg(ifp); 1601 #ifdef INET6 1602 in6_if_up(ifp); 1603 #endif 1604 1605 lwkt_replymsg(&nmsg->lmsg, 0); 1606 } 1607 1608 void 1609 if_route(struct ifnet *ifp, int flag, int fam) 1610 { 1611 struct netmsg_ifroute msg; 1612 1613 ASSERT_CANDOMSG_NETISR0(curthread); 1614 1615 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 0, 1616 if_route_dispatch); 1617 msg.ifp = ifp; 1618 msg.flag = flag; 1619 msg.fam = fam; 1620 lwkt_domsg(netisr_cpuport(0), &msg.base.lmsg, 0); 1621 } 1622 1623 /* 1624 * Mark an interface down and notify protocols of the transition. An 1625 * interface going down is also considered to be a synchronizing event. 1626 * We must ensure that all packet processing related to the interface 1627 * has completed before we return so e.g. the caller can free the ifnet 1628 * structure that the mbufs may be referencing. 1629 * 1630 * NOTE: must be called at splnet or eqivalent. 1631 */ 1632 void 1633 if_down(struct ifnet *ifp) 1634 { 1635 if_unroute(ifp, IFF_UP, AF_UNSPEC); 1636 netmsg_service_sync(); 1637 } 1638 1639 /* 1640 * Mark an interface up and notify protocols of 1641 * the transition. 1642 * NOTE: must be called at splnet or eqivalent. 1643 */ 1644 void 1645 if_up(struct ifnet *ifp) 1646 { 1647 if_route(ifp, IFF_UP, AF_UNSPEC); 1648 } 1649 1650 /* 1651 * Process a link state change. 1652 * NOTE: must be called at splsoftnet or equivalent. 1653 */ 1654 void 1655 if_link_state_change(struct ifnet *ifp) 1656 { 1657 int link_state = ifp->if_link_state; 1658 1659 rt_ifmsg(ifp); 1660 devctl_notify("IFNET", ifp->if_xname, 1661 (link_state == LINK_STATE_UP) ? "LINK_UP" : "LINK_DOWN", NULL); 1662 } 1663 1664 /* 1665 * Handle interface watchdog timer routines. Called 1666 * from softclock, we decrement timers (if set) and 1667 * call the appropriate interface routine on expiration. 1668 */ 1669 static void 1670 if_slowtimo_dispatch(netmsg_t nmsg) 1671 { 1672 struct globaldata *gd = mycpu; 1673 const struct ifnet_array *arr; 1674 int i; 1675 1676 ASSERT_IN_NETISR(0); 1677 1678 crit_enter_gd(gd); 1679 lwkt_replymsg(&nmsg->lmsg, 0); /* reply ASAP */ 1680 crit_exit_gd(gd); 1681 1682 arr = ifnet_array_get(); 1683 for (i = 0; i < arr->ifnet_count; ++i) { 1684 struct ifnet *ifp = arr->ifnet_arr[i]; 1685 1686 crit_enter_gd(gd); 1687 1688 if (if_stats_compat) { 1689 IFNET_STAT_GET(ifp, ipackets, ifp->if_ipackets); 1690 IFNET_STAT_GET(ifp, ierrors, ifp->if_ierrors); 1691 IFNET_STAT_GET(ifp, opackets, ifp->if_opackets); 1692 IFNET_STAT_GET(ifp, oerrors, ifp->if_oerrors); 1693 IFNET_STAT_GET(ifp, collisions, ifp->if_collisions); 1694 IFNET_STAT_GET(ifp, ibytes, ifp->if_ibytes); 1695 IFNET_STAT_GET(ifp, obytes, ifp->if_obytes); 1696 IFNET_STAT_GET(ifp, imcasts, ifp->if_imcasts); 1697 IFNET_STAT_GET(ifp, omcasts, ifp->if_omcasts); 1698 IFNET_STAT_GET(ifp, iqdrops, ifp->if_iqdrops); 1699 IFNET_STAT_GET(ifp, noproto, ifp->if_noproto); 1700 } 1701 1702 if (ifp->if_timer == 0 || --ifp->if_timer) { 1703 crit_exit_gd(gd); 1704 continue; 1705 } 1706 if (ifp->if_watchdog) { 1707 if (ifnet_tryserialize_all(ifp)) { 1708 (*ifp->if_watchdog)(ifp); 1709 ifnet_deserialize_all(ifp); 1710 } else { 1711 /* try again next timeout */ 1712 ++ifp->if_timer; 1713 } 1714 } 1715 1716 crit_exit_gd(gd); 1717 } 1718 1719 callout_reset(&if_slowtimo_timer, hz / IFNET_SLOWHZ, if_slowtimo, NULL); 1720 } 1721 1722 static void 1723 if_slowtimo(void *arg __unused) 1724 { 1725 struct lwkt_msg *lmsg = &if_slowtimo_netmsg.lmsg; 1726 1727 KASSERT(mycpuid == 0, ("not on cpu0")); 1728 crit_enter(); 1729 if (lmsg->ms_flags & MSGF_DONE) 1730 lwkt_sendmsg_oncpu(netisr_cpuport(0), lmsg); 1731 crit_exit(); 1732 } 1733 1734 /* 1735 * Map interface name to 1736 * interface structure pointer. 1737 */ 1738 struct ifnet * 1739 ifunit(const char *name) 1740 { 1741 struct ifnet *ifp; 1742 1743 /* 1744 * Search all the interfaces for this name/number 1745 */ 1746 KASSERT(mtx_owned(&ifnet_mtx), ("ifnet is not locked")); 1747 1748 TAILQ_FOREACH(ifp, &ifnetlist, if_link) { 1749 if (strncmp(ifp->if_xname, name, IFNAMSIZ) == 0) 1750 break; 1751 } 1752 return (ifp); 1753 } 1754 1755 struct ifnet * 1756 ifunit_netisr(const char *name) 1757 { 1758 const struct ifnet_array *arr; 1759 int i; 1760 1761 /* 1762 * Search all the interfaces for this name/number 1763 */ 1764 1765 arr = ifnet_array_get(); 1766 for (i = 0; i < arr->ifnet_count; ++i) { 1767 struct ifnet *ifp = arr->ifnet_arr[i]; 1768 1769 if (strncmp(ifp->if_xname, name, IFNAMSIZ) == 0) 1770 return ifp; 1771 } 1772 return NULL; 1773 } 1774 1775 /* 1776 * Interface ioctls. 1777 */ 1778 int 1779 ifioctl(struct socket *so, u_long cmd, caddr_t data, struct ucred *cred) 1780 { 1781 struct ifnet *ifp; 1782 struct ifreq *ifr; 1783 struct ifstat *ifs; 1784 int error; 1785 short oif_flags; 1786 int new_flags; 1787 #ifdef COMPAT_43 1788 int ocmd; 1789 #endif 1790 size_t namelen, onamelen; 1791 char new_name[IFNAMSIZ]; 1792 struct ifaddr *ifa; 1793 struct sockaddr_dl *sdl; 1794 1795 switch (cmd) { 1796 case SIOCGIFCONF: 1797 case OSIOCGIFCONF: 1798 return (ifconf(cmd, data, cred)); 1799 default: 1800 break; 1801 } 1802 1803 ifr = (struct ifreq *)data; 1804 1805 switch (cmd) { 1806 case SIOCIFCREATE: 1807 case SIOCIFCREATE2: 1808 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0) 1809 return (error); 1810 return (if_clone_create(ifr->ifr_name, sizeof(ifr->ifr_name), 1811 cmd == SIOCIFCREATE2 ? ifr->ifr_data : NULL)); 1812 case SIOCIFDESTROY: 1813 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0) 1814 return (error); 1815 return (if_clone_destroy(ifr->ifr_name)); 1816 case SIOCIFGCLONERS: 1817 return (if_clone_list((struct if_clonereq *)data)); 1818 default: 1819 break; 1820 } 1821 1822 /* 1823 * Nominal ioctl through interface, lookup the ifp and obtain a 1824 * lock to serialize the ifconfig ioctl operation. 1825 */ 1826 ifnet_lock(); 1827 1828 ifp = ifunit(ifr->ifr_name); 1829 if (ifp == NULL) { 1830 ifnet_unlock(); 1831 return (ENXIO); 1832 } 1833 error = 0; 1834 1835 switch (cmd) { 1836 case SIOCGIFINDEX: 1837 ifr->ifr_index = ifp->if_index; 1838 break; 1839 1840 case SIOCGIFFLAGS: 1841 ifr->ifr_flags = ifp->if_flags; 1842 ifr->ifr_flagshigh = ifp->if_flags >> 16; 1843 break; 1844 1845 case SIOCGIFCAP: 1846 ifr->ifr_reqcap = ifp->if_capabilities; 1847 ifr->ifr_curcap = ifp->if_capenable; 1848 break; 1849 1850 case SIOCGIFMETRIC: 1851 ifr->ifr_metric = ifp->if_metric; 1852 break; 1853 1854 case SIOCGIFMTU: 1855 ifr->ifr_mtu = ifp->if_mtu; 1856 break; 1857 1858 case SIOCGIFTSOLEN: 1859 ifr->ifr_tsolen = ifp->if_tsolen; 1860 break; 1861 1862 case SIOCGIFDATA: 1863 error = copyout((caddr_t)&ifp->if_data, ifr->ifr_data, 1864 sizeof(ifp->if_data)); 1865 break; 1866 1867 case SIOCGIFPHYS: 1868 ifr->ifr_phys = ifp->if_physical; 1869 break; 1870 1871 case SIOCGIFPOLLCPU: 1872 ifr->ifr_pollcpu = -1; 1873 break; 1874 1875 case SIOCSIFPOLLCPU: 1876 break; 1877 1878 case SIOCSIFFLAGS: 1879 error = priv_check_cred(cred, PRIV_ROOT, 0); 1880 if (error) 1881 break; 1882 new_flags = (ifr->ifr_flags & 0xffff) | 1883 (ifr->ifr_flagshigh << 16); 1884 if (ifp->if_flags & IFF_SMART) { 1885 /* Smart drivers twiddle their own routes */ 1886 } else if (ifp->if_flags & IFF_UP && 1887 (new_flags & IFF_UP) == 0) { 1888 crit_enter(); 1889 if_down(ifp); 1890 crit_exit(); 1891 } else if (new_flags & IFF_UP && 1892 (ifp->if_flags & IFF_UP) == 0) { 1893 crit_enter(); 1894 if_up(ifp); 1895 crit_exit(); 1896 } 1897 1898 #ifdef IFPOLL_ENABLE 1899 if ((new_flags ^ ifp->if_flags) & IFF_NPOLLING) { 1900 if (new_flags & IFF_NPOLLING) 1901 ifpoll_register(ifp); 1902 else 1903 ifpoll_deregister(ifp); 1904 } 1905 #endif 1906 1907 ifp->if_flags = (ifp->if_flags & IFF_CANTCHANGE) | 1908 (new_flags &~ IFF_CANTCHANGE); 1909 if (new_flags & IFF_PPROMISC) { 1910 /* Permanently promiscuous mode requested */ 1911 ifp->if_flags |= IFF_PROMISC; 1912 } else if (ifp->if_pcount == 0) { 1913 ifp->if_flags &= ~IFF_PROMISC; 1914 } 1915 if (ifp->if_ioctl) { 1916 ifnet_serialize_all(ifp); 1917 ifp->if_ioctl(ifp, cmd, data, cred); 1918 ifnet_deserialize_all(ifp); 1919 } 1920 getmicrotime(&ifp->if_lastchange); 1921 break; 1922 1923 case SIOCSIFCAP: 1924 error = priv_check_cred(cred, PRIV_ROOT, 0); 1925 if (error) 1926 break; 1927 if (ifr->ifr_reqcap & ~ifp->if_capabilities) { 1928 error = EINVAL; 1929 break; 1930 } 1931 ifnet_serialize_all(ifp); 1932 ifp->if_ioctl(ifp, cmd, data, cred); 1933 ifnet_deserialize_all(ifp); 1934 break; 1935 1936 case SIOCSIFNAME: 1937 error = priv_check_cred(cred, PRIV_ROOT, 0); 1938 if (error) 1939 break; 1940 error = copyinstr(ifr->ifr_data, new_name, IFNAMSIZ, NULL); 1941 if (error) 1942 break; 1943 if (new_name[0] == '\0') { 1944 error = EINVAL; 1945 break; 1946 } 1947 if (ifunit(new_name) != NULL) { 1948 error = EEXIST; 1949 break; 1950 } 1951 1952 EVENTHANDLER_INVOKE(ifnet_detach_event, ifp); 1953 1954 /* Announce the departure of the interface. */ 1955 rt_ifannouncemsg(ifp, IFAN_DEPARTURE); 1956 1957 strlcpy(ifp->if_xname, new_name, sizeof(ifp->if_xname)); 1958 ifa = TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa; 1959 sdl = (struct sockaddr_dl *)ifa->ifa_addr; 1960 namelen = strlen(new_name); 1961 onamelen = sdl->sdl_nlen; 1962 /* 1963 * Move the address if needed. This is safe because we 1964 * allocate space for a name of length IFNAMSIZ when we 1965 * create this in if_attach(). 1966 */ 1967 if (namelen != onamelen) { 1968 bcopy(sdl->sdl_data + onamelen, 1969 sdl->sdl_data + namelen, sdl->sdl_alen); 1970 } 1971 bcopy(new_name, sdl->sdl_data, namelen); 1972 sdl->sdl_nlen = namelen; 1973 sdl = (struct sockaddr_dl *)ifa->ifa_netmask; 1974 bzero(sdl->sdl_data, onamelen); 1975 while (namelen != 0) 1976 sdl->sdl_data[--namelen] = 0xff; 1977 1978 EVENTHANDLER_INVOKE(ifnet_attach_event, ifp); 1979 1980 /* Announce the return of the interface. */ 1981 rt_ifannouncemsg(ifp, IFAN_ARRIVAL); 1982 break; 1983 1984 case SIOCSIFMETRIC: 1985 error = priv_check_cred(cred, PRIV_ROOT, 0); 1986 if (error) 1987 break; 1988 ifp->if_metric = ifr->ifr_metric; 1989 getmicrotime(&ifp->if_lastchange); 1990 break; 1991 1992 case SIOCSIFPHYS: 1993 error = priv_check_cred(cred, PRIV_ROOT, 0); 1994 if (error) 1995 break; 1996 if (ifp->if_ioctl == NULL) { 1997 error = EOPNOTSUPP; 1998 break; 1999 } 2000 ifnet_serialize_all(ifp); 2001 error = ifp->if_ioctl(ifp, cmd, data, cred); 2002 ifnet_deserialize_all(ifp); 2003 if (error == 0) 2004 getmicrotime(&ifp->if_lastchange); 2005 break; 2006 2007 case SIOCSIFMTU: 2008 { 2009 u_long oldmtu = ifp->if_mtu; 2010 2011 error = priv_check_cred(cred, PRIV_ROOT, 0); 2012 if (error) 2013 break; 2014 if (ifp->if_ioctl == NULL) { 2015 error = EOPNOTSUPP; 2016 break; 2017 } 2018 if (ifr->ifr_mtu < IF_MINMTU || ifr->ifr_mtu > IF_MAXMTU) { 2019 error = EINVAL; 2020 break; 2021 } 2022 ifnet_serialize_all(ifp); 2023 error = ifp->if_ioctl(ifp, cmd, data, cred); 2024 ifnet_deserialize_all(ifp); 2025 if (error == 0) { 2026 getmicrotime(&ifp->if_lastchange); 2027 rt_ifmsg(ifp); 2028 } 2029 /* 2030 * If the link MTU changed, do network layer specific procedure. 2031 */ 2032 if (ifp->if_mtu != oldmtu) { 2033 #ifdef INET6 2034 nd6_setmtu(ifp); 2035 #endif 2036 } 2037 break; 2038 } 2039 2040 case SIOCSIFTSOLEN: 2041 error = priv_check_cred(cred, PRIV_ROOT, 0); 2042 if (error) 2043 break; 2044 2045 /* XXX need driver supplied upper limit */ 2046 if (ifr->ifr_tsolen <= 0) { 2047 error = EINVAL; 2048 break; 2049 } 2050 ifp->if_tsolen = ifr->ifr_tsolen; 2051 break; 2052 2053 case SIOCADDMULTI: 2054 case SIOCDELMULTI: 2055 error = priv_check_cred(cred, PRIV_ROOT, 0); 2056 if (error) 2057 break; 2058 2059 /* Don't allow group membership on non-multicast interfaces. */ 2060 if ((ifp->if_flags & IFF_MULTICAST) == 0) { 2061 error = EOPNOTSUPP; 2062 break; 2063 } 2064 2065 /* Don't let users screw up protocols' entries. */ 2066 if (ifr->ifr_addr.sa_family != AF_LINK) { 2067 error = EINVAL; 2068 break; 2069 } 2070 2071 if (cmd == SIOCADDMULTI) { 2072 struct ifmultiaddr *ifma; 2073 error = if_addmulti(ifp, &ifr->ifr_addr, &ifma); 2074 } else { 2075 error = if_delmulti(ifp, &ifr->ifr_addr); 2076 } 2077 if (error == 0) 2078 getmicrotime(&ifp->if_lastchange); 2079 break; 2080 2081 case SIOCSIFPHYADDR: 2082 case SIOCDIFPHYADDR: 2083 #ifdef INET6 2084 case SIOCSIFPHYADDR_IN6: 2085 #endif 2086 case SIOCSLIFPHYADDR: 2087 case SIOCSIFMEDIA: 2088 case SIOCSIFGENERIC: 2089 error = priv_check_cred(cred, PRIV_ROOT, 0); 2090 if (error) 2091 break; 2092 if (ifp->if_ioctl == 0) { 2093 error = EOPNOTSUPP; 2094 break; 2095 } 2096 ifnet_serialize_all(ifp); 2097 error = ifp->if_ioctl(ifp, cmd, data, cred); 2098 ifnet_deserialize_all(ifp); 2099 if (error == 0) 2100 getmicrotime(&ifp->if_lastchange); 2101 break; 2102 2103 case SIOCGIFSTATUS: 2104 ifs = (struct ifstat *)data; 2105 ifs->ascii[0] = '\0'; 2106 /* fall through */ 2107 case SIOCGIFPSRCADDR: 2108 case SIOCGIFPDSTADDR: 2109 case SIOCGLIFPHYADDR: 2110 case SIOCGIFMEDIA: 2111 case SIOCGIFGENERIC: 2112 if (ifp->if_ioctl == NULL) { 2113 error = EOPNOTSUPP; 2114 break; 2115 } 2116 ifnet_serialize_all(ifp); 2117 error = ifp->if_ioctl(ifp, cmd, data, cred); 2118 ifnet_deserialize_all(ifp); 2119 break; 2120 2121 case SIOCSIFLLADDR: 2122 error = priv_check_cred(cred, PRIV_ROOT, 0); 2123 if (error) 2124 break; 2125 error = if_setlladdr(ifp, ifr->ifr_addr.sa_data, 2126 ifr->ifr_addr.sa_len); 2127 EVENTHANDLER_INVOKE(iflladdr_event, ifp); 2128 break; 2129 2130 default: 2131 oif_flags = ifp->if_flags; 2132 if (so->so_proto == 0) { 2133 error = EOPNOTSUPP; 2134 break; 2135 } 2136 #ifndef COMPAT_43 2137 error = so_pru_control_direct(so, cmd, data, ifp); 2138 #else 2139 ocmd = cmd; 2140 2141 switch (cmd) { 2142 case SIOCSIFDSTADDR: 2143 case SIOCSIFADDR: 2144 case SIOCSIFBRDADDR: 2145 case SIOCSIFNETMASK: 2146 #if BYTE_ORDER != BIG_ENDIAN 2147 if (ifr->ifr_addr.sa_family == 0 && 2148 ifr->ifr_addr.sa_len < 16) { 2149 ifr->ifr_addr.sa_family = ifr->ifr_addr.sa_len; 2150 ifr->ifr_addr.sa_len = 16; 2151 } 2152 #else 2153 if (ifr->ifr_addr.sa_len == 0) 2154 ifr->ifr_addr.sa_len = 16; 2155 #endif 2156 break; 2157 case OSIOCGIFADDR: 2158 cmd = SIOCGIFADDR; 2159 break; 2160 case OSIOCGIFDSTADDR: 2161 cmd = SIOCGIFDSTADDR; 2162 break; 2163 case OSIOCGIFBRDADDR: 2164 cmd = SIOCGIFBRDADDR; 2165 break; 2166 case OSIOCGIFNETMASK: 2167 cmd = SIOCGIFNETMASK; 2168 break; 2169 default: 2170 break; 2171 } 2172 2173 error = so_pru_control_direct(so, cmd, data, ifp); 2174 2175 switch (ocmd) { 2176 case OSIOCGIFADDR: 2177 case OSIOCGIFDSTADDR: 2178 case OSIOCGIFBRDADDR: 2179 case OSIOCGIFNETMASK: 2180 *(u_short *)&ifr->ifr_addr = ifr->ifr_addr.sa_family; 2181 break; 2182 } 2183 #endif /* COMPAT_43 */ 2184 2185 if ((oif_flags ^ ifp->if_flags) & IFF_UP) { 2186 #ifdef INET6 2187 DELAY(100);/* XXX: temporary workaround for fxp issue*/ 2188 if (ifp->if_flags & IFF_UP) { 2189 crit_enter(); 2190 in6_if_up(ifp); 2191 crit_exit(); 2192 } 2193 #endif 2194 } 2195 break; 2196 } 2197 2198 ifnet_unlock(); 2199 return (error); 2200 } 2201 2202 /* 2203 * Set/clear promiscuous mode on interface ifp based on the truth value 2204 * of pswitch. The calls are reference counted so that only the first 2205 * "on" request actually has an effect, as does the final "off" request. 2206 * Results are undefined if the "off" and "on" requests are not matched. 2207 */ 2208 int 2209 ifpromisc(struct ifnet *ifp, int pswitch) 2210 { 2211 struct ifreq ifr; 2212 int error; 2213 int oldflags; 2214 2215 oldflags = ifp->if_flags; 2216 if (ifp->if_flags & IFF_PPROMISC) { 2217 /* Do nothing if device is in permanently promiscuous mode */ 2218 ifp->if_pcount += pswitch ? 1 : -1; 2219 return (0); 2220 } 2221 if (pswitch) { 2222 /* 2223 * If the device is not configured up, we cannot put it in 2224 * promiscuous mode. 2225 */ 2226 if ((ifp->if_flags & IFF_UP) == 0) 2227 return (ENETDOWN); 2228 if (ifp->if_pcount++ != 0) 2229 return (0); 2230 ifp->if_flags |= IFF_PROMISC; 2231 log(LOG_INFO, "%s: promiscuous mode enabled\n", 2232 ifp->if_xname); 2233 } else { 2234 if (--ifp->if_pcount > 0) 2235 return (0); 2236 ifp->if_flags &= ~IFF_PROMISC; 2237 log(LOG_INFO, "%s: promiscuous mode disabled\n", 2238 ifp->if_xname); 2239 } 2240 ifr.ifr_flags = ifp->if_flags; 2241 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2242 ifnet_serialize_all(ifp); 2243 error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, NULL); 2244 ifnet_deserialize_all(ifp); 2245 if (error == 0) 2246 rt_ifmsg(ifp); 2247 else 2248 ifp->if_flags = oldflags; 2249 return error; 2250 } 2251 2252 /* 2253 * Return interface configuration 2254 * of system. List may be used 2255 * in later ioctl's (above) to get 2256 * other information. 2257 */ 2258 static int 2259 ifconf(u_long cmd, caddr_t data, struct ucred *cred) 2260 { 2261 struct ifconf *ifc = (struct ifconf *)data; 2262 struct ifnet *ifp; 2263 struct sockaddr *sa; 2264 struct ifreq ifr, *ifrp; 2265 int space = ifc->ifc_len, error = 0; 2266 2267 ifrp = ifc->ifc_req; 2268 2269 ifnet_lock(); 2270 TAILQ_FOREACH(ifp, &ifnetlist, if_link) { 2271 struct ifaddr_container *ifac, *ifac_mark; 2272 struct ifaddr_marker mark; 2273 struct ifaddrhead *head; 2274 int addrs; 2275 2276 if (space <= sizeof ifr) 2277 break; 2278 2279 /* 2280 * Zero the stack declared structure first to prevent 2281 * memory disclosure. 2282 */ 2283 bzero(&ifr, sizeof(ifr)); 2284 if (strlcpy(ifr.ifr_name, ifp->if_xname, sizeof(ifr.ifr_name)) 2285 >= sizeof(ifr.ifr_name)) { 2286 error = ENAMETOOLONG; 2287 break; 2288 } 2289 2290 /* 2291 * Add a marker, since copyout() could block and during that 2292 * period the list could be changed. Inserting the marker to 2293 * the header of the list will not cause trouble for the code 2294 * assuming that the first element of the list is AF_LINK; the 2295 * marker will be moved to the next position w/o blocking. 2296 */ 2297 ifa_marker_init(&mark, ifp); 2298 ifac_mark = &mark.ifac; 2299 head = &ifp->if_addrheads[mycpuid]; 2300 2301 addrs = 0; 2302 TAILQ_INSERT_HEAD(head, ifac_mark, ifa_link); 2303 while ((ifac = TAILQ_NEXT(ifac_mark, ifa_link)) != NULL) { 2304 struct ifaddr *ifa = ifac->ifa; 2305 2306 TAILQ_REMOVE(head, ifac_mark, ifa_link); 2307 TAILQ_INSERT_AFTER(head, ifac, ifac_mark, ifa_link); 2308 2309 /* Ignore marker */ 2310 if (ifa->ifa_addr->sa_family == AF_UNSPEC) 2311 continue; 2312 2313 if (space <= sizeof ifr) 2314 break; 2315 sa = ifa->ifa_addr; 2316 if (cred->cr_prison && 2317 prison_if(cred, sa)) 2318 continue; 2319 addrs++; 2320 /* 2321 * Keep a reference on this ifaddr, so that it will 2322 * not be destroyed when its address is copied to 2323 * the userland, which could block. 2324 */ 2325 IFAREF(ifa); 2326 #ifdef COMPAT_43 2327 if (cmd == OSIOCGIFCONF) { 2328 struct osockaddr *osa = 2329 (struct osockaddr *)&ifr.ifr_addr; 2330 ifr.ifr_addr = *sa; 2331 osa->sa_family = sa->sa_family; 2332 error = copyout(&ifr, ifrp, sizeof ifr); 2333 ifrp++; 2334 } else 2335 #endif 2336 if (sa->sa_len <= sizeof(*sa)) { 2337 ifr.ifr_addr = *sa; 2338 error = copyout(&ifr, ifrp, sizeof ifr); 2339 ifrp++; 2340 } else { 2341 if (space < (sizeof ifr) + sa->sa_len - 2342 sizeof(*sa)) { 2343 IFAFREE(ifa); 2344 break; 2345 } 2346 space -= sa->sa_len - sizeof(*sa); 2347 error = copyout(&ifr, ifrp, 2348 sizeof ifr.ifr_name); 2349 if (error == 0) 2350 error = copyout(sa, &ifrp->ifr_addr, 2351 sa->sa_len); 2352 ifrp = (struct ifreq *) 2353 (sa->sa_len + (caddr_t)&ifrp->ifr_addr); 2354 } 2355 IFAFREE(ifa); 2356 if (error) 2357 break; 2358 space -= sizeof ifr; 2359 } 2360 TAILQ_REMOVE(head, ifac_mark, ifa_link); 2361 if (error) 2362 break; 2363 if (!addrs) { 2364 bzero(&ifr.ifr_addr, sizeof ifr.ifr_addr); 2365 error = copyout(&ifr, ifrp, sizeof ifr); 2366 if (error) 2367 break; 2368 space -= sizeof ifr; 2369 ifrp++; 2370 } 2371 } 2372 ifnet_unlock(); 2373 2374 ifc->ifc_len -= space; 2375 return (error); 2376 } 2377 2378 /* 2379 * Just like if_promisc(), but for all-multicast-reception mode. 2380 */ 2381 int 2382 if_allmulti(struct ifnet *ifp, int onswitch) 2383 { 2384 int error = 0; 2385 struct ifreq ifr; 2386 2387 crit_enter(); 2388 2389 if (onswitch) { 2390 if (ifp->if_amcount++ == 0) { 2391 ifp->if_flags |= IFF_ALLMULTI; 2392 ifr.ifr_flags = ifp->if_flags; 2393 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2394 ifnet_serialize_all(ifp); 2395 error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, 2396 NULL); 2397 ifnet_deserialize_all(ifp); 2398 } 2399 } else { 2400 if (ifp->if_amcount > 1) { 2401 ifp->if_amcount--; 2402 } else { 2403 ifp->if_amcount = 0; 2404 ifp->if_flags &= ~IFF_ALLMULTI; 2405 ifr.ifr_flags = ifp->if_flags; 2406 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2407 ifnet_serialize_all(ifp); 2408 error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, 2409 NULL); 2410 ifnet_deserialize_all(ifp); 2411 } 2412 } 2413 2414 crit_exit(); 2415 2416 if (error == 0) 2417 rt_ifmsg(ifp); 2418 return error; 2419 } 2420 2421 /* 2422 * Add a multicast listenership to the interface in question. 2423 * The link layer provides a routine which converts 2424 */ 2425 int 2426 if_addmulti_serialized(struct ifnet *ifp, struct sockaddr *sa, 2427 struct ifmultiaddr **retifma) 2428 { 2429 struct sockaddr *llsa, *dupsa; 2430 int error; 2431 struct ifmultiaddr *ifma; 2432 2433 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2434 2435 /* 2436 * If the matching multicast address already exists 2437 * then don't add a new one, just add a reference 2438 */ 2439 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2440 if (sa_equal(sa, ifma->ifma_addr)) { 2441 ifma->ifma_refcount++; 2442 if (retifma) 2443 *retifma = ifma; 2444 return 0; 2445 } 2446 } 2447 2448 /* 2449 * Give the link layer a chance to accept/reject it, and also 2450 * find out which AF_LINK address this maps to, if it isn't one 2451 * already. 2452 */ 2453 if (ifp->if_resolvemulti) { 2454 error = ifp->if_resolvemulti(ifp, &llsa, sa); 2455 if (error) 2456 return error; 2457 } else { 2458 llsa = NULL; 2459 } 2460 2461 ifma = kmalloc(sizeof *ifma, M_IFMADDR, M_INTWAIT); 2462 dupsa = kmalloc(sa->sa_len, M_IFMADDR, M_INTWAIT); 2463 bcopy(sa, dupsa, sa->sa_len); 2464 2465 ifma->ifma_addr = dupsa; 2466 ifma->ifma_lladdr = llsa; 2467 ifma->ifma_ifp = ifp; 2468 ifma->ifma_refcount = 1; 2469 ifma->ifma_protospec = NULL; 2470 rt_newmaddrmsg(RTM_NEWMADDR, ifma); 2471 2472 TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link); 2473 if (retifma) 2474 *retifma = ifma; 2475 2476 if (llsa != NULL) { 2477 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2478 if (sa_equal(ifma->ifma_addr, llsa)) 2479 break; 2480 } 2481 if (ifma) { 2482 ifma->ifma_refcount++; 2483 } else { 2484 ifma = kmalloc(sizeof *ifma, M_IFMADDR, M_INTWAIT); 2485 dupsa = kmalloc(llsa->sa_len, M_IFMADDR, M_INTWAIT); 2486 bcopy(llsa, dupsa, llsa->sa_len); 2487 ifma->ifma_addr = dupsa; 2488 ifma->ifma_ifp = ifp; 2489 ifma->ifma_refcount = 1; 2490 TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link); 2491 } 2492 } 2493 /* 2494 * We are certain we have added something, so call down to the 2495 * interface to let them know about it. 2496 */ 2497 if (ifp->if_ioctl) 2498 ifp->if_ioctl(ifp, SIOCADDMULTI, 0, NULL); 2499 2500 return 0; 2501 } 2502 2503 int 2504 if_addmulti(struct ifnet *ifp, struct sockaddr *sa, 2505 struct ifmultiaddr **retifma) 2506 { 2507 int error; 2508 2509 ifnet_serialize_all(ifp); 2510 error = if_addmulti_serialized(ifp, sa, retifma); 2511 ifnet_deserialize_all(ifp); 2512 2513 return error; 2514 } 2515 2516 /* 2517 * Remove a reference to a multicast address on this interface. Yell 2518 * if the request does not match an existing membership. 2519 */ 2520 static int 2521 if_delmulti_serialized(struct ifnet *ifp, struct sockaddr *sa) 2522 { 2523 struct ifmultiaddr *ifma; 2524 2525 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2526 2527 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) 2528 if (sa_equal(sa, ifma->ifma_addr)) 2529 break; 2530 if (ifma == NULL) 2531 return ENOENT; 2532 2533 if (ifma->ifma_refcount > 1) { 2534 ifma->ifma_refcount--; 2535 return 0; 2536 } 2537 2538 rt_newmaddrmsg(RTM_DELMADDR, ifma); 2539 sa = ifma->ifma_lladdr; 2540 TAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifma_link); 2541 /* 2542 * Make sure the interface driver is notified 2543 * in the case of a link layer mcast group being left. 2544 */ 2545 if (ifma->ifma_addr->sa_family == AF_LINK && sa == NULL) 2546 ifp->if_ioctl(ifp, SIOCDELMULTI, 0, NULL); 2547 kfree(ifma->ifma_addr, M_IFMADDR); 2548 kfree(ifma, M_IFMADDR); 2549 if (sa == NULL) 2550 return 0; 2551 2552 /* 2553 * Now look for the link-layer address which corresponds to 2554 * this network address. It had been squirreled away in 2555 * ifma->ifma_lladdr for this purpose (so we don't have 2556 * to call ifp->if_resolvemulti() again), and we saved that 2557 * value in sa above. If some nasty deleted the 2558 * link-layer address out from underneath us, we can deal because 2559 * the address we stored was is not the same as the one which was 2560 * in the record for the link-layer address. (So we don't complain 2561 * in that case.) 2562 */ 2563 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) 2564 if (sa_equal(sa, ifma->ifma_addr)) 2565 break; 2566 if (ifma == NULL) 2567 return 0; 2568 2569 if (ifma->ifma_refcount > 1) { 2570 ifma->ifma_refcount--; 2571 return 0; 2572 } 2573 2574 TAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifma_link); 2575 ifp->if_ioctl(ifp, SIOCDELMULTI, 0, NULL); 2576 kfree(ifma->ifma_addr, M_IFMADDR); 2577 kfree(sa, M_IFMADDR); 2578 kfree(ifma, M_IFMADDR); 2579 2580 return 0; 2581 } 2582 2583 int 2584 if_delmulti(struct ifnet *ifp, struct sockaddr *sa) 2585 { 2586 int error; 2587 2588 ifnet_serialize_all(ifp); 2589 error = if_delmulti_serialized(ifp, sa); 2590 ifnet_deserialize_all(ifp); 2591 2592 return error; 2593 } 2594 2595 /* 2596 * Delete all multicast group membership for an interface. 2597 * Should be used to quickly flush all multicast filters. 2598 */ 2599 void 2600 if_delallmulti_serialized(struct ifnet *ifp) 2601 { 2602 struct ifmultiaddr *ifma, mark; 2603 struct sockaddr sa; 2604 2605 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2606 2607 bzero(&sa, sizeof(sa)); 2608 sa.sa_family = AF_UNSPEC; 2609 sa.sa_len = sizeof(sa); 2610 2611 bzero(&mark, sizeof(mark)); 2612 mark.ifma_addr = &sa; 2613 2614 TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, &mark, ifma_link); 2615 while ((ifma = TAILQ_NEXT(&mark, ifma_link)) != NULL) { 2616 TAILQ_REMOVE(&ifp->if_multiaddrs, &mark, ifma_link); 2617 TAILQ_INSERT_AFTER(&ifp->if_multiaddrs, ifma, &mark, 2618 ifma_link); 2619 2620 if (ifma->ifma_addr->sa_family == AF_UNSPEC) 2621 continue; 2622 2623 if_delmulti_serialized(ifp, ifma->ifma_addr); 2624 } 2625 TAILQ_REMOVE(&ifp->if_multiaddrs, &mark, ifma_link); 2626 } 2627 2628 2629 /* 2630 * Set the link layer address on an interface. 2631 * 2632 * At this time we only support certain types of interfaces, 2633 * and we don't allow the length of the address to change. 2634 */ 2635 int 2636 if_setlladdr(struct ifnet *ifp, const u_char *lladdr, int len) 2637 { 2638 struct sockaddr_dl *sdl; 2639 struct ifreq ifr; 2640 2641 sdl = IF_LLSOCKADDR(ifp); 2642 if (sdl == NULL) 2643 return (EINVAL); 2644 if (len != sdl->sdl_alen) /* don't allow length to change */ 2645 return (EINVAL); 2646 switch (ifp->if_type) { 2647 case IFT_ETHER: /* these types use struct arpcom */ 2648 case IFT_XETHER: 2649 case IFT_L2VLAN: 2650 case IFT_IEEE8023ADLAG: 2651 bcopy(lladdr, ((struct arpcom *)ifp->if_softc)->ac_enaddr, len); 2652 bcopy(lladdr, LLADDR(sdl), len); 2653 break; 2654 default: 2655 return (ENODEV); 2656 } 2657 /* 2658 * If the interface is already up, we need 2659 * to re-init it in order to reprogram its 2660 * address filter. 2661 */ 2662 ifnet_serialize_all(ifp); 2663 if ((ifp->if_flags & IFF_UP) != 0) { 2664 #ifdef INET 2665 struct ifaddr_container *ifac; 2666 #endif 2667 2668 ifp->if_flags &= ~IFF_UP; 2669 ifr.ifr_flags = ifp->if_flags; 2670 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2671 ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, 2672 NULL); 2673 ifp->if_flags |= IFF_UP; 2674 ifr.ifr_flags = ifp->if_flags; 2675 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2676 ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, 2677 NULL); 2678 #ifdef INET 2679 /* 2680 * Also send gratuitous ARPs to notify other nodes about 2681 * the address change. 2682 */ 2683 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 2684 struct ifaddr *ifa = ifac->ifa; 2685 2686 if (ifa->ifa_addr != NULL && 2687 ifa->ifa_addr->sa_family == AF_INET) 2688 arp_gratuitous(ifp, ifa); 2689 } 2690 #endif 2691 } 2692 ifnet_deserialize_all(ifp); 2693 return (0); 2694 } 2695 2696 struct ifmultiaddr * 2697 ifmaof_ifpforaddr(struct sockaddr *sa, struct ifnet *ifp) 2698 { 2699 struct ifmultiaddr *ifma; 2700 2701 /* TODO: need ifnet_serialize_main */ 2702 ifnet_serialize_all(ifp); 2703 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) 2704 if (sa_equal(ifma->ifma_addr, sa)) 2705 break; 2706 ifnet_deserialize_all(ifp); 2707 2708 return ifma; 2709 } 2710 2711 /* 2712 * This function locates the first real ethernet MAC from a network 2713 * card and loads it into node, returning 0 on success or ENOENT if 2714 * no suitable interfaces were found. It is used by the uuid code to 2715 * generate a unique 6-byte number. 2716 */ 2717 int 2718 if_getanyethermac(uint16_t *node, int minlen) 2719 { 2720 struct ifnet *ifp; 2721 struct sockaddr_dl *sdl; 2722 2723 ifnet_lock(); 2724 TAILQ_FOREACH(ifp, &ifnetlist, if_link) { 2725 if (ifp->if_type != IFT_ETHER) 2726 continue; 2727 sdl = IF_LLSOCKADDR(ifp); 2728 if (sdl->sdl_alen < minlen) 2729 continue; 2730 bcopy(((struct arpcom *)ifp->if_softc)->ac_enaddr, node, 2731 minlen); 2732 ifnet_unlock(); 2733 return(0); 2734 } 2735 ifnet_unlock(); 2736 return (ENOENT); 2737 } 2738 2739 /* 2740 * The name argument must be a pointer to storage which will last as 2741 * long as the interface does. For physical devices, the result of 2742 * device_get_name(dev) is a good choice and for pseudo-devices a 2743 * static string works well. 2744 */ 2745 void 2746 if_initname(struct ifnet *ifp, const char *name, int unit) 2747 { 2748 ifp->if_dname = name; 2749 ifp->if_dunit = unit; 2750 if (unit != IF_DUNIT_NONE) 2751 ksnprintf(ifp->if_xname, IFNAMSIZ, "%s%d", name, unit); 2752 else 2753 strlcpy(ifp->if_xname, name, IFNAMSIZ); 2754 } 2755 2756 int 2757 if_printf(struct ifnet *ifp, const char *fmt, ...) 2758 { 2759 __va_list ap; 2760 int retval; 2761 2762 retval = kprintf("%s: ", ifp->if_xname); 2763 __va_start(ap, fmt); 2764 retval += kvprintf(fmt, ap); 2765 __va_end(ap); 2766 return (retval); 2767 } 2768 2769 struct ifnet * 2770 if_alloc(uint8_t type) 2771 { 2772 struct ifnet *ifp; 2773 size_t size; 2774 2775 /* 2776 * XXX temporary hack until arpcom is setup in if_l2com 2777 */ 2778 if (type == IFT_ETHER) 2779 size = sizeof(struct arpcom); 2780 else 2781 size = sizeof(struct ifnet); 2782 2783 ifp = kmalloc(size, M_IFNET, M_WAITOK|M_ZERO); 2784 2785 ifp->if_type = type; 2786 2787 if (if_com_alloc[type] != NULL) { 2788 ifp->if_l2com = if_com_alloc[type](type, ifp); 2789 if (ifp->if_l2com == NULL) { 2790 kfree(ifp, M_IFNET); 2791 return (NULL); 2792 } 2793 } 2794 return (ifp); 2795 } 2796 2797 void 2798 if_free(struct ifnet *ifp) 2799 { 2800 kfree(ifp, M_IFNET); 2801 } 2802 2803 void 2804 ifq_set_classic(struct ifaltq *ifq) 2805 { 2806 ifq_set_methods(ifq, ifq->altq_ifp->if_mapsubq, 2807 ifsq_classic_enqueue, ifsq_classic_dequeue, ifsq_classic_request); 2808 } 2809 2810 void 2811 ifq_set_methods(struct ifaltq *ifq, altq_mapsubq_t mapsubq, 2812 ifsq_enqueue_t enqueue, ifsq_dequeue_t dequeue, ifsq_request_t request) 2813 { 2814 int q; 2815 2816 KASSERT(mapsubq != NULL, ("mapsubq is not specified")); 2817 KASSERT(enqueue != NULL, ("enqueue is not specified")); 2818 KASSERT(dequeue != NULL, ("dequeue is not specified")); 2819 KASSERT(request != NULL, ("request is not specified")); 2820 2821 ifq->altq_mapsubq = mapsubq; 2822 for (q = 0; q < ifq->altq_subq_cnt; ++q) { 2823 struct ifaltq_subque *ifsq = &ifq->altq_subq[q]; 2824 2825 ifsq->ifsq_enqueue = enqueue; 2826 ifsq->ifsq_dequeue = dequeue; 2827 ifsq->ifsq_request = request; 2828 } 2829 } 2830 2831 static void 2832 ifsq_norm_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m) 2833 { 2834 m->m_nextpkt = NULL; 2835 if (ifsq->ifsq_norm_tail == NULL) 2836 ifsq->ifsq_norm_head = m; 2837 else 2838 ifsq->ifsq_norm_tail->m_nextpkt = m; 2839 ifsq->ifsq_norm_tail = m; 2840 ALTQ_SQ_CNTR_INC(ifsq, m->m_pkthdr.len); 2841 } 2842 2843 static void 2844 ifsq_prio_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m) 2845 { 2846 m->m_nextpkt = NULL; 2847 if (ifsq->ifsq_prio_tail == NULL) 2848 ifsq->ifsq_prio_head = m; 2849 else 2850 ifsq->ifsq_prio_tail->m_nextpkt = m; 2851 ifsq->ifsq_prio_tail = m; 2852 ALTQ_SQ_CNTR_INC(ifsq, m->m_pkthdr.len); 2853 ALTQ_SQ_PRIO_CNTR_INC(ifsq, m->m_pkthdr.len); 2854 } 2855 2856 static struct mbuf * 2857 ifsq_norm_dequeue(struct ifaltq_subque *ifsq) 2858 { 2859 struct mbuf *m; 2860 2861 m = ifsq->ifsq_norm_head; 2862 if (m != NULL) { 2863 if ((ifsq->ifsq_norm_head = m->m_nextpkt) == NULL) 2864 ifsq->ifsq_norm_tail = NULL; 2865 m->m_nextpkt = NULL; 2866 ALTQ_SQ_CNTR_DEC(ifsq, m->m_pkthdr.len); 2867 } 2868 return m; 2869 } 2870 2871 static struct mbuf * 2872 ifsq_prio_dequeue(struct ifaltq_subque *ifsq) 2873 { 2874 struct mbuf *m; 2875 2876 m = ifsq->ifsq_prio_head; 2877 if (m != NULL) { 2878 if ((ifsq->ifsq_prio_head = m->m_nextpkt) == NULL) 2879 ifsq->ifsq_prio_tail = NULL; 2880 m->m_nextpkt = NULL; 2881 ALTQ_SQ_CNTR_DEC(ifsq, m->m_pkthdr.len); 2882 ALTQ_SQ_PRIO_CNTR_DEC(ifsq, m->m_pkthdr.len); 2883 } 2884 return m; 2885 } 2886 2887 int 2888 ifsq_classic_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m, 2889 struct altq_pktattr *pa __unused) 2890 { 2891 M_ASSERTPKTHDR(m); 2892 if (ifsq->ifsq_len >= ifsq->ifsq_maxlen || 2893 ifsq->ifsq_bcnt >= ifsq->ifsq_maxbcnt) { 2894 if ((m->m_flags & M_PRIO) && 2895 ifsq->ifsq_prio_len < (ifsq->ifsq_maxlen / 2) && 2896 ifsq->ifsq_prio_bcnt < (ifsq->ifsq_maxbcnt / 2)) { 2897 struct mbuf *m_drop; 2898 2899 /* 2900 * Perform drop-head on normal queue 2901 */ 2902 m_drop = ifsq_norm_dequeue(ifsq); 2903 if (m_drop != NULL) { 2904 m_freem(m_drop); 2905 ifsq_prio_enqueue(ifsq, m); 2906 return 0; 2907 } 2908 /* XXX nothing could be dropped? */ 2909 } 2910 m_freem(m); 2911 return ENOBUFS; 2912 } else { 2913 if (m->m_flags & M_PRIO) 2914 ifsq_prio_enqueue(ifsq, m); 2915 else 2916 ifsq_norm_enqueue(ifsq, m); 2917 return 0; 2918 } 2919 } 2920 2921 struct mbuf * 2922 ifsq_classic_dequeue(struct ifaltq_subque *ifsq, int op) 2923 { 2924 struct mbuf *m; 2925 2926 switch (op) { 2927 case ALTDQ_POLL: 2928 m = ifsq->ifsq_prio_head; 2929 if (m == NULL) 2930 m = ifsq->ifsq_norm_head; 2931 break; 2932 2933 case ALTDQ_REMOVE: 2934 m = ifsq_prio_dequeue(ifsq); 2935 if (m == NULL) 2936 m = ifsq_norm_dequeue(ifsq); 2937 break; 2938 2939 default: 2940 panic("unsupported ALTQ dequeue op: %d", op); 2941 } 2942 return m; 2943 } 2944 2945 int 2946 ifsq_classic_request(struct ifaltq_subque *ifsq, int req, void *arg) 2947 { 2948 switch (req) { 2949 case ALTRQ_PURGE: 2950 for (;;) { 2951 struct mbuf *m; 2952 2953 m = ifsq_classic_dequeue(ifsq, ALTDQ_REMOVE); 2954 if (m == NULL) 2955 break; 2956 m_freem(m); 2957 } 2958 break; 2959 2960 default: 2961 panic("unsupported ALTQ request: %d", req); 2962 } 2963 return 0; 2964 } 2965 2966 static void 2967 ifsq_ifstart_try(struct ifaltq_subque *ifsq, int force_sched) 2968 { 2969 struct ifnet *ifp = ifsq_get_ifp(ifsq); 2970 int running = 0, need_sched; 2971 2972 /* 2973 * Try to do direct ifnet.if_start on the subqueue first, if there is 2974 * contention on the subqueue hardware serializer, ifnet.if_start on 2975 * the subqueue will be scheduled on the subqueue owner CPU. 2976 */ 2977 if (!ifsq_tryserialize_hw(ifsq)) { 2978 /* 2979 * Subqueue hardware serializer contention happened, 2980 * ifnet.if_start on the subqueue is scheduled on 2981 * the subqueue owner CPU, and we keep going. 2982 */ 2983 ifsq_ifstart_schedule(ifsq, 1); 2984 return; 2985 } 2986 2987 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) { 2988 ifp->if_start(ifp, ifsq); 2989 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) 2990 running = 1; 2991 } 2992 need_sched = ifsq_ifstart_need_schedule(ifsq, running); 2993 2994 ifsq_deserialize_hw(ifsq); 2995 2996 if (need_sched) { 2997 /* 2998 * More data need to be transmitted, ifnet.if_start on the 2999 * subqueue is scheduled on the subqueue owner CPU, and we 3000 * keep going. 3001 * NOTE: ifnet.if_start subqueue interlock is not released. 3002 */ 3003 ifsq_ifstart_schedule(ifsq, force_sched); 3004 } 3005 } 3006 3007 /* 3008 * Subqeue packets staging mechanism: 3009 * 3010 * The packets enqueued into the subqueue are staged to a certain amount 3011 * before the ifnet.if_start on the subqueue is called. In this way, the 3012 * driver could avoid writing to hardware registers upon every packet, 3013 * instead, hardware registers could be written when certain amount of 3014 * packets are put onto hardware TX ring. The measurement on several modern 3015 * NICs (emx(4), igb(4), bnx(4), bge(4), jme(4)) shows that the hardware 3016 * registers writing aggregation could save ~20% CPU time when 18bytes UDP 3017 * datagrams are transmitted at 1.48Mpps. The performance improvement by 3018 * hardware registers writing aggeregation is also mentioned by Luigi Rizzo's 3019 * netmap paper (http://info.iet.unipi.it/~luigi/netmap/). 3020 * 3021 * Subqueue packets staging is performed for two entry points into drivers' 3022 * transmission function: 3023 * - Direct ifnet.if_start calling on the subqueue, i.e. ifsq_ifstart_try() 3024 * - ifnet.if_start scheduling on the subqueue, i.e. ifsq_ifstart_schedule() 3025 * 3026 * Subqueue packets staging will be stopped upon any of the following 3027 * conditions: 3028 * - If the count of packets enqueued on the current CPU is great than or 3029 * equal to ifsq_stage_cntmax. (XXX this should be per-interface) 3030 * - If the total length of packets enqueued on the current CPU is great 3031 * than or equal to the hardware's MTU - max_protohdr. max_protohdr is 3032 * cut from the hardware's MTU mainly bacause a full TCP segment's size 3033 * is usually less than hardware's MTU. 3034 * - ifsq_ifstart_schedule() is not pending on the current CPU and 3035 * ifnet.if_start subqueue interlock (ifaltq_subq.ifsq_started) is not 3036 * released. 3037 * - The if_start_rollup(), which is registered as low priority netisr 3038 * rollup function, is called; probably because no more work is pending 3039 * for netisr. 3040 * 3041 * NOTE: 3042 * Currently subqueue packet staging is only performed in netisr threads. 3043 */ 3044 int 3045 ifq_dispatch(struct ifnet *ifp, struct mbuf *m, struct altq_pktattr *pa) 3046 { 3047 struct ifaltq *ifq = &ifp->if_snd; 3048 struct ifaltq_subque *ifsq; 3049 int error, start = 0, len, mcast = 0, avoid_start = 0; 3050 struct ifsubq_stage_head *head = NULL; 3051 struct ifsubq_stage *stage = NULL; 3052 struct globaldata *gd = mycpu; 3053 struct thread *td = gd->gd_curthread; 3054 3055 crit_enter_quick(td); 3056 3057 ifsq = ifq_map_subq(ifq, gd->gd_cpuid); 3058 ASSERT_ALTQ_SQ_NOT_SERIALIZED_HW(ifsq); 3059 3060 len = m->m_pkthdr.len; 3061 if (m->m_flags & M_MCAST) 3062 mcast = 1; 3063 3064 if (td->td_type == TD_TYPE_NETISR) { 3065 head = &ifsubq_stage_heads[mycpuid]; 3066 stage = ifsq_get_stage(ifsq, mycpuid); 3067 3068 stage->stg_cnt++; 3069 stage->stg_len += len; 3070 if (stage->stg_cnt < ifsq_stage_cntmax && 3071 stage->stg_len < (ifp->if_mtu - max_protohdr)) 3072 avoid_start = 1; 3073 } 3074 3075 ALTQ_SQ_LOCK(ifsq); 3076 error = ifsq_enqueue_locked(ifsq, m, pa); 3077 if (error) { 3078 if (!ifsq_data_ready(ifsq)) { 3079 ALTQ_SQ_UNLOCK(ifsq); 3080 crit_exit_quick(td); 3081 return error; 3082 } 3083 avoid_start = 0; 3084 } 3085 if (!ifsq_is_started(ifsq)) { 3086 if (avoid_start) { 3087 ALTQ_SQ_UNLOCK(ifsq); 3088 3089 KKASSERT(!error); 3090 if ((stage->stg_flags & IFSQ_STAGE_FLAG_QUED) == 0) 3091 ifsq_stage_insert(head, stage); 3092 3093 IFNET_STAT_INC(ifp, obytes, len); 3094 if (mcast) 3095 IFNET_STAT_INC(ifp, omcasts, 1); 3096 crit_exit_quick(td); 3097 return error; 3098 } 3099 3100 /* 3101 * Hold the subqueue interlock of ifnet.if_start 3102 */ 3103 ifsq_set_started(ifsq); 3104 start = 1; 3105 } 3106 ALTQ_SQ_UNLOCK(ifsq); 3107 3108 if (!error) { 3109 IFNET_STAT_INC(ifp, obytes, len); 3110 if (mcast) 3111 IFNET_STAT_INC(ifp, omcasts, 1); 3112 } 3113 3114 if (stage != NULL) { 3115 if (!start && (stage->stg_flags & IFSQ_STAGE_FLAG_SCHED)) { 3116 KKASSERT(stage->stg_flags & IFSQ_STAGE_FLAG_QUED); 3117 if (!avoid_start) { 3118 ifsq_stage_remove(head, stage); 3119 ifsq_ifstart_schedule(ifsq, 1); 3120 } 3121 crit_exit_quick(td); 3122 return error; 3123 } 3124 3125 if (stage->stg_flags & IFSQ_STAGE_FLAG_QUED) { 3126 ifsq_stage_remove(head, stage); 3127 } else { 3128 stage->stg_cnt = 0; 3129 stage->stg_len = 0; 3130 } 3131 } 3132 3133 if (!start) { 3134 crit_exit_quick(td); 3135 return error; 3136 } 3137 3138 ifsq_ifstart_try(ifsq, 0); 3139 3140 crit_exit_quick(td); 3141 return error; 3142 } 3143 3144 void * 3145 ifa_create(int size) 3146 { 3147 struct ifaddr *ifa; 3148 int i; 3149 3150 KASSERT(size >= sizeof(*ifa), ("ifaddr size too small")); 3151 3152 ifa = kmalloc(size, M_IFADDR, M_INTWAIT | M_ZERO); 3153 ifa->ifa_containers = 3154 kmalloc_cachealign(ncpus * sizeof(struct ifaddr_container), 3155 M_IFADDR, M_INTWAIT | M_ZERO); 3156 3157 ifa->ifa_ncnt = ncpus; 3158 for (i = 0; i < ncpus; ++i) { 3159 struct ifaddr_container *ifac = &ifa->ifa_containers[i]; 3160 3161 ifac->ifa_magic = IFA_CONTAINER_MAGIC; 3162 ifac->ifa = ifa; 3163 ifac->ifa_refcnt = 1; 3164 } 3165 #ifdef IFADDR_DEBUG 3166 kprintf("alloc ifa %p %d\n", ifa, size); 3167 #endif 3168 return ifa; 3169 } 3170 3171 void 3172 ifac_free(struct ifaddr_container *ifac, int cpu_id) 3173 { 3174 struct ifaddr *ifa = ifac->ifa; 3175 3176 KKASSERT(ifac->ifa_magic == IFA_CONTAINER_MAGIC); 3177 KKASSERT(ifac->ifa_refcnt == 0); 3178 KASSERT(ifac->ifa_listmask == 0, 3179 ("ifa is still on %#x lists", ifac->ifa_listmask)); 3180 3181 ifac->ifa_magic = IFA_CONTAINER_DEAD; 3182 3183 #ifdef IFADDR_DEBUG_VERBOSE 3184 kprintf("try free ifa %p cpu_id %d\n", ifac->ifa, cpu_id); 3185 #endif 3186 3187 KASSERT(ifa->ifa_ncnt > 0 && ifa->ifa_ncnt <= ncpus, 3188 ("invalid # of ifac, %d", ifa->ifa_ncnt)); 3189 if (atomic_fetchadd_int(&ifa->ifa_ncnt, -1) == 1) { 3190 #ifdef IFADDR_DEBUG 3191 kprintf("free ifa %p\n", ifa); 3192 #endif 3193 kfree(ifa->ifa_containers, M_IFADDR); 3194 kfree(ifa, M_IFADDR); 3195 } 3196 } 3197 3198 static void 3199 ifa_iflink_dispatch(netmsg_t nmsg) 3200 { 3201 struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg; 3202 struct ifaddr *ifa = msg->ifa; 3203 struct ifnet *ifp = msg->ifp; 3204 int cpu = mycpuid; 3205 struct ifaddr_container *ifac; 3206 3207 crit_enter(); 3208 3209 ifac = &ifa->ifa_containers[cpu]; 3210 ASSERT_IFAC_VALID(ifac); 3211 KASSERT((ifac->ifa_listmask & IFA_LIST_IFADDRHEAD) == 0, 3212 ("ifaddr is on if_addrheads")); 3213 3214 ifac->ifa_listmask |= IFA_LIST_IFADDRHEAD; 3215 if (msg->tail) 3216 TAILQ_INSERT_TAIL(&ifp->if_addrheads[cpu], ifac, ifa_link); 3217 else 3218 TAILQ_INSERT_HEAD(&ifp->if_addrheads[cpu], ifac, ifa_link); 3219 3220 crit_exit(); 3221 3222 ifa_forwardmsg(&nmsg->lmsg, cpu + 1); 3223 } 3224 3225 void 3226 ifa_iflink(struct ifaddr *ifa, struct ifnet *ifp, int tail) 3227 { 3228 struct netmsg_ifaddr msg; 3229 3230 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 3231 0, ifa_iflink_dispatch); 3232 msg.ifa = ifa; 3233 msg.ifp = ifp; 3234 msg.tail = tail; 3235 3236 ifa_domsg(&msg.base.lmsg, 0); 3237 } 3238 3239 static void 3240 ifa_ifunlink_dispatch(netmsg_t nmsg) 3241 { 3242 struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg; 3243 struct ifaddr *ifa = msg->ifa; 3244 struct ifnet *ifp = msg->ifp; 3245 int cpu = mycpuid; 3246 struct ifaddr_container *ifac; 3247 3248 crit_enter(); 3249 3250 ifac = &ifa->ifa_containers[cpu]; 3251 ASSERT_IFAC_VALID(ifac); 3252 KASSERT(ifac->ifa_listmask & IFA_LIST_IFADDRHEAD, 3253 ("ifaddr is not on if_addrhead")); 3254 3255 TAILQ_REMOVE(&ifp->if_addrheads[cpu], ifac, ifa_link); 3256 ifac->ifa_listmask &= ~IFA_LIST_IFADDRHEAD; 3257 3258 crit_exit(); 3259 3260 ifa_forwardmsg(&nmsg->lmsg, cpu + 1); 3261 } 3262 3263 void 3264 ifa_ifunlink(struct ifaddr *ifa, struct ifnet *ifp) 3265 { 3266 struct netmsg_ifaddr msg; 3267 3268 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 3269 0, ifa_ifunlink_dispatch); 3270 msg.ifa = ifa; 3271 msg.ifp = ifp; 3272 3273 ifa_domsg(&msg.base.lmsg, 0); 3274 } 3275 3276 static void 3277 ifa_destroy_dispatch(netmsg_t nmsg) 3278 { 3279 struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg; 3280 3281 IFAFREE(msg->ifa); 3282 ifa_forwardmsg(&nmsg->lmsg, mycpuid + 1); 3283 } 3284 3285 void 3286 ifa_destroy(struct ifaddr *ifa) 3287 { 3288 struct netmsg_ifaddr msg; 3289 3290 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 3291 0, ifa_destroy_dispatch); 3292 msg.ifa = ifa; 3293 3294 ifa_domsg(&msg.base.lmsg, 0); 3295 } 3296 3297 struct lwkt_port * 3298 ifnet_portfn(int cpu) 3299 { 3300 return &ifnet_threads[cpu].td_msgport; 3301 } 3302 3303 void 3304 ifnet_forwardmsg(struct lwkt_msg *lmsg, int next_cpu) 3305 { 3306 KKASSERT(next_cpu > mycpuid && next_cpu <= ncpus); 3307 3308 if (next_cpu < ncpus) 3309 lwkt_forwardmsg(ifnet_portfn(next_cpu), lmsg); 3310 else 3311 lwkt_replymsg(lmsg, 0); 3312 } 3313 3314 int 3315 ifnet_domsg(struct lwkt_msg *lmsg, int cpu) 3316 { 3317 KKASSERT(cpu < ncpus); 3318 return lwkt_domsg(ifnet_portfn(cpu), lmsg, 0); 3319 } 3320 3321 void 3322 ifnet_sendmsg(struct lwkt_msg *lmsg, int cpu) 3323 { 3324 KKASSERT(cpu < ncpus); 3325 lwkt_sendmsg(ifnet_portfn(cpu), lmsg); 3326 } 3327 3328 /* 3329 * Generic netmsg service loop. Some protocols may roll their own but all 3330 * must do the basic command dispatch function call done here. 3331 */ 3332 static void 3333 ifnet_service_loop(void *arg __unused) 3334 { 3335 netmsg_t msg; 3336 3337 while ((msg = lwkt_waitport(&curthread->td_msgport, 0))) { 3338 KASSERT(msg->base.nm_dispatch, ("ifnet_service: badmsg")); 3339 msg->base.nm_dispatch(msg); 3340 } 3341 } 3342 3343 static void 3344 if_start_rollup(void) 3345 { 3346 struct ifsubq_stage_head *head = &ifsubq_stage_heads[mycpuid]; 3347 struct ifsubq_stage *stage; 3348 3349 crit_enter(); 3350 3351 while ((stage = TAILQ_FIRST(&head->stg_head)) != NULL) { 3352 struct ifaltq_subque *ifsq = stage->stg_subq; 3353 int is_sched = 0; 3354 3355 if (stage->stg_flags & IFSQ_STAGE_FLAG_SCHED) 3356 is_sched = 1; 3357 ifsq_stage_remove(head, stage); 3358 3359 if (is_sched) { 3360 ifsq_ifstart_schedule(ifsq, 1); 3361 } else { 3362 int start = 0; 3363 3364 ALTQ_SQ_LOCK(ifsq); 3365 if (!ifsq_is_started(ifsq)) { 3366 /* 3367 * Hold the subqueue interlock of 3368 * ifnet.if_start 3369 */ 3370 ifsq_set_started(ifsq); 3371 start = 1; 3372 } 3373 ALTQ_SQ_UNLOCK(ifsq); 3374 3375 if (start) 3376 ifsq_ifstart_try(ifsq, 1); 3377 } 3378 KKASSERT((stage->stg_flags & 3379 (IFSQ_STAGE_FLAG_QUED | IFSQ_STAGE_FLAG_SCHED)) == 0); 3380 } 3381 3382 crit_exit(); 3383 } 3384 3385 static void 3386 ifnetinit(void *dummy __unused) 3387 { 3388 int i; 3389 3390 for (i = 0; i < ncpus; ++i) { 3391 struct thread *thr = &ifnet_threads[i]; 3392 3393 lwkt_create(ifnet_service_loop, NULL, NULL, 3394 thr, TDF_NOSTART|TDF_FORCE_SPINPORT|TDF_FIXEDCPU, 3395 i, "ifnet %d", i); 3396 netmsg_service_port_init(&thr->td_msgport); 3397 lwkt_schedule(thr); 3398 } 3399 3400 for (i = 0; i < ncpus; ++i) 3401 TAILQ_INIT(&ifsubq_stage_heads[i].stg_head); 3402 netisr_register_rollup(if_start_rollup, NETISR_ROLLUP_PRIO_IFSTART); 3403 } 3404 3405 void 3406 if_register_com_alloc(u_char type, 3407 if_com_alloc_t *a, if_com_free_t *f) 3408 { 3409 3410 KASSERT(if_com_alloc[type] == NULL, 3411 ("if_register_com_alloc: %d already registered", type)); 3412 KASSERT(if_com_free[type] == NULL, 3413 ("if_register_com_alloc: %d free already registered", type)); 3414 3415 if_com_alloc[type] = a; 3416 if_com_free[type] = f; 3417 } 3418 3419 void 3420 if_deregister_com_alloc(u_char type) 3421 { 3422 3423 KASSERT(if_com_alloc[type] != NULL, 3424 ("if_deregister_com_alloc: %d not registered", type)); 3425 KASSERT(if_com_free[type] != NULL, 3426 ("if_deregister_com_alloc: %d free not registered", type)); 3427 if_com_alloc[type] = NULL; 3428 if_com_free[type] = NULL; 3429 } 3430 3431 int 3432 if_ring_count2(int cnt, int cnt_max) 3433 { 3434 int shift = 0; 3435 3436 KASSERT(cnt_max >= 1 && powerof2(cnt_max), 3437 ("invalid ring count max %d", cnt_max)); 3438 3439 if (cnt <= 0) 3440 cnt = cnt_max; 3441 if (cnt > ncpus2) 3442 cnt = ncpus2; 3443 if (cnt > cnt_max) 3444 cnt = cnt_max; 3445 3446 while ((1 << (shift + 1)) <= cnt) 3447 ++shift; 3448 cnt = 1 << shift; 3449 3450 KASSERT(cnt >= 1 && cnt <= ncpus2 && cnt <= cnt_max, 3451 ("calculate cnt %d, ncpus2 %d, cnt max %d", 3452 cnt, ncpus2, cnt_max)); 3453 return cnt; 3454 } 3455 3456 void 3457 ifq_set_maxlen(struct ifaltq *ifq, int len) 3458 { 3459 ifq->altq_maxlen = len + (ncpus * ifsq_stage_cntmax); 3460 } 3461 3462 int 3463 ifq_mapsubq_default(struct ifaltq *ifq __unused, int cpuid __unused) 3464 { 3465 return ALTQ_SUBQ_INDEX_DEFAULT; 3466 } 3467 3468 int 3469 ifq_mapsubq_mask(struct ifaltq *ifq, int cpuid) 3470 { 3471 return (cpuid & ifq->altq_subq_mask); 3472 } 3473 3474 static void 3475 ifsq_watchdog(void *arg) 3476 { 3477 struct ifsubq_watchdog *wd = arg; 3478 struct ifnet *ifp; 3479 3480 if (__predict_true(wd->wd_timer == 0 || --wd->wd_timer)) 3481 goto done; 3482 3483 ifp = ifsq_get_ifp(wd->wd_subq); 3484 if (ifnet_tryserialize_all(ifp)) { 3485 wd->wd_watchdog(wd->wd_subq); 3486 ifnet_deserialize_all(ifp); 3487 } else { 3488 /* try again next timeout */ 3489 wd->wd_timer = 1; 3490 } 3491 done: 3492 ifsq_watchdog_reset(wd); 3493 } 3494 3495 static void 3496 ifsq_watchdog_reset(struct ifsubq_watchdog *wd) 3497 { 3498 callout_reset_bycpu(&wd->wd_callout, hz, ifsq_watchdog, wd, 3499 ifsq_get_cpuid(wd->wd_subq)); 3500 } 3501 3502 void 3503 ifsq_watchdog_init(struct ifsubq_watchdog *wd, struct ifaltq_subque *ifsq, 3504 ifsq_watchdog_t watchdog) 3505 { 3506 callout_init_mp(&wd->wd_callout); 3507 wd->wd_timer = 0; 3508 wd->wd_subq = ifsq; 3509 wd->wd_watchdog = watchdog; 3510 } 3511 3512 void 3513 ifsq_watchdog_start(struct ifsubq_watchdog *wd) 3514 { 3515 wd->wd_timer = 0; 3516 ifsq_watchdog_reset(wd); 3517 } 3518 3519 void 3520 ifsq_watchdog_stop(struct ifsubq_watchdog *wd) 3521 { 3522 wd->wd_timer = 0; 3523 callout_stop(&wd->wd_callout); 3524 } 3525 3526 void 3527 ifnet_lock(void) 3528 { 3529 KASSERT(curthread->td_type != TD_TYPE_NETISR, 3530 ("try holding ifnet lock in netisr")); 3531 mtx_lock(&ifnet_mtx); 3532 } 3533 3534 void 3535 ifnet_unlock(void) 3536 { 3537 KASSERT(curthread->td_type != TD_TYPE_NETISR, 3538 ("try holding ifnet lock in netisr")); 3539 mtx_unlock(&ifnet_mtx); 3540 } 3541 3542 static struct ifnet_array * 3543 ifnet_array_alloc(int count) 3544 { 3545 struct ifnet_array *arr; 3546 3547 arr = kmalloc(__offsetof(struct ifnet_array, ifnet_arr[count]), 3548 M_IFNET, M_WAITOK); 3549 arr->ifnet_count = count; 3550 3551 return arr; 3552 } 3553 3554 static void 3555 ifnet_array_free(struct ifnet_array *arr) 3556 { 3557 if (arr == &ifnet_array0) 3558 return; 3559 kfree(arr, M_IFNET); 3560 } 3561 3562 static struct ifnet_array * 3563 ifnet_array_add(struct ifnet *ifp, const struct ifnet_array *old_arr) 3564 { 3565 struct ifnet_array *arr; 3566 int count, i; 3567 3568 KASSERT(old_arr->ifnet_count >= 0, 3569 ("invalid ifnet array count %d", old_arr->ifnet_count)); 3570 count = old_arr->ifnet_count + 1; 3571 arr = ifnet_array_alloc(count); 3572 3573 /* 3574 * Save the old ifnet array and append this ifp to the end of 3575 * the new ifnet array. 3576 */ 3577 for (i = 0; i < old_arr->ifnet_count; ++i) { 3578 KASSERT(old_arr->ifnet_arr[i] != ifp, 3579 ("%s is already in ifnet array", ifp->if_xname)); 3580 arr->ifnet_arr[i] = old_arr->ifnet_arr[i]; 3581 } 3582 KASSERT(i == count - 1, 3583 ("add %s, ifnet array index mismatch, should be %d, but got %d", 3584 ifp->if_xname, count - 1, i)); 3585 arr->ifnet_arr[i] = ifp; 3586 3587 return arr; 3588 } 3589 3590 static struct ifnet_array * 3591 ifnet_array_del(struct ifnet *ifp, const struct ifnet_array *old_arr) 3592 { 3593 struct ifnet_array *arr; 3594 int count, i, idx, found = 0; 3595 3596 KASSERT(old_arr->ifnet_count > 0, 3597 ("invalid ifnet array count %d", old_arr->ifnet_count)); 3598 count = old_arr->ifnet_count - 1; 3599 arr = ifnet_array_alloc(count); 3600 3601 /* 3602 * Save the old ifnet array, but skip this ifp. 3603 */ 3604 idx = 0; 3605 for (i = 0; i < old_arr->ifnet_count; ++i) { 3606 if (old_arr->ifnet_arr[i] == ifp) { 3607 KASSERT(!found, 3608 ("dup %s is in ifnet array", ifp->if_xname)); 3609 found = 1; 3610 continue; 3611 } 3612 KASSERT(idx < count, 3613 ("invalid ifnet array index %d, count %d", idx, count)); 3614 arr->ifnet_arr[idx] = old_arr->ifnet_arr[i]; 3615 ++idx; 3616 } 3617 KASSERT(found, ("%s is not in ifnet array", ifp->if_xname)); 3618 KASSERT(idx == count, 3619 ("del %s, ifnet array count mismatch, should be %d, but got %d ", 3620 ifp->if_xname, count, idx)); 3621 3622 return arr; 3623 } 3624 3625 const struct ifnet_array * 3626 ifnet_array_get(void) 3627 { 3628 KASSERT(curthread->td_type == TD_TYPE_NETISR, ("not in netisr")); 3629 return ifnet_array; 3630 } 3631 3632 int 3633 ifnet_array_isempty(void) 3634 { 3635 KASSERT(curthread->td_type == TD_TYPE_NETISR, ("not in netisr")); 3636 if (ifnet_array->ifnet_count == 0) 3637 return 1; 3638 else 3639 return 0; 3640 } 3641 3642 void 3643 ifa_marker_init(struct ifaddr_marker *mark, struct ifnet *ifp) 3644 { 3645 struct ifaddr *ifa; 3646 3647 memset(mark, 0, sizeof(*mark)); 3648 ifa = &mark->ifa; 3649 3650 mark->ifac.ifa = ifa; 3651 3652 ifa->ifa_addr = &mark->addr; 3653 ifa->ifa_dstaddr = &mark->dstaddr; 3654 ifa->ifa_netmask = &mark->netmask; 3655 ifa->ifa_ifp = ifp; 3656 } 3657