1 /* 2 * Copyright (c) 1980, 1986, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)if.c 8.3 (Berkeley) 1/4/94 30 * $FreeBSD: src/sys/net/if.c,v 1.185 2004/03/13 02:35:03 brooks Exp $ 31 */ 32 33 #include "opt_inet6.h" 34 #include "opt_inet.h" 35 #include "opt_ifpoll.h" 36 37 #include <sys/param.h> 38 #include <sys/malloc.h> 39 #include <sys/mbuf.h> 40 #include <sys/systm.h> 41 #include <sys/proc.h> 42 #include <sys/priv.h> 43 #include <sys/protosw.h> 44 #include <sys/socket.h> 45 #include <sys/socketvar.h> 46 #include <sys/socketops.h> 47 #include <sys/kernel.h> 48 #include <sys/ktr.h> 49 #include <sys/mutex.h> 50 #include <sys/sockio.h> 51 #include <sys/syslog.h> 52 #include <sys/sysctl.h> 53 #include <sys/domain.h> 54 #include <sys/thread.h> 55 #include <sys/serialize.h> 56 #include <sys/bus.h> 57 58 #include <sys/thread2.h> 59 #include <sys/msgport2.h> 60 #include <sys/mutex2.h> 61 62 #include <net/if.h> 63 #include <net/if_arp.h> 64 #include <net/if_dl.h> 65 #include <net/if_types.h> 66 #include <net/if_var.h> 67 #include <net/ifq_var.h> 68 #include <net/radix.h> 69 #include <net/route.h> 70 #include <net/if_clone.h> 71 #include <net/netisr2.h> 72 #include <net/netmsg2.h> 73 74 #include <machine/atomic.h> 75 #include <machine/stdarg.h> 76 #include <machine/smp.h> 77 78 #if defined(INET) || defined(INET6) 79 /*XXX*/ 80 #include <netinet/in.h> 81 #include <netinet/in_var.h> 82 #include <netinet/if_ether.h> 83 #ifdef INET6 84 #include <netinet6/in6_var.h> 85 #include <netinet6/in6_ifattach.h> 86 #endif 87 #endif 88 89 struct netmsg_ifaddr { 90 struct netmsg_base base; 91 struct ifaddr *ifa; 92 struct ifnet *ifp; 93 int tail; 94 }; 95 96 struct ifsubq_stage_head { 97 TAILQ_HEAD(, ifsubq_stage) stg_head; 98 } __cachealign; 99 100 /* 101 * System initialization 102 */ 103 static void if_attachdomain(void *); 104 static void if_attachdomain1(struct ifnet *); 105 static int ifconf(u_long, caddr_t, struct ucred *); 106 static void ifinit(void *); 107 static void ifnetinit(void *); 108 static void if_slowtimo(void *); 109 static void link_rtrequest(int, struct rtentry *); 110 static int if_rtdel(struct radix_node *, void *); 111 static void if_slowtimo_dispatch(netmsg_t); 112 113 /* Helper functions */ 114 static void ifsq_watchdog_reset(struct ifsubq_watchdog *); 115 static int if_delmulti_serialized(struct ifnet *, struct sockaddr *); 116 static struct ifnet_array *ifnet_array_alloc(int); 117 static void ifnet_array_free(struct ifnet_array *); 118 static struct ifnet_array *ifnet_array_add(struct ifnet *, 119 const struct ifnet_array *); 120 static struct ifnet_array *ifnet_array_del(struct ifnet *, 121 const struct ifnet_array *); 122 123 #ifdef INET6 124 /* 125 * XXX: declare here to avoid to include many inet6 related files.. 126 * should be more generalized? 127 */ 128 extern void nd6_setmtu(struct ifnet *); 129 #endif 130 131 SYSCTL_NODE(_net, PF_LINK, link, CTLFLAG_RW, 0, "Link layers"); 132 SYSCTL_NODE(_net_link, 0, generic, CTLFLAG_RW, 0, "Generic link-management"); 133 134 static int ifsq_stage_cntmax = 4; 135 TUNABLE_INT("net.link.stage_cntmax", &ifsq_stage_cntmax); 136 SYSCTL_INT(_net_link, OID_AUTO, stage_cntmax, CTLFLAG_RW, 137 &ifsq_stage_cntmax, 0, "ifq staging packet count max"); 138 139 static int if_stats_compat = 0; 140 SYSCTL_INT(_net_link, OID_AUTO, stats_compat, CTLFLAG_RW, 141 &if_stats_compat, 0, "Compat the old ifnet stats"); 142 143 SYSINIT(interfaces, SI_SUB_PROTO_IF, SI_ORDER_FIRST, ifinit, NULL); 144 /* Must be after netisr_init */ 145 SYSINIT(ifnet, SI_SUB_PRE_DRIVERS, SI_ORDER_SECOND, ifnetinit, NULL); 146 147 static if_com_alloc_t *if_com_alloc[256]; 148 static if_com_free_t *if_com_free[256]; 149 150 MALLOC_DEFINE(M_IFADDR, "ifaddr", "interface address"); 151 MALLOC_DEFINE(M_IFMADDR, "ether_multi", "link-level multicast address"); 152 MALLOC_DEFINE(M_IFNET, "ifnet", "interface structure"); 153 154 int ifqmaxlen = IFQ_MAXLEN; 155 struct ifnethead ifnet = TAILQ_HEAD_INITIALIZER(ifnet); 156 157 static struct ifnet_array ifnet_array0; 158 static struct ifnet_array *ifnet_array = &ifnet_array0; 159 160 static struct callout if_slowtimo_timer; 161 static struct netmsg_base if_slowtimo_netmsg; 162 163 int if_index = 0; 164 struct ifnet **ifindex2ifnet = NULL; 165 static struct thread *ifnet_threads[MAXCPU]; 166 static struct mtx ifnet_mtx = MTX_INITIALIZER("ifnet"); 167 168 static struct ifsubq_stage_head ifsubq_stage_heads[MAXCPU]; 169 170 #ifdef notyet 171 #define IFQ_KTR_STRING "ifq=%p" 172 #define IFQ_KTR_ARGS struct ifaltq *ifq 173 #ifndef KTR_IFQ 174 #define KTR_IFQ KTR_ALL 175 #endif 176 KTR_INFO_MASTER(ifq); 177 KTR_INFO(KTR_IFQ, ifq, enqueue, 0, IFQ_KTR_STRING, IFQ_KTR_ARGS); 178 KTR_INFO(KTR_IFQ, ifq, dequeue, 1, IFQ_KTR_STRING, IFQ_KTR_ARGS); 179 #define logifq(name, arg) KTR_LOG(ifq_ ## name, arg) 180 181 #define IF_START_KTR_STRING "ifp=%p" 182 #define IF_START_KTR_ARGS struct ifnet *ifp 183 #ifndef KTR_IF_START 184 #define KTR_IF_START KTR_ALL 185 #endif 186 KTR_INFO_MASTER(if_start); 187 KTR_INFO(KTR_IF_START, if_start, run, 0, 188 IF_START_KTR_STRING, IF_START_KTR_ARGS); 189 KTR_INFO(KTR_IF_START, if_start, sched, 1, 190 IF_START_KTR_STRING, IF_START_KTR_ARGS); 191 KTR_INFO(KTR_IF_START, if_start, avoid, 2, 192 IF_START_KTR_STRING, IF_START_KTR_ARGS); 193 KTR_INFO(KTR_IF_START, if_start, contend_sched, 3, 194 IF_START_KTR_STRING, IF_START_KTR_ARGS); 195 KTR_INFO(KTR_IF_START, if_start, chase_sched, 4, 196 IF_START_KTR_STRING, IF_START_KTR_ARGS); 197 #define logifstart(name, arg) KTR_LOG(if_start_ ## name, arg) 198 #endif 199 200 TAILQ_HEAD(, ifg_group) ifg_head = TAILQ_HEAD_INITIALIZER(ifg_head); 201 202 /* 203 * Network interface utility routines. 204 * 205 * Routines with ifa_ifwith* names take sockaddr *'s as 206 * parameters. 207 */ 208 /* ARGSUSED*/ 209 static void 210 ifinit(void *dummy) 211 { 212 struct ifnet *ifp; 213 214 callout_init_mp(&if_slowtimo_timer); 215 netmsg_init(&if_slowtimo_netmsg, NULL, &netisr_adone_rport, 216 MSGF_PRIORITY, if_slowtimo_dispatch); 217 218 /* XXX is this necessary? */ 219 ifnet_lock(); 220 TAILQ_FOREACH(ifp, &ifnetlist, if_link) { 221 if (ifp->if_snd.altq_maxlen == 0) { 222 if_printf(ifp, "XXX: driver didn't set altq_maxlen\n"); 223 ifq_set_maxlen(&ifp->if_snd, ifqmaxlen); 224 } 225 } 226 ifnet_unlock(); 227 228 /* Start if_slowtimo */ 229 lwkt_sendmsg(netisr_cpuport(0), &if_slowtimo_netmsg.lmsg); 230 } 231 232 static void 233 ifsq_ifstart_ipifunc(void *arg) 234 { 235 struct ifaltq_subque *ifsq = arg; 236 struct lwkt_msg *lmsg = ifsq_get_ifstart_lmsg(ifsq, mycpuid); 237 238 crit_enter(); 239 if (lmsg->ms_flags & MSGF_DONE) 240 lwkt_sendmsg_oncpu(netisr_cpuport(mycpuid), lmsg); 241 crit_exit(); 242 } 243 244 static __inline void 245 ifsq_stage_remove(struct ifsubq_stage_head *head, struct ifsubq_stage *stage) 246 { 247 KKASSERT(stage->stg_flags & IFSQ_STAGE_FLAG_QUED); 248 TAILQ_REMOVE(&head->stg_head, stage, stg_link); 249 stage->stg_flags &= ~(IFSQ_STAGE_FLAG_QUED | IFSQ_STAGE_FLAG_SCHED); 250 stage->stg_cnt = 0; 251 stage->stg_len = 0; 252 } 253 254 static __inline void 255 ifsq_stage_insert(struct ifsubq_stage_head *head, struct ifsubq_stage *stage) 256 { 257 KKASSERT((stage->stg_flags & 258 (IFSQ_STAGE_FLAG_QUED | IFSQ_STAGE_FLAG_SCHED)) == 0); 259 stage->stg_flags |= IFSQ_STAGE_FLAG_QUED; 260 TAILQ_INSERT_TAIL(&head->stg_head, stage, stg_link); 261 } 262 263 /* 264 * Schedule ifnet.if_start on the subqueue owner CPU 265 */ 266 static void 267 ifsq_ifstart_schedule(struct ifaltq_subque *ifsq, int force) 268 { 269 int cpu; 270 271 if (!force && curthread->td_type == TD_TYPE_NETISR && 272 ifsq_stage_cntmax > 0) { 273 struct ifsubq_stage *stage = ifsq_get_stage(ifsq, mycpuid); 274 275 stage->stg_cnt = 0; 276 stage->stg_len = 0; 277 if ((stage->stg_flags & IFSQ_STAGE_FLAG_QUED) == 0) 278 ifsq_stage_insert(&ifsubq_stage_heads[mycpuid], stage); 279 stage->stg_flags |= IFSQ_STAGE_FLAG_SCHED; 280 return; 281 } 282 283 cpu = ifsq_get_cpuid(ifsq); 284 if (cpu != mycpuid) 285 lwkt_send_ipiq(globaldata_find(cpu), ifsq_ifstart_ipifunc, ifsq); 286 else 287 ifsq_ifstart_ipifunc(ifsq); 288 } 289 290 /* 291 * NOTE: 292 * This function will release ifnet.if_start subqueue interlock, 293 * if ifnet.if_start for the subqueue does not need to be scheduled 294 */ 295 static __inline int 296 ifsq_ifstart_need_schedule(struct ifaltq_subque *ifsq, int running) 297 { 298 if (!running || ifsq_is_empty(ifsq) 299 #ifdef ALTQ 300 || ifsq->ifsq_altq->altq_tbr != NULL 301 #endif 302 ) { 303 ALTQ_SQ_LOCK(ifsq); 304 /* 305 * ifnet.if_start subqueue interlock is released, if: 306 * 1) Hardware can not take any packets, due to 307 * o interface is marked down 308 * o hardware queue is full (ifsq_is_oactive) 309 * Under the second situation, hardware interrupt 310 * or polling(4) will call/schedule ifnet.if_start 311 * on the subqueue when hardware queue is ready 312 * 2) There is no packet in the subqueue. 313 * Further ifq_dispatch or ifq_handoff will call/ 314 * schedule ifnet.if_start on the subqueue. 315 * 3) TBR is used and it does not allow further 316 * dequeueing. 317 * TBR callout will call ifnet.if_start on the 318 * subqueue. 319 */ 320 if (!running || !ifsq_data_ready(ifsq)) { 321 ifsq_clr_started(ifsq); 322 ALTQ_SQ_UNLOCK(ifsq); 323 return 0; 324 } 325 ALTQ_SQ_UNLOCK(ifsq); 326 } 327 return 1; 328 } 329 330 static void 331 ifsq_ifstart_dispatch(netmsg_t msg) 332 { 333 struct lwkt_msg *lmsg = &msg->base.lmsg; 334 struct ifaltq_subque *ifsq = lmsg->u.ms_resultp; 335 struct ifnet *ifp = ifsq_get_ifp(ifsq); 336 struct globaldata *gd = mycpu; 337 int running = 0, need_sched; 338 339 crit_enter_gd(gd); 340 341 lwkt_replymsg(lmsg, 0); /* reply ASAP */ 342 343 if (gd->gd_cpuid != ifsq_get_cpuid(ifsq)) { 344 /* 345 * We need to chase the subqueue owner CPU change. 346 */ 347 ifsq_ifstart_schedule(ifsq, 1); 348 crit_exit_gd(gd); 349 return; 350 } 351 352 ifsq_serialize_hw(ifsq); 353 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) { 354 ifp->if_start(ifp, ifsq); 355 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) 356 running = 1; 357 } 358 need_sched = ifsq_ifstart_need_schedule(ifsq, running); 359 ifsq_deserialize_hw(ifsq); 360 361 if (need_sched) { 362 /* 363 * More data need to be transmitted, ifnet.if_start is 364 * scheduled on the subqueue owner CPU, and we keep going. 365 * NOTE: ifnet.if_start subqueue interlock is not released. 366 */ 367 ifsq_ifstart_schedule(ifsq, 0); 368 } 369 370 crit_exit_gd(gd); 371 } 372 373 /* Device driver ifnet.if_start helper function */ 374 void 375 ifsq_devstart(struct ifaltq_subque *ifsq) 376 { 377 struct ifnet *ifp = ifsq_get_ifp(ifsq); 378 int running = 0; 379 380 ASSERT_ALTQ_SQ_SERIALIZED_HW(ifsq); 381 382 ALTQ_SQ_LOCK(ifsq); 383 if (ifsq_is_started(ifsq) || !ifsq_data_ready(ifsq)) { 384 ALTQ_SQ_UNLOCK(ifsq); 385 return; 386 } 387 ifsq_set_started(ifsq); 388 ALTQ_SQ_UNLOCK(ifsq); 389 390 ifp->if_start(ifp, ifsq); 391 392 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) 393 running = 1; 394 395 if (ifsq_ifstart_need_schedule(ifsq, running)) { 396 /* 397 * More data need to be transmitted, ifnet.if_start is 398 * scheduled on ifnet's CPU, and we keep going. 399 * NOTE: ifnet.if_start interlock is not released. 400 */ 401 ifsq_ifstart_schedule(ifsq, 0); 402 } 403 } 404 405 void 406 if_devstart(struct ifnet *ifp) 407 { 408 ifsq_devstart(ifq_get_subq_default(&ifp->if_snd)); 409 } 410 411 /* Device driver ifnet.if_start schedule helper function */ 412 void 413 ifsq_devstart_sched(struct ifaltq_subque *ifsq) 414 { 415 ifsq_ifstart_schedule(ifsq, 1); 416 } 417 418 void 419 if_devstart_sched(struct ifnet *ifp) 420 { 421 ifsq_devstart_sched(ifq_get_subq_default(&ifp->if_snd)); 422 } 423 424 static void 425 if_default_serialize(struct ifnet *ifp, enum ifnet_serialize slz __unused) 426 { 427 lwkt_serialize_enter(ifp->if_serializer); 428 } 429 430 static void 431 if_default_deserialize(struct ifnet *ifp, enum ifnet_serialize slz __unused) 432 { 433 lwkt_serialize_exit(ifp->if_serializer); 434 } 435 436 static int 437 if_default_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz __unused) 438 { 439 return lwkt_serialize_try(ifp->if_serializer); 440 } 441 442 #ifdef INVARIANTS 443 static void 444 if_default_serialize_assert(struct ifnet *ifp, 445 enum ifnet_serialize slz __unused, 446 boolean_t serialized) 447 { 448 if (serialized) 449 ASSERT_SERIALIZED(ifp->if_serializer); 450 else 451 ASSERT_NOT_SERIALIZED(ifp->if_serializer); 452 } 453 #endif 454 455 /* 456 * Attach an interface to the list of "active" interfaces. 457 * 458 * The serializer is optional. 459 */ 460 void 461 if_attach(struct ifnet *ifp, lwkt_serialize_t serializer) 462 { 463 unsigned socksize; 464 int namelen, masklen; 465 struct sockaddr_dl *sdl, *sdl_addr; 466 struct ifaddr *ifa; 467 struct ifaltq *ifq; 468 struct ifnet **old_ifindex2ifnet = NULL; 469 struct ifnet_array *old_ifnet_array; 470 int i, q; 471 472 static int if_indexlim = 8; 473 474 if (ifp->if_serialize != NULL) { 475 KASSERT(ifp->if_deserialize != NULL && 476 ifp->if_tryserialize != NULL && 477 ifp->if_serialize_assert != NULL, 478 ("serialize functions are partially setup")); 479 480 /* 481 * If the device supplies serialize functions, 482 * then clear if_serializer to catch any invalid 483 * usage of this field. 484 */ 485 KASSERT(serializer == NULL, 486 ("both serialize functions and default serializer " 487 "are supplied")); 488 ifp->if_serializer = NULL; 489 } else { 490 KASSERT(ifp->if_deserialize == NULL && 491 ifp->if_tryserialize == NULL && 492 ifp->if_serialize_assert == NULL, 493 ("serialize functions are partially setup")); 494 ifp->if_serialize = if_default_serialize; 495 ifp->if_deserialize = if_default_deserialize; 496 ifp->if_tryserialize = if_default_tryserialize; 497 #ifdef INVARIANTS 498 ifp->if_serialize_assert = if_default_serialize_assert; 499 #endif 500 501 /* 502 * The serializer can be passed in from the device, 503 * allowing the same serializer to be used for both 504 * the interrupt interlock and the device queue. 505 * If not specified, the netif structure will use an 506 * embedded serializer. 507 */ 508 if (serializer == NULL) { 509 serializer = &ifp->if_default_serializer; 510 lwkt_serialize_init(serializer); 511 } 512 ifp->if_serializer = serializer; 513 } 514 515 /* 516 * XXX - 517 * The old code would work if the interface passed a pre-existing 518 * chain of ifaddrs to this code. We don't trust our callers to 519 * properly initialize the tailq, however, so we no longer allow 520 * this unlikely case. 521 */ 522 ifp->if_addrheads = kmalloc(ncpus * sizeof(struct ifaddrhead), 523 M_IFADDR, M_WAITOK | M_ZERO); 524 for (i = 0; i < ncpus; ++i) 525 TAILQ_INIT(&ifp->if_addrheads[i]); 526 527 TAILQ_INIT(&ifp->if_multiaddrs); 528 TAILQ_INIT(&ifp->if_groups); 529 getmicrotime(&ifp->if_lastchange); 530 531 /* 532 * create a Link Level name for this device 533 */ 534 namelen = strlen(ifp->if_xname); 535 masklen = offsetof(struct sockaddr_dl, sdl_data[0]) + namelen; 536 socksize = masklen + ifp->if_addrlen; 537 if (socksize < sizeof(*sdl)) 538 socksize = sizeof(*sdl); 539 socksize = RT_ROUNDUP(socksize); 540 ifa = ifa_create(sizeof(struct ifaddr) + 2 * socksize); 541 sdl = sdl_addr = (struct sockaddr_dl *)(ifa + 1); 542 sdl->sdl_len = socksize; 543 sdl->sdl_family = AF_LINK; 544 bcopy(ifp->if_xname, sdl->sdl_data, namelen); 545 sdl->sdl_nlen = namelen; 546 sdl->sdl_type = ifp->if_type; 547 ifp->if_lladdr = ifa; 548 ifa->ifa_ifp = ifp; 549 ifa->ifa_rtrequest = link_rtrequest; 550 ifa->ifa_addr = (struct sockaddr *)sdl; 551 sdl = (struct sockaddr_dl *)(socksize + (caddr_t)sdl); 552 ifa->ifa_netmask = (struct sockaddr *)sdl; 553 sdl->sdl_len = masklen; 554 while (namelen != 0) 555 sdl->sdl_data[--namelen] = 0xff; 556 ifa_iflink(ifa, ifp, 0 /* Insert head */); 557 558 ifp->if_data_pcpu = kmalloc_cachealign( 559 ncpus * sizeof(struct ifdata_pcpu), M_DEVBUF, M_WAITOK | M_ZERO); 560 561 if (ifp->if_mapsubq == NULL) 562 ifp->if_mapsubq = ifq_mapsubq_default; 563 564 ifq = &ifp->if_snd; 565 ifq->altq_type = 0; 566 ifq->altq_disc = NULL; 567 ifq->altq_flags &= ALTQF_CANTCHANGE; 568 ifq->altq_tbr = NULL; 569 ifq->altq_ifp = ifp; 570 571 if (ifq->altq_subq_cnt <= 0) 572 ifq->altq_subq_cnt = 1; 573 ifq->altq_subq = kmalloc_cachealign( 574 ifq->altq_subq_cnt * sizeof(struct ifaltq_subque), 575 M_DEVBUF, M_WAITOK | M_ZERO); 576 577 if (ifq->altq_maxlen == 0) { 578 if_printf(ifp, "driver didn't set altq_maxlen\n"); 579 ifq_set_maxlen(ifq, ifqmaxlen); 580 } 581 582 for (q = 0; q < ifq->altq_subq_cnt; ++q) { 583 struct ifaltq_subque *ifsq = &ifq->altq_subq[q]; 584 585 ALTQ_SQ_LOCK_INIT(ifsq); 586 ifsq->ifsq_index = q; 587 588 ifsq->ifsq_altq = ifq; 589 ifsq->ifsq_ifp = ifp; 590 591 ifsq->ifsq_maxlen = ifq->altq_maxlen; 592 ifsq->ifsq_maxbcnt = ifsq->ifsq_maxlen * MCLBYTES; 593 ifsq->ifsq_prepended = NULL; 594 ifsq->ifsq_started = 0; 595 ifsq->ifsq_hw_oactive = 0; 596 ifsq_set_cpuid(ifsq, 0); 597 if (ifp->if_serializer != NULL) 598 ifsq_set_hw_serialize(ifsq, ifp->if_serializer); 599 600 ifsq->ifsq_stage = 601 kmalloc_cachealign(ncpus * sizeof(struct ifsubq_stage), 602 M_DEVBUF, M_WAITOK | M_ZERO); 603 for (i = 0; i < ncpus; ++i) 604 ifsq->ifsq_stage[i].stg_subq = ifsq; 605 606 ifsq->ifsq_ifstart_nmsg = 607 kmalloc(ncpus * sizeof(struct netmsg_base), 608 M_LWKTMSG, M_WAITOK); 609 for (i = 0; i < ncpus; ++i) { 610 netmsg_init(&ifsq->ifsq_ifstart_nmsg[i], NULL, 611 &netisr_adone_rport, 0, ifsq_ifstart_dispatch); 612 ifsq->ifsq_ifstart_nmsg[i].lmsg.u.ms_resultp = ifsq; 613 } 614 } 615 ifq_set_classic(ifq); 616 617 /* 618 * Increase mbuf cluster/jcluster limits for the mbufs that 619 * could sit on the device queues for quite some time. 620 */ 621 if (ifp->if_nmbclusters > 0) 622 mcl_inclimit(ifp->if_nmbclusters); 623 if (ifp->if_nmbjclusters > 0) 624 mjcl_inclimit(ifp->if_nmbjclusters); 625 626 /* 627 * Install this ifp into ifindex2inet, ifnet queue and ifnet 628 * array after it is setup. 629 * 630 * Protect ifindex2ifnet, ifnet queue and ifnet array changes 631 * by ifnet lock, so that non-netisr threads could get a 632 * consistent view. 633 */ 634 ifnet_lock(); 635 636 /* Don't update if_index until ifindex2ifnet is setup */ 637 ifp->if_index = if_index + 1; 638 sdl_addr->sdl_index = ifp->if_index; 639 640 /* 641 * Install this ifp into ifindex2ifnet 642 */ 643 if (ifindex2ifnet == NULL || ifp->if_index >= if_indexlim) { 644 unsigned int n; 645 struct ifnet **q; 646 647 /* 648 * Grow ifindex2ifnet 649 */ 650 if_indexlim <<= 1; 651 n = if_indexlim * sizeof(*q); 652 q = kmalloc(n, M_IFADDR, M_WAITOK | M_ZERO); 653 if (ifindex2ifnet != NULL) { 654 bcopy(ifindex2ifnet, q, n/2); 655 /* Free old ifindex2ifnet after sync all netisrs */ 656 old_ifindex2ifnet = ifindex2ifnet; 657 } 658 ifindex2ifnet = q; 659 } 660 ifindex2ifnet[ifp->if_index] = ifp; 661 /* 662 * Update if_index after this ifp is installed into ifindex2ifnet, 663 * so that netisrs could get a consistent view of ifindex2ifnet. 664 */ 665 cpu_sfence(); 666 if_index = ifp->if_index; 667 668 /* 669 * Install this ifp into ifnet array. 670 */ 671 /* Free old ifnet array after sync all netisrs */ 672 old_ifnet_array = ifnet_array; 673 ifnet_array = ifnet_array_add(ifp, old_ifnet_array); 674 675 /* 676 * Install this ifp into ifnet queue. 677 */ 678 TAILQ_INSERT_TAIL(&ifnetlist, ifp, if_link); 679 680 ifnet_unlock(); 681 682 /* 683 * Sync all netisrs so that the old ifindex2ifnet and ifnet array 684 * are no longer accessed and we can free them safely later on. 685 */ 686 netmsg_service_sync(); 687 if (old_ifindex2ifnet != NULL) 688 kfree(old_ifindex2ifnet, M_IFADDR); 689 ifnet_array_free(old_ifnet_array); 690 691 if (!SLIST_EMPTY(&domains)) 692 if_attachdomain1(ifp); 693 694 /* Announce the interface. */ 695 EVENTHANDLER_INVOKE(ifnet_attach_event, ifp); 696 devctl_notify("IFNET", ifp->if_xname, "ATTACH", NULL); 697 rt_ifannouncemsg(ifp, IFAN_ARRIVAL); 698 } 699 700 static void 701 if_attachdomain(void *dummy) 702 { 703 struct ifnet *ifp; 704 705 ifnet_lock(); 706 TAILQ_FOREACH(ifp, &ifnetlist, if_list) 707 if_attachdomain1(ifp); 708 ifnet_unlock(); 709 } 710 SYSINIT(domainifattach, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_FIRST, 711 if_attachdomain, NULL); 712 713 static void 714 if_attachdomain1(struct ifnet *ifp) 715 { 716 struct domain *dp; 717 718 crit_enter(); 719 720 /* address family dependent data region */ 721 bzero(ifp->if_afdata, sizeof(ifp->if_afdata)); 722 SLIST_FOREACH(dp, &domains, dom_next) 723 if (dp->dom_ifattach) 724 ifp->if_afdata[dp->dom_family] = 725 (*dp->dom_ifattach)(ifp); 726 crit_exit(); 727 } 728 729 /* 730 * Purge all addresses whose type is _not_ AF_LINK 731 */ 732 static void 733 if_purgeaddrs_nolink_dispatch(netmsg_t nmsg) 734 { 735 struct lwkt_msg *lmsg = &nmsg->lmsg; 736 struct ifnet *ifp = lmsg->u.ms_resultp; 737 struct ifaddr_container *ifac, *next; 738 739 ASSERT_IN_NETISR(0); 740 741 /* 742 * The ifaddr processing in the following loop will block, 743 * however, this function is called in netisr0, in which 744 * ifaddr list changes happen, so we don't care about the 745 * blockness of the ifaddr processing here. 746 */ 747 TAILQ_FOREACH_MUTABLE(ifac, &ifp->if_addrheads[mycpuid], 748 ifa_link, next) { 749 struct ifaddr *ifa = ifac->ifa; 750 751 /* Ignore marker */ 752 if (ifa->ifa_addr->sa_family == AF_UNSPEC) 753 continue; 754 755 /* Leave link ifaddr as it is */ 756 if (ifa->ifa_addr->sa_family == AF_LINK) 757 continue; 758 #ifdef INET 759 /* XXX: Ugly!! ad hoc just for INET */ 760 if (ifa->ifa_addr && ifa->ifa_addr->sa_family == AF_INET) { 761 struct ifaliasreq ifr; 762 #ifdef IFADDR_DEBUG_VERBOSE 763 int i; 764 765 kprintf("purge in4 addr %p: ", ifa); 766 for (i = 0; i < ncpus; ++i) 767 kprintf("%d ", ifa->ifa_containers[i].ifa_refcnt); 768 kprintf("\n"); 769 #endif 770 771 bzero(&ifr, sizeof ifr); 772 ifr.ifra_addr = *ifa->ifa_addr; 773 if (ifa->ifa_dstaddr) 774 ifr.ifra_broadaddr = *ifa->ifa_dstaddr; 775 if (in_control(SIOCDIFADDR, (caddr_t)&ifr, ifp, 776 NULL) == 0) 777 continue; 778 } 779 #endif /* INET */ 780 #ifdef INET6 781 if (ifa->ifa_addr && ifa->ifa_addr->sa_family == AF_INET6) { 782 #ifdef IFADDR_DEBUG_VERBOSE 783 int i; 784 785 kprintf("purge in6 addr %p: ", ifa); 786 for (i = 0; i < ncpus; ++i) 787 kprintf("%d ", ifa->ifa_containers[i].ifa_refcnt); 788 kprintf("\n"); 789 #endif 790 791 in6_purgeaddr(ifa); 792 /* ifp_addrhead is already updated */ 793 continue; 794 } 795 #endif /* INET6 */ 796 ifa_ifunlink(ifa, ifp); 797 ifa_destroy(ifa); 798 } 799 800 lwkt_replymsg(lmsg, 0); 801 } 802 803 void 804 if_purgeaddrs_nolink(struct ifnet *ifp) 805 { 806 struct netmsg_base nmsg; 807 struct lwkt_msg *lmsg = &nmsg.lmsg; 808 809 ASSERT_CANDOMSG_NETISR0(curthread); 810 811 netmsg_init(&nmsg, NULL, &curthread->td_msgport, 0, 812 if_purgeaddrs_nolink_dispatch); 813 lmsg->u.ms_resultp = ifp; 814 lwkt_domsg(netisr_cpuport(0), lmsg, 0); 815 } 816 817 static void 818 ifq_stage_detach_handler(netmsg_t nmsg) 819 { 820 struct ifaltq *ifq = nmsg->lmsg.u.ms_resultp; 821 int q; 822 823 for (q = 0; q < ifq->altq_subq_cnt; ++q) { 824 struct ifaltq_subque *ifsq = &ifq->altq_subq[q]; 825 struct ifsubq_stage *stage = ifsq_get_stage(ifsq, mycpuid); 826 827 if (stage->stg_flags & IFSQ_STAGE_FLAG_QUED) 828 ifsq_stage_remove(&ifsubq_stage_heads[mycpuid], stage); 829 } 830 lwkt_replymsg(&nmsg->lmsg, 0); 831 } 832 833 static void 834 ifq_stage_detach(struct ifaltq *ifq) 835 { 836 struct netmsg_base base; 837 int cpu; 838 839 netmsg_init(&base, NULL, &curthread->td_msgport, 0, 840 ifq_stage_detach_handler); 841 base.lmsg.u.ms_resultp = ifq; 842 843 for (cpu = 0; cpu < ncpus; ++cpu) 844 lwkt_domsg(netisr_cpuport(cpu), &base.lmsg, 0); 845 } 846 847 struct netmsg_if_rtdel { 848 struct netmsg_base base; 849 struct ifnet *ifp; 850 }; 851 852 static void 853 if_rtdel_dispatch(netmsg_t msg) 854 { 855 struct netmsg_if_rtdel *rmsg = (void *)msg; 856 int i, nextcpu, cpu; 857 858 cpu = mycpuid; 859 for (i = 1; i <= AF_MAX; i++) { 860 struct radix_node_head *rnh; 861 862 if ((rnh = rt_tables[cpu][i]) == NULL) 863 continue; 864 rnh->rnh_walktree(rnh, if_rtdel, rmsg->ifp); 865 } 866 867 nextcpu = cpu + 1; 868 if (nextcpu < ncpus) 869 lwkt_forwardmsg(netisr_cpuport(nextcpu), &rmsg->base.lmsg); 870 else 871 lwkt_replymsg(&rmsg->base.lmsg, 0); 872 } 873 874 /* 875 * Detach an interface, removing it from the 876 * list of "active" interfaces. 877 */ 878 void 879 if_detach(struct ifnet *ifp) 880 { 881 struct ifnet_array *old_ifnet_array; 882 struct netmsg_if_rtdel msg; 883 struct domain *dp; 884 int q; 885 886 /* Announce that the interface is gone. */ 887 EVENTHANDLER_INVOKE(ifnet_detach_event, ifp); 888 rt_ifannouncemsg(ifp, IFAN_DEPARTURE); 889 devctl_notify("IFNET", ifp->if_xname, "DETACH", NULL); 890 891 /* 892 * Remove this ifp from ifindex2inet, ifnet queue and ifnet 893 * array before it is whacked. 894 * 895 * Protect ifindex2ifnet, ifnet queue and ifnet array changes 896 * by ifnet lock, so that non-netisr threads could get a 897 * consistent view. 898 */ 899 ifnet_lock(); 900 901 /* 902 * Remove this ifp from ifindex2ifnet and maybe decrement if_index. 903 */ 904 ifindex2ifnet[ifp->if_index] = NULL; 905 while (if_index > 0 && ifindex2ifnet[if_index] == NULL) 906 if_index--; 907 908 /* 909 * Remove this ifp from ifnet queue. 910 */ 911 TAILQ_REMOVE(&ifnetlist, ifp, if_link); 912 913 /* 914 * Remove this ifp from ifnet array. 915 */ 916 /* Free old ifnet array after sync all netisrs */ 917 old_ifnet_array = ifnet_array; 918 ifnet_array = ifnet_array_del(ifp, old_ifnet_array); 919 920 ifnet_unlock(); 921 922 /* 923 * Sync all netisrs so that the old ifnet array is no longer 924 * accessed and we can free it safely later on. 925 */ 926 netmsg_service_sync(); 927 ifnet_array_free(old_ifnet_array); 928 929 /* 930 * Remove routes and flush queues. 931 */ 932 crit_enter(); 933 #ifdef IFPOLL_ENABLE 934 if (ifp->if_flags & IFF_NPOLLING) 935 ifpoll_deregister(ifp); 936 #endif 937 if_down(ifp); 938 939 /* Decrease the mbuf clusters/jclusters limits increased by us */ 940 if (ifp->if_nmbclusters > 0) 941 mcl_inclimit(-ifp->if_nmbclusters); 942 if (ifp->if_nmbjclusters > 0) 943 mjcl_inclimit(-ifp->if_nmbjclusters); 944 945 #ifdef ALTQ 946 if (ifq_is_enabled(&ifp->if_snd)) 947 altq_disable(&ifp->if_snd); 948 if (ifq_is_attached(&ifp->if_snd)) 949 altq_detach(&ifp->if_snd); 950 #endif 951 952 /* 953 * Clean up all addresses. 954 */ 955 ifp->if_lladdr = NULL; 956 957 if_purgeaddrs_nolink(ifp); 958 if (!TAILQ_EMPTY(&ifp->if_addrheads[mycpuid])) { 959 struct ifaddr *ifa; 960 961 ifa = TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa; 962 KASSERT(ifa->ifa_addr->sa_family == AF_LINK, 963 ("non-link ifaddr is left on if_addrheads")); 964 965 ifa_ifunlink(ifa, ifp); 966 ifa_destroy(ifa); 967 KASSERT(TAILQ_EMPTY(&ifp->if_addrheads[mycpuid]), 968 ("there are still ifaddrs left on if_addrheads")); 969 } 970 971 #ifdef INET 972 /* 973 * Remove all IPv4 kernel structures related to ifp. 974 */ 975 in_ifdetach(ifp); 976 #endif 977 978 #ifdef INET6 979 /* 980 * Remove all IPv6 kernel structs related to ifp. This should be done 981 * before removing routing entries below, since IPv6 interface direct 982 * routes are expected to be removed by the IPv6-specific kernel API. 983 * Otherwise, the kernel will detect some inconsistency and bark it. 984 */ 985 in6_ifdetach(ifp); 986 #endif 987 988 /* 989 * Delete all remaining routes using this interface 990 */ 991 netmsg_init(&msg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY, 992 if_rtdel_dispatch); 993 msg.ifp = ifp; 994 rt_domsg_global(&msg.base); 995 996 SLIST_FOREACH(dp, &domains, dom_next) 997 if (dp->dom_ifdetach && ifp->if_afdata[dp->dom_family]) 998 (*dp->dom_ifdetach)(ifp, 999 ifp->if_afdata[dp->dom_family]); 1000 1001 kfree(ifp->if_addrheads, M_IFADDR); 1002 1003 lwkt_synchronize_ipiqs("if_detach"); 1004 ifq_stage_detach(&ifp->if_snd); 1005 1006 for (q = 0; q < ifp->if_snd.altq_subq_cnt; ++q) { 1007 struct ifaltq_subque *ifsq = &ifp->if_snd.altq_subq[q]; 1008 1009 kfree(ifsq->ifsq_ifstart_nmsg, M_LWKTMSG); 1010 kfree(ifsq->ifsq_stage, M_DEVBUF); 1011 } 1012 kfree(ifp->if_snd.altq_subq, M_DEVBUF); 1013 1014 kfree(ifp->if_data_pcpu, M_DEVBUF); 1015 1016 crit_exit(); 1017 } 1018 1019 /* 1020 * Create interface group without members 1021 */ 1022 struct ifg_group * 1023 if_creategroup(const char *groupname) 1024 { 1025 struct ifg_group *ifg = NULL; 1026 1027 if ((ifg = (struct ifg_group *)kmalloc(sizeof(struct ifg_group), 1028 M_TEMP, M_NOWAIT)) == NULL) 1029 return (NULL); 1030 1031 strlcpy(ifg->ifg_group, groupname, sizeof(ifg->ifg_group)); 1032 ifg->ifg_refcnt = 0; 1033 ifg->ifg_carp_demoted = 0; 1034 TAILQ_INIT(&ifg->ifg_members); 1035 #if NPF > 0 1036 pfi_attach_ifgroup(ifg); 1037 #endif 1038 TAILQ_INSERT_TAIL(&ifg_head, ifg, ifg_next); 1039 1040 return (ifg); 1041 } 1042 1043 /* 1044 * Add a group to an interface 1045 */ 1046 int 1047 if_addgroup(struct ifnet *ifp, const char *groupname) 1048 { 1049 struct ifg_list *ifgl; 1050 struct ifg_group *ifg = NULL; 1051 struct ifg_member *ifgm; 1052 1053 if (groupname[0] && groupname[strlen(groupname) - 1] >= '0' && 1054 groupname[strlen(groupname) - 1] <= '9') 1055 return (EINVAL); 1056 1057 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) 1058 if (!strcmp(ifgl->ifgl_group->ifg_group, groupname)) 1059 return (EEXIST); 1060 1061 if ((ifgl = kmalloc(sizeof(*ifgl), M_TEMP, M_NOWAIT)) == NULL) 1062 return (ENOMEM); 1063 1064 if ((ifgm = kmalloc(sizeof(*ifgm), M_TEMP, M_NOWAIT)) == NULL) { 1065 kfree(ifgl, M_TEMP); 1066 return (ENOMEM); 1067 } 1068 1069 TAILQ_FOREACH(ifg, &ifg_head, ifg_next) 1070 if (!strcmp(ifg->ifg_group, groupname)) 1071 break; 1072 1073 if (ifg == NULL && (ifg = if_creategroup(groupname)) == NULL) { 1074 kfree(ifgl, M_TEMP); 1075 kfree(ifgm, M_TEMP); 1076 return (ENOMEM); 1077 } 1078 1079 ifg->ifg_refcnt++; 1080 ifgl->ifgl_group = ifg; 1081 ifgm->ifgm_ifp = ifp; 1082 1083 TAILQ_INSERT_TAIL(&ifg->ifg_members, ifgm, ifgm_next); 1084 TAILQ_INSERT_TAIL(&ifp->if_groups, ifgl, ifgl_next); 1085 1086 #if NPF > 0 1087 pfi_group_change(groupname); 1088 #endif 1089 1090 return (0); 1091 } 1092 1093 /* 1094 * Remove a group from an interface 1095 */ 1096 int 1097 if_delgroup(struct ifnet *ifp, const char *groupname) 1098 { 1099 struct ifg_list *ifgl; 1100 struct ifg_member *ifgm; 1101 1102 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) 1103 if (!strcmp(ifgl->ifgl_group->ifg_group, groupname)) 1104 break; 1105 if (ifgl == NULL) 1106 return (ENOENT); 1107 1108 TAILQ_REMOVE(&ifp->if_groups, ifgl, ifgl_next); 1109 1110 TAILQ_FOREACH(ifgm, &ifgl->ifgl_group->ifg_members, ifgm_next) 1111 if (ifgm->ifgm_ifp == ifp) 1112 break; 1113 1114 if (ifgm != NULL) { 1115 TAILQ_REMOVE(&ifgl->ifgl_group->ifg_members, ifgm, ifgm_next); 1116 kfree(ifgm, M_TEMP); 1117 } 1118 1119 if (--ifgl->ifgl_group->ifg_refcnt == 0) { 1120 TAILQ_REMOVE(&ifg_head, ifgl->ifgl_group, ifg_next); 1121 #if NPF > 0 1122 pfi_detach_ifgroup(ifgl->ifgl_group); 1123 #endif 1124 kfree(ifgl->ifgl_group, M_TEMP); 1125 } 1126 1127 kfree(ifgl, M_TEMP); 1128 1129 #if NPF > 0 1130 pfi_group_change(groupname); 1131 #endif 1132 1133 return (0); 1134 } 1135 1136 /* 1137 * Stores all groups from an interface in memory pointed 1138 * to by data 1139 */ 1140 int 1141 if_getgroup(caddr_t data, struct ifnet *ifp) 1142 { 1143 int len, error; 1144 struct ifg_list *ifgl; 1145 struct ifg_req ifgrq, *ifgp; 1146 struct ifgroupreq *ifgr = (struct ifgroupreq *)data; 1147 1148 if (ifgr->ifgr_len == 0) { 1149 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) 1150 ifgr->ifgr_len += sizeof(struct ifg_req); 1151 return (0); 1152 } 1153 1154 len = ifgr->ifgr_len; 1155 ifgp = ifgr->ifgr_groups; 1156 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) { 1157 if (len < sizeof(ifgrq)) 1158 return (EINVAL); 1159 bzero(&ifgrq, sizeof ifgrq); 1160 strlcpy(ifgrq.ifgrq_group, ifgl->ifgl_group->ifg_group, 1161 sizeof(ifgrq.ifgrq_group)); 1162 if ((error = copyout((caddr_t)&ifgrq, (caddr_t)ifgp, 1163 sizeof(struct ifg_req)))) 1164 return (error); 1165 len -= sizeof(ifgrq); 1166 ifgp++; 1167 } 1168 1169 return (0); 1170 } 1171 1172 /* 1173 * Stores all members of a group in memory pointed to by data 1174 */ 1175 int 1176 if_getgroupmembers(caddr_t data) 1177 { 1178 struct ifgroupreq *ifgr = (struct ifgroupreq *)data; 1179 struct ifg_group *ifg; 1180 struct ifg_member *ifgm; 1181 struct ifg_req ifgrq, *ifgp; 1182 int len, error; 1183 1184 TAILQ_FOREACH(ifg, &ifg_head, ifg_next) 1185 if (!strcmp(ifg->ifg_group, ifgr->ifgr_name)) 1186 break; 1187 if (ifg == NULL) 1188 return (ENOENT); 1189 1190 if (ifgr->ifgr_len == 0) { 1191 TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next) 1192 ifgr->ifgr_len += sizeof(ifgrq); 1193 return (0); 1194 } 1195 1196 len = ifgr->ifgr_len; 1197 ifgp = ifgr->ifgr_groups; 1198 TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next) { 1199 if (len < sizeof(ifgrq)) 1200 return (EINVAL); 1201 bzero(&ifgrq, sizeof ifgrq); 1202 strlcpy(ifgrq.ifgrq_member, ifgm->ifgm_ifp->if_xname, 1203 sizeof(ifgrq.ifgrq_member)); 1204 if ((error = copyout((caddr_t)&ifgrq, (caddr_t)ifgp, 1205 sizeof(struct ifg_req)))) 1206 return (error); 1207 len -= sizeof(ifgrq); 1208 ifgp++; 1209 } 1210 1211 return (0); 1212 } 1213 1214 /* 1215 * Delete Routes for a Network Interface 1216 * 1217 * Called for each routing entry via the rnh->rnh_walktree() call above 1218 * to delete all route entries referencing a detaching network interface. 1219 * 1220 * Arguments: 1221 * rn pointer to node in the routing table 1222 * arg argument passed to rnh->rnh_walktree() - detaching interface 1223 * 1224 * Returns: 1225 * 0 successful 1226 * errno failed - reason indicated 1227 * 1228 */ 1229 static int 1230 if_rtdel(struct radix_node *rn, void *arg) 1231 { 1232 struct rtentry *rt = (struct rtentry *)rn; 1233 struct ifnet *ifp = arg; 1234 int err; 1235 1236 if (rt->rt_ifp == ifp) { 1237 1238 /* 1239 * Protect (sorta) against walktree recursion problems 1240 * with cloned routes 1241 */ 1242 if (!(rt->rt_flags & RTF_UP)) 1243 return (0); 1244 1245 err = rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, 1246 rt_mask(rt), rt->rt_flags, 1247 NULL); 1248 if (err) { 1249 log(LOG_WARNING, "if_rtdel: error %d\n", err); 1250 } 1251 } 1252 1253 return (0); 1254 } 1255 1256 static __inline boolean_t 1257 ifa_prefer(const struct ifaddr *cur_ifa, const struct ifaddr *old_ifa) 1258 { 1259 if (old_ifa == NULL) 1260 return TRUE; 1261 1262 if ((old_ifa->ifa_ifp->if_flags & IFF_UP) == 0 && 1263 (cur_ifa->ifa_ifp->if_flags & IFF_UP)) 1264 return TRUE; 1265 if ((old_ifa->ifa_flags & IFA_ROUTE) == 0 && 1266 (cur_ifa->ifa_flags & IFA_ROUTE)) 1267 return TRUE; 1268 return FALSE; 1269 } 1270 1271 /* 1272 * Locate an interface based on a complete address. 1273 */ 1274 struct ifaddr * 1275 ifa_ifwithaddr(struct sockaddr *addr) 1276 { 1277 const struct ifnet_array *arr; 1278 int i; 1279 1280 arr = ifnet_array_get(); 1281 for (i = 0; i < arr->ifnet_count; ++i) { 1282 struct ifnet *ifp = arr->ifnet_arr[i]; 1283 struct ifaddr_container *ifac; 1284 1285 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1286 struct ifaddr *ifa = ifac->ifa; 1287 1288 if (ifa->ifa_addr->sa_family != addr->sa_family) 1289 continue; 1290 if (sa_equal(addr, ifa->ifa_addr)) 1291 return (ifa); 1292 if ((ifp->if_flags & IFF_BROADCAST) && 1293 ifa->ifa_broadaddr && 1294 /* IPv6 doesn't have broadcast */ 1295 ifa->ifa_broadaddr->sa_len != 0 && 1296 sa_equal(ifa->ifa_broadaddr, addr)) 1297 return (ifa); 1298 } 1299 } 1300 return (NULL); 1301 } 1302 1303 /* 1304 * Locate the point to point interface with a given destination address. 1305 */ 1306 struct ifaddr * 1307 ifa_ifwithdstaddr(struct sockaddr *addr) 1308 { 1309 const struct ifnet_array *arr; 1310 int i; 1311 1312 arr = ifnet_array_get(); 1313 for (i = 0; i < arr->ifnet_count; ++i) { 1314 struct ifnet *ifp = arr->ifnet_arr[i]; 1315 struct ifaddr_container *ifac; 1316 1317 if (!(ifp->if_flags & IFF_POINTOPOINT)) 1318 continue; 1319 1320 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1321 struct ifaddr *ifa = ifac->ifa; 1322 1323 if (ifa->ifa_addr->sa_family != addr->sa_family) 1324 continue; 1325 if (ifa->ifa_dstaddr && 1326 sa_equal(addr, ifa->ifa_dstaddr)) 1327 return (ifa); 1328 } 1329 } 1330 return (NULL); 1331 } 1332 1333 /* 1334 * Find an interface on a specific network. If many, choice 1335 * is most specific found. 1336 */ 1337 struct ifaddr * 1338 ifa_ifwithnet(struct sockaddr *addr) 1339 { 1340 struct ifaddr *ifa_maybe = NULL; 1341 u_int af = addr->sa_family; 1342 char *addr_data = addr->sa_data, *cplim; 1343 const struct ifnet_array *arr; 1344 int i; 1345 1346 /* 1347 * AF_LINK addresses can be looked up directly by their index number, 1348 * so do that if we can. 1349 */ 1350 if (af == AF_LINK) { 1351 struct sockaddr_dl *sdl = (struct sockaddr_dl *)addr; 1352 1353 if (sdl->sdl_index && sdl->sdl_index <= if_index) 1354 return (ifindex2ifnet[sdl->sdl_index]->if_lladdr); 1355 } 1356 1357 /* 1358 * Scan though each interface, looking for ones that have 1359 * addresses in this address family. 1360 */ 1361 arr = ifnet_array_get(); 1362 for (i = 0; i < arr->ifnet_count; ++i) { 1363 struct ifnet *ifp = arr->ifnet_arr[i]; 1364 struct ifaddr_container *ifac; 1365 1366 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1367 struct ifaddr *ifa = ifac->ifa; 1368 char *cp, *cp2, *cp3; 1369 1370 if (ifa->ifa_addr->sa_family != af) 1371 next: continue; 1372 if (af == AF_INET && ifp->if_flags & IFF_POINTOPOINT) { 1373 /* 1374 * This is a bit broken as it doesn't 1375 * take into account that the remote end may 1376 * be a single node in the network we are 1377 * looking for. 1378 * The trouble is that we don't know the 1379 * netmask for the remote end. 1380 */ 1381 if (ifa->ifa_dstaddr != NULL && 1382 sa_equal(addr, ifa->ifa_dstaddr)) 1383 return (ifa); 1384 } else { 1385 /* 1386 * if we have a special address handler, 1387 * then use it instead of the generic one. 1388 */ 1389 if (ifa->ifa_claim_addr) { 1390 if ((*ifa->ifa_claim_addr)(ifa, addr)) { 1391 return (ifa); 1392 } else { 1393 continue; 1394 } 1395 } 1396 1397 /* 1398 * Scan all the bits in the ifa's address. 1399 * If a bit dissagrees with what we are 1400 * looking for, mask it with the netmask 1401 * to see if it really matters. 1402 * (A byte at a time) 1403 */ 1404 if (ifa->ifa_netmask == 0) 1405 continue; 1406 cp = addr_data; 1407 cp2 = ifa->ifa_addr->sa_data; 1408 cp3 = ifa->ifa_netmask->sa_data; 1409 cplim = ifa->ifa_netmask->sa_len + 1410 (char *)ifa->ifa_netmask; 1411 while (cp3 < cplim) 1412 if ((*cp++ ^ *cp2++) & *cp3++) 1413 goto next; /* next address! */ 1414 /* 1415 * If the netmask of what we just found 1416 * is more specific than what we had before 1417 * (if we had one) then remember the new one 1418 * before continuing to search for an even 1419 * better one. If the netmasks are equal, 1420 * we prefer the this ifa based on the result 1421 * of ifa_prefer(). 1422 */ 1423 if (ifa_maybe == NULL || 1424 rn_refines((char *)ifa->ifa_netmask, 1425 (char *)ifa_maybe->ifa_netmask) || 1426 (sa_equal(ifa_maybe->ifa_netmask, 1427 ifa->ifa_netmask) && 1428 ifa_prefer(ifa, ifa_maybe))) 1429 ifa_maybe = ifa; 1430 } 1431 } 1432 } 1433 return (ifa_maybe); 1434 } 1435 1436 /* 1437 * Find an interface address specific to an interface best matching 1438 * a given address. 1439 */ 1440 struct ifaddr * 1441 ifaof_ifpforaddr(struct sockaddr *addr, struct ifnet *ifp) 1442 { 1443 struct ifaddr_container *ifac; 1444 char *cp, *cp2, *cp3; 1445 char *cplim; 1446 struct ifaddr *ifa_maybe = NULL; 1447 u_int af = addr->sa_family; 1448 1449 if (af >= AF_MAX) 1450 return (0); 1451 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1452 struct ifaddr *ifa = ifac->ifa; 1453 1454 if (ifa->ifa_addr->sa_family != af) 1455 continue; 1456 if (ifa_maybe == NULL) 1457 ifa_maybe = ifa; 1458 if (ifa->ifa_netmask == NULL) { 1459 if (sa_equal(addr, ifa->ifa_addr) || 1460 (ifa->ifa_dstaddr != NULL && 1461 sa_equal(addr, ifa->ifa_dstaddr))) 1462 return (ifa); 1463 continue; 1464 } 1465 if (ifp->if_flags & IFF_POINTOPOINT) { 1466 if (sa_equal(addr, ifa->ifa_dstaddr)) 1467 return (ifa); 1468 } else { 1469 cp = addr->sa_data; 1470 cp2 = ifa->ifa_addr->sa_data; 1471 cp3 = ifa->ifa_netmask->sa_data; 1472 cplim = ifa->ifa_netmask->sa_len + (char *)ifa->ifa_netmask; 1473 for (; cp3 < cplim; cp3++) 1474 if ((*cp++ ^ *cp2++) & *cp3) 1475 break; 1476 if (cp3 == cplim) 1477 return (ifa); 1478 } 1479 } 1480 return (ifa_maybe); 1481 } 1482 1483 /* 1484 * Default action when installing a route with a Link Level gateway. 1485 * Lookup an appropriate real ifa to point to. 1486 * This should be moved to /sys/net/link.c eventually. 1487 */ 1488 static void 1489 link_rtrequest(int cmd, struct rtentry *rt) 1490 { 1491 struct ifaddr *ifa; 1492 struct sockaddr *dst; 1493 struct ifnet *ifp; 1494 1495 if (cmd != RTM_ADD || (ifa = rt->rt_ifa) == NULL || 1496 (ifp = ifa->ifa_ifp) == NULL || (dst = rt_key(rt)) == NULL) 1497 return; 1498 ifa = ifaof_ifpforaddr(dst, ifp); 1499 if (ifa != NULL) { 1500 IFAFREE(rt->rt_ifa); 1501 IFAREF(ifa); 1502 rt->rt_ifa = ifa; 1503 if (ifa->ifa_rtrequest && ifa->ifa_rtrequest != link_rtrequest) 1504 ifa->ifa_rtrequest(cmd, rt); 1505 } 1506 } 1507 1508 struct netmsg_ifroute { 1509 struct netmsg_base base; 1510 struct ifnet *ifp; 1511 int flag; 1512 int fam; 1513 }; 1514 1515 /* 1516 * Mark an interface down and notify protocols of the transition. 1517 */ 1518 static void 1519 if_unroute_dispatch(netmsg_t nmsg) 1520 { 1521 struct netmsg_ifroute *msg = (struct netmsg_ifroute *)nmsg; 1522 struct ifnet *ifp = msg->ifp; 1523 int flag = msg->flag, fam = msg->fam; 1524 struct ifaddr_container *ifac; 1525 1526 ifp->if_flags &= ~flag; 1527 getmicrotime(&ifp->if_lastchange); 1528 /* 1529 * The ifaddr processing in the following loop will block, 1530 * however, this function is called in netisr0, in which 1531 * ifaddr list changes happen, so we don't care about the 1532 * blockness of the ifaddr processing here. 1533 */ 1534 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1535 struct ifaddr *ifa = ifac->ifa; 1536 1537 /* Ignore marker */ 1538 if (ifa->ifa_addr->sa_family == AF_UNSPEC) 1539 continue; 1540 1541 if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family)) 1542 kpfctlinput(PRC_IFDOWN, ifa->ifa_addr); 1543 } 1544 ifq_purge_all(&ifp->if_snd); 1545 rt_ifmsg(ifp); 1546 1547 lwkt_replymsg(&nmsg->lmsg, 0); 1548 } 1549 1550 void 1551 if_unroute(struct ifnet *ifp, int flag, int fam) 1552 { 1553 struct netmsg_ifroute msg; 1554 1555 ASSERT_CANDOMSG_NETISR0(curthread); 1556 1557 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 0, 1558 if_unroute_dispatch); 1559 msg.ifp = ifp; 1560 msg.flag = flag; 1561 msg.fam = fam; 1562 lwkt_domsg(netisr_cpuport(0), &msg.base.lmsg, 0); 1563 } 1564 1565 /* 1566 * Mark an interface up and notify protocols of the transition. 1567 */ 1568 static void 1569 if_route_dispatch(netmsg_t nmsg) 1570 { 1571 struct netmsg_ifroute *msg = (struct netmsg_ifroute *)nmsg; 1572 struct ifnet *ifp = msg->ifp; 1573 int flag = msg->flag, fam = msg->fam; 1574 struct ifaddr_container *ifac; 1575 1576 ifq_purge_all(&ifp->if_snd); 1577 ifp->if_flags |= flag; 1578 getmicrotime(&ifp->if_lastchange); 1579 /* 1580 * The ifaddr processing in the following loop will block, 1581 * however, this function is called in netisr0, in which 1582 * ifaddr list changes happen, so we don't care about the 1583 * blockness of the ifaddr processing here. 1584 */ 1585 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1586 struct ifaddr *ifa = ifac->ifa; 1587 1588 /* Ignore marker */ 1589 if (ifa->ifa_addr->sa_family == AF_UNSPEC) 1590 continue; 1591 1592 if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family)) 1593 kpfctlinput(PRC_IFUP, ifa->ifa_addr); 1594 } 1595 rt_ifmsg(ifp); 1596 #ifdef INET6 1597 in6_if_up(ifp); 1598 #endif 1599 1600 lwkt_replymsg(&nmsg->lmsg, 0); 1601 } 1602 1603 void 1604 if_route(struct ifnet *ifp, int flag, int fam) 1605 { 1606 struct netmsg_ifroute msg; 1607 1608 ASSERT_CANDOMSG_NETISR0(curthread); 1609 1610 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 0, 1611 if_route_dispatch); 1612 msg.ifp = ifp; 1613 msg.flag = flag; 1614 msg.fam = fam; 1615 lwkt_domsg(netisr_cpuport(0), &msg.base.lmsg, 0); 1616 } 1617 1618 /* 1619 * Mark an interface down and notify protocols of the transition. An 1620 * interface going down is also considered to be a synchronizing event. 1621 * We must ensure that all packet processing related to the interface 1622 * has completed before we return so e.g. the caller can free the ifnet 1623 * structure that the mbufs may be referencing. 1624 * 1625 * NOTE: must be called at splnet or eqivalent. 1626 */ 1627 void 1628 if_down(struct ifnet *ifp) 1629 { 1630 if_unroute(ifp, IFF_UP, AF_UNSPEC); 1631 netmsg_service_sync(); 1632 } 1633 1634 /* 1635 * Mark an interface up and notify protocols of 1636 * the transition. 1637 * NOTE: must be called at splnet or eqivalent. 1638 */ 1639 void 1640 if_up(struct ifnet *ifp) 1641 { 1642 if_route(ifp, IFF_UP, AF_UNSPEC); 1643 } 1644 1645 /* 1646 * Process a link state change. 1647 * NOTE: must be called at splsoftnet or equivalent. 1648 */ 1649 void 1650 if_link_state_change(struct ifnet *ifp) 1651 { 1652 int link_state = ifp->if_link_state; 1653 1654 rt_ifmsg(ifp); 1655 devctl_notify("IFNET", ifp->if_xname, 1656 (link_state == LINK_STATE_UP) ? "LINK_UP" : "LINK_DOWN", NULL); 1657 } 1658 1659 /* 1660 * Handle interface watchdog timer routines. Called 1661 * from softclock, we decrement timers (if set) and 1662 * call the appropriate interface routine on expiration. 1663 */ 1664 static void 1665 if_slowtimo_dispatch(netmsg_t nmsg) 1666 { 1667 struct globaldata *gd = mycpu; 1668 const struct ifnet_array *arr; 1669 int i; 1670 1671 ASSERT_IN_NETISR(0); 1672 1673 crit_enter_gd(gd); 1674 lwkt_replymsg(&nmsg->lmsg, 0); /* reply ASAP */ 1675 crit_exit_gd(gd); 1676 1677 arr = ifnet_array_get(); 1678 for (i = 0; i < arr->ifnet_count; ++i) { 1679 struct ifnet *ifp = arr->ifnet_arr[i]; 1680 1681 crit_enter_gd(gd); 1682 1683 if (if_stats_compat) { 1684 IFNET_STAT_GET(ifp, ipackets, ifp->if_ipackets); 1685 IFNET_STAT_GET(ifp, ierrors, ifp->if_ierrors); 1686 IFNET_STAT_GET(ifp, opackets, ifp->if_opackets); 1687 IFNET_STAT_GET(ifp, oerrors, ifp->if_oerrors); 1688 IFNET_STAT_GET(ifp, collisions, ifp->if_collisions); 1689 IFNET_STAT_GET(ifp, ibytes, ifp->if_ibytes); 1690 IFNET_STAT_GET(ifp, obytes, ifp->if_obytes); 1691 IFNET_STAT_GET(ifp, imcasts, ifp->if_imcasts); 1692 IFNET_STAT_GET(ifp, omcasts, ifp->if_omcasts); 1693 IFNET_STAT_GET(ifp, iqdrops, ifp->if_iqdrops); 1694 IFNET_STAT_GET(ifp, noproto, ifp->if_noproto); 1695 IFNET_STAT_GET(ifp, oqdrops, ifp->if_oqdrops); 1696 } 1697 1698 if (ifp->if_timer == 0 || --ifp->if_timer) { 1699 crit_exit_gd(gd); 1700 continue; 1701 } 1702 if (ifp->if_watchdog) { 1703 if (ifnet_tryserialize_all(ifp)) { 1704 (*ifp->if_watchdog)(ifp); 1705 ifnet_deserialize_all(ifp); 1706 } else { 1707 /* try again next timeout */ 1708 ++ifp->if_timer; 1709 } 1710 } 1711 1712 crit_exit_gd(gd); 1713 } 1714 1715 callout_reset(&if_slowtimo_timer, hz / IFNET_SLOWHZ, if_slowtimo, NULL); 1716 } 1717 1718 static void 1719 if_slowtimo(void *arg __unused) 1720 { 1721 struct lwkt_msg *lmsg = &if_slowtimo_netmsg.lmsg; 1722 1723 KASSERT(mycpuid == 0, ("not on cpu0")); 1724 crit_enter(); 1725 if (lmsg->ms_flags & MSGF_DONE) 1726 lwkt_sendmsg_oncpu(netisr_cpuport(0), lmsg); 1727 crit_exit(); 1728 } 1729 1730 /* 1731 * Map interface name to 1732 * interface structure pointer. 1733 */ 1734 struct ifnet * 1735 ifunit(const char *name) 1736 { 1737 struct ifnet *ifp; 1738 1739 /* 1740 * Search all the interfaces for this name/number 1741 */ 1742 KASSERT(mtx_owned(&ifnet_mtx), ("ifnet is not locked")); 1743 1744 TAILQ_FOREACH(ifp, &ifnetlist, if_link) { 1745 if (strncmp(ifp->if_xname, name, IFNAMSIZ) == 0) 1746 break; 1747 } 1748 return (ifp); 1749 } 1750 1751 struct ifnet * 1752 ifunit_netisr(const char *name) 1753 { 1754 const struct ifnet_array *arr; 1755 int i; 1756 1757 /* 1758 * Search all the interfaces for this name/number 1759 */ 1760 1761 arr = ifnet_array_get(); 1762 for (i = 0; i < arr->ifnet_count; ++i) { 1763 struct ifnet *ifp = arr->ifnet_arr[i]; 1764 1765 if (strncmp(ifp->if_xname, name, IFNAMSIZ) == 0) 1766 return ifp; 1767 } 1768 return NULL; 1769 } 1770 1771 /* 1772 * Interface ioctls. 1773 */ 1774 int 1775 ifioctl(struct socket *so, u_long cmd, caddr_t data, struct ucred *cred) 1776 { 1777 struct ifnet *ifp; 1778 struct ifreq *ifr; 1779 struct ifstat *ifs; 1780 int error, do_ifup = 0; 1781 short oif_flags; 1782 int new_flags; 1783 size_t namelen, onamelen; 1784 char new_name[IFNAMSIZ]; 1785 struct ifaddr *ifa; 1786 struct sockaddr_dl *sdl; 1787 1788 switch (cmd) { 1789 case SIOCGIFCONF: 1790 case OSIOCGIFCONF: 1791 return (ifconf(cmd, data, cred)); 1792 default: 1793 break; 1794 } 1795 1796 ifr = (struct ifreq *)data; 1797 1798 switch (cmd) { 1799 case SIOCIFCREATE: 1800 case SIOCIFCREATE2: 1801 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0) 1802 return (error); 1803 return (if_clone_create(ifr->ifr_name, sizeof(ifr->ifr_name), 1804 cmd == SIOCIFCREATE2 ? ifr->ifr_data : NULL)); 1805 case SIOCIFDESTROY: 1806 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0) 1807 return (error); 1808 return (if_clone_destroy(ifr->ifr_name)); 1809 case SIOCIFGCLONERS: 1810 return (if_clone_list((struct if_clonereq *)data)); 1811 default: 1812 break; 1813 } 1814 1815 /* 1816 * Nominal ioctl through interface, lookup the ifp and obtain a 1817 * lock to serialize the ifconfig ioctl operation. 1818 */ 1819 ifnet_lock(); 1820 1821 ifp = ifunit(ifr->ifr_name); 1822 if (ifp == NULL) { 1823 ifnet_unlock(); 1824 return (ENXIO); 1825 } 1826 error = 0; 1827 1828 switch (cmd) { 1829 case SIOCGIFINDEX: 1830 ifr->ifr_index = ifp->if_index; 1831 break; 1832 1833 case SIOCGIFFLAGS: 1834 ifr->ifr_flags = ifp->if_flags; 1835 ifr->ifr_flagshigh = ifp->if_flags >> 16; 1836 break; 1837 1838 case SIOCGIFCAP: 1839 ifr->ifr_reqcap = ifp->if_capabilities; 1840 ifr->ifr_curcap = ifp->if_capenable; 1841 break; 1842 1843 case SIOCGIFMETRIC: 1844 ifr->ifr_metric = ifp->if_metric; 1845 break; 1846 1847 case SIOCGIFMTU: 1848 ifr->ifr_mtu = ifp->if_mtu; 1849 break; 1850 1851 case SIOCGIFTSOLEN: 1852 ifr->ifr_tsolen = ifp->if_tsolen; 1853 break; 1854 1855 case SIOCGIFDATA: 1856 error = copyout((caddr_t)&ifp->if_data, ifr->ifr_data, 1857 sizeof(ifp->if_data)); 1858 break; 1859 1860 case SIOCGIFPHYS: 1861 ifr->ifr_phys = ifp->if_physical; 1862 break; 1863 1864 case SIOCGIFPOLLCPU: 1865 ifr->ifr_pollcpu = -1; 1866 break; 1867 1868 case SIOCSIFPOLLCPU: 1869 break; 1870 1871 case SIOCSIFFLAGS: 1872 error = priv_check_cred(cred, PRIV_ROOT, 0); 1873 if (error) 1874 break; 1875 new_flags = (ifr->ifr_flags & 0xffff) | 1876 (ifr->ifr_flagshigh << 16); 1877 if (ifp->if_flags & IFF_SMART) { 1878 /* Smart drivers twiddle their own routes */ 1879 } else if (ifp->if_flags & IFF_UP && 1880 (new_flags & IFF_UP) == 0) { 1881 if_down(ifp); 1882 } else if (new_flags & IFF_UP && 1883 (ifp->if_flags & IFF_UP) == 0) { 1884 do_ifup = 1; 1885 } 1886 1887 #ifdef IFPOLL_ENABLE 1888 if ((new_flags ^ ifp->if_flags) & IFF_NPOLLING) { 1889 if (new_flags & IFF_NPOLLING) 1890 ifpoll_register(ifp); 1891 else 1892 ifpoll_deregister(ifp); 1893 } 1894 #endif 1895 1896 ifp->if_flags = (ifp->if_flags & IFF_CANTCHANGE) | 1897 (new_flags &~ IFF_CANTCHANGE); 1898 if (new_flags & IFF_PPROMISC) { 1899 /* Permanently promiscuous mode requested */ 1900 ifp->if_flags |= IFF_PROMISC; 1901 } else if (ifp->if_pcount == 0) { 1902 ifp->if_flags &= ~IFF_PROMISC; 1903 } 1904 if (ifp->if_ioctl) { 1905 ifnet_serialize_all(ifp); 1906 ifp->if_ioctl(ifp, cmd, data, cred); 1907 ifnet_deserialize_all(ifp); 1908 } 1909 if (do_ifup) 1910 if_up(ifp); 1911 getmicrotime(&ifp->if_lastchange); 1912 break; 1913 1914 case SIOCSIFCAP: 1915 error = priv_check_cred(cred, PRIV_ROOT, 0); 1916 if (error) 1917 break; 1918 if (ifr->ifr_reqcap & ~ifp->if_capabilities) { 1919 error = EINVAL; 1920 break; 1921 } 1922 ifnet_serialize_all(ifp); 1923 ifp->if_ioctl(ifp, cmd, data, cred); 1924 ifnet_deserialize_all(ifp); 1925 break; 1926 1927 case SIOCSIFNAME: 1928 error = priv_check_cred(cred, PRIV_ROOT, 0); 1929 if (error) 1930 break; 1931 error = copyinstr(ifr->ifr_data, new_name, IFNAMSIZ, NULL); 1932 if (error) 1933 break; 1934 if (new_name[0] == '\0') { 1935 error = EINVAL; 1936 break; 1937 } 1938 if (ifunit(new_name) != NULL) { 1939 error = EEXIST; 1940 break; 1941 } 1942 1943 EVENTHANDLER_INVOKE(ifnet_detach_event, ifp); 1944 1945 /* Announce the departure of the interface. */ 1946 rt_ifannouncemsg(ifp, IFAN_DEPARTURE); 1947 1948 strlcpy(ifp->if_xname, new_name, sizeof(ifp->if_xname)); 1949 ifa = TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa; 1950 sdl = (struct sockaddr_dl *)ifa->ifa_addr; 1951 namelen = strlen(new_name); 1952 onamelen = sdl->sdl_nlen; 1953 /* 1954 * Move the address if needed. This is safe because we 1955 * allocate space for a name of length IFNAMSIZ when we 1956 * create this in if_attach(). 1957 */ 1958 if (namelen != onamelen) { 1959 bcopy(sdl->sdl_data + onamelen, 1960 sdl->sdl_data + namelen, sdl->sdl_alen); 1961 } 1962 bcopy(new_name, sdl->sdl_data, namelen); 1963 sdl->sdl_nlen = namelen; 1964 sdl = (struct sockaddr_dl *)ifa->ifa_netmask; 1965 bzero(sdl->sdl_data, onamelen); 1966 while (namelen != 0) 1967 sdl->sdl_data[--namelen] = 0xff; 1968 1969 EVENTHANDLER_INVOKE(ifnet_attach_event, ifp); 1970 1971 /* Announce the return of the interface. */ 1972 rt_ifannouncemsg(ifp, IFAN_ARRIVAL); 1973 break; 1974 1975 case SIOCSIFMETRIC: 1976 error = priv_check_cred(cred, PRIV_ROOT, 0); 1977 if (error) 1978 break; 1979 ifp->if_metric = ifr->ifr_metric; 1980 getmicrotime(&ifp->if_lastchange); 1981 break; 1982 1983 case SIOCSIFPHYS: 1984 error = priv_check_cred(cred, PRIV_ROOT, 0); 1985 if (error) 1986 break; 1987 if (ifp->if_ioctl == NULL) { 1988 error = EOPNOTSUPP; 1989 break; 1990 } 1991 ifnet_serialize_all(ifp); 1992 error = ifp->if_ioctl(ifp, cmd, data, cred); 1993 ifnet_deserialize_all(ifp); 1994 if (error == 0) 1995 getmicrotime(&ifp->if_lastchange); 1996 break; 1997 1998 case SIOCSIFMTU: 1999 { 2000 u_long oldmtu = ifp->if_mtu; 2001 2002 error = priv_check_cred(cred, PRIV_ROOT, 0); 2003 if (error) 2004 break; 2005 if (ifp->if_ioctl == NULL) { 2006 error = EOPNOTSUPP; 2007 break; 2008 } 2009 if (ifr->ifr_mtu < IF_MINMTU || ifr->ifr_mtu > IF_MAXMTU) { 2010 error = EINVAL; 2011 break; 2012 } 2013 ifnet_serialize_all(ifp); 2014 error = ifp->if_ioctl(ifp, cmd, data, cred); 2015 ifnet_deserialize_all(ifp); 2016 if (error == 0) { 2017 getmicrotime(&ifp->if_lastchange); 2018 rt_ifmsg(ifp); 2019 } 2020 /* 2021 * If the link MTU changed, do network layer specific procedure. 2022 */ 2023 if (ifp->if_mtu != oldmtu) { 2024 #ifdef INET6 2025 nd6_setmtu(ifp); 2026 #endif 2027 } 2028 break; 2029 } 2030 2031 case SIOCSIFTSOLEN: 2032 error = priv_check_cred(cred, PRIV_ROOT, 0); 2033 if (error) 2034 break; 2035 2036 /* XXX need driver supplied upper limit */ 2037 if (ifr->ifr_tsolen <= 0) { 2038 error = EINVAL; 2039 break; 2040 } 2041 ifp->if_tsolen = ifr->ifr_tsolen; 2042 break; 2043 2044 case SIOCADDMULTI: 2045 case SIOCDELMULTI: 2046 error = priv_check_cred(cred, PRIV_ROOT, 0); 2047 if (error) 2048 break; 2049 2050 /* Don't allow group membership on non-multicast interfaces. */ 2051 if ((ifp->if_flags & IFF_MULTICAST) == 0) { 2052 error = EOPNOTSUPP; 2053 break; 2054 } 2055 2056 /* Don't let users screw up protocols' entries. */ 2057 if (ifr->ifr_addr.sa_family != AF_LINK) { 2058 error = EINVAL; 2059 break; 2060 } 2061 2062 if (cmd == SIOCADDMULTI) { 2063 struct ifmultiaddr *ifma; 2064 error = if_addmulti(ifp, &ifr->ifr_addr, &ifma); 2065 } else { 2066 error = if_delmulti(ifp, &ifr->ifr_addr); 2067 } 2068 if (error == 0) 2069 getmicrotime(&ifp->if_lastchange); 2070 break; 2071 2072 case SIOCSIFPHYADDR: 2073 case SIOCDIFPHYADDR: 2074 #ifdef INET6 2075 case SIOCSIFPHYADDR_IN6: 2076 #endif 2077 case SIOCSLIFPHYADDR: 2078 case SIOCSIFMEDIA: 2079 case SIOCSIFGENERIC: 2080 error = priv_check_cred(cred, PRIV_ROOT, 0); 2081 if (error) 2082 break; 2083 if (ifp->if_ioctl == 0) { 2084 error = EOPNOTSUPP; 2085 break; 2086 } 2087 ifnet_serialize_all(ifp); 2088 error = ifp->if_ioctl(ifp, cmd, data, cred); 2089 ifnet_deserialize_all(ifp); 2090 if (error == 0) 2091 getmicrotime(&ifp->if_lastchange); 2092 break; 2093 2094 case SIOCGIFSTATUS: 2095 ifs = (struct ifstat *)data; 2096 ifs->ascii[0] = '\0'; 2097 /* fall through */ 2098 case SIOCGIFPSRCADDR: 2099 case SIOCGIFPDSTADDR: 2100 case SIOCGLIFPHYADDR: 2101 case SIOCGIFMEDIA: 2102 case SIOCGIFGENERIC: 2103 if (ifp->if_ioctl == NULL) { 2104 error = EOPNOTSUPP; 2105 break; 2106 } 2107 ifnet_serialize_all(ifp); 2108 error = ifp->if_ioctl(ifp, cmd, data, cred); 2109 ifnet_deserialize_all(ifp); 2110 break; 2111 2112 case SIOCSIFLLADDR: 2113 error = priv_check_cred(cred, PRIV_ROOT, 0); 2114 if (error) 2115 break; 2116 error = if_setlladdr(ifp, ifr->ifr_addr.sa_data, 2117 ifr->ifr_addr.sa_len); 2118 EVENTHANDLER_INVOKE(iflladdr_event, ifp); 2119 break; 2120 2121 default: 2122 oif_flags = ifp->if_flags; 2123 if (so->so_proto == 0) { 2124 error = EOPNOTSUPP; 2125 break; 2126 } 2127 error = so_pru_control_direct(so, cmd, data, ifp); 2128 2129 if ((oif_flags ^ ifp->if_flags) & IFF_UP) { 2130 #ifdef INET6 2131 DELAY(100);/* XXX: temporary workaround for fxp issue*/ 2132 if (ifp->if_flags & IFF_UP) { 2133 crit_enter(); 2134 in6_if_up(ifp); 2135 crit_exit(); 2136 } 2137 #endif 2138 } 2139 break; 2140 } 2141 2142 ifnet_unlock(); 2143 return (error); 2144 } 2145 2146 /* 2147 * Set/clear promiscuous mode on interface ifp based on the truth value 2148 * of pswitch. The calls are reference counted so that only the first 2149 * "on" request actually has an effect, as does the final "off" request. 2150 * Results are undefined if the "off" and "on" requests are not matched. 2151 */ 2152 int 2153 ifpromisc(struct ifnet *ifp, int pswitch) 2154 { 2155 struct ifreq ifr; 2156 int error; 2157 int oldflags; 2158 2159 oldflags = ifp->if_flags; 2160 if (ifp->if_flags & IFF_PPROMISC) { 2161 /* Do nothing if device is in permanently promiscuous mode */ 2162 ifp->if_pcount += pswitch ? 1 : -1; 2163 return (0); 2164 } 2165 if (pswitch) { 2166 /* 2167 * If the device is not configured up, we cannot put it in 2168 * promiscuous mode. 2169 */ 2170 if ((ifp->if_flags & IFF_UP) == 0) 2171 return (ENETDOWN); 2172 if (ifp->if_pcount++ != 0) 2173 return (0); 2174 ifp->if_flags |= IFF_PROMISC; 2175 log(LOG_INFO, "%s: promiscuous mode enabled\n", 2176 ifp->if_xname); 2177 } else { 2178 if (--ifp->if_pcount > 0) 2179 return (0); 2180 ifp->if_flags &= ~IFF_PROMISC; 2181 log(LOG_INFO, "%s: promiscuous mode disabled\n", 2182 ifp->if_xname); 2183 } 2184 ifr.ifr_flags = ifp->if_flags; 2185 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2186 ifnet_serialize_all(ifp); 2187 error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, NULL); 2188 ifnet_deserialize_all(ifp); 2189 if (error == 0) 2190 rt_ifmsg(ifp); 2191 else 2192 ifp->if_flags = oldflags; 2193 return error; 2194 } 2195 2196 /* 2197 * Return interface configuration 2198 * of system. List may be used 2199 * in later ioctl's (above) to get 2200 * other information. 2201 */ 2202 static int 2203 ifconf(u_long cmd, caddr_t data, struct ucred *cred) 2204 { 2205 struct ifconf *ifc = (struct ifconf *)data; 2206 struct ifnet *ifp; 2207 struct sockaddr *sa; 2208 struct ifreq ifr, *ifrp; 2209 int space = ifc->ifc_len, error = 0; 2210 2211 ifrp = ifc->ifc_req; 2212 2213 ifnet_lock(); 2214 TAILQ_FOREACH(ifp, &ifnetlist, if_link) { 2215 struct ifaddr_container *ifac, *ifac_mark; 2216 struct ifaddr_marker mark; 2217 struct ifaddrhead *head; 2218 int addrs; 2219 2220 if (space <= sizeof ifr) 2221 break; 2222 2223 /* 2224 * Zero the stack declared structure first to prevent 2225 * memory disclosure. 2226 */ 2227 bzero(&ifr, sizeof(ifr)); 2228 if (strlcpy(ifr.ifr_name, ifp->if_xname, sizeof(ifr.ifr_name)) 2229 >= sizeof(ifr.ifr_name)) { 2230 error = ENAMETOOLONG; 2231 break; 2232 } 2233 2234 /* 2235 * Add a marker, since copyout() could block and during that 2236 * period the list could be changed. Inserting the marker to 2237 * the header of the list will not cause trouble for the code 2238 * assuming that the first element of the list is AF_LINK; the 2239 * marker will be moved to the next position w/o blocking. 2240 */ 2241 ifa_marker_init(&mark, ifp); 2242 ifac_mark = &mark.ifac; 2243 head = &ifp->if_addrheads[mycpuid]; 2244 2245 addrs = 0; 2246 TAILQ_INSERT_HEAD(head, ifac_mark, ifa_link); 2247 while ((ifac = TAILQ_NEXT(ifac_mark, ifa_link)) != NULL) { 2248 struct ifaddr *ifa = ifac->ifa; 2249 2250 TAILQ_REMOVE(head, ifac_mark, ifa_link); 2251 TAILQ_INSERT_AFTER(head, ifac, ifac_mark, ifa_link); 2252 2253 /* Ignore marker */ 2254 if (ifa->ifa_addr->sa_family == AF_UNSPEC) 2255 continue; 2256 2257 if (space <= sizeof ifr) 2258 break; 2259 sa = ifa->ifa_addr; 2260 if (cred->cr_prison && 2261 prison_if(cred, sa)) 2262 continue; 2263 addrs++; 2264 /* 2265 * Keep a reference on this ifaddr, so that it will 2266 * not be destroyed when its address is copied to 2267 * the userland, which could block. 2268 */ 2269 IFAREF(ifa); 2270 if (sa->sa_len <= sizeof(*sa)) { 2271 ifr.ifr_addr = *sa; 2272 error = copyout(&ifr, ifrp, sizeof ifr); 2273 ifrp++; 2274 } else { 2275 if (space < (sizeof ifr) + sa->sa_len - 2276 sizeof(*sa)) { 2277 IFAFREE(ifa); 2278 break; 2279 } 2280 space -= sa->sa_len - sizeof(*sa); 2281 error = copyout(&ifr, ifrp, 2282 sizeof ifr.ifr_name); 2283 if (error == 0) 2284 error = copyout(sa, &ifrp->ifr_addr, 2285 sa->sa_len); 2286 ifrp = (struct ifreq *) 2287 (sa->sa_len + (caddr_t)&ifrp->ifr_addr); 2288 } 2289 IFAFREE(ifa); 2290 if (error) 2291 break; 2292 space -= sizeof ifr; 2293 } 2294 TAILQ_REMOVE(head, ifac_mark, ifa_link); 2295 if (error) 2296 break; 2297 if (!addrs) { 2298 bzero(&ifr.ifr_addr, sizeof ifr.ifr_addr); 2299 error = copyout(&ifr, ifrp, sizeof ifr); 2300 if (error) 2301 break; 2302 space -= sizeof ifr; 2303 ifrp++; 2304 } 2305 } 2306 ifnet_unlock(); 2307 2308 ifc->ifc_len -= space; 2309 return (error); 2310 } 2311 2312 /* 2313 * Just like if_promisc(), but for all-multicast-reception mode. 2314 */ 2315 int 2316 if_allmulti(struct ifnet *ifp, int onswitch) 2317 { 2318 int error = 0; 2319 struct ifreq ifr; 2320 2321 crit_enter(); 2322 2323 if (onswitch) { 2324 if (ifp->if_amcount++ == 0) { 2325 ifp->if_flags |= IFF_ALLMULTI; 2326 ifr.ifr_flags = ifp->if_flags; 2327 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2328 ifnet_serialize_all(ifp); 2329 error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, 2330 NULL); 2331 ifnet_deserialize_all(ifp); 2332 } 2333 } else { 2334 if (ifp->if_amcount > 1) { 2335 ifp->if_amcount--; 2336 } else { 2337 ifp->if_amcount = 0; 2338 ifp->if_flags &= ~IFF_ALLMULTI; 2339 ifr.ifr_flags = ifp->if_flags; 2340 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2341 ifnet_serialize_all(ifp); 2342 error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, 2343 NULL); 2344 ifnet_deserialize_all(ifp); 2345 } 2346 } 2347 2348 crit_exit(); 2349 2350 if (error == 0) 2351 rt_ifmsg(ifp); 2352 return error; 2353 } 2354 2355 /* 2356 * Add a multicast listenership to the interface in question. 2357 * The link layer provides a routine which converts 2358 */ 2359 int 2360 if_addmulti_serialized(struct ifnet *ifp, struct sockaddr *sa, 2361 struct ifmultiaddr **retifma) 2362 { 2363 struct sockaddr *llsa, *dupsa; 2364 int error; 2365 struct ifmultiaddr *ifma; 2366 2367 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2368 2369 /* 2370 * If the matching multicast address already exists 2371 * then don't add a new one, just add a reference 2372 */ 2373 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2374 if (sa_equal(sa, ifma->ifma_addr)) { 2375 ifma->ifma_refcount++; 2376 if (retifma) 2377 *retifma = ifma; 2378 return 0; 2379 } 2380 } 2381 2382 /* 2383 * Give the link layer a chance to accept/reject it, and also 2384 * find out which AF_LINK address this maps to, if it isn't one 2385 * already. 2386 */ 2387 if (ifp->if_resolvemulti) { 2388 error = ifp->if_resolvemulti(ifp, &llsa, sa); 2389 if (error) 2390 return error; 2391 } else { 2392 llsa = NULL; 2393 } 2394 2395 ifma = kmalloc(sizeof *ifma, M_IFMADDR, M_INTWAIT); 2396 dupsa = kmalloc(sa->sa_len, M_IFMADDR, M_INTWAIT); 2397 bcopy(sa, dupsa, sa->sa_len); 2398 2399 ifma->ifma_addr = dupsa; 2400 ifma->ifma_lladdr = llsa; 2401 ifma->ifma_ifp = ifp; 2402 ifma->ifma_refcount = 1; 2403 ifma->ifma_protospec = NULL; 2404 rt_newmaddrmsg(RTM_NEWMADDR, ifma); 2405 2406 TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link); 2407 if (retifma) 2408 *retifma = ifma; 2409 2410 if (llsa != NULL) { 2411 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2412 if (sa_equal(ifma->ifma_addr, llsa)) 2413 break; 2414 } 2415 if (ifma) { 2416 ifma->ifma_refcount++; 2417 } else { 2418 ifma = kmalloc(sizeof *ifma, M_IFMADDR, M_INTWAIT); 2419 dupsa = kmalloc(llsa->sa_len, M_IFMADDR, M_INTWAIT); 2420 bcopy(llsa, dupsa, llsa->sa_len); 2421 ifma->ifma_addr = dupsa; 2422 ifma->ifma_ifp = ifp; 2423 ifma->ifma_refcount = 1; 2424 TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link); 2425 } 2426 } 2427 /* 2428 * We are certain we have added something, so call down to the 2429 * interface to let them know about it. 2430 */ 2431 if (ifp->if_ioctl) 2432 ifp->if_ioctl(ifp, SIOCADDMULTI, 0, NULL); 2433 2434 return 0; 2435 } 2436 2437 int 2438 if_addmulti(struct ifnet *ifp, struct sockaddr *sa, 2439 struct ifmultiaddr **retifma) 2440 { 2441 int error; 2442 2443 ifnet_serialize_all(ifp); 2444 error = if_addmulti_serialized(ifp, sa, retifma); 2445 ifnet_deserialize_all(ifp); 2446 2447 return error; 2448 } 2449 2450 /* 2451 * Remove a reference to a multicast address on this interface. Yell 2452 * if the request does not match an existing membership. 2453 */ 2454 static int 2455 if_delmulti_serialized(struct ifnet *ifp, struct sockaddr *sa) 2456 { 2457 struct ifmultiaddr *ifma; 2458 2459 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2460 2461 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) 2462 if (sa_equal(sa, ifma->ifma_addr)) 2463 break; 2464 if (ifma == NULL) 2465 return ENOENT; 2466 2467 if (ifma->ifma_refcount > 1) { 2468 ifma->ifma_refcount--; 2469 return 0; 2470 } 2471 2472 rt_newmaddrmsg(RTM_DELMADDR, ifma); 2473 sa = ifma->ifma_lladdr; 2474 TAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifma_link); 2475 /* 2476 * Make sure the interface driver is notified 2477 * in the case of a link layer mcast group being left. 2478 */ 2479 if (ifma->ifma_addr->sa_family == AF_LINK && sa == NULL) 2480 ifp->if_ioctl(ifp, SIOCDELMULTI, 0, NULL); 2481 kfree(ifma->ifma_addr, M_IFMADDR); 2482 kfree(ifma, M_IFMADDR); 2483 if (sa == NULL) 2484 return 0; 2485 2486 /* 2487 * Now look for the link-layer address which corresponds to 2488 * this network address. It had been squirreled away in 2489 * ifma->ifma_lladdr for this purpose (so we don't have 2490 * to call ifp->if_resolvemulti() again), and we saved that 2491 * value in sa above. If some nasty deleted the 2492 * link-layer address out from underneath us, we can deal because 2493 * the address we stored was is not the same as the one which was 2494 * in the record for the link-layer address. (So we don't complain 2495 * in that case.) 2496 */ 2497 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) 2498 if (sa_equal(sa, ifma->ifma_addr)) 2499 break; 2500 if (ifma == NULL) 2501 return 0; 2502 2503 if (ifma->ifma_refcount > 1) { 2504 ifma->ifma_refcount--; 2505 return 0; 2506 } 2507 2508 TAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifma_link); 2509 ifp->if_ioctl(ifp, SIOCDELMULTI, 0, NULL); 2510 kfree(ifma->ifma_addr, M_IFMADDR); 2511 kfree(sa, M_IFMADDR); 2512 kfree(ifma, M_IFMADDR); 2513 2514 return 0; 2515 } 2516 2517 int 2518 if_delmulti(struct ifnet *ifp, struct sockaddr *sa) 2519 { 2520 int error; 2521 2522 ifnet_serialize_all(ifp); 2523 error = if_delmulti_serialized(ifp, sa); 2524 ifnet_deserialize_all(ifp); 2525 2526 return error; 2527 } 2528 2529 /* 2530 * Delete all multicast group membership for an interface. 2531 * Should be used to quickly flush all multicast filters. 2532 */ 2533 void 2534 if_delallmulti_serialized(struct ifnet *ifp) 2535 { 2536 struct ifmultiaddr *ifma, mark; 2537 struct sockaddr sa; 2538 2539 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2540 2541 bzero(&sa, sizeof(sa)); 2542 sa.sa_family = AF_UNSPEC; 2543 sa.sa_len = sizeof(sa); 2544 2545 bzero(&mark, sizeof(mark)); 2546 mark.ifma_addr = &sa; 2547 2548 TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, &mark, ifma_link); 2549 while ((ifma = TAILQ_NEXT(&mark, ifma_link)) != NULL) { 2550 TAILQ_REMOVE(&ifp->if_multiaddrs, &mark, ifma_link); 2551 TAILQ_INSERT_AFTER(&ifp->if_multiaddrs, ifma, &mark, 2552 ifma_link); 2553 2554 if (ifma->ifma_addr->sa_family == AF_UNSPEC) 2555 continue; 2556 2557 if_delmulti_serialized(ifp, ifma->ifma_addr); 2558 } 2559 TAILQ_REMOVE(&ifp->if_multiaddrs, &mark, ifma_link); 2560 } 2561 2562 2563 /* 2564 * Set the link layer address on an interface. 2565 * 2566 * At this time we only support certain types of interfaces, 2567 * and we don't allow the length of the address to change. 2568 */ 2569 int 2570 if_setlladdr(struct ifnet *ifp, const u_char *lladdr, int len) 2571 { 2572 struct sockaddr_dl *sdl; 2573 struct ifreq ifr; 2574 2575 sdl = IF_LLSOCKADDR(ifp); 2576 if (sdl == NULL) 2577 return (EINVAL); 2578 if (len != sdl->sdl_alen) /* don't allow length to change */ 2579 return (EINVAL); 2580 switch (ifp->if_type) { 2581 case IFT_ETHER: /* these types use struct arpcom */ 2582 case IFT_XETHER: 2583 case IFT_L2VLAN: 2584 case IFT_IEEE8023ADLAG: 2585 bcopy(lladdr, ((struct arpcom *)ifp->if_softc)->ac_enaddr, len); 2586 bcopy(lladdr, LLADDR(sdl), len); 2587 break; 2588 default: 2589 return (ENODEV); 2590 } 2591 /* 2592 * If the interface is already up, we need 2593 * to re-init it in order to reprogram its 2594 * address filter. 2595 */ 2596 ifnet_serialize_all(ifp); 2597 if ((ifp->if_flags & IFF_UP) != 0) { 2598 #ifdef INET 2599 struct ifaddr_container *ifac; 2600 #endif 2601 2602 ifp->if_flags &= ~IFF_UP; 2603 ifr.ifr_flags = ifp->if_flags; 2604 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2605 ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, 2606 NULL); 2607 ifp->if_flags |= IFF_UP; 2608 ifr.ifr_flags = ifp->if_flags; 2609 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2610 ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, 2611 NULL); 2612 #ifdef INET 2613 /* 2614 * Also send gratuitous ARPs to notify other nodes about 2615 * the address change. 2616 */ 2617 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 2618 struct ifaddr *ifa = ifac->ifa; 2619 2620 if (ifa->ifa_addr != NULL && 2621 ifa->ifa_addr->sa_family == AF_INET) 2622 arp_gratuitous(ifp, ifa); 2623 } 2624 #endif 2625 } 2626 ifnet_deserialize_all(ifp); 2627 return (0); 2628 } 2629 2630 struct ifmultiaddr * 2631 ifmaof_ifpforaddr(struct sockaddr *sa, struct ifnet *ifp) 2632 { 2633 struct ifmultiaddr *ifma; 2634 2635 /* TODO: need ifnet_serialize_main */ 2636 ifnet_serialize_all(ifp); 2637 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) 2638 if (sa_equal(ifma->ifma_addr, sa)) 2639 break; 2640 ifnet_deserialize_all(ifp); 2641 2642 return ifma; 2643 } 2644 2645 /* 2646 * This function locates the first real ethernet MAC from a network 2647 * card and loads it into node, returning 0 on success or ENOENT if 2648 * no suitable interfaces were found. It is used by the uuid code to 2649 * generate a unique 6-byte number. 2650 */ 2651 int 2652 if_getanyethermac(uint16_t *node, int minlen) 2653 { 2654 struct ifnet *ifp; 2655 struct sockaddr_dl *sdl; 2656 2657 ifnet_lock(); 2658 TAILQ_FOREACH(ifp, &ifnetlist, if_link) { 2659 if (ifp->if_type != IFT_ETHER) 2660 continue; 2661 sdl = IF_LLSOCKADDR(ifp); 2662 if (sdl->sdl_alen < minlen) 2663 continue; 2664 bcopy(((struct arpcom *)ifp->if_softc)->ac_enaddr, node, 2665 minlen); 2666 ifnet_unlock(); 2667 return(0); 2668 } 2669 ifnet_unlock(); 2670 return (ENOENT); 2671 } 2672 2673 /* 2674 * The name argument must be a pointer to storage which will last as 2675 * long as the interface does. For physical devices, the result of 2676 * device_get_name(dev) is a good choice and for pseudo-devices a 2677 * static string works well. 2678 */ 2679 void 2680 if_initname(struct ifnet *ifp, const char *name, int unit) 2681 { 2682 ifp->if_dname = name; 2683 ifp->if_dunit = unit; 2684 if (unit != IF_DUNIT_NONE) 2685 ksnprintf(ifp->if_xname, IFNAMSIZ, "%s%d", name, unit); 2686 else 2687 strlcpy(ifp->if_xname, name, IFNAMSIZ); 2688 } 2689 2690 int 2691 if_printf(struct ifnet *ifp, const char *fmt, ...) 2692 { 2693 __va_list ap; 2694 int retval; 2695 2696 retval = kprintf("%s: ", ifp->if_xname); 2697 __va_start(ap, fmt); 2698 retval += kvprintf(fmt, ap); 2699 __va_end(ap); 2700 return (retval); 2701 } 2702 2703 struct ifnet * 2704 if_alloc(uint8_t type) 2705 { 2706 struct ifnet *ifp; 2707 size_t size; 2708 2709 /* 2710 * XXX temporary hack until arpcom is setup in if_l2com 2711 */ 2712 if (type == IFT_ETHER) 2713 size = sizeof(struct arpcom); 2714 else 2715 size = sizeof(struct ifnet); 2716 2717 ifp = kmalloc(size, M_IFNET, M_WAITOK|M_ZERO); 2718 2719 ifp->if_type = type; 2720 2721 if (if_com_alloc[type] != NULL) { 2722 ifp->if_l2com = if_com_alloc[type](type, ifp); 2723 if (ifp->if_l2com == NULL) { 2724 kfree(ifp, M_IFNET); 2725 return (NULL); 2726 } 2727 } 2728 return (ifp); 2729 } 2730 2731 void 2732 if_free(struct ifnet *ifp) 2733 { 2734 kfree(ifp, M_IFNET); 2735 } 2736 2737 void 2738 ifq_set_classic(struct ifaltq *ifq) 2739 { 2740 ifq_set_methods(ifq, ifq->altq_ifp->if_mapsubq, 2741 ifsq_classic_enqueue, ifsq_classic_dequeue, ifsq_classic_request); 2742 } 2743 2744 void 2745 ifq_set_methods(struct ifaltq *ifq, altq_mapsubq_t mapsubq, 2746 ifsq_enqueue_t enqueue, ifsq_dequeue_t dequeue, ifsq_request_t request) 2747 { 2748 int q; 2749 2750 KASSERT(mapsubq != NULL, ("mapsubq is not specified")); 2751 KASSERT(enqueue != NULL, ("enqueue is not specified")); 2752 KASSERT(dequeue != NULL, ("dequeue is not specified")); 2753 KASSERT(request != NULL, ("request is not specified")); 2754 2755 ifq->altq_mapsubq = mapsubq; 2756 for (q = 0; q < ifq->altq_subq_cnt; ++q) { 2757 struct ifaltq_subque *ifsq = &ifq->altq_subq[q]; 2758 2759 ifsq->ifsq_enqueue = enqueue; 2760 ifsq->ifsq_dequeue = dequeue; 2761 ifsq->ifsq_request = request; 2762 } 2763 } 2764 2765 static void 2766 ifsq_norm_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m) 2767 { 2768 2769 classq_add(&ifsq->ifsq_norm, m); 2770 ALTQ_SQ_CNTR_INC(ifsq, m->m_pkthdr.len); 2771 } 2772 2773 static void 2774 ifsq_prio_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m) 2775 { 2776 2777 classq_add(&ifsq->ifsq_prio, m); 2778 ALTQ_SQ_CNTR_INC(ifsq, m->m_pkthdr.len); 2779 ALTQ_SQ_PRIO_CNTR_INC(ifsq, m->m_pkthdr.len); 2780 } 2781 2782 static struct mbuf * 2783 ifsq_norm_dequeue(struct ifaltq_subque *ifsq) 2784 { 2785 struct mbuf *m; 2786 2787 m = classq_get(&ifsq->ifsq_norm); 2788 if (m != NULL) 2789 ALTQ_SQ_CNTR_DEC(ifsq, m->m_pkthdr.len); 2790 return (m); 2791 } 2792 2793 static struct mbuf * 2794 ifsq_prio_dequeue(struct ifaltq_subque *ifsq) 2795 { 2796 struct mbuf *m; 2797 2798 m = classq_get(&ifsq->ifsq_prio); 2799 if (m != NULL) { 2800 ALTQ_SQ_CNTR_DEC(ifsq, m->m_pkthdr.len); 2801 ALTQ_SQ_PRIO_CNTR_DEC(ifsq, m->m_pkthdr.len); 2802 } 2803 return (m); 2804 } 2805 2806 int 2807 ifsq_classic_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m, 2808 struct altq_pktattr *pa __unused) 2809 { 2810 2811 M_ASSERTPKTHDR(m); 2812 again: 2813 if (ifsq->ifsq_len >= ifsq->ifsq_maxlen || 2814 ifsq->ifsq_bcnt >= ifsq->ifsq_maxbcnt) { 2815 struct mbuf *m_drop; 2816 2817 if (m->m_flags & M_PRIO) { 2818 m_drop = NULL; 2819 if (ifsq->ifsq_prio_len < (ifsq->ifsq_maxlen >> 1) && 2820 ifsq->ifsq_prio_bcnt < (ifsq->ifsq_maxbcnt >> 1)) { 2821 /* Try dropping some from normal queue. */ 2822 m_drop = ifsq_norm_dequeue(ifsq); 2823 } 2824 if (m_drop == NULL) 2825 m_drop = ifsq_prio_dequeue(ifsq); 2826 } else { 2827 m_drop = ifsq_norm_dequeue(ifsq); 2828 } 2829 if (m_drop != NULL) { 2830 IFNET_STAT_INC(ifsq->ifsq_ifp, oqdrops, 1); 2831 m_freem(m_drop); 2832 goto again; 2833 } 2834 /* 2835 * No old packets could be dropped! 2836 * NOTE: Caller increases oqdrops. 2837 */ 2838 m_freem(m); 2839 return (ENOBUFS); 2840 } else { 2841 if (m->m_flags & M_PRIO) 2842 ifsq_prio_enqueue(ifsq, m); 2843 else 2844 ifsq_norm_enqueue(ifsq, m); 2845 return (0); 2846 } 2847 } 2848 2849 struct mbuf * 2850 ifsq_classic_dequeue(struct ifaltq_subque *ifsq, int op) 2851 { 2852 struct mbuf *m; 2853 2854 switch (op) { 2855 case ALTDQ_POLL: 2856 m = classq_head(&ifsq->ifsq_prio); 2857 if (m == NULL) 2858 m = classq_head(&ifsq->ifsq_norm); 2859 break; 2860 2861 case ALTDQ_REMOVE: 2862 m = ifsq_prio_dequeue(ifsq); 2863 if (m == NULL) 2864 m = ifsq_norm_dequeue(ifsq); 2865 break; 2866 2867 default: 2868 panic("unsupported ALTQ dequeue op: %d", op); 2869 } 2870 return m; 2871 } 2872 2873 int 2874 ifsq_classic_request(struct ifaltq_subque *ifsq, int req, void *arg) 2875 { 2876 switch (req) { 2877 case ALTRQ_PURGE: 2878 for (;;) { 2879 struct mbuf *m; 2880 2881 m = ifsq_classic_dequeue(ifsq, ALTDQ_REMOVE); 2882 if (m == NULL) 2883 break; 2884 m_freem(m); 2885 } 2886 break; 2887 2888 default: 2889 panic("unsupported ALTQ request: %d", req); 2890 } 2891 return 0; 2892 } 2893 2894 static void 2895 ifsq_ifstart_try(struct ifaltq_subque *ifsq, int force_sched) 2896 { 2897 struct ifnet *ifp = ifsq_get_ifp(ifsq); 2898 int running = 0, need_sched; 2899 2900 /* 2901 * Try to do direct ifnet.if_start on the subqueue first, if there is 2902 * contention on the subqueue hardware serializer, ifnet.if_start on 2903 * the subqueue will be scheduled on the subqueue owner CPU. 2904 */ 2905 if (!ifsq_tryserialize_hw(ifsq)) { 2906 /* 2907 * Subqueue hardware serializer contention happened, 2908 * ifnet.if_start on the subqueue is scheduled on 2909 * the subqueue owner CPU, and we keep going. 2910 */ 2911 ifsq_ifstart_schedule(ifsq, 1); 2912 return; 2913 } 2914 2915 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) { 2916 ifp->if_start(ifp, ifsq); 2917 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) 2918 running = 1; 2919 } 2920 need_sched = ifsq_ifstart_need_schedule(ifsq, running); 2921 2922 ifsq_deserialize_hw(ifsq); 2923 2924 if (need_sched) { 2925 /* 2926 * More data need to be transmitted, ifnet.if_start on the 2927 * subqueue is scheduled on the subqueue owner CPU, and we 2928 * keep going. 2929 * NOTE: ifnet.if_start subqueue interlock is not released. 2930 */ 2931 ifsq_ifstart_schedule(ifsq, force_sched); 2932 } 2933 } 2934 2935 /* 2936 * Subqeue packets staging mechanism: 2937 * 2938 * The packets enqueued into the subqueue are staged to a certain amount 2939 * before the ifnet.if_start on the subqueue is called. In this way, the 2940 * driver could avoid writing to hardware registers upon every packet, 2941 * instead, hardware registers could be written when certain amount of 2942 * packets are put onto hardware TX ring. The measurement on several modern 2943 * NICs (emx(4), igb(4), bnx(4), bge(4), jme(4)) shows that the hardware 2944 * registers writing aggregation could save ~20% CPU time when 18bytes UDP 2945 * datagrams are transmitted at 1.48Mpps. The performance improvement by 2946 * hardware registers writing aggeregation is also mentioned by Luigi Rizzo's 2947 * netmap paper (http://info.iet.unipi.it/~luigi/netmap/). 2948 * 2949 * Subqueue packets staging is performed for two entry points into drivers' 2950 * transmission function: 2951 * - Direct ifnet.if_start calling on the subqueue, i.e. ifsq_ifstart_try() 2952 * - ifnet.if_start scheduling on the subqueue, i.e. ifsq_ifstart_schedule() 2953 * 2954 * Subqueue packets staging will be stopped upon any of the following 2955 * conditions: 2956 * - If the count of packets enqueued on the current CPU is great than or 2957 * equal to ifsq_stage_cntmax. (XXX this should be per-interface) 2958 * - If the total length of packets enqueued on the current CPU is great 2959 * than or equal to the hardware's MTU - max_protohdr. max_protohdr is 2960 * cut from the hardware's MTU mainly bacause a full TCP segment's size 2961 * is usually less than hardware's MTU. 2962 * - ifsq_ifstart_schedule() is not pending on the current CPU and 2963 * ifnet.if_start subqueue interlock (ifaltq_subq.ifsq_started) is not 2964 * released. 2965 * - The if_start_rollup(), which is registered as low priority netisr 2966 * rollup function, is called; probably because no more work is pending 2967 * for netisr. 2968 * 2969 * NOTE: 2970 * Currently subqueue packet staging is only performed in netisr threads. 2971 */ 2972 int 2973 ifq_dispatch(struct ifnet *ifp, struct mbuf *m, struct altq_pktattr *pa) 2974 { 2975 struct ifaltq *ifq = &ifp->if_snd; 2976 struct ifaltq_subque *ifsq; 2977 int error, start = 0, len, mcast = 0, avoid_start = 0; 2978 struct ifsubq_stage_head *head = NULL; 2979 struct ifsubq_stage *stage = NULL; 2980 struct globaldata *gd = mycpu; 2981 struct thread *td = gd->gd_curthread; 2982 2983 crit_enter_quick(td); 2984 2985 ifsq = ifq_map_subq(ifq, gd->gd_cpuid); 2986 ASSERT_ALTQ_SQ_NOT_SERIALIZED_HW(ifsq); 2987 2988 len = m->m_pkthdr.len; 2989 if (m->m_flags & M_MCAST) 2990 mcast = 1; 2991 2992 if (td->td_type == TD_TYPE_NETISR) { 2993 head = &ifsubq_stage_heads[mycpuid]; 2994 stage = ifsq_get_stage(ifsq, mycpuid); 2995 2996 stage->stg_cnt++; 2997 stage->stg_len += len; 2998 if (stage->stg_cnt < ifsq_stage_cntmax && 2999 stage->stg_len < (ifp->if_mtu - max_protohdr)) 3000 avoid_start = 1; 3001 } 3002 3003 ALTQ_SQ_LOCK(ifsq); 3004 error = ifsq_enqueue_locked(ifsq, m, pa); 3005 if (error) { 3006 IFNET_STAT_INC(ifp, oqdrops, 1); 3007 if (!ifsq_data_ready(ifsq)) { 3008 ALTQ_SQ_UNLOCK(ifsq); 3009 crit_exit_quick(td); 3010 return error; 3011 } 3012 avoid_start = 0; 3013 } 3014 if (!ifsq_is_started(ifsq)) { 3015 if (avoid_start) { 3016 ALTQ_SQ_UNLOCK(ifsq); 3017 3018 KKASSERT(!error); 3019 if ((stage->stg_flags & IFSQ_STAGE_FLAG_QUED) == 0) 3020 ifsq_stage_insert(head, stage); 3021 3022 IFNET_STAT_INC(ifp, obytes, len); 3023 if (mcast) 3024 IFNET_STAT_INC(ifp, omcasts, 1); 3025 crit_exit_quick(td); 3026 return error; 3027 } 3028 3029 /* 3030 * Hold the subqueue interlock of ifnet.if_start 3031 */ 3032 ifsq_set_started(ifsq); 3033 start = 1; 3034 } 3035 ALTQ_SQ_UNLOCK(ifsq); 3036 3037 if (!error) { 3038 IFNET_STAT_INC(ifp, obytes, len); 3039 if (mcast) 3040 IFNET_STAT_INC(ifp, omcasts, 1); 3041 } 3042 3043 if (stage != NULL) { 3044 if (!start && (stage->stg_flags & IFSQ_STAGE_FLAG_SCHED)) { 3045 KKASSERT(stage->stg_flags & IFSQ_STAGE_FLAG_QUED); 3046 if (!avoid_start) { 3047 ifsq_stage_remove(head, stage); 3048 ifsq_ifstart_schedule(ifsq, 1); 3049 } 3050 crit_exit_quick(td); 3051 return error; 3052 } 3053 3054 if (stage->stg_flags & IFSQ_STAGE_FLAG_QUED) { 3055 ifsq_stage_remove(head, stage); 3056 } else { 3057 stage->stg_cnt = 0; 3058 stage->stg_len = 0; 3059 } 3060 } 3061 3062 if (!start) { 3063 crit_exit_quick(td); 3064 return error; 3065 } 3066 3067 ifsq_ifstart_try(ifsq, 0); 3068 3069 crit_exit_quick(td); 3070 return error; 3071 } 3072 3073 void * 3074 ifa_create(int size) 3075 { 3076 struct ifaddr *ifa; 3077 int i; 3078 3079 KASSERT(size >= sizeof(*ifa), ("ifaddr size too small")); 3080 3081 ifa = kmalloc(size, M_IFADDR, M_INTWAIT | M_ZERO); 3082 ifa->ifa_containers = 3083 kmalloc_cachealign(ncpus * sizeof(struct ifaddr_container), 3084 M_IFADDR, M_INTWAIT | M_ZERO); 3085 3086 ifa->ifa_ncnt = ncpus; 3087 for (i = 0; i < ncpus; ++i) { 3088 struct ifaddr_container *ifac = &ifa->ifa_containers[i]; 3089 3090 ifac->ifa_magic = IFA_CONTAINER_MAGIC; 3091 ifac->ifa = ifa; 3092 ifac->ifa_refcnt = 1; 3093 } 3094 #ifdef IFADDR_DEBUG 3095 kprintf("alloc ifa %p %d\n", ifa, size); 3096 #endif 3097 return ifa; 3098 } 3099 3100 void 3101 ifac_free(struct ifaddr_container *ifac, int cpu_id) 3102 { 3103 struct ifaddr *ifa = ifac->ifa; 3104 3105 KKASSERT(ifac->ifa_magic == IFA_CONTAINER_MAGIC); 3106 KKASSERT(ifac->ifa_refcnt == 0); 3107 KASSERT(ifac->ifa_listmask == 0, 3108 ("ifa is still on %#x lists", ifac->ifa_listmask)); 3109 3110 ifac->ifa_magic = IFA_CONTAINER_DEAD; 3111 3112 #ifdef IFADDR_DEBUG_VERBOSE 3113 kprintf("try free ifa %p cpu_id %d\n", ifac->ifa, cpu_id); 3114 #endif 3115 3116 KASSERT(ifa->ifa_ncnt > 0 && ifa->ifa_ncnt <= ncpus, 3117 ("invalid # of ifac, %d", ifa->ifa_ncnt)); 3118 if (atomic_fetchadd_int(&ifa->ifa_ncnt, -1) == 1) { 3119 #ifdef IFADDR_DEBUG 3120 kprintf("free ifa %p\n", ifa); 3121 #endif 3122 kfree(ifa->ifa_containers, M_IFADDR); 3123 kfree(ifa, M_IFADDR); 3124 } 3125 } 3126 3127 static void 3128 ifa_iflink_dispatch(netmsg_t nmsg) 3129 { 3130 struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg; 3131 struct ifaddr *ifa = msg->ifa; 3132 struct ifnet *ifp = msg->ifp; 3133 int cpu = mycpuid; 3134 struct ifaddr_container *ifac; 3135 3136 crit_enter(); 3137 3138 ifac = &ifa->ifa_containers[cpu]; 3139 ASSERT_IFAC_VALID(ifac); 3140 KASSERT((ifac->ifa_listmask & IFA_LIST_IFADDRHEAD) == 0, 3141 ("ifaddr is on if_addrheads")); 3142 3143 ifac->ifa_listmask |= IFA_LIST_IFADDRHEAD; 3144 if (msg->tail) 3145 TAILQ_INSERT_TAIL(&ifp->if_addrheads[cpu], ifac, ifa_link); 3146 else 3147 TAILQ_INSERT_HEAD(&ifp->if_addrheads[cpu], ifac, ifa_link); 3148 3149 crit_exit(); 3150 3151 ifa_forwardmsg(&nmsg->lmsg, cpu + 1); 3152 } 3153 3154 void 3155 ifa_iflink(struct ifaddr *ifa, struct ifnet *ifp, int tail) 3156 { 3157 struct netmsg_ifaddr msg; 3158 3159 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 3160 0, ifa_iflink_dispatch); 3161 msg.ifa = ifa; 3162 msg.ifp = ifp; 3163 msg.tail = tail; 3164 3165 ifa_domsg(&msg.base.lmsg, 0); 3166 } 3167 3168 static void 3169 ifa_ifunlink_dispatch(netmsg_t nmsg) 3170 { 3171 struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg; 3172 struct ifaddr *ifa = msg->ifa; 3173 struct ifnet *ifp = msg->ifp; 3174 int cpu = mycpuid; 3175 struct ifaddr_container *ifac; 3176 3177 crit_enter(); 3178 3179 ifac = &ifa->ifa_containers[cpu]; 3180 ASSERT_IFAC_VALID(ifac); 3181 KASSERT(ifac->ifa_listmask & IFA_LIST_IFADDRHEAD, 3182 ("ifaddr is not on if_addrhead")); 3183 3184 TAILQ_REMOVE(&ifp->if_addrheads[cpu], ifac, ifa_link); 3185 ifac->ifa_listmask &= ~IFA_LIST_IFADDRHEAD; 3186 3187 crit_exit(); 3188 3189 ifa_forwardmsg(&nmsg->lmsg, cpu + 1); 3190 } 3191 3192 void 3193 ifa_ifunlink(struct ifaddr *ifa, struct ifnet *ifp) 3194 { 3195 struct netmsg_ifaddr msg; 3196 3197 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 3198 0, ifa_ifunlink_dispatch); 3199 msg.ifa = ifa; 3200 msg.ifp = ifp; 3201 3202 ifa_domsg(&msg.base.lmsg, 0); 3203 } 3204 3205 static void 3206 ifa_destroy_dispatch(netmsg_t nmsg) 3207 { 3208 struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg; 3209 3210 IFAFREE(msg->ifa); 3211 ifa_forwardmsg(&nmsg->lmsg, mycpuid + 1); 3212 } 3213 3214 void 3215 ifa_destroy(struct ifaddr *ifa) 3216 { 3217 struct netmsg_ifaddr msg; 3218 3219 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 3220 0, ifa_destroy_dispatch); 3221 msg.ifa = ifa; 3222 3223 ifa_domsg(&msg.base.lmsg, 0); 3224 } 3225 3226 struct lwkt_port * 3227 ifnet_portfn(int cpu) 3228 { 3229 return &ifnet_threads[cpu]->td_msgport; 3230 } 3231 3232 void 3233 ifnet_forwardmsg(struct lwkt_msg *lmsg, int next_cpu) 3234 { 3235 KKASSERT(next_cpu > mycpuid && next_cpu <= ncpus); 3236 3237 if (next_cpu < ncpus) 3238 lwkt_forwardmsg(ifnet_portfn(next_cpu), lmsg); 3239 else 3240 lwkt_replymsg(lmsg, 0); 3241 } 3242 3243 int 3244 ifnet_domsg(struct lwkt_msg *lmsg, int cpu) 3245 { 3246 KKASSERT(cpu < ncpus); 3247 return lwkt_domsg(ifnet_portfn(cpu), lmsg, 0); 3248 } 3249 3250 void 3251 ifnet_sendmsg(struct lwkt_msg *lmsg, int cpu) 3252 { 3253 KKASSERT(cpu < ncpus); 3254 lwkt_sendmsg(ifnet_portfn(cpu), lmsg); 3255 } 3256 3257 /* 3258 * Generic netmsg service loop. Some protocols may roll their own but all 3259 * must do the basic command dispatch function call done here. 3260 */ 3261 static void 3262 ifnet_service_loop(void *arg __unused) 3263 { 3264 netmsg_t msg; 3265 3266 while ((msg = lwkt_waitport(&curthread->td_msgport, 0))) { 3267 KASSERT(msg->base.nm_dispatch, ("ifnet_service: badmsg")); 3268 msg->base.nm_dispatch(msg); 3269 } 3270 } 3271 3272 static void 3273 if_start_rollup(void) 3274 { 3275 struct ifsubq_stage_head *head = &ifsubq_stage_heads[mycpuid]; 3276 struct ifsubq_stage *stage; 3277 3278 crit_enter(); 3279 3280 while ((stage = TAILQ_FIRST(&head->stg_head)) != NULL) { 3281 struct ifaltq_subque *ifsq = stage->stg_subq; 3282 int is_sched = 0; 3283 3284 if (stage->stg_flags & IFSQ_STAGE_FLAG_SCHED) 3285 is_sched = 1; 3286 ifsq_stage_remove(head, stage); 3287 3288 if (is_sched) { 3289 ifsq_ifstart_schedule(ifsq, 1); 3290 } else { 3291 int start = 0; 3292 3293 ALTQ_SQ_LOCK(ifsq); 3294 if (!ifsq_is_started(ifsq)) { 3295 /* 3296 * Hold the subqueue interlock of 3297 * ifnet.if_start 3298 */ 3299 ifsq_set_started(ifsq); 3300 start = 1; 3301 } 3302 ALTQ_SQ_UNLOCK(ifsq); 3303 3304 if (start) 3305 ifsq_ifstart_try(ifsq, 1); 3306 } 3307 KKASSERT((stage->stg_flags & 3308 (IFSQ_STAGE_FLAG_QUED | IFSQ_STAGE_FLAG_SCHED)) == 0); 3309 } 3310 3311 crit_exit(); 3312 } 3313 3314 static void 3315 ifnetinit(void *dummy __unused) 3316 { 3317 int i; 3318 3319 for (i = 0; i < ncpus; ++i) { 3320 struct thread **thr = &ifnet_threads[i]; 3321 3322 lwkt_create(ifnet_service_loop, NULL, thr, NULL, 3323 TDF_NOSTART|TDF_FORCE_SPINPORT|TDF_FIXEDCPU, 3324 i, "ifnet %d", i); 3325 netmsg_service_port_init(&(*thr)->td_msgport); 3326 lwkt_schedule(*thr); 3327 } 3328 3329 for (i = 0; i < ncpus; ++i) 3330 TAILQ_INIT(&ifsubq_stage_heads[i].stg_head); 3331 netisr_register_rollup(if_start_rollup, NETISR_ROLLUP_PRIO_IFSTART); 3332 } 3333 3334 void 3335 if_register_com_alloc(u_char type, 3336 if_com_alloc_t *a, if_com_free_t *f) 3337 { 3338 3339 KASSERT(if_com_alloc[type] == NULL, 3340 ("if_register_com_alloc: %d already registered", type)); 3341 KASSERT(if_com_free[type] == NULL, 3342 ("if_register_com_alloc: %d free already registered", type)); 3343 3344 if_com_alloc[type] = a; 3345 if_com_free[type] = f; 3346 } 3347 3348 void 3349 if_deregister_com_alloc(u_char type) 3350 { 3351 3352 KASSERT(if_com_alloc[type] != NULL, 3353 ("if_deregister_com_alloc: %d not registered", type)); 3354 KASSERT(if_com_free[type] != NULL, 3355 ("if_deregister_com_alloc: %d free not registered", type)); 3356 if_com_alloc[type] = NULL; 3357 if_com_free[type] = NULL; 3358 } 3359 3360 int 3361 if_ring_count2(int cnt, int cnt_max) 3362 { 3363 int shift = 0; 3364 3365 KASSERT(cnt_max >= 1 && powerof2(cnt_max), 3366 ("invalid ring count max %d", cnt_max)); 3367 3368 if (cnt <= 0) 3369 cnt = cnt_max; 3370 if (cnt > ncpus2) 3371 cnt = ncpus2; 3372 if (cnt > cnt_max) 3373 cnt = cnt_max; 3374 3375 while ((1 << (shift + 1)) <= cnt) 3376 ++shift; 3377 cnt = 1 << shift; 3378 3379 KASSERT(cnt >= 1 && cnt <= ncpus2 && cnt <= cnt_max, 3380 ("calculate cnt %d, ncpus2 %d, cnt max %d", 3381 cnt, ncpus2, cnt_max)); 3382 return cnt; 3383 } 3384 3385 void 3386 ifq_set_maxlen(struct ifaltq *ifq, int len) 3387 { 3388 ifq->altq_maxlen = len + (ncpus * ifsq_stage_cntmax); 3389 } 3390 3391 int 3392 ifq_mapsubq_default(struct ifaltq *ifq __unused, int cpuid __unused) 3393 { 3394 return ALTQ_SUBQ_INDEX_DEFAULT; 3395 } 3396 3397 int 3398 ifq_mapsubq_mask(struct ifaltq *ifq, int cpuid) 3399 { 3400 return (cpuid & ifq->altq_subq_mask); 3401 } 3402 3403 static void 3404 ifsq_watchdog(void *arg) 3405 { 3406 struct ifsubq_watchdog *wd = arg; 3407 struct ifnet *ifp; 3408 3409 if (__predict_true(wd->wd_timer == 0 || --wd->wd_timer)) 3410 goto done; 3411 3412 ifp = ifsq_get_ifp(wd->wd_subq); 3413 if (ifnet_tryserialize_all(ifp)) { 3414 wd->wd_watchdog(wd->wd_subq); 3415 ifnet_deserialize_all(ifp); 3416 } else { 3417 /* try again next timeout */ 3418 wd->wd_timer = 1; 3419 } 3420 done: 3421 ifsq_watchdog_reset(wd); 3422 } 3423 3424 static void 3425 ifsq_watchdog_reset(struct ifsubq_watchdog *wd) 3426 { 3427 callout_reset_bycpu(&wd->wd_callout, hz, ifsq_watchdog, wd, 3428 ifsq_get_cpuid(wd->wd_subq)); 3429 } 3430 3431 void 3432 ifsq_watchdog_init(struct ifsubq_watchdog *wd, struct ifaltq_subque *ifsq, 3433 ifsq_watchdog_t watchdog) 3434 { 3435 callout_init_mp(&wd->wd_callout); 3436 wd->wd_timer = 0; 3437 wd->wd_subq = ifsq; 3438 wd->wd_watchdog = watchdog; 3439 } 3440 3441 void 3442 ifsq_watchdog_start(struct ifsubq_watchdog *wd) 3443 { 3444 wd->wd_timer = 0; 3445 ifsq_watchdog_reset(wd); 3446 } 3447 3448 void 3449 ifsq_watchdog_stop(struct ifsubq_watchdog *wd) 3450 { 3451 wd->wd_timer = 0; 3452 callout_stop(&wd->wd_callout); 3453 } 3454 3455 void 3456 ifnet_lock(void) 3457 { 3458 KASSERT(curthread->td_type != TD_TYPE_NETISR, 3459 ("try holding ifnet lock in netisr")); 3460 mtx_lock(&ifnet_mtx); 3461 } 3462 3463 void 3464 ifnet_unlock(void) 3465 { 3466 KASSERT(curthread->td_type != TD_TYPE_NETISR, 3467 ("try holding ifnet lock in netisr")); 3468 mtx_unlock(&ifnet_mtx); 3469 } 3470 3471 static struct ifnet_array * 3472 ifnet_array_alloc(int count) 3473 { 3474 struct ifnet_array *arr; 3475 3476 arr = kmalloc(__offsetof(struct ifnet_array, ifnet_arr[count]), 3477 M_IFNET, M_WAITOK); 3478 arr->ifnet_count = count; 3479 3480 return arr; 3481 } 3482 3483 static void 3484 ifnet_array_free(struct ifnet_array *arr) 3485 { 3486 if (arr == &ifnet_array0) 3487 return; 3488 kfree(arr, M_IFNET); 3489 } 3490 3491 static struct ifnet_array * 3492 ifnet_array_add(struct ifnet *ifp, const struct ifnet_array *old_arr) 3493 { 3494 struct ifnet_array *arr; 3495 int count, i; 3496 3497 KASSERT(old_arr->ifnet_count >= 0, 3498 ("invalid ifnet array count %d", old_arr->ifnet_count)); 3499 count = old_arr->ifnet_count + 1; 3500 arr = ifnet_array_alloc(count); 3501 3502 /* 3503 * Save the old ifnet array and append this ifp to the end of 3504 * the new ifnet array. 3505 */ 3506 for (i = 0; i < old_arr->ifnet_count; ++i) { 3507 KASSERT(old_arr->ifnet_arr[i] != ifp, 3508 ("%s is already in ifnet array", ifp->if_xname)); 3509 arr->ifnet_arr[i] = old_arr->ifnet_arr[i]; 3510 } 3511 KASSERT(i == count - 1, 3512 ("add %s, ifnet array index mismatch, should be %d, but got %d", 3513 ifp->if_xname, count - 1, i)); 3514 arr->ifnet_arr[i] = ifp; 3515 3516 return arr; 3517 } 3518 3519 static struct ifnet_array * 3520 ifnet_array_del(struct ifnet *ifp, const struct ifnet_array *old_arr) 3521 { 3522 struct ifnet_array *arr; 3523 int count, i, idx, found = 0; 3524 3525 KASSERT(old_arr->ifnet_count > 0, 3526 ("invalid ifnet array count %d", old_arr->ifnet_count)); 3527 count = old_arr->ifnet_count - 1; 3528 arr = ifnet_array_alloc(count); 3529 3530 /* 3531 * Save the old ifnet array, but skip this ifp. 3532 */ 3533 idx = 0; 3534 for (i = 0; i < old_arr->ifnet_count; ++i) { 3535 if (old_arr->ifnet_arr[i] == ifp) { 3536 KASSERT(!found, 3537 ("dup %s is in ifnet array", ifp->if_xname)); 3538 found = 1; 3539 continue; 3540 } 3541 KASSERT(idx < count, 3542 ("invalid ifnet array index %d, count %d", idx, count)); 3543 arr->ifnet_arr[idx] = old_arr->ifnet_arr[i]; 3544 ++idx; 3545 } 3546 KASSERT(found, ("%s is not in ifnet array", ifp->if_xname)); 3547 KASSERT(idx == count, 3548 ("del %s, ifnet array count mismatch, should be %d, but got %d ", 3549 ifp->if_xname, count, idx)); 3550 3551 return arr; 3552 } 3553 3554 const struct ifnet_array * 3555 ifnet_array_get(void) 3556 { 3557 const struct ifnet_array *ret; 3558 3559 KASSERT(curthread->td_type == TD_TYPE_NETISR, ("not in netisr")); 3560 ret = ifnet_array; 3561 /* Make sure 'ret' is really used. */ 3562 cpu_ccfence(); 3563 return (ret); 3564 } 3565 3566 int 3567 ifnet_array_isempty(void) 3568 { 3569 KASSERT(curthread->td_type == TD_TYPE_NETISR, ("not in netisr")); 3570 if (ifnet_array->ifnet_count == 0) 3571 return 1; 3572 else 3573 return 0; 3574 } 3575 3576 void 3577 ifa_marker_init(struct ifaddr_marker *mark, struct ifnet *ifp) 3578 { 3579 struct ifaddr *ifa; 3580 3581 memset(mark, 0, sizeof(*mark)); 3582 ifa = &mark->ifa; 3583 3584 mark->ifac.ifa = ifa; 3585 3586 ifa->ifa_addr = &mark->addr; 3587 ifa->ifa_dstaddr = &mark->dstaddr; 3588 ifa->ifa_netmask = &mark->netmask; 3589 ifa->ifa_ifp = ifp; 3590 } 3591