1 /* 2 * Copyright (c) 1980, 1986, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)if.c 8.3 (Berkeley) 1/4/94 30 * $FreeBSD: src/sys/net/if.c,v 1.185 2004/03/13 02:35:03 brooks Exp $ 31 */ 32 33 #include "opt_compat.h" 34 #include "opt_inet6.h" 35 #include "opt_inet.h" 36 #include "opt_ifpoll.h" 37 38 #include <sys/param.h> 39 #include <sys/malloc.h> 40 #include <sys/mbuf.h> 41 #include <sys/systm.h> 42 #include <sys/proc.h> 43 #include <sys/priv.h> 44 #include <sys/protosw.h> 45 #include <sys/socket.h> 46 #include <sys/socketvar.h> 47 #include <sys/socketops.h> 48 #include <sys/kernel.h> 49 #include <sys/ktr.h> 50 #include <sys/mutex.h> 51 #include <sys/sockio.h> 52 #include <sys/syslog.h> 53 #include <sys/sysctl.h> 54 #include <sys/domain.h> 55 #include <sys/thread.h> 56 #include <sys/serialize.h> 57 #include <sys/bus.h> 58 59 #include <sys/thread2.h> 60 #include <sys/msgport2.h> 61 #include <sys/mutex2.h> 62 63 #include <net/if.h> 64 #include <net/if_arp.h> 65 #include <net/if_dl.h> 66 #include <net/if_types.h> 67 #include <net/if_var.h> 68 #include <net/ifq_var.h> 69 #include <net/radix.h> 70 #include <net/route.h> 71 #include <net/if_clone.h> 72 #include <net/netisr2.h> 73 #include <net/netmsg2.h> 74 75 #include <machine/atomic.h> 76 #include <machine/stdarg.h> 77 #include <machine/smp.h> 78 79 #if defined(INET) || defined(INET6) 80 /*XXX*/ 81 #include <netinet/in.h> 82 #include <netinet/in_var.h> 83 #include <netinet/if_ether.h> 84 #ifdef INET6 85 #include <netinet6/in6_var.h> 86 #include <netinet6/in6_ifattach.h> 87 #endif 88 #endif 89 90 #if defined(COMPAT_43) 91 #include <emulation/43bsd/43bsd_socket.h> 92 #endif /* COMPAT_43 */ 93 94 struct netmsg_ifaddr { 95 struct netmsg_base base; 96 struct ifaddr *ifa; 97 struct ifnet *ifp; 98 int tail; 99 }; 100 101 struct ifsubq_stage_head { 102 TAILQ_HEAD(, ifsubq_stage) stg_head; 103 } __cachealign; 104 105 /* 106 * System initialization 107 */ 108 static void if_attachdomain(void *); 109 static void if_attachdomain1(struct ifnet *); 110 static int ifconf(u_long, caddr_t, struct ucred *); 111 static void ifinit(void *); 112 static void ifnetinit(void *); 113 static void if_slowtimo(void *); 114 static void link_rtrequest(int, struct rtentry *); 115 static int if_rtdel(struct radix_node *, void *); 116 static void if_slowtimo_dispatch(netmsg_t); 117 118 /* Helper functions */ 119 static void ifsq_watchdog_reset(struct ifsubq_watchdog *); 120 static int if_delmulti_serialized(struct ifnet *, struct sockaddr *); 121 static struct ifnet_array *ifnet_array_alloc(int); 122 static void ifnet_array_free(struct ifnet_array *); 123 static struct ifnet_array *ifnet_array_add(struct ifnet *, 124 const struct ifnet_array *); 125 static struct ifnet_array *ifnet_array_del(struct ifnet *, 126 const struct ifnet_array *); 127 128 #ifdef INET6 129 /* 130 * XXX: declare here to avoid to include many inet6 related files.. 131 * should be more generalized? 132 */ 133 extern void nd6_setmtu(struct ifnet *); 134 #endif 135 136 SYSCTL_NODE(_net, PF_LINK, link, CTLFLAG_RW, 0, "Link layers"); 137 SYSCTL_NODE(_net_link, 0, generic, CTLFLAG_RW, 0, "Generic link-management"); 138 139 static int ifsq_stage_cntmax = 4; 140 TUNABLE_INT("net.link.stage_cntmax", &ifsq_stage_cntmax); 141 SYSCTL_INT(_net_link, OID_AUTO, stage_cntmax, CTLFLAG_RW, 142 &ifsq_stage_cntmax, 0, "ifq staging packet count max"); 143 144 static int if_stats_compat = 0; 145 SYSCTL_INT(_net_link, OID_AUTO, stats_compat, CTLFLAG_RW, 146 &if_stats_compat, 0, "Compat the old ifnet stats"); 147 148 SYSINIT(interfaces, SI_SUB_PROTO_IF, SI_ORDER_FIRST, ifinit, NULL); 149 /* Must be after netisr_init */ 150 SYSINIT(ifnet, SI_SUB_PRE_DRIVERS, SI_ORDER_SECOND, ifnetinit, NULL); 151 152 static if_com_alloc_t *if_com_alloc[256]; 153 static if_com_free_t *if_com_free[256]; 154 155 MALLOC_DEFINE(M_IFADDR, "ifaddr", "interface address"); 156 MALLOC_DEFINE(M_IFMADDR, "ether_multi", "link-level multicast address"); 157 MALLOC_DEFINE(M_IFNET, "ifnet", "interface structure"); 158 159 int ifqmaxlen = IFQ_MAXLEN; 160 struct ifnethead ifnet = TAILQ_HEAD_INITIALIZER(ifnet); 161 162 static struct ifnet_array ifnet_array0; 163 static struct ifnet_array *ifnet_array = &ifnet_array0; 164 165 static struct callout if_slowtimo_timer; 166 static struct netmsg_base if_slowtimo_netmsg; 167 168 int if_index = 0; 169 struct ifnet **ifindex2ifnet = NULL; 170 static struct thread *ifnet_threads[MAXCPU]; 171 static struct mtx ifnet_mtx = MTX_INITIALIZER("ifnet"); 172 173 static struct ifsubq_stage_head ifsubq_stage_heads[MAXCPU]; 174 175 #ifdef notyet 176 #define IFQ_KTR_STRING "ifq=%p" 177 #define IFQ_KTR_ARGS struct ifaltq *ifq 178 #ifndef KTR_IFQ 179 #define KTR_IFQ KTR_ALL 180 #endif 181 KTR_INFO_MASTER(ifq); 182 KTR_INFO(KTR_IFQ, ifq, enqueue, 0, IFQ_KTR_STRING, IFQ_KTR_ARGS); 183 KTR_INFO(KTR_IFQ, ifq, dequeue, 1, IFQ_KTR_STRING, IFQ_KTR_ARGS); 184 #define logifq(name, arg) KTR_LOG(ifq_ ## name, arg) 185 186 #define IF_START_KTR_STRING "ifp=%p" 187 #define IF_START_KTR_ARGS struct ifnet *ifp 188 #ifndef KTR_IF_START 189 #define KTR_IF_START KTR_ALL 190 #endif 191 KTR_INFO_MASTER(if_start); 192 KTR_INFO(KTR_IF_START, if_start, run, 0, 193 IF_START_KTR_STRING, IF_START_KTR_ARGS); 194 KTR_INFO(KTR_IF_START, if_start, sched, 1, 195 IF_START_KTR_STRING, IF_START_KTR_ARGS); 196 KTR_INFO(KTR_IF_START, if_start, avoid, 2, 197 IF_START_KTR_STRING, IF_START_KTR_ARGS); 198 KTR_INFO(KTR_IF_START, if_start, contend_sched, 3, 199 IF_START_KTR_STRING, IF_START_KTR_ARGS); 200 KTR_INFO(KTR_IF_START, if_start, chase_sched, 4, 201 IF_START_KTR_STRING, IF_START_KTR_ARGS); 202 #define logifstart(name, arg) KTR_LOG(if_start_ ## name, arg) 203 #endif 204 205 TAILQ_HEAD(, ifg_group) ifg_head = TAILQ_HEAD_INITIALIZER(ifg_head); 206 207 /* 208 * Network interface utility routines. 209 * 210 * Routines with ifa_ifwith* names take sockaddr *'s as 211 * parameters. 212 */ 213 /* ARGSUSED*/ 214 static void 215 ifinit(void *dummy) 216 { 217 struct ifnet *ifp; 218 219 callout_init_mp(&if_slowtimo_timer); 220 netmsg_init(&if_slowtimo_netmsg, NULL, &netisr_adone_rport, 221 MSGF_PRIORITY, if_slowtimo_dispatch); 222 223 /* XXX is this necessary? */ 224 ifnet_lock(); 225 TAILQ_FOREACH(ifp, &ifnetlist, if_link) { 226 if (ifp->if_snd.altq_maxlen == 0) { 227 if_printf(ifp, "XXX: driver didn't set altq_maxlen\n"); 228 ifq_set_maxlen(&ifp->if_snd, ifqmaxlen); 229 } 230 } 231 ifnet_unlock(); 232 233 /* Start if_slowtimo */ 234 lwkt_sendmsg(netisr_cpuport(0), &if_slowtimo_netmsg.lmsg); 235 } 236 237 static void 238 ifsq_ifstart_ipifunc(void *arg) 239 { 240 struct ifaltq_subque *ifsq = arg; 241 struct lwkt_msg *lmsg = ifsq_get_ifstart_lmsg(ifsq, mycpuid); 242 243 crit_enter(); 244 if (lmsg->ms_flags & MSGF_DONE) 245 lwkt_sendmsg_oncpu(netisr_cpuport(mycpuid), lmsg); 246 crit_exit(); 247 } 248 249 static __inline void 250 ifsq_stage_remove(struct ifsubq_stage_head *head, struct ifsubq_stage *stage) 251 { 252 KKASSERT(stage->stg_flags & IFSQ_STAGE_FLAG_QUED); 253 TAILQ_REMOVE(&head->stg_head, stage, stg_link); 254 stage->stg_flags &= ~(IFSQ_STAGE_FLAG_QUED | IFSQ_STAGE_FLAG_SCHED); 255 stage->stg_cnt = 0; 256 stage->stg_len = 0; 257 } 258 259 static __inline void 260 ifsq_stage_insert(struct ifsubq_stage_head *head, struct ifsubq_stage *stage) 261 { 262 KKASSERT((stage->stg_flags & 263 (IFSQ_STAGE_FLAG_QUED | IFSQ_STAGE_FLAG_SCHED)) == 0); 264 stage->stg_flags |= IFSQ_STAGE_FLAG_QUED; 265 TAILQ_INSERT_TAIL(&head->stg_head, stage, stg_link); 266 } 267 268 /* 269 * Schedule ifnet.if_start on the subqueue owner CPU 270 */ 271 static void 272 ifsq_ifstart_schedule(struct ifaltq_subque *ifsq, int force) 273 { 274 int cpu; 275 276 if (!force && curthread->td_type == TD_TYPE_NETISR && 277 ifsq_stage_cntmax > 0) { 278 struct ifsubq_stage *stage = ifsq_get_stage(ifsq, mycpuid); 279 280 stage->stg_cnt = 0; 281 stage->stg_len = 0; 282 if ((stage->stg_flags & IFSQ_STAGE_FLAG_QUED) == 0) 283 ifsq_stage_insert(&ifsubq_stage_heads[mycpuid], stage); 284 stage->stg_flags |= IFSQ_STAGE_FLAG_SCHED; 285 return; 286 } 287 288 cpu = ifsq_get_cpuid(ifsq); 289 if (cpu != mycpuid) 290 lwkt_send_ipiq(globaldata_find(cpu), ifsq_ifstart_ipifunc, ifsq); 291 else 292 ifsq_ifstart_ipifunc(ifsq); 293 } 294 295 /* 296 * NOTE: 297 * This function will release ifnet.if_start subqueue interlock, 298 * if ifnet.if_start for the subqueue does not need to be scheduled 299 */ 300 static __inline int 301 ifsq_ifstart_need_schedule(struct ifaltq_subque *ifsq, int running) 302 { 303 if (!running || ifsq_is_empty(ifsq) 304 #ifdef ALTQ 305 || ifsq->ifsq_altq->altq_tbr != NULL 306 #endif 307 ) { 308 ALTQ_SQ_LOCK(ifsq); 309 /* 310 * ifnet.if_start subqueue interlock is released, if: 311 * 1) Hardware can not take any packets, due to 312 * o interface is marked down 313 * o hardware queue is full (ifsq_is_oactive) 314 * Under the second situation, hardware interrupt 315 * or polling(4) will call/schedule ifnet.if_start 316 * on the subqueue when hardware queue is ready 317 * 2) There is no packet in the subqueue. 318 * Further ifq_dispatch or ifq_handoff will call/ 319 * schedule ifnet.if_start on the subqueue. 320 * 3) TBR is used and it does not allow further 321 * dequeueing. 322 * TBR callout will call ifnet.if_start on the 323 * subqueue. 324 */ 325 if (!running || !ifsq_data_ready(ifsq)) { 326 ifsq_clr_started(ifsq); 327 ALTQ_SQ_UNLOCK(ifsq); 328 return 0; 329 } 330 ALTQ_SQ_UNLOCK(ifsq); 331 } 332 return 1; 333 } 334 335 static void 336 ifsq_ifstart_dispatch(netmsg_t msg) 337 { 338 struct lwkt_msg *lmsg = &msg->base.lmsg; 339 struct ifaltq_subque *ifsq = lmsg->u.ms_resultp; 340 struct ifnet *ifp = ifsq_get_ifp(ifsq); 341 struct globaldata *gd = mycpu; 342 int running = 0, need_sched; 343 344 crit_enter_gd(gd); 345 346 lwkt_replymsg(lmsg, 0); /* reply ASAP */ 347 348 if (gd->gd_cpuid != ifsq_get_cpuid(ifsq)) { 349 /* 350 * We need to chase the subqueue owner CPU change. 351 */ 352 ifsq_ifstart_schedule(ifsq, 1); 353 crit_exit_gd(gd); 354 return; 355 } 356 357 ifsq_serialize_hw(ifsq); 358 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) { 359 ifp->if_start(ifp, ifsq); 360 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) 361 running = 1; 362 } 363 need_sched = ifsq_ifstart_need_schedule(ifsq, running); 364 ifsq_deserialize_hw(ifsq); 365 366 if (need_sched) { 367 /* 368 * More data need to be transmitted, ifnet.if_start is 369 * scheduled on the subqueue owner CPU, and we keep going. 370 * NOTE: ifnet.if_start subqueue interlock is not released. 371 */ 372 ifsq_ifstart_schedule(ifsq, 0); 373 } 374 375 crit_exit_gd(gd); 376 } 377 378 /* Device driver ifnet.if_start helper function */ 379 void 380 ifsq_devstart(struct ifaltq_subque *ifsq) 381 { 382 struct ifnet *ifp = ifsq_get_ifp(ifsq); 383 int running = 0; 384 385 ASSERT_ALTQ_SQ_SERIALIZED_HW(ifsq); 386 387 ALTQ_SQ_LOCK(ifsq); 388 if (ifsq_is_started(ifsq) || !ifsq_data_ready(ifsq)) { 389 ALTQ_SQ_UNLOCK(ifsq); 390 return; 391 } 392 ifsq_set_started(ifsq); 393 ALTQ_SQ_UNLOCK(ifsq); 394 395 ifp->if_start(ifp, ifsq); 396 397 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) 398 running = 1; 399 400 if (ifsq_ifstart_need_schedule(ifsq, running)) { 401 /* 402 * More data need to be transmitted, ifnet.if_start is 403 * scheduled on ifnet's CPU, and we keep going. 404 * NOTE: ifnet.if_start interlock is not released. 405 */ 406 ifsq_ifstart_schedule(ifsq, 0); 407 } 408 } 409 410 void 411 if_devstart(struct ifnet *ifp) 412 { 413 ifsq_devstart(ifq_get_subq_default(&ifp->if_snd)); 414 } 415 416 /* Device driver ifnet.if_start schedule helper function */ 417 void 418 ifsq_devstart_sched(struct ifaltq_subque *ifsq) 419 { 420 ifsq_ifstart_schedule(ifsq, 1); 421 } 422 423 void 424 if_devstart_sched(struct ifnet *ifp) 425 { 426 ifsq_devstart_sched(ifq_get_subq_default(&ifp->if_snd)); 427 } 428 429 static void 430 if_default_serialize(struct ifnet *ifp, enum ifnet_serialize slz __unused) 431 { 432 lwkt_serialize_enter(ifp->if_serializer); 433 } 434 435 static void 436 if_default_deserialize(struct ifnet *ifp, enum ifnet_serialize slz __unused) 437 { 438 lwkt_serialize_exit(ifp->if_serializer); 439 } 440 441 static int 442 if_default_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz __unused) 443 { 444 return lwkt_serialize_try(ifp->if_serializer); 445 } 446 447 #ifdef INVARIANTS 448 static void 449 if_default_serialize_assert(struct ifnet *ifp, 450 enum ifnet_serialize slz __unused, 451 boolean_t serialized) 452 { 453 if (serialized) 454 ASSERT_SERIALIZED(ifp->if_serializer); 455 else 456 ASSERT_NOT_SERIALIZED(ifp->if_serializer); 457 } 458 #endif 459 460 /* 461 * Attach an interface to the list of "active" interfaces. 462 * 463 * The serializer is optional. 464 */ 465 void 466 if_attach(struct ifnet *ifp, lwkt_serialize_t serializer) 467 { 468 unsigned socksize; 469 int namelen, masklen; 470 struct sockaddr_dl *sdl, *sdl_addr; 471 struct ifaddr *ifa; 472 struct ifaltq *ifq; 473 struct ifnet **old_ifindex2ifnet = NULL; 474 struct ifnet_array *old_ifnet_array; 475 int i, q; 476 477 static int if_indexlim = 8; 478 479 if (ifp->if_serialize != NULL) { 480 KASSERT(ifp->if_deserialize != NULL && 481 ifp->if_tryserialize != NULL && 482 ifp->if_serialize_assert != NULL, 483 ("serialize functions are partially setup")); 484 485 /* 486 * If the device supplies serialize functions, 487 * then clear if_serializer to catch any invalid 488 * usage of this field. 489 */ 490 KASSERT(serializer == NULL, 491 ("both serialize functions and default serializer " 492 "are supplied")); 493 ifp->if_serializer = NULL; 494 } else { 495 KASSERT(ifp->if_deserialize == NULL && 496 ifp->if_tryserialize == NULL && 497 ifp->if_serialize_assert == NULL, 498 ("serialize functions are partially setup")); 499 ifp->if_serialize = if_default_serialize; 500 ifp->if_deserialize = if_default_deserialize; 501 ifp->if_tryserialize = if_default_tryserialize; 502 #ifdef INVARIANTS 503 ifp->if_serialize_assert = if_default_serialize_assert; 504 #endif 505 506 /* 507 * The serializer can be passed in from the device, 508 * allowing the same serializer to be used for both 509 * the interrupt interlock and the device queue. 510 * If not specified, the netif structure will use an 511 * embedded serializer. 512 */ 513 if (serializer == NULL) { 514 serializer = &ifp->if_default_serializer; 515 lwkt_serialize_init(serializer); 516 } 517 ifp->if_serializer = serializer; 518 } 519 520 /* 521 * XXX - 522 * The old code would work if the interface passed a pre-existing 523 * chain of ifaddrs to this code. We don't trust our callers to 524 * properly initialize the tailq, however, so we no longer allow 525 * this unlikely case. 526 */ 527 ifp->if_addrheads = kmalloc(ncpus * sizeof(struct ifaddrhead), 528 M_IFADDR, M_WAITOK | M_ZERO); 529 for (i = 0; i < ncpus; ++i) 530 TAILQ_INIT(&ifp->if_addrheads[i]); 531 532 TAILQ_INIT(&ifp->if_multiaddrs); 533 TAILQ_INIT(&ifp->if_groups); 534 getmicrotime(&ifp->if_lastchange); 535 536 /* 537 * create a Link Level name for this device 538 */ 539 namelen = strlen(ifp->if_xname); 540 masklen = offsetof(struct sockaddr_dl, sdl_data[0]) + namelen; 541 socksize = masklen + ifp->if_addrlen; 542 if (socksize < sizeof(*sdl)) 543 socksize = sizeof(*sdl); 544 socksize = RT_ROUNDUP(socksize); 545 ifa = ifa_create(sizeof(struct ifaddr) + 2 * socksize); 546 sdl = sdl_addr = (struct sockaddr_dl *)(ifa + 1); 547 sdl->sdl_len = socksize; 548 sdl->sdl_family = AF_LINK; 549 bcopy(ifp->if_xname, sdl->sdl_data, namelen); 550 sdl->sdl_nlen = namelen; 551 sdl->sdl_type = ifp->if_type; 552 ifp->if_lladdr = ifa; 553 ifa->ifa_ifp = ifp; 554 ifa->ifa_rtrequest = link_rtrequest; 555 ifa->ifa_addr = (struct sockaddr *)sdl; 556 sdl = (struct sockaddr_dl *)(socksize + (caddr_t)sdl); 557 ifa->ifa_netmask = (struct sockaddr *)sdl; 558 sdl->sdl_len = masklen; 559 while (namelen != 0) 560 sdl->sdl_data[--namelen] = 0xff; 561 ifa_iflink(ifa, ifp, 0 /* Insert head */); 562 563 ifp->if_data_pcpu = kmalloc_cachealign( 564 ncpus * sizeof(struct ifdata_pcpu), M_DEVBUF, M_WAITOK | M_ZERO); 565 566 if (ifp->if_mapsubq == NULL) 567 ifp->if_mapsubq = ifq_mapsubq_default; 568 569 ifq = &ifp->if_snd; 570 ifq->altq_type = 0; 571 ifq->altq_disc = NULL; 572 ifq->altq_flags &= ALTQF_CANTCHANGE; 573 ifq->altq_tbr = NULL; 574 ifq->altq_ifp = ifp; 575 576 if (ifq->altq_subq_cnt <= 0) 577 ifq->altq_subq_cnt = 1; 578 ifq->altq_subq = kmalloc_cachealign( 579 ifq->altq_subq_cnt * sizeof(struct ifaltq_subque), 580 M_DEVBUF, M_WAITOK | M_ZERO); 581 582 if (ifq->altq_maxlen == 0) { 583 if_printf(ifp, "driver didn't set altq_maxlen\n"); 584 ifq_set_maxlen(ifq, ifqmaxlen); 585 } 586 587 for (q = 0; q < ifq->altq_subq_cnt; ++q) { 588 struct ifaltq_subque *ifsq = &ifq->altq_subq[q]; 589 590 ALTQ_SQ_LOCK_INIT(ifsq); 591 ifsq->ifsq_index = q; 592 593 ifsq->ifsq_altq = ifq; 594 ifsq->ifsq_ifp = ifp; 595 596 ifsq->ifsq_maxlen = ifq->altq_maxlen; 597 ifsq->ifsq_maxbcnt = ifsq->ifsq_maxlen * MCLBYTES; 598 ifsq->ifsq_prepended = NULL; 599 ifsq->ifsq_started = 0; 600 ifsq->ifsq_hw_oactive = 0; 601 ifsq_set_cpuid(ifsq, 0); 602 if (ifp->if_serializer != NULL) 603 ifsq_set_hw_serialize(ifsq, ifp->if_serializer); 604 605 ifsq->ifsq_stage = 606 kmalloc_cachealign(ncpus * sizeof(struct ifsubq_stage), 607 M_DEVBUF, M_WAITOK | M_ZERO); 608 for (i = 0; i < ncpus; ++i) 609 ifsq->ifsq_stage[i].stg_subq = ifsq; 610 611 ifsq->ifsq_ifstart_nmsg = 612 kmalloc(ncpus * sizeof(struct netmsg_base), 613 M_LWKTMSG, M_WAITOK); 614 for (i = 0; i < ncpus; ++i) { 615 netmsg_init(&ifsq->ifsq_ifstart_nmsg[i], NULL, 616 &netisr_adone_rport, 0, ifsq_ifstart_dispatch); 617 ifsq->ifsq_ifstart_nmsg[i].lmsg.u.ms_resultp = ifsq; 618 } 619 } 620 ifq_set_classic(ifq); 621 622 /* 623 * Increase mbuf cluster/jcluster limits for the mbufs that 624 * could sit on the device queues for quite some time. 625 */ 626 if (ifp->if_nmbclusters > 0) 627 mcl_inclimit(ifp->if_nmbclusters); 628 if (ifp->if_nmbjclusters > 0) 629 mjcl_inclimit(ifp->if_nmbjclusters); 630 631 /* 632 * Install this ifp into ifindex2inet, ifnet queue and ifnet 633 * array after it is setup. 634 * 635 * Protect ifindex2ifnet, ifnet queue and ifnet array changes 636 * by ifnet lock, so that non-netisr threads could get a 637 * consistent view. 638 */ 639 ifnet_lock(); 640 641 /* Don't update if_index until ifindex2ifnet is setup */ 642 ifp->if_index = if_index + 1; 643 sdl_addr->sdl_index = ifp->if_index; 644 645 /* 646 * Install this ifp into ifindex2ifnet 647 */ 648 if (ifindex2ifnet == NULL || ifp->if_index >= if_indexlim) { 649 unsigned int n; 650 struct ifnet **q; 651 652 /* 653 * Grow ifindex2ifnet 654 */ 655 if_indexlim <<= 1; 656 n = if_indexlim * sizeof(*q); 657 q = kmalloc(n, M_IFADDR, M_WAITOK | M_ZERO); 658 if (ifindex2ifnet != NULL) { 659 bcopy(ifindex2ifnet, q, n/2); 660 /* Free old ifindex2ifnet after sync all netisrs */ 661 old_ifindex2ifnet = ifindex2ifnet; 662 } 663 ifindex2ifnet = q; 664 } 665 ifindex2ifnet[ifp->if_index] = ifp; 666 /* 667 * Update if_index after this ifp is installed into ifindex2ifnet, 668 * so that netisrs could get a consistent view of ifindex2ifnet. 669 */ 670 cpu_sfence(); 671 if_index = ifp->if_index; 672 673 /* 674 * Install this ifp into ifnet array. 675 */ 676 /* Free old ifnet array after sync all netisrs */ 677 old_ifnet_array = ifnet_array; 678 ifnet_array = ifnet_array_add(ifp, old_ifnet_array); 679 680 /* 681 * Install this ifp into ifnet queue. 682 */ 683 TAILQ_INSERT_TAIL(&ifnetlist, ifp, if_link); 684 685 ifnet_unlock(); 686 687 /* 688 * Sync all netisrs so that the old ifindex2ifnet and ifnet array 689 * are no longer accessed and we can free them safely later on. 690 */ 691 netmsg_service_sync(); 692 if (old_ifindex2ifnet != NULL) 693 kfree(old_ifindex2ifnet, M_IFADDR); 694 ifnet_array_free(old_ifnet_array); 695 696 if (!SLIST_EMPTY(&domains)) 697 if_attachdomain1(ifp); 698 699 /* Announce the interface. */ 700 EVENTHANDLER_INVOKE(ifnet_attach_event, ifp); 701 devctl_notify("IFNET", ifp->if_xname, "ATTACH", NULL); 702 rt_ifannouncemsg(ifp, IFAN_ARRIVAL); 703 } 704 705 static void 706 if_attachdomain(void *dummy) 707 { 708 struct ifnet *ifp; 709 710 ifnet_lock(); 711 TAILQ_FOREACH(ifp, &ifnetlist, if_list) 712 if_attachdomain1(ifp); 713 ifnet_unlock(); 714 } 715 SYSINIT(domainifattach, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_FIRST, 716 if_attachdomain, NULL); 717 718 static void 719 if_attachdomain1(struct ifnet *ifp) 720 { 721 struct domain *dp; 722 723 crit_enter(); 724 725 /* address family dependent data region */ 726 bzero(ifp->if_afdata, sizeof(ifp->if_afdata)); 727 SLIST_FOREACH(dp, &domains, dom_next) 728 if (dp->dom_ifattach) 729 ifp->if_afdata[dp->dom_family] = 730 (*dp->dom_ifattach)(ifp); 731 crit_exit(); 732 } 733 734 /* 735 * Purge all addresses whose type is _not_ AF_LINK 736 */ 737 static void 738 if_purgeaddrs_nolink_dispatch(netmsg_t nmsg) 739 { 740 struct lwkt_msg *lmsg = &nmsg->lmsg; 741 struct ifnet *ifp = lmsg->u.ms_resultp; 742 struct ifaddr_container *ifac, *next; 743 744 ASSERT_IN_NETISR(0); 745 746 /* 747 * The ifaddr processing in the following loop will block, 748 * however, this function is called in netisr0, in which 749 * ifaddr list changes happen, so we don't care about the 750 * blockness of the ifaddr processing here. 751 */ 752 TAILQ_FOREACH_MUTABLE(ifac, &ifp->if_addrheads[mycpuid], 753 ifa_link, next) { 754 struct ifaddr *ifa = ifac->ifa; 755 756 /* Ignore marker */ 757 if (ifa->ifa_addr->sa_family == AF_UNSPEC) 758 continue; 759 760 /* Leave link ifaddr as it is */ 761 if (ifa->ifa_addr->sa_family == AF_LINK) 762 continue; 763 #ifdef INET 764 /* XXX: Ugly!! ad hoc just for INET */ 765 if (ifa->ifa_addr && ifa->ifa_addr->sa_family == AF_INET) { 766 struct ifaliasreq ifr; 767 #ifdef IFADDR_DEBUG_VERBOSE 768 int i; 769 770 kprintf("purge in4 addr %p: ", ifa); 771 for (i = 0; i < ncpus; ++i) 772 kprintf("%d ", ifa->ifa_containers[i].ifa_refcnt); 773 kprintf("\n"); 774 #endif 775 776 bzero(&ifr, sizeof ifr); 777 ifr.ifra_addr = *ifa->ifa_addr; 778 if (ifa->ifa_dstaddr) 779 ifr.ifra_broadaddr = *ifa->ifa_dstaddr; 780 if (in_control(SIOCDIFADDR, (caddr_t)&ifr, ifp, 781 NULL) == 0) 782 continue; 783 } 784 #endif /* INET */ 785 #ifdef INET6 786 if (ifa->ifa_addr && ifa->ifa_addr->sa_family == AF_INET6) { 787 #ifdef IFADDR_DEBUG_VERBOSE 788 int i; 789 790 kprintf("purge in6 addr %p: ", ifa); 791 for (i = 0; i < ncpus; ++i) 792 kprintf("%d ", ifa->ifa_containers[i].ifa_refcnt); 793 kprintf("\n"); 794 #endif 795 796 in6_purgeaddr(ifa); 797 /* ifp_addrhead is already updated */ 798 continue; 799 } 800 #endif /* INET6 */ 801 ifa_ifunlink(ifa, ifp); 802 ifa_destroy(ifa); 803 } 804 805 lwkt_replymsg(lmsg, 0); 806 } 807 808 void 809 if_purgeaddrs_nolink(struct ifnet *ifp) 810 { 811 struct netmsg_base nmsg; 812 struct lwkt_msg *lmsg = &nmsg.lmsg; 813 814 ASSERT_CANDOMSG_NETISR0(curthread); 815 816 netmsg_init(&nmsg, NULL, &curthread->td_msgport, 0, 817 if_purgeaddrs_nolink_dispatch); 818 lmsg->u.ms_resultp = ifp; 819 lwkt_domsg(netisr_cpuport(0), lmsg, 0); 820 } 821 822 static void 823 ifq_stage_detach_handler(netmsg_t nmsg) 824 { 825 struct ifaltq *ifq = nmsg->lmsg.u.ms_resultp; 826 int q; 827 828 for (q = 0; q < ifq->altq_subq_cnt; ++q) { 829 struct ifaltq_subque *ifsq = &ifq->altq_subq[q]; 830 struct ifsubq_stage *stage = ifsq_get_stage(ifsq, mycpuid); 831 832 if (stage->stg_flags & IFSQ_STAGE_FLAG_QUED) 833 ifsq_stage_remove(&ifsubq_stage_heads[mycpuid], stage); 834 } 835 lwkt_replymsg(&nmsg->lmsg, 0); 836 } 837 838 static void 839 ifq_stage_detach(struct ifaltq *ifq) 840 { 841 struct netmsg_base base; 842 int cpu; 843 844 netmsg_init(&base, NULL, &curthread->td_msgport, 0, 845 ifq_stage_detach_handler); 846 base.lmsg.u.ms_resultp = ifq; 847 848 for (cpu = 0; cpu < ncpus; ++cpu) 849 lwkt_domsg(netisr_cpuport(cpu), &base.lmsg, 0); 850 } 851 852 struct netmsg_if_rtdel { 853 struct netmsg_base base; 854 struct ifnet *ifp; 855 }; 856 857 static void 858 if_rtdel_dispatch(netmsg_t msg) 859 { 860 struct netmsg_if_rtdel *rmsg = (void *)msg; 861 int i, nextcpu, cpu; 862 863 cpu = mycpuid; 864 for (i = 1; i <= AF_MAX; i++) { 865 struct radix_node_head *rnh; 866 867 if ((rnh = rt_tables[cpu][i]) == NULL) 868 continue; 869 rnh->rnh_walktree(rnh, if_rtdel, rmsg->ifp); 870 } 871 872 nextcpu = cpu + 1; 873 if (nextcpu < ncpus) 874 lwkt_forwardmsg(netisr_cpuport(nextcpu), &rmsg->base.lmsg); 875 else 876 lwkt_replymsg(&rmsg->base.lmsg, 0); 877 } 878 879 /* 880 * Detach an interface, removing it from the 881 * list of "active" interfaces. 882 */ 883 void 884 if_detach(struct ifnet *ifp) 885 { 886 struct ifnet_array *old_ifnet_array; 887 struct netmsg_if_rtdel msg; 888 struct domain *dp; 889 int q; 890 891 /* Announce that the interface is gone. */ 892 EVENTHANDLER_INVOKE(ifnet_detach_event, ifp); 893 rt_ifannouncemsg(ifp, IFAN_DEPARTURE); 894 devctl_notify("IFNET", ifp->if_xname, "DETACH", NULL); 895 896 /* 897 * Remove this ifp from ifindex2inet, ifnet queue and ifnet 898 * array before it is whacked. 899 * 900 * Protect ifindex2ifnet, ifnet queue and ifnet array changes 901 * by ifnet lock, so that non-netisr threads could get a 902 * consistent view. 903 */ 904 ifnet_lock(); 905 906 /* 907 * Remove this ifp from ifindex2ifnet and maybe decrement if_index. 908 */ 909 ifindex2ifnet[ifp->if_index] = NULL; 910 while (if_index > 0 && ifindex2ifnet[if_index] == NULL) 911 if_index--; 912 913 /* 914 * Remove this ifp from ifnet queue. 915 */ 916 TAILQ_REMOVE(&ifnetlist, ifp, if_link); 917 918 /* 919 * Remove this ifp from ifnet array. 920 */ 921 /* Free old ifnet array after sync all netisrs */ 922 old_ifnet_array = ifnet_array; 923 ifnet_array = ifnet_array_del(ifp, old_ifnet_array); 924 925 ifnet_unlock(); 926 927 /* 928 * Sync all netisrs so that the old ifnet array is no longer 929 * accessed and we can free it safely later on. 930 */ 931 netmsg_service_sync(); 932 ifnet_array_free(old_ifnet_array); 933 934 /* 935 * Remove routes and flush queues. 936 */ 937 crit_enter(); 938 #ifdef IFPOLL_ENABLE 939 if (ifp->if_flags & IFF_NPOLLING) 940 ifpoll_deregister(ifp); 941 #endif 942 if_down(ifp); 943 944 /* Decrease the mbuf clusters/jclusters limits increased by us */ 945 if (ifp->if_nmbclusters > 0) 946 mcl_inclimit(-ifp->if_nmbclusters); 947 if (ifp->if_nmbjclusters > 0) 948 mjcl_inclimit(-ifp->if_nmbjclusters); 949 950 #ifdef ALTQ 951 if (ifq_is_enabled(&ifp->if_snd)) 952 altq_disable(&ifp->if_snd); 953 if (ifq_is_attached(&ifp->if_snd)) 954 altq_detach(&ifp->if_snd); 955 #endif 956 957 /* 958 * Clean up all addresses. 959 */ 960 ifp->if_lladdr = NULL; 961 962 if_purgeaddrs_nolink(ifp); 963 if (!TAILQ_EMPTY(&ifp->if_addrheads[mycpuid])) { 964 struct ifaddr *ifa; 965 966 ifa = TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa; 967 KASSERT(ifa->ifa_addr->sa_family == AF_LINK, 968 ("non-link ifaddr is left on if_addrheads")); 969 970 ifa_ifunlink(ifa, ifp); 971 ifa_destroy(ifa); 972 KASSERT(TAILQ_EMPTY(&ifp->if_addrheads[mycpuid]), 973 ("there are still ifaddrs left on if_addrheads")); 974 } 975 976 #ifdef INET 977 /* 978 * Remove all IPv4 kernel structures related to ifp. 979 */ 980 in_ifdetach(ifp); 981 #endif 982 983 #ifdef INET6 984 /* 985 * Remove all IPv6 kernel structs related to ifp. This should be done 986 * before removing routing entries below, since IPv6 interface direct 987 * routes are expected to be removed by the IPv6-specific kernel API. 988 * Otherwise, the kernel will detect some inconsistency and bark it. 989 */ 990 in6_ifdetach(ifp); 991 #endif 992 993 /* 994 * Delete all remaining routes using this interface 995 */ 996 netmsg_init(&msg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY, 997 if_rtdel_dispatch); 998 msg.ifp = ifp; 999 rt_domsg_global(&msg.base); 1000 1001 SLIST_FOREACH(dp, &domains, dom_next) 1002 if (dp->dom_ifdetach && ifp->if_afdata[dp->dom_family]) 1003 (*dp->dom_ifdetach)(ifp, 1004 ifp->if_afdata[dp->dom_family]); 1005 1006 kfree(ifp->if_addrheads, M_IFADDR); 1007 1008 lwkt_synchronize_ipiqs("if_detach"); 1009 ifq_stage_detach(&ifp->if_snd); 1010 1011 for (q = 0; q < ifp->if_snd.altq_subq_cnt; ++q) { 1012 struct ifaltq_subque *ifsq = &ifp->if_snd.altq_subq[q]; 1013 1014 kfree(ifsq->ifsq_ifstart_nmsg, M_LWKTMSG); 1015 kfree(ifsq->ifsq_stage, M_DEVBUF); 1016 } 1017 kfree(ifp->if_snd.altq_subq, M_DEVBUF); 1018 1019 kfree(ifp->if_data_pcpu, M_DEVBUF); 1020 1021 crit_exit(); 1022 } 1023 1024 /* 1025 * Create interface group without members 1026 */ 1027 struct ifg_group * 1028 if_creategroup(const char *groupname) 1029 { 1030 struct ifg_group *ifg = NULL; 1031 1032 if ((ifg = (struct ifg_group *)kmalloc(sizeof(struct ifg_group), 1033 M_TEMP, M_NOWAIT)) == NULL) 1034 return (NULL); 1035 1036 strlcpy(ifg->ifg_group, groupname, sizeof(ifg->ifg_group)); 1037 ifg->ifg_refcnt = 0; 1038 ifg->ifg_carp_demoted = 0; 1039 TAILQ_INIT(&ifg->ifg_members); 1040 #if NPF > 0 1041 pfi_attach_ifgroup(ifg); 1042 #endif 1043 TAILQ_INSERT_TAIL(&ifg_head, ifg, ifg_next); 1044 1045 return (ifg); 1046 } 1047 1048 /* 1049 * Add a group to an interface 1050 */ 1051 int 1052 if_addgroup(struct ifnet *ifp, const char *groupname) 1053 { 1054 struct ifg_list *ifgl; 1055 struct ifg_group *ifg = NULL; 1056 struct ifg_member *ifgm; 1057 1058 if (groupname[0] && groupname[strlen(groupname) - 1] >= '0' && 1059 groupname[strlen(groupname) - 1] <= '9') 1060 return (EINVAL); 1061 1062 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) 1063 if (!strcmp(ifgl->ifgl_group->ifg_group, groupname)) 1064 return (EEXIST); 1065 1066 if ((ifgl = kmalloc(sizeof(*ifgl), M_TEMP, M_NOWAIT)) == NULL) 1067 return (ENOMEM); 1068 1069 if ((ifgm = kmalloc(sizeof(*ifgm), M_TEMP, M_NOWAIT)) == NULL) { 1070 kfree(ifgl, M_TEMP); 1071 return (ENOMEM); 1072 } 1073 1074 TAILQ_FOREACH(ifg, &ifg_head, ifg_next) 1075 if (!strcmp(ifg->ifg_group, groupname)) 1076 break; 1077 1078 if (ifg == NULL && (ifg = if_creategroup(groupname)) == NULL) { 1079 kfree(ifgl, M_TEMP); 1080 kfree(ifgm, M_TEMP); 1081 return (ENOMEM); 1082 } 1083 1084 ifg->ifg_refcnt++; 1085 ifgl->ifgl_group = ifg; 1086 ifgm->ifgm_ifp = ifp; 1087 1088 TAILQ_INSERT_TAIL(&ifg->ifg_members, ifgm, ifgm_next); 1089 TAILQ_INSERT_TAIL(&ifp->if_groups, ifgl, ifgl_next); 1090 1091 #if NPF > 0 1092 pfi_group_change(groupname); 1093 #endif 1094 1095 return (0); 1096 } 1097 1098 /* 1099 * Remove a group from an interface 1100 */ 1101 int 1102 if_delgroup(struct ifnet *ifp, const char *groupname) 1103 { 1104 struct ifg_list *ifgl; 1105 struct ifg_member *ifgm; 1106 1107 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) 1108 if (!strcmp(ifgl->ifgl_group->ifg_group, groupname)) 1109 break; 1110 if (ifgl == NULL) 1111 return (ENOENT); 1112 1113 TAILQ_REMOVE(&ifp->if_groups, ifgl, ifgl_next); 1114 1115 TAILQ_FOREACH(ifgm, &ifgl->ifgl_group->ifg_members, ifgm_next) 1116 if (ifgm->ifgm_ifp == ifp) 1117 break; 1118 1119 if (ifgm != NULL) { 1120 TAILQ_REMOVE(&ifgl->ifgl_group->ifg_members, ifgm, ifgm_next); 1121 kfree(ifgm, M_TEMP); 1122 } 1123 1124 if (--ifgl->ifgl_group->ifg_refcnt == 0) { 1125 TAILQ_REMOVE(&ifg_head, ifgl->ifgl_group, ifg_next); 1126 #if NPF > 0 1127 pfi_detach_ifgroup(ifgl->ifgl_group); 1128 #endif 1129 kfree(ifgl->ifgl_group, M_TEMP); 1130 } 1131 1132 kfree(ifgl, M_TEMP); 1133 1134 #if NPF > 0 1135 pfi_group_change(groupname); 1136 #endif 1137 1138 return (0); 1139 } 1140 1141 /* 1142 * Stores all groups from an interface in memory pointed 1143 * to by data 1144 */ 1145 int 1146 if_getgroup(caddr_t data, struct ifnet *ifp) 1147 { 1148 int len, error; 1149 struct ifg_list *ifgl; 1150 struct ifg_req ifgrq, *ifgp; 1151 struct ifgroupreq *ifgr = (struct ifgroupreq *)data; 1152 1153 if (ifgr->ifgr_len == 0) { 1154 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) 1155 ifgr->ifgr_len += sizeof(struct ifg_req); 1156 return (0); 1157 } 1158 1159 len = ifgr->ifgr_len; 1160 ifgp = ifgr->ifgr_groups; 1161 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) { 1162 if (len < sizeof(ifgrq)) 1163 return (EINVAL); 1164 bzero(&ifgrq, sizeof ifgrq); 1165 strlcpy(ifgrq.ifgrq_group, ifgl->ifgl_group->ifg_group, 1166 sizeof(ifgrq.ifgrq_group)); 1167 if ((error = copyout((caddr_t)&ifgrq, (caddr_t)ifgp, 1168 sizeof(struct ifg_req)))) 1169 return (error); 1170 len -= sizeof(ifgrq); 1171 ifgp++; 1172 } 1173 1174 return (0); 1175 } 1176 1177 /* 1178 * Stores all members of a group in memory pointed to by data 1179 */ 1180 int 1181 if_getgroupmembers(caddr_t data) 1182 { 1183 struct ifgroupreq *ifgr = (struct ifgroupreq *)data; 1184 struct ifg_group *ifg; 1185 struct ifg_member *ifgm; 1186 struct ifg_req ifgrq, *ifgp; 1187 int len, error; 1188 1189 TAILQ_FOREACH(ifg, &ifg_head, ifg_next) 1190 if (!strcmp(ifg->ifg_group, ifgr->ifgr_name)) 1191 break; 1192 if (ifg == NULL) 1193 return (ENOENT); 1194 1195 if (ifgr->ifgr_len == 0) { 1196 TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next) 1197 ifgr->ifgr_len += sizeof(ifgrq); 1198 return (0); 1199 } 1200 1201 len = ifgr->ifgr_len; 1202 ifgp = ifgr->ifgr_groups; 1203 TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next) { 1204 if (len < sizeof(ifgrq)) 1205 return (EINVAL); 1206 bzero(&ifgrq, sizeof ifgrq); 1207 strlcpy(ifgrq.ifgrq_member, ifgm->ifgm_ifp->if_xname, 1208 sizeof(ifgrq.ifgrq_member)); 1209 if ((error = copyout((caddr_t)&ifgrq, (caddr_t)ifgp, 1210 sizeof(struct ifg_req)))) 1211 return (error); 1212 len -= sizeof(ifgrq); 1213 ifgp++; 1214 } 1215 1216 return (0); 1217 } 1218 1219 /* 1220 * Delete Routes for a Network Interface 1221 * 1222 * Called for each routing entry via the rnh->rnh_walktree() call above 1223 * to delete all route entries referencing a detaching network interface. 1224 * 1225 * Arguments: 1226 * rn pointer to node in the routing table 1227 * arg argument passed to rnh->rnh_walktree() - detaching interface 1228 * 1229 * Returns: 1230 * 0 successful 1231 * errno failed - reason indicated 1232 * 1233 */ 1234 static int 1235 if_rtdel(struct radix_node *rn, void *arg) 1236 { 1237 struct rtentry *rt = (struct rtentry *)rn; 1238 struct ifnet *ifp = arg; 1239 int err; 1240 1241 if (rt->rt_ifp == ifp) { 1242 1243 /* 1244 * Protect (sorta) against walktree recursion problems 1245 * with cloned routes 1246 */ 1247 if (!(rt->rt_flags & RTF_UP)) 1248 return (0); 1249 1250 err = rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, 1251 rt_mask(rt), rt->rt_flags, 1252 NULL); 1253 if (err) { 1254 log(LOG_WARNING, "if_rtdel: error %d\n", err); 1255 } 1256 } 1257 1258 return (0); 1259 } 1260 1261 static __inline boolean_t 1262 ifa_prefer(const struct ifaddr *cur_ifa, const struct ifaddr *old_ifa) 1263 { 1264 if (old_ifa == NULL) 1265 return TRUE; 1266 1267 if ((old_ifa->ifa_ifp->if_flags & IFF_UP) == 0 && 1268 (cur_ifa->ifa_ifp->if_flags & IFF_UP)) 1269 return TRUE; 1270 if ((old_ifa->ifa_flags & IFA_ROUTE) == 0 && 1271 (cur_ifa->ifa_flags & IFA_ROUTE)) 1272 return TRUE; 1273 return FALSE; 1274 } 1275 1276 /* 1277 * Locate an interface based on a complete address. 1278 */ 1279 struct ifaddr * 1280 ifa_ifwithaddr(struct sockaddr *addr) 1281 { 1282 const struct ifnet_array *arr; 1283 int i; 1284 1285 arr = ifnet_array_get(); 1286 for (i = 0; i < arr->ifnet_count; ++i) { 1287 struct ifnet *ifp = arr->ifnet_arr[i]; 1288 struct ifaddr_container *ifac; 1289 1290 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1291 struct ifaddr *ifa = ifac->ifa; 1292 1293 if (ifa->ifa_addr->sa_family != addr->sa_family) 1294 continue; 1295 if (sa_equal(addr, ifa->ifa_addr)) 1296 return (ifa); 1297 if ((ifp->if_flags & IFF_BROADCAST) && 1298 ifa->ifa_broadaddr && 1299 /* IPv6 doesn't have broadcast */ 1300 ifa->ifa_broadaddr->sa_len != 0 && 1301 sa_equal(ifa->ifa_broadaddr, addr)) 1302 return (ifa); 1303 } 1304 } 1305 return (NULL); 1306 } 1307 1308 /* 1309 * Locate the point to point interface with a given destination address. 1310 */ 1311 struct ifaddr * 1312 ifa_ifwithdstaddr(struct sockaddr *addr) 1313 { 1314 const struct ifnet_array *arr; 1315 int i; 1316 1317 arr = ifnet_array_get(); 1318 for (i = 0; i < arr->ifnet_count; ++i) { 1319 struct ifnet *ifp = arr->ifnet_arr[i]; 1320 struct ifaddr_container *ifac; 1321 1322 if (!(ifp->if_flags & IFF_POINTOPOINT)) 1323 continue; 1324 1325 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1326 struct ifaddr *ifa = ifac->ifa; 1327 1328 if (ifa->ifa_addr->sa_family != addr->sa_family) 1329 continue; 1330 if (ifa->ifa_dstaddr && 1331 sa_equal(addr, ifa->ifa_dstaddr)) 1332 return (ifa); 1333 } 1334 } 1335 return (NULL); 1336 } 1337 1338 /* 1339 * Find an interface on a specific network. If many, choice 1340 * is most specific found. 1341 */ 1342 struct ifaddr * 1343 ifa_ifwithnet(struct sockaddr *addr) 1344 { 1345 struct ifaddr *ifa_maybe = NULL; 1346 u_int af = addr->sa_family; 1347 char *addr_data = addr->sa_data, *cplim; 1348 const struct ifnet_array *arr; 1349 int i; 1350 1351 /* 1352 * AF_LINK addresses can be looked up directly by their index number, 1353 * so do that if we can. 1354 */ 1355 if (af == AF_LINK) { 1356 struct sockaddr_dl *sdl = (struct sockaddr_dl *)addr; 1357 1358 if (sdl->sdl_index && sdl->sdl_index <= if_index) 1359 return (ifindex2ifnet[sdl->sdl_index]->if_lladdr); 1360 } 1361 1362 /* 1363 * Scan though each interface, looking for ones that have 1364 * addresses in this address family. 1365 */ 1366 arr = ifnet_array_get(); 1367 for (i = 0; i < arr->ifnet_count; ++i) { 1368 struct ifnet *ifp = arr->ifnet_arr[i]; 1369 struct ifaddr_container *ifac; 1370 1371 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1372 struct ifaddr *ifa = ifac->ifa; 1373 char *cp, *cp2, *cp3; 1374 1375 if (ifa->ifa_addr->sa_family != af) 1376 next: continue; 1377 if (af == AF_INET && ifp->if_flags & IFF_POINTOPOINT) { 1378 /* 1379 * This is a bit broken as it doesn't 1380 * take into account that the remote end may 1381 * be a single node in the network we are 1382 * looking for. 1383 * The trouble is that we don't know the 1384 * netmask for the remote end. 1385 */ 1386 if (ifa->ifa_dstaddr != NULL && 1387 sa_equal(addr, ifa->ifa_dstaddr)) 1388 return (ifa); 1389 } else { 1390 /* 1391 * if we have a special address handler, 1392 * then use it instead of the generic one. 1393 */ 1394 if (ifa->ifa_claim_addr) { 1395 if ((*ifa->ifa_claim_addr)(ifa, addr)) { 1396 return (ifa); 1397 } else { 1398 continue; 1399 } 1400 } 1401 1402 /* 1403 * Scan all the bits in the ifa's address. 1404 * If a bit dissagrees with what we are 1405 * looking for, mask it with the netmask 1406 * to see if it really matters. 1407 * (A byte at a time) 1408 */ 1409 if (ifa->ifa_netmask == 0) 1410 continue; 1411 cp = addr_data; 1412 cp2 = ifa->ifa_addr->sa_data; 1413 cp3 = ifa->ifa_netmask->sa_data; 1414 cplim = ifa->ifa_netmask->sa_len + 1415 (char *)ifa->ifa_netmask; 1416 while (cp3 < cplim) 1417 if ((*cp++ ^ *cp2++) & *cp3++) 1418 goto next; /* next address! */ 1419 /* 1420 * If the netmask of what we just found 1421 * is more specific than what we had before 1422 * (if we had one) then remember the new one 1423 * before continuing to search for an even 1424 * better one. If the netmasks are equal, 1425 * we prefer the this ifa based on the result 1426 * of ifa_prefer(). 1427 */ 1428 if (ifa_maybe == NULL || 1429 rn_refines((char *)ifa->ifa_netmask, 1430 (char *)ifa_maybe->ifa_netmask) || 1431 (sa_equal(ifa_maybe->ifa_netmask, 1432 ifa->ifa_netmask) && 1433 ifa_prefer(ifa, ifa_maybe))) 1434 ifa_maybe = ifa; 1435 } 1436 } 1437 } 1438 return (ifa_maybe); 1439 } 1440 1441 /* 1442 * Find an interface address specific to an interface best matching 1443 * a given address. 1444 */ 1445 struct ifaddr * 1446 ifaof_ifpforaddr(struct sockaddr *addr, struct ifnet *ifp) 1447 { 1448 struct ifaddr_container *ifac; 1449 char *cp, *cp2, *cp3; 1450 char *cplim; 1451 struct ifaddr *ifa_maybe = NULL; 1452 u_int af = addr->sa_family; 1453 1454 if (af >= AF_MAX) 1455 return (0); 1456 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1457 struct ifaddr *ifa = ifac->ifa; 1458 1459 if (ifa->ifa_addr->sa_family != af) 1460 continue; 1461 if (ifa_maybe == NULL) 1462 ifa_maybe = ifa; 1463 if (ifa->ifa_netmask == NULL) { 1464 if (sa_equal(addr, ifa->ifa_addr) || 1465 (ifa->ifa_dstaddr != NULL && 1466 sa_equal(addr, ifa->ifa_dstaddr))) 1467 return (ifa); 1468 continue; 1469 } 1470 if (ifp->if_flags & IFF_POINTOPOINT) { 1471 if (sa_equal(addr, ifa->ifa_dstaddr)) 1472 return (ifa); 1473 } else { 1474 cp = addr->sa_data; 1475 cp2 = ifa->ifa_addr->sa_data; 1476 cp3 = ifa->ifa_netmask->sa_data; 1477 cplim = ifa->ifa_netmask->sa_len + (char *)ifa->ifa_netmask; 1478 for (; cp3 < cplim; cp3++) 1479 if ((*cp++ ^ *cp2++) & *cp3) 1480 break; 1481 if (cp3 == cplim) 1482 return (ifa); 1483 } 1484 } 1485 return (ifa_maybe); 1486 } 1487 1488 /* 1489 * Default action when installing a route with a Link Level gateway. 1490 * Lookup an appropriate real ifa to point to. 1491 * This should be moved to /sys/net/link.c eventually. 1492 */ 1493 static void 1494 link_rtrequest(int cmd, struct rtentry *rt) 1495 { 1496 struct ifaddr *ifa; 1497 struct sockaddr *dst; 1498 struct ifnet *ifp; 1499 1500 if (cmd != RTM_ADD || (ifa = rt->rt_ifa) == NULL || 1501 (ifp = ifa->ifa_ifp) == NULL || (dst = rt_key(rt)) == NULL) 1502 return; 1503 ifa = ifaof_ifpforaddr(dst, ifp); 1504 if (ifa != NULL) { 1505 IFAFREE(rt->rt_ifa); 1506 IFAREF(ifa); 1507 rt->rt_ifa = ifa; 1508 if (ifa->ifa_rtrequest && ifa->ifa_rtrequest != link_rtrequest) 1509 ifa->ifa_rtrequest(cmd, rt); 1510 } 1511 } 1512 1513 struct netmsg_ifroute { 1514 struct netmsg_base base; 1515 struct ifnet *ifp; 1516 int flag; 1517 int fam; 1518 }; 1519 1520 /* 1521 * Mark an interface down and notify protocols of the transition. 1522 */ 1523 static void 1524 if_unroute_dispatch(netmsg_t nmsg) 1525 { 1526 struct netmsg_ifroute *msg = (struct netmsg_ifroute *)nmsg; 1527 struct ifnet *ifp = msg->ifp; 1528 int flag = msg->flag, fam = msg->fam; 1529 struct ifaddr_container *ifac; 1530 1531 ifp->if_flags &= ~flag; 1532 getmicrotime(&ifp->if_lastchange); 1533 /* 1534 * The ifaddr processing in the following loop will block, 1535 * however, this function is called in netisr0, in which 1536 * ifaddr list changes happen, so we don't care about the 1537 * blockness of the ifaddr processing here. 1538 */ 1539 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1540 struct ifaddr *ifa = ifac->ifa; 1541 1542 /* Ignore marker */ 1543 if (ifa->ifa_addr->sa_family == AF_UNSPEC) 1544 continue; 1545 1546 if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family)) 1547 kpfctlinput(PRC_IFDOWN, ifa->ifa_addr); 1548 } 1549 ifq_purge_all(&ifp->if_snd); 1550 rt_ifmsg(ifp); 1551 1552 lwkt_replymsg(&nmsg->lmsg, 0); 1553 } 1554 1555 void 1556 if_unroute(struct ifnet *ifp, int flag, int fam) 1557 { 1558 struct netmsg_ifroute msg; 1559 1560 ASSERT_CANDOMSG_NETISR0(curthread); 1561 1562 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 0, 1563 if_unroute_dispatch); 1564 msg.ifp = ifp; 1565 msg.flag = flag; 1566 msg.fam = fam; 1567 lwkt_domsg(netisr_cpuport(0), &msg.base.lmsg, 0); 1568 } 1569 1570 /* 1571 * Mark an interface up and notify protocols of the transition. 1572 */ 1573 static void 1574 if_route_dispatch(netmsg_t nmsg) 1575 { 1576 struct netmsg_ifroute *msg = (struct netmsg_ifroute *)nmsg; 1577 struct ifnet *ifp = msg->ifp; 1578 int flag = msg->flag, fam = msg->fam; 1579 struct ifaddr_container *ifac; 1580 1581 ifq_purge_all(&ifp->if_snd); 1582 ifp->if_flags |= flag; 1583 getmicrotime(&ifp->if_lastchange); 1584 /* 1585 * The ifaddr processing in the following loop will block, 1586 * however, this function is called in netisr0, in which 1587 * ifaddr list changes happen, so we don't care about the 1588 * blockness of the ifaddr processing here. 1589 */ 1590 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1591 struct ifaddr *ifa = ifac->ifa; 1592 1593 /* Ignore marker */ 1594 if (ifa->ifa_addr->sa_family == AF_UNSPEC) 1595 continue; 1596 1597 if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family)) 1598 kpfctlinput(PRC_IFUP, ifa->ifa_addr); 1599 } 1600 rt_ifmsg(ifp); 1601 #ifdef INET6 1602 in6_if_up(ifp); 1603 #endif 1604 1605 lwkt_replymsg(&nmsg->lmsg, 0); 1606 } 1607 1608 void 1609 if_route(struct ifnet *ifp, int flag, int fam) 1610 { 1611 struct netmsg_ifroute msg; 1612 1613 ASSERT_CANDOMSG_NETISR0(curthread); 1614 1615 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 0, 1616 if_route_dispatch); 1617 msg.ifp = ifp; 1618 msg.flag = flag; 1619 msg.fam = fam; 1620 lwkt_domsg(netisr_cpuport(0), &msg.base.lmsg, 0); 1621 } 1622 1623 /* 1624 * Mark an interface down and notify protocols of the transition. An 1625 * interface going down is also considered to be a synchronizing event. 1626 * We must ensure that all packet processing related to the interface 1627 * has completed before we return so e.g. the caller can free the ifnet 1628 * structure that the mbufs may be referencing. 1629 * 1630 * NOTE: must be called at splnet or eqivalent. 1631 */ 1632 void 1633 if_down(struct ifnet *ifp) 1634 { 1635 if_unroute(ifp, IFF_UP, AF_UNSPEC); 1636 netmsg_service_sync(); 1637 } 1638 1639 /* 1640 * Mark an interface up and notify protocols of 1641 * the transition. 1642 * NOTE: must be called at splnet or eqivalent. 1643 */ 1644 void 1645 if_up(struct ifnet *ifp) 1646 { 1647 if_route(ifp, IFF_UP, AF_UNSPEC); 1648 } 1649 1650 /* 1651 * Process a link state change. 1652 * NOTE: must be called at splsoftnet or equivalent. 1653 */ 1654 void 1655 if_link_state_change(struct ifnet *ifp) 1656 { 1657 int link_state = ifp->if_link_state; 1658 1659 rt_ifmsg(ifp); 1660 devctl_notify("IFNET", ifp->if_xname, 1661 (link_state == LINK_STATE_UP) ? "LINK_UP" : "LINK_DOWN", NULL); 1662 } 1663 1664 /* 1665 * Handle interface watchdog timer routines. Called 1666 * from softclock, we decrement timers (if set) and 1667 * call the appropriate interface routine on expiration. 1668 */ 1669 static void 1670 if_slowtimo_dispatch(netmsg_t nmsg) 1671 { 1672 struct globaldata *gd = mycpu; 1673 const struct ifnet_array *arr; 1674 int i; 1675 1676 ASSERT_IN_NETISR(0); 1677 1678 crit_enter_gd(gd); 1679 lwkt_replymsg(&nmsg->lmsg, 0); /* reply ASAP */ 1680 crit_exit_gd(gd); 1681 1682 arr = ifnet_array_get(); 1683 for (i = 0; i < arr->ifnet_count; ++i) { 1684 struct ifnet *ifp = arr->ifnet_arr[i]; 1685 1686 crit_enter_gd(gd); 1687 1688 if (if_stats_compat) { 1689 IFNET_STAT_GET(ifp, ipackets, ifp->if_ipackets); 1690 IFNET_STAT_GET(ifp, ierrors, ifp->if_ierrors); 1691 IFNET_STAT_GET(ifp, opackets, ifp->if_opackets); 1692 IFNET_STAT_GET(ifp, oerrors, ifp->if_oerrors); 1693 IFNET_STAT_GET(ifp, collisions, ifp->if_collisions); 1694 IFNET_STAT_GET(ifp, ibytes, ifp->if_ibytes); 1695 IFNET_STAT_GET(ifp, obytes, ifp->if_obytes); 1696 IFNET_STAT_GET(ifp, imcasts, ifp->if_imcasts); 1697 IFNET_STAT_GET(ifp, omcasts, ifp->if_omcasts); 1698 IFNET_STAT_GET(ifp, iqdrops, ifp->if_iqdrops); 1699 IFNET_STAT_GET(ifp, noproto, ifp->if_noproto); 1700 IFNET_STAT_GET(ifp, oqdrops, ifp->if_oqdrops); 1701 } 1702 1703 if (ifp->if_timer == 0 || --ifp->if_timer) { 1704 crit_exit_gd(gd); 1705 continue; 1706 } 1707 if (ifp->if_watchdog) { 1708 if (ifnet_tryserialize_all(ifp)) { 1709 (*ifp->if_watchdog)(ifp); 1710 ifnet_deserialize_all(ifp); 1711 } else { 1712 /* try again next timeout */ 1713 ++ifp->if_timer; 1714 } 1715 } 1716 1717 crit_exit_gd(gd); 1718 } 1719 1720 callout_reset(&if_slowtimo_timer, hz / IFNET_SLOWHZ, if_slowtimo, NULL); 1721 } 1722 1723 static void 1724 if_slowtimo(void *arg __unused) 1725 { 1726 struct lwkt_msg *lmsg = &if_slowtimo_netmsg.lmsg; 1727 1728 KASSERT(mycpuid == 0, ("not on cpu0")); 1729 crit_enter(); 1730 if (lmsg->ms_flags & MSGF_DONE) 1731 lwkt_sendmsg_oncpu(netisr_cpuport(0), lmsg); 1732 crit_exit(); 1733 } 1734 1735 /* 1736 * Map interface name to 1737 * interface structure pointer. 1738 */ 1739 struct ifnet * 1740 ifunit(const char *name) 1741 { 1742 struct ifnet *ifp; 1743 1744 /* 1745 * Search all the interfaces for this name/number 1746 */ 1747 KASSERT(mtx_owned(&ifnet_mtx), ("ifnet is not locked")); 1748 1749 TAILQ_FOREACH(ifp, &ifnetlist, if_link) { 1750 if (strncmp(ifp->if_xname, name, IFNAMSIZ) == 0) 1751 break; 1752 } 1753 return (ifp); 1754 } 1755 1756 struct ifnet * 1757 ifunit_netisr(const char *name) 1758 { 1759 const struct ifnet_array *arr; 1760 int i; 1761 1762 /* 1763 * Search all the interfaces for this name/number 1764 */ 1765 1766 arr = ifnet_array_get(); 1767 for (i = 0; i < arr->ifnet_count; ++i) { 1768 struct ifnet *ifp = arr->ifnet_arr[i]; 1769 1770 if (strncmp(ifp->if_xname, name, IFNAMSIZ) == 0) 1771 return ifp; 1772 } 1773 return NULL; 1774 } 1775 1776 /* 1777 * Interface ioctls. 1778 */ 1779 int 1780 ifioctl(struct socket *so, u_long cmd, caddr_t data, struct ucred *cred) 1781 { 1782 struct ifnet *ifp; 1783 struct ifreq *ifr; 1784 struct ifstat *ifs; 1785 int error; 1786 short oif_flags; 1787 int new_flags; 1788 #ifdef COMPAT_43 1789 int ocmd; 1790 #endif 1791 size_t namelen, onamelen; 1792 char new_name[IFNAMSIZ]; 1793 struct ifaddr *ifa; 1794 struct sockaddr_dl *sdl; 1795 1796 switch (cmd) { 1797 case SIOCGIFCONF: 1798 case OSIOCGIFCONF: 1799 return (ifconf(cmd, data, cred)); 1800 default: 1801 break; 1802 } 1803 1804 ifr = (struct ifreq *)data; 1805 1806 switch (cmd) { 1807 case SIOCIFCREATE: 1808 case SIOCIFCREATE2: 1809 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0) 1810 return (error); 1811 return (if_clone_create(ifr->ifr_name, sizeof(ifr->ifr_name), 1812 cmd == SIOCIFCREATE2 ? ifr->ifr_data : NULL)); 1813 case SIOCIFDESTROY: 1814 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0) 1815 return (error); 1816 return (if_clone_destroy(ifr->ifr_name)); 1817 case SIOCIFGCLONERS: 1818 return (if_clone_list((struct if_clonereq *)data)); 1819 default: 1820 break; 1821 } 1822 1823 /* 1824 * Nominal ioctl through interface, lookup the ifp and obtain a 1825 * lock to serialize the ifconfig ioctl operation. 1826 */ 1827 ifnet_lock(); 1828 1829 ifp = ifunit(ifr->ifr_name); 1830 if (ifp == NULL) { 1831 ifnet_unlock(); 1832 return (ENXIO); 1833 } 1834 error = 0; 1835 1836 switch (cmd) { 1837 case SIOCGIFINDEX: 1838 ifr->ifr_index = ifp->if_index; 1839 break; 1840 1841 case SIOCGIFFLAGS: 1842 ifr->ifr_flags = ifp->if_flags; 1843 ifr->ifr_flagshigh = ifp->if_flags >> 16; 1844 break; 1845 1846 case SIOCGIFCAP: 1847 ifr->ifr_reqcap = ifp->if_capabilities; 1848 ifr->ifr_curcap = ifp->if_capenable; 1849 break; 1850 1851 case SIOCGIFMETRIC: 1852 ifr->ifr_metric = ifp->if_metric; 1853 break; 1854 1855 case SIOCGIFMTU: 1856 ifr->ifr_mtu = ifp->if_mtu; 1857 break; 1858 1859 case SIOCGIFTSOLEN: 1860 ifr->ifr_tsolen = ifp->if_tsolen; 1861 break; 1862 1863 case SIOCGIFDATA: 1864 error = copyout((caddr_t)&ifp->if_data, ifr->ifr_data, 1865 sizeof(ifp->if_data)); 1866 break; 1867 1868 case SIOCGIFPHYS: 1869 ifr->ifr_phys = ifp->if_physical; 1870 break; 1871 1872 case SIOCGIFPOLLCPU: 1873 ifr->ifr_pollcpu = -1; 1874 break; 1875 1876 case SIOCSIFPOLLCPU: 1877 break; 1878 1879 case SIOCSIFFLAGS: 1880 error = priv_check_cred(cred, PRIV_ROOT, 0); 1881 if (error) 1882 break; 1883 new_flags = (ifr->ifr_flags & 0xffff) | 1884 (ifr->ifr_flagshigh << 16); 1885 if (ifp->if_flags & IFF_SMART) { 1886 /* Smart drivers twiddle their own routes */ 1887 } else if (ifp->if_flags & IFF_UP && 1888 (new_flags & IFF_UP) == 0) { 1889 crit_enter(); 1890 if_down(ifp); 1891 crit_exit(); 1892 } else if (new_flags & IFF_UP && 1893 (ifp->if_flags & IFF_UP) == 0) { 1894 crit_enter(); 1895 if_up(ifp); 1896 crit_exit(); 1897 } 1898 1899 #ifdef IFPOLL_ENABLE 1900 if ((new_flags ^ ifp->if_flags) & IFF_NPOLLING) { 1901 if (new_flags & IFF_NPOLLING) 1902 ifpoll_register(ifp); 1903 else 1904 ifpoll_deregister(ifp); 1905 } 1906 #endif 1907 1908 ifp->if_flags = (ifp->if_flags & IFF_CANTCHANGE) | 1909 (new_flags &~ IFF_CANTCHANGE); 1910 if (new_flags & IFF_PPROMISC) { 1911 /* Permanently promiscuous mode requested */ 1912 ifp->if_flags |= IFF_PROMISC; 1913 } else if (ifp->if_pcount == 0) { 1914 ifp->if_flags &= ~IFF_PROMISC; 1915 } 1916 if (ifp->if_ioctl) { 1917 ifnet_serialize_all(ifp); 1918 ifp->if_ioctl(ifp, cmd, data, cred); 1919 ifnet_deserialize_all(ifp); 1920 } 1921 getmicrotime(&ifp->if_lastchange); 1922 break; 1923 1924 case SIOCSIFCAP: 1925 error = priv_check_cred(cred, PRIV_ROOT, 0); 1926 if (error) 1927 break; 1928 if (ifr->ifr_reqcap & ~ifp->if_capabilities) { 1929 error = EINVAL; 1930 break; 1931 } 1932 ifnet_serialize_all(ifp); 1933 ifp->if_ioctl(ifp, cmd, data, cred); 1934 ifnet_deserialize_all(ifp); 1935 break; 1936 1937 case SIOCSIFNAME: 1938 error = priv_check_cred(cred, PRIV_ROOT, 0); 1939 if (error) 1940 break; 1941 error = copyinstr(ifr->ifr_data, new_name, IFNAMSIZ, NULL); 1942 if (error) 1943 break; 1944 if (new_name[0] == '\0') { 1945 error = EINVAL; 1946 break; 1947 } 1948 if (ifunit(new_name) != NULL) { 1949 error = EEXIST; 1950 break; 1951 } 1952 1953 EVENTHANDLER_INVOKE(ifnet_detach_event, ifp); 1954 1955 /* Announce the departure of the interface. */ 1956 rt_ifannouncemsg(ifp, IFAN_DEPARTURE); 1957 1958 strlcpy(ifp->if_xname, new_name, sizeof(ifp->if_xname)); 1959 ifa = TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa; 1960 sdl = (struct sockaddr_dl *)ifa->ifa_addr; 1961 namelen = strlen(new_name); 1962 onamelen = sdl->sdl_nlen; 1963 /* 1964 * Move the address if needed. This is safe because we 1965 * allocate space for a name of length IFNAMSIZ when we 1966 * create this in if_attach(). 1967 */ 1968 if (namelen != onamelen) { 1969 bcopy(sdl->sdl_data + onamelen, 1970 sdl->sdl_data + namelen, sdl->sdl_alen); 1971 } 1972 bcopy(new_name, sdl->sdl_data, namelen); 1973 sdl->sdl_nlen = namelen; 1974 sdl = (struct sockaddr_dl *)ifa->ifa_netmask; 1975 bzero(sdl->sdl_data, onamelen); 1976 while (namelen != 0) 1977 sdl->sdl_data[--namelen] = 0xff; 1978 1979 EVENTHANDLER_INVOKE(ifnet_attach_event, ifp); 1980 1981 /* Announce the return of the interface. */ 1982 rt_ifannouncemsg(ifp, IFAN_ARRIVAL); 1983 break; 1984 1985 case SIOCSIFMETRIC: 1986 error = priv_check_cred(cred, PRIV_ROOT, 0); 1987 if (error) 1988 break; 1989 ifp->if_metric = ifr->ifr_metric; 1990 getmicrotime(&ifp->if_lastchange); 1991 break; 1992 1993 case SIOCSIFPHYS: 1994 error = priv_check_cred(cred, PRIV_ROOT, 0); 1995 if (error) 1996 break; 1997 if (ifp->if_ioctl == NULL) { 1998 error = EOPNOTSUPP; 1999 break; 2000 } 2001 ifnet_serialize_all(ifp); 2002 error = ifp->if_ioctl(ifp, cmd, data, cred); 2003 ifnet_deserialize_all(ifp); 2004 if (error == 0) 2005 getmicrotime(&ifp->if_lastchange); 2006 break; 2007 2008 case SIOCSIFMTU: 2009 { 2010 u_long oldmtu = ifp->if_mtu; 2011 2012 error = priv_check_cred(cred, PRIV_ROOT, 0); 2013 if (error) 2014 break; 2015 if (ifp->if_ioctl == NULL) { 2016 error = EOPNOTSUPP; 2017 break; 2018 } 2019 if (ifr->ifr_mtu < IF_MINMTU || ifr->ifr_mtu > IF_MAXMTU) { 2020 error = EINVAL; 2021 break; 2022 } 2023 ifnet_serialize_all(ifp); 2024 error = ifp->if_ioctl(ifp, cmd, data, cred); 2025 ifnet_deserialize_all(ifp); 2026 if (error == 0) { 2027 getmicrotime(&ifp->if_lastchange); 2028 rt_ifmsg(ifp); 2029 } 2030 /* 2031 * If the link MTU changed, do network layer specific procedure. 2032 */ 2033 if (ifp->if_mtu != oldmtu) { 2034 #ifdef INET6 2035 nd6_setmtu(ifp); 2036 #endif 2037 } 2038 break; 2039 } 2040 2041 case SIOCSIFTSOLEN: 2042 error = priv_check_cred(cred, PRIV_ROOT, 0); 2043 if (error) 2044 break; 2045 2046 /* XXX need driver supplied upper limit */ 2047 if (ifr->ifr_tsolen <= 0) { 2048 error = EINVAL; 2049 break; 2050 } 2051 ifp->if_tsolen = ifr->ifr_tsolen; 2052 break; 2053 2054 case SIOCADDMULTI: 2055 case SIOCDELMULTI: 2056 error = priv_check_cred(cred, PRIV_ROOT, 0); 2057 if (error) 2058 break; 2059 2060 /* Don't allow group membership on non-multicast interfaces. */ 2061 if ((ifp->if_flags & IFF_MULTICAST) == 0) { 2062 error = EOPNOTSUPP; 2063 break; 2064 } 2065 2066 /* Don't let users screw up protocols' entries. */ 2067 if (ifr->ifr_addr.sa_family != AF_LINK) { 2068 error = EINVAL; 2069 break; 2070 } 2071 2072 if (cmd == SIOCADDMULTI) { 2073 struct ifmultiaddr *ifma; 2074 error = if_addmulti(ifp, &ifr->ifr_addr, &ifma); 2075 } else { 2076 error = if_delmulti(ifp, &ifr->ifr_addr); 2077 } 2078 if (error == 0) 2079 getmicrotime(&ifp->if_lastchange); 2080 break; 2081 2082 case SIOCSIFPHYADDR: 2083 case SIOCDIFPHYADDR: 2084 #ifdef INET6 2085 case SIOCSIFPHYADDR_IN6: 2086 #endif 2087 case SIOCSLIFPHYADDR: 2088 case SIOCSIFMEDIA: 2089 case SIOCSIFGENERIC: 2090 error = priv_check_cred(cred, PRIV_ROOT, 0); 2091 if (error) 2092 break; 2093 if (ifp->if_ioctl == 0) { 2094 error = EOPNOTSUPP; 2095 break; 2096 } 2097 ifnet_serialize_all(ifp); 2098 error = ifp->if_ioctl(ifp, cmd, data, cred); 2099 ifnet_deserialize_all(ifp); 2100 if (error == 0) 2101 getmicrotime(&ifp->if_lastchange); 2102 break; 2103 2104 case SIOCGIFSTATUS: 2105 ifs = (struct ifstat *)data; 2106 ifs->ascii[0] = '\0'; 2107 /* fall through */ 2108 case SIOCGIFPSRCADDR: 2109 case SIOCGIFPDSTADDR: 2110 case SIOCGLIFPHYADDR: 2111 case SIOCGIFMEDIA: 2112 case SIOCGIFGENERIC: 2113 if (ifp->if_ioctl == NULL) { 2114 error = EOPNOTSUPP; 2115 break; 2116 } 2117 ifnet_serialize_all(ifp); 2118 error = ifp->if_ioctl(ifp, cmd, data, cred); 2119 ifnet_deserialize_all(ifp); 2120 break; 2121 2122 case SIOCSIFLLADDR: 2123 error = priv_check_cred(cred, PRIV_ROOT, 0); 2124 if (error) 2125 break; 2126 error = if_setlladdr(ifp, ifr->ifr_addr.sa_data, 2127 ifr->ifr_addr.sa_len); 2128 EVENTHANDLER_INVOKE(iflladdr_event, ifp); 2129 break; 2130 2131 default: 2132 oif_flags = ifp->if_flags; 2133 if (so->so_proto == 0) { 2134 error = EOPNOTSUPP; 2135 break; 2136 } 2137 #ifndef COMPAT_43 2138 error = so_pru_control_direct(so, cmd, data, ifp); 2139 #else 2140 ocmd = cmd; 2141 2142 switch (cmd) { 2143 case SIOCSIFDSTADDR: 2144 case SIOCSIFADDR: 2145 case SIOCSIFBRDADDR: 2146 case SIOCSIFNETMASK: 2147 #if BYTE_ORDER != BIG_ENDIAN 2148 if (ifr->ifr_addr.sa_family == 0 && 2149 ifr->ifr_addr.sa_len < 16) { 2150 ifr->ifr_addr.sa_family = ifr->ifr_addr.sa_len; 2151 ifr->ifr_addr.sa_len = 16; 2152 } 2153 #else 2154 if (ifr->ifr_addr.sa_len == 0) 2155 ifr->ifr_addr.sa_len = 16; 2156 #endif 2157 break; 2158 case OSIOCGIFADDR: 2159 cmd = SIOCGIFADDR; 2160 break; 2161 case OSIOCGIFDSTADDR: 2162 cmd = SIOCGIFDSTADDR; 2163 break; 2164 case OSIOCGIFBRDADDR: 2165 cmd = SIOCGIFBRDADDR; 2166 break; 2167 case OSIOCGIFNETMASK: 2168 cmd = SIOCGIFNETMASK; 2169 break; 2170 default: 2171 break; 2172 } 2173 2174 error = so_pru_control_direct(so, cmd, data, ifp); 2175 2176 switch (ocmd) { 2177 case OSIOCGIFADDR: 2178 case OSIOCGIFDSTADDR: 2179 case OSIOCGIFBRDADDR: 2180 case OSIOCGIFNETMASK: 2181 *(u_short *)&ifr->ifr_addr = ifr->ifr_addr.sa_family; 2182 break; 2183 } 2184 #endif /* COMPAT_43 */ 2185 2186 if ((oif_flags ^ ifp->if_flags) & IFF_UP) { 2187 #ifdef INET6 2188 DELAY(100);/* XXX: temporary workaround for fxp issue*/ 2189 if (ifp->if_flags & IFF_UP) { 2190 crit_enter(); 2191 in6_if_up(ifp); 2192 crit_exit(); 2193 } 2194 #endif 2195 } 2196 break; 2197 } 2198 2199 ifnet_unlock(); 2200 return (error); 2201 } 2202 2203 /* 2204 * Set/clear promiscuous mode on interface ifp based on the truth value 2205 * of pswitch. The calls are reference counted so that only the first 2206 * "on" request actually has an effect, as does the final "off" request. 2207 * Results are undefined if the "off" and "on" requests are not matched. 2208 */ 2209 int 2210 ifpromisc(struct ifnet *ifp, int pswitch) 2211 { 2212 struct ifreq ifr; 2213 int error; 2214 int oldflags; 2215 2216 oldflags = ifp->if_flags; 2217 if (ifp->if_flags & IFF_PPROMISC) { 2218 /* Do nothing if device is in permanently promiscuous mode */ 2219 ifp->if_pcount += pswitch ? 1 : -1; 2220 return (0); 2221 } 2222 if (pswitch) { 2223 /* 2224 * If the device is not configured up, we cannot put it in 2225 * promiscuous mode. 2226 */ 2227 if ((ifp->if_flags & IFF_UP) == 0) 2228 return (ENETDOWN); 2229 if (ifp->if_pcount++ != 0) 2230 return (0); 2231 ifp->if_flags |= IFF_PROMISC; 2232 log(LOG_INFO, "%s: promiscuous mode enabled\n", 2233 ifp->if_xname); 2234 } else { 2235 if (--ifp->if_pcount > 0) 2236 return (0); 2237 ifp->if_flags &= ~IFF_PROMISC; 2238 log(LOG_INFO, "%s: promiscuous mode disabled\n", 2239 ifp->if_xname); 2240 } 2241 ifr.ifr_flags = ifp->if_flags; 2242 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2243 ifnet_serialize_all(ifp); 2244 error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, NULL); 2245 ifnet_deserialize_all(ifp); 2246 if (error == 0) 2247 rt_ifmsg(ifp); 2248 else 2249 ifp->if_flags = oldflags; 2250 return error; 2251 } 2252 2253 /* 2254 * Return interface configuration 2255 * of system. List may be used 2256 * in later ioctl's (above) to get 2257 * other information. 2258 */ 2259 static int 2260 ifconf(u_long cmd, caddr_t data, struct ucred *cred) 2261 { 2262 struct ifconf *ifc = (struct ifconf *)data; 2263 struct ifnet *ifp; 2264 struct sockaddr *sa; 2265 struct ifreq ifr, *ifrp; 2266 int space = ifc->ifc_len, error = 0; 2267 2268 ifrp = ifc->ifc_req; 2269 2270 ifnet_lock(); 2271 TAILQ_FOREACH(ifp, &ifnetlist, if_link) { 2272 struct ifaddr_container *ifac, *ifac_mark; 2273 struct ifaddr_marker mark; 2274 struct ifaddrhead *head; 2275 int addrs; 2276 2277 if (space <= sizeof ifr) 2278 break; 2279 2280 /* 2281 * Zero the stack declared structure first to prevent 2282 * memory disclosure. 2283 */ 2284 bzero(&ifr, sizeof(ifr)); 2285 if (strlcpy(ifr.ifr_name, ifp->if_xname, sizeof(ifr.ifr_name)) 2286 >= sizeof(ifr.ifr_name)) { 2287 error = ENAMETOOLONG; 2288 break; 2289 } 2290 2291 /* 2292 * Add a marker, since copyout() could block and during that 2293 * period the list could be changed. Inserting the marker to 2294 * the header of the list will not cause trouble for the code 2295 * assuming that the first element of the list is AF_LINK; the 2296 * marker will be moved to the next position w/o blocking. 2297 */ 2298 ifa_marker_init(&mark, ifp); 2299 ifac_mark = &mark.ifac; 2300 head = &ifp->if_addrheads[mycpuid]; 2301 2302 addrs = 0; 2303 TAILQ_INSERT_HEAD(head, ifac_mark, ifa_link); 2304 while ((ifac = TAILQ_NEXT(ifac_mark, ifa_link)) != NULL) { 2305 struct ifaddr *ifa = ifac->ifa; 2306 2307 TAILQ_REMOVE(head, ifac_mark, ifa_link); 2308 TAILQ_INSERT_AFTER(head, ifac, ifac_mark, ifa_link); 2309 2310 /* Ignore marker */ 2311 if (ifa->ifa_addr->sa_family == AF_UNSPEC) 2312 continue; 2313 2314 if (space <= sizeof ifr) 2315 break; 2316 sa = ifa->ifa_addr; 2317 if (cred->cr_prison && 2318 prison_if(cred, sa)) 2319 continue; 2320 addrs++; 2321 /* 2322 * Keep a reference on this ifaddr, so that it will 2323 * not be destroyed when its address is copied to 2324 * the userland, which could block. 2325 */ 2326 IFAREF(ifa); 2327 #ifdef COMPAT_43 2328 if (cmd == OSIOCGIFCONF) { 2329 struct osockaddr *osa = 2330 (struct osockaddr *)&ifr.ifr_addr; 2331 ifr.ifr_addr = *sa; 2332 osa->sa_family = sa->sa_family; 2333 error = copyout(&ifr, ifrp, sizeof ifr); 2334 ifrp++; 2335 } else 2336 #endif 2337 if (sa->sa_len <= sizeof(*sa)) { 2338 ifr.ifr_addr = *sa; 2339 error = copyout(&ifr, ifrp, sizeof ifr); 2340 ifrp++; 2341 } else { 2342 if (space < (sizeof ifr) + sa->sa_len - 2343 sizeof(*sa)) { 2344 IFAFREE(ifa); 2345 break; 2346 } 2347 space -= sa->sa_len - sizeof(*sa); 2348 error = copyout(&ifr, ifrp, 2349 sizeof ifr.ifr_name); 2350 if (error == 0) 2351 error = copyout(sa, &ifrp->ifr_addr, 2352 sa->sa_len); 2353 ifrp = (struct ifreq *) 2354 (sa->sa_len + (caddr_t)&ifrp->ifr_addr); 2355 } 2356 IFAFREE(ifa); 2357 if (error) 2358 break; 2359 space -= sizeof ifr; 2360 } 2361 TAILQ_REMOVE(head, ifac_mark, ifa_link); 2362 if (error) 2363 break; 2364 if (!addrs) { 2365 bzero(&ifr.ifr_addr, sizeof ifr.ifr_addr); 2366 error = copyout(&ifr, ifrp, sizeof ifr); 2367 if (error) 2368 break; 2369 space -= sizeof ifr; 2370 ifrp++; 2371 } 2372 } 2373 ifnet_unlock(); 2374 2375 ifc->ifc_len -= space; 2376 return (error); 2377 } 2378 2379 /* 2380 * Just like if_promisc(), but for all-multicast-reception mode. 2381 */ 2382 int 2383 if_allmulti(struct ifnet *ifp, int onswitch) 2384 { 2385 int error = 0; 2386 struct ifreq ifr; 2387 2388 crit_enter(); 2389 2390 if (onswitch) { 2391 if (ifp->if_amcount++ == 0) { 2392 ifp->if_flags |= IFF_ALLMULTI; 2393 ifr.ifr_flags = ifp->if_flags; 2394 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2395 ifnet_serialize_all(ifp); 2396 error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, 2397 NULL); 2398 ifnet_deserialize_all(ifp); 2399 } 2400 } else { 2401 if (ifp->if_amcount > 1) { 2402 ifp->if_amcount--; 2403 } else { 2404 ifp->if_amcount = 0; 2405 ifp->if_flags &= ~IFF_ALLMULTI; 2406 ifr.ifr_flags = ifp->if_flags; 2407 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2408 ifnet_serialize_all(ifp); 2409 error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, 2410 NULL); 2411 ifnet_deserialize_all(ifp); 2412 } 2413 } 2414 2415 crit_exit(); 2416 2417 if (error == 0) 2418 rt_ifmsg(ifp); 2419 return error; 2420 } 2421 2422 /* 2423 * Add a multicast listenership to the interface in question. 2424 * The link layer provides a routine which converts 2425 */ 2426 int 2427 if_addmulti_serialized(struct ifnet *ifp, struct sockaddr *sa, 2428 struct ifmultiaddr **retifma) 2429 { 2430 struct sockaddr *llsa, *dupsa; 2431 int error; 2432 struct ifmultiaddr *ifma; 2433 2434 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2435 2436 /* 2437 * If the matching multicast address already exists 2438 * then don't add a new one, just add a reference 2439 */ 2440 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2441 if (sa_equal(sa, ifma->ifma_addr)) { 2442 ifma->ifma_refcount++; 2443 if (retifma) 2444 *retifma = ifma; 2445 return 0; 2446 } 2447 } 2448 2449 /* 2450 * Give the link layer a chance to accept/reject it, and also 2451 * find out which AF_LINK address this maps to, if it isn't one 2452 * already. 2453 */ 2454 if (ifp->if_resolvemulti) { 2455 error = ifp->if_resolvemulti(ifp, &llsa, sa); 2456 if (error) 2457 return error; 2458 } else { 2459 llsa = NULL; 2460 } 2461 2462 ifma = kmalloc(sizeof *ifma, M_IFMADDR, M_INTWAIT); 2463 dupsa = kmalloc(sa->sa_len, M_IFMADDR, M_INTWAIT); 2464 bcopy(sa, dupsa, sa->sa_len); 2465 2466 ifma->ifma_addr = dupsa; 2467 ifma->ifma_lladdr = llsa; 2468 ifma->ifma_ifp = ifp; 2469 ifma->ifma_refcount = 1; 2470 ifma->ifma_protospec = NULL; 2471 rt_newmaddrmsg(RTM_NEWMADDR, ifma); 2472 2473 TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link); 2474 if (retifma) 2475 *retifma = ifma; 2476 2477 if (llsa != NULL) { 2478 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2479 if (sa_equal(ifma->ifma_addr, llsa)) 2480 break; 2481 } 2482 if (ifma) { 2483 ifma->ifma_refcount++; 2484 } else { 2485 ifma = kmalloc(sizeof *ifma, M_IFMADDR, M_INTWAIT); 2486 dupsa = kmalloc(llsa->sa_len, M_IFMADDR, M_INTWAIT); 2487 bcopy(llsa, dupsa, llsa->sa_len); 2488 ifma->ifma_addr = dupsa; 2489 ifma->ifma_ifp = ifp; 2490 ifma->ifma_refcount = 1; 2491 TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link); 2492 } 2493 } 2494 /* 2495 * We are certain we have added something, so call down to the 2496 * interface to let them know about it. 2497 */ 2498 if (ifp->if_ioctl) 2499 ifp->if_ioctl(ifp, SIOCADDMULTI, 0, NULL); 2500 2501 return 0; 2502 } 2503 2504 int 2505 if_addmulti(struct ifnet *ifp, struct sockaddr *sa, 2506 struct ifmultiaddr **retifma) 2507 { 2508 int error; 2509 2510 ifnet_serialize_all(ifp); 2511 error = if_addmulti_serialized(ifp, sa, retifma); 2512 ifnet_deserialize_all(ifp); 2513 2514 return error; 2515 } 2516 2517 /* 2518 * Remove a reference to a multicast address on this interface. Yell 2519 * if the request does not match an existing membership. 2520 */ 2521 static int 2522 if_delmulti_serialized(struct ifnet *ifp, struct sockaddr *sa) 2523 { 2524 struct ifmultiaddr *ifma; 2525 2526 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2527 2528 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) 2529 if (sa_equal(sa, ifma->ifma_addr)) 2530 break; 2531 if (ifma == NULL) 2532 return ENOENT; 2533 2534 if (ifma->ifma_refcount > 1) { 2535 ifma->ifma_refcount--; 2536 return 0; 2537 } 2538 2539 rt_newmaddrmsg(RTM_DELMADDR, ifma); 2540 sa = ifma->ifma_lladdr; 2541 TAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifma_link); 2542 /* 2543 * Make sure the interface driver is notified 2544 * in the case of a link layer mcast group being left. 2545 */ 2546 if (ifma->ifma_addr->sa_family == AF_LINK && sa == NULL) 2547 ifp->if_ioctl(ifp, SIOCDELMULTI, 0, NULL); 2548 kfree(ifma->ifma_addr, M_IFMADDR); 2549 kfree(ifma, M_IFMADDR); 2550 if (sa == NULL) 2551 return 0; 2552 2553 /* 2554 * Now look for the link-layer address which corresponds to 2555 * this network address. It had been squirreled away in 2556 * ifma->ifma_lladdr for this purpose (so we don't have 2557 * to call ifp->if_resolvemulti() again), and we saved that 2558 * value in sa above. If some nasty deleted the 2559 * link-layer address out from underneath us, we can deal because 2560 * the address we stored was is not the same as the one which was 2561 * in the record for the link-layer address. (So we don't complain 2562 * in that case.) 2563 */ 2564 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) 2565 if (sa_equal(sa, ifma->ifma_addr)) 2566 break; 2567 if (ifma == NULL) 2568 return 0; 2569 2570 if (ifma->ifma_refcount > 1) { 2571 ifma->ifma_refcount--; 2572 return 0; 2573 } 2574 2575 TAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifma_link); 2576 ifp->if_ioctl(ifp, SIOCDELMULTI, 0, NULL); 2577 kfree(ifma->ifma_addr, M_IFMADDR); 2578 kfree(sa, M_IFMADDR); 2579 kfree(ifma, M_IFMADDR); 2580 2581 return 0; 2582 } 2583 2584 int 2585 if_delmulti(struct ifnet *ifp, struct sockaddr *sa) 2586 { 2587 int error; 2588 2589 ifnet_serialize_all(ifp); 2590 error = if_delmulti_serialized(ifp, sa); 2591 ifnet_deserialize_all(ifp); 2592 2593 return error; 2594 } 2595 2596 /* 2597 * Delete all multicast group membership for an interface. 2598 * Should be used to quickly flush all multicast filters. 2599 */ 2600 void 2601 if_delallmulti_serialized(struct ifnet *ifp) 2602 { 2603 struct ifmultiaddr *ifma, mark; 2604 struct sockaddr sa; 2605 2606 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2607 2608 bzero(&sa, sizeof(sa)); 2609 sa.sa_family = AF_UNSPEC; 2610 sa.sa_len = sizeof(sa); 2611 2612 bzero(&mark, sizeof(mark)); 2613 mark.ifma_addr = &sa; 2614 2615 TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, &mark, ifma_link); 2616 while ((ifma = TAILQ_NEXT(&mark, ifma_link)) != NULL) { 2617 TAILQ_REMOVE(&ifp->if_multiaddrs, &mark, ifma_link); 2618 TAILQ_INSERT_AFTER(&ifp->if_multiaddrs, ifma, &mark, 2619 ifma_link); 2620 2621 if (ifma->ifma_addr->sa_family == AF_UNSPEC) 2622 continue; 2623 2624 if_delmulti_serialized(ifp, ifma->ifma_addr); 2625 } 2626 TAILQ_REMOVE(&ifp->if_multiaddrs, &mark, ifma_link); 2627 } 2628 2629 2630 /* 2631 * Set the link layer address on an interface. 2632 * 2633 * At this time we only support certain types of interfaces, 2634 * and we don't allow the length of the address to change. 2635 */ 2636 int 2637 if_setlladdr(struct ifnet *ifp, const u_char *lladdr, int len) 2638 { 2639 struct sockaddr_dl *sdl; 2640 struct ifreq ifr; 2641 2642 sdl = IF_LLSOCKADDR(ifp); 2643 if (sdl == NULL) 2644 return (EINVAL); 2645 if (len != sdl->sdl_alen) /* don't allow length to change */ 2646 return (EINVAL); 2647 switch (ifp->if_type) { 2648 case IFT_ETHER: /* these types use struct arpcom */ 2649 case IFT_XETHER: 2650 case IFT_L2VLAN: 2651 case IFT_IEEE8023ADLAG: 2652 bcopy(lladdr, ((struct arpcom *)ifp->if_softc)->ac_enaddr, len); 2653 bcopy(lladdr, LLADDR(sdl), len); 2654 break; 2655 default: 2656 return (ENODEV); 2657 } 2658 /* 2659 * If the interface is already up, we need 2660 * to re-init it in order to reprogram its 2661 * address filter. 2662 */ 2663 ifnet_serialize_all(ifp); 2664 if ((ifp->if_flags & IFF_UP) != 0) { 2665 #ifdef INET 2666 struct ifaddr_container *ifac; 2667 #endif 2668 2669 ifp->if_flags &= ~IFF_UP; 2670 ifr.ifr_flags = ifp->if_flags; 2671 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2672 ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, 2673 NULL); 2674 ifp->if_flags |= IFF_UP; 2675 ifr.ifr_flags = ifp->if_flags; 2676 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2677 ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, 2678 NULL); 2679 #ifdef INET 2680 /* 2681 * Also send gratuitous ARPs to notify other nodes about 2682 * the address change. 2683 */ 2684 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 2685 struct ifaddr *ifa = ifac->ifa; 2686 2687 if (ifa->ifa_addr != NULL && 2688 ifa->ifa_addr->sa_family == AF_INET) 2689 arp_gratuitous(ifp, ifa); 2690 } 2691 #endif 2692 } 2693 ifnet_deserialize_all(ifp); 2694 return (0); 2695 } 2696 2697 struct ifmultiaddr * 2698 ifmaof_ifpforaddr(struct sockaddr *sa, struct ifnet *ifp) 2699 { 2700 struct ifmultiaddr *ifma; 2701 2702 /* TODO: need ifnet_serialize_main */ 2703 ifnet_serialize_all(ifp); 2704 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) 2705 if (sa_equal(ifma->ifma_addr, sa)) 2706 break; 2707 ifnet_deserialize_all(ifp); 2708 2709 return ifma; 2710 } 2711 2712 /* 2713 * This function locates the first real ethernet MAC from a network 2714 * card and loads it into node, returning 0 on success or ENOENT if 2715 * no suitable interfaces were found. It is used by the uuid code to 2716 * generate a unique 6-byte number. 2717 */ 2718 int 2719 if_getanyethermac(uint16_t *node, int minlen) 2720 { 2721 struct ifnet *ifp; 2722 struct sockaddr_dl *sdl; 2723 2724 ifnet_lock(); 2725 TAILQ_FOREACH(ifp, &ifnetlist, if_link) { 2726 if (ifp->if_type != IFT_ETHER) 2727 continue; 2728 sdl = IF_LLSOCKADDR(ifp); 2729 if (sdl->sdl_alen < minlen) 2730 continue; 2731 bcopy(((struct arpcom *)ifp->if_softc)->ac_enaddr, node, 2732 minlen); 2733 ifnet_unlock(); 2734 return(0); 2735 } 2736 ifnet_unlock(); 2737 return (ENOENT); 2738 } 2739 2740 /* 2741 * The name argument must be a pointer to storage which will last as 2742 * long as the interface does. For physical devices, the result of 2743 * device_get_name(dev) is a good choice and for pseudo-devices a 2744 * static string works well. 2745 */ 2746 void 2747 if_initname(struct ifnet *ifp, const char *name, int unit) 2748 { 2749 ifp->if_dname = name; 2750 ifp->if_dunit = unit; 2751 if (unit != IF_DUNIT_NONE) 2752 ksnprintf(ifp->if_xname, IFNAMSIZ, "%s%d", name, unit); 2753 else 2754 strlcpy(ifp->if_xname, name, IFNAMSIZ); 2755 } 2756 2757 int 2758 if_printf(struct ifnet *ifp, const char *fmt, ...) 2759 { 2760 __va_list ap; 2761 int retval; 2762 2763 retval = kprintf("%s: ", ifp->if_xname); 2764 __va_start(ap, fmt); 2765 retval += kvprintf(fmt, ap); 2766 __va_end(ap); 2767 return (retval); 2768 } 2769 2770 struct ifnet * 2771 if_alloc(uint8_t type) 2772 { 2773 struct ifnet *ifp; 2774 size_t size; 2775 2776 /* 2777 * XXX temporary hack until arpcom is setup in if_l2com 2778 */ 2779 if (type == IFT_ETHER) 2780 size = sizeof(struct arpcom); 2781 else 2782 size = sizeof(struct ifnet); 2783 2784 ifp = kmalloc(size, M_IFNET, M_WAITOK|M_ZERO); 2785 2786 ifp->if_type = type; 2787 2788 if (if_com_alloc[type] != NULL) { 2789 ifp->if_l2com = if_com_alloc[type](type, ifp); 2790 if (ifp->if_l2com == NULL) { 2791 kfree(ifp, M_IFNET); 2792 return (NULL); 2793 } 2794 } 2795 return (ifp); 2796 } 2797 2798 void 2799 if_free(struct ifnet *ifp) 2800 { 2801 kfree(ifp, M_IFNET); 2802 } 2803 2804 void 2805 ifq_set_classic(struct ifaltq *ifq) 2806 { 2807 ifq_set_methods(ifq, ifq->altq_ifp->if_mapsubq, 2808 ifsq_classic_enqueue, ifsq_classic_dequeue, ifsq_classic_request); 2809 } 2810 2811 void 2812 ifq_set_methods(struct ifaltq *ifq, altq_mapsubq_t mapsubq, 2813 ifsq_enqueue_t enqueue, ifsq_dequeue_t dequeue, ifsq_request_t request) 2814 { 2815 int q; 2816 2817 KASSERT(mapsubq != NULL, ("mapsubq is not specified")); 2818 KASSERT(enqueue != NULL, ("enqueue is not specified")); 2819 KASSERT(dequeue != NULL, ("dequeue is not specified")); 2820 KASSERT(request != NULL, ("request is not specified")); 2821 2822 ifq->altq_mapsubq = mapsubq; 2823 for (q = 0; q < ifq->altq_subq_cnt; ++q) { 2824 struct ifaltq_subque *ifsq = &ifq->altq_subq[q]; 2825 2826 ifsq->ifsq_enqueue = enqueue; 2827 ifsq->ifsq_dequeue = dequeue; 2828 ifsq->ifsq_request = request; 2829 } 2830 } 2831 2832 static void 2833 ifsq_norm_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m) 2834 { 2835 m->m_nextpkt = NULL; 2836 if (ifsq->ifsq_norm_tail == NULL) 2837 ifsq->ifsq_norm_head = m; 2838 else 2839 ifsq->ifsq_norm_tail->m_nextpkt = m; 2840 ifsq->ifsq_norm_tail = m; 2841 ALTQ_SQ_CNTR_INC(ifsq, m->m_pkthdr.len); 2842 } 2843 2844 static void 2845 ifsq_prio_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m) 2846 { 2847 m->m_nextpkt = NULL; 2848 if (ifsq->ifsq_prio_tail == NULL) 2849 ifsq->ifsq_prio_head = m; 2850 else 2851 ifsq->ifsq_prio_tail->m_nextpkt = m; 2852 ifsq->ifsq_prio_tail = m; 2853 ALTQ_SQ_CNTR_INC(ifsq, m->m_pkthdr.len); 2854 ALTQ_SQ_PRIO_CNTR_INC(ifsq, m->m_pkthdr.len); 2855 } 2856 2857 static struct mbuf * 2858 ifsq_norm_dequeue(struct ifaltq_subque *ifsq) 2859 { 2860 struct mbuf *m; 2861 2862 m = ifsq->ifsq_norm_head; 2863 if (m != NULL) { 2864 if ((ifsq->ifsq_norm_head = m->m_nextpkt) == NULL) 2865 ifsq->ifsq_norm_tail = NULL; 2866 m->m_nextpkt = NULL; 2867 ALTQ_SQ_CNTR_DEC(ifsq, m->m_pkthdr.len); 2868 } 2869 return m; 2870 } 2871 2872 static struct mbuf * 2873 ifsq_prio_dequeue(struct ifaltq_subque *ifsq) 2874 { 2875 struct mbuf *m; 2876 2877 m = ifsq->ifsq_prio_head; 2878 if (m != NULL) { 2879 if ((ifsq->ifsq_prio_head = m->m_nextpkt) == NULL) 2880 ifsq->ifsq_prio_tail = NULL; 2881 m->m_nextpkt = NULL; 2882 ALTQ_SQ_CNTR_DEC(ifsq, m->m_pkthdr.len); 2883 ALTQ_SQ_PRIO_CNTR_DEC(ifsq, m->m_pkthdr.len); 2884 } 2885 return m; 2886 } 2887 2888 int 2889 ifsq_classic_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m, 2890 struct altq_pktattr *pa __unused) 2891 { 2892 M_ASSERTPKTHDR(m); 2893 if (ifsq->ifsq_len >= ifsq->ifsq_maxlen || 2894 ifsq->ifsq_bcnt >= ifsq->ifsq_maxbcnt) { 2895 if ((m->m_flags & M_PRIO) && 2896 ifsq->ifsq_prio_len < (ifsq->ifsq_maxlen / 2) && 2897 ifsq->ifsq_prio_bcnt < (ifsq->ifsq_maxbcnt / 2)) { 2898 struct mbuf *m_drop; 2899 2900 /* 2901 * Perform drop-head on normal queue 2902 */ 2903 m_drop = ifsq_norm_dequeue(ifsq); 2904 if (m_drop != NULL) { 2905 m_freem(m_drop); 2906 ifsq_prio_enqueue(ifsq, m); 2907 return 0; 2908 } 2909 /* XXX nothing could be dropped? */ 2910 } 2911 m_freem(m); 2912 return ENOBUFS; 2913 } else { 2914 if (m->m_flags & M_PRIO) 2915 ifsq_prio_enqueue(ifsq, m); 2916 else 2917 ifsq_norm_enqueue(ifsq, m); 2918 return 0; 2919 } 2920 } 2921 2922 struct mbuf * 2923 ifsq_classic_dequeue(struct ifaltq_subque *ifsq, int op) 2924 { 2925 struct mbuf *m; 2926 2927 switch (op) { 2928 case ALTDQ_POLL: 2929 m = ifsq->ifsq_prio_head; 2930 if (m == NULL) 2931 m = ifsq->ifsq_norm_head; 2932 break; 2933 2934 case ALTDQ_REMOVE: 2935 m = ifsq_prio_dequeue(ifsq); 2936 if (m == NULL) 2937 m = ifsq_norm_dequeue(ifsq); 2938 break; 2939 2940 default: 2941 panic("unsupported ALTQ dequeue op: %d", op); 2942 } 2943 return m; 2944 } 2945 2946 int 2947 ifsq_classic_request(struct ifaltq_subque *ifsq, int req, void *arg) 2948 { 2949 switch (req) { 2950 case ALTRQ_PURGE: 2951 for (;;) { 2952 struct mbuf *m; 2953 2954 m = ifsq_classic_dequeue(ifsq, ALTDQ_REMOVE); 2955 if (m == NULL) 2956 break; 2957 m_freem(m); 2958 } 2959 break; 2960 2961 default: 2962 panic("unsupported ALTQ request: %d", req); 2963 } 2964 return 0; 2965 } 2966 2967 static void 2968 ifsq_ifstart_try(struct ifaltq_subque *ifsq, int force_sched) 2969 { 2970 struct ifnet *ifp = ifsq_get_ifp(ifsq); 2971 int running = 0, need_sched; 2972 2973 /* 2974 * Try to do direct ifnet.if_start on the subqueue first, if there is 2975 * contention on the subqueue hardware serializer, ifnet.if_start on 2976 * the subqueue will be scheduled on the subqueue owner CPU. 2977 */ 2978 if (!ifsq_tryserialize_hw(ifsq)) { 2979 /* 2980 * Subqueue hardware serializer contention happened, 2981 * ifnet.if_start on the subqueue is scheduled on 2982 * the subqueue owner CPU, and we keep going. 2983 */ 2984 ifsq_ifstart_schedule(ifsq, 1); 2985 return; 2986 } 2987 2988 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) { 2989 ifp->if_start(ifp, ifsq); 2990 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) 2991 running = 1; 2992 } 2993 need_sched = ifsq_ifstart_need_schedule(ifsq, running); 2994 2995 ifsq_deserialize_hw(ifsq); 2996 2997 if (need_sched) { 2998 /* 2999 * More data need to be transmitted, ifnet.if_start on the 3000 * subqueue is scheduled on the subqueue owner CPU, and we 3001 * keep going. 3002 * NOTE: ifnet.if_start subqueue interlock is not released. 3003 */ 3004 ifsq_ifstart_schedule(ifsq, force_sched); 3005 } 3006 } 3007 3008 /* 3009 * Subqeue packets staging mechanism: 3010 * 3011 * The packets enqueued into the subqueue are staged to a certain amount 3012 * before the ifnet.if_start on the subqueue is called. In this way, the 3013 * driver could avoid writing to hardware registers upon every packet, 3014 * instead, hardware registers could be written when certain amount of 3015 * packets are put onto hardware TX ring. The measurement on several modern 3016 * NICs (emx(4), igb(4), bnx(4), bge(4), jme(4)) shows that the hardware 3017 * registers writing aggregation could save ~20% CPU time when 18bytes UDP 3018 * datagrams are transmitted at 1.48Mpps. The performance improvement by 3019 * hardware registers writing aggeregation is also mentioned by Luigi Rizzo's 3020 * netmap paper (http://info.iet.unipi.it/~luigi/netmap/). 3021 * 3022 * Subqueue packets staging is performed for two entry points into drivers' 3023 * transmission function: 3024 * - Direct ifnet.if_start calling on the subqueue, i.e. ifsq_ifstart_try() 3025 * - ifnet.if_start scheduling on the subqueue, i.e. ifsq_ifstart_schedule() 3026 * 3027 * Subqueue packets staging will be stopped upon any of the following 3028 * conditions: 3029 * - If the count of packets enqueued on the current CPU is great than or 3030 * equal to ifsq_stage_cntmax. (XXX this should be per-interface) 3031 * - If the total length of packets enqueued on the current CPU is great 3032 * than or equal to the hardware's MTU - max_protohdr. max_protohdr is 3033 * cut from the hardware's MTU mainly bacause a full TCP segment's size 3034 * is usually less than hardware's MTU. 3035 * - ifsq_ifstart_schedule() is not pending on the current CPU and 3036 * ifnet.if_start subqueue interlock (ifaltq_subq.ifsq_started) is not 3037 * released. 3038 * - The if_start_rollup(), which is registered as low priority netisr 3039 * rollup function, is called; probably because no more work is pending 3040 * for netisr. 3041 * 3042 * NOTE: 3043 * Currently subqueue packet staging is only performed in netisr threads. 3044 */ 3045 int 3046 ifq_dispatch(struct ifnet *ifp, struct mbuf *m, struct altq_pktattr *pa) 3047 { 3048 struct ifaltq *ifq = &ifp->if_snd; 3049 struct ifaltq_subque *ifsq; 3050 int error, start = 0, len, mcast = 0, avoid_start = 0; 3051 struct ifsubq_stage_head *head = NULL; 3052 struct ifsubq_stage *stage = NULL; 3053 struct globaldata *gd = mycpu; 3054 struct thread *td = gd->gd_curthread; 3055 3056 crit_enter_quick(td); 3057 3058 ifsq = ifq_map_subq(ifq, gd->gd_cpuid); 3059 ASSERT_ALTQ_SQ_NOT_SERIALIZED_HW(ifsq); 3060 3061 len = m->m_pkthdr.len; 3062 if (m->m_flags & M_MCAST) 3063 mcast = 1; 3064 3065 if (td->td_type == TD_TYPE_NETISR) { 3066 head = &ifsubq_stage_heads[mycpuid]; 3067 stage = ifsq_get_stage(ifsq, mycpuid); 3068 3069 stage->stg_cnt++; 3070 stage->stg_len += len; 3071 if (stage->stg_cnt < ifsq_stage_cntmax && 3072 stage->stg_len < (ifp->if_mtu - max_protohdr)) 3073 avoid_start = 1; 3074 } 3075 3076 ALTQ_SQ_LOCK(ifsq); 3077 error = ifsq_enqueue_locked(ifsq, m, pa); 3078 if (error) { 3079 IFNET_STAT_INC(ifp, oqdrops, 1); 3080 if (!ifsq_data_ready(ifsq)) { 3081 ALTQ_SQ_UNLOCK(ifsq); 3082 crit_exit_quick(td); 3083 return error; 3084 } 3085 avoid_start = 0; 3086 } 3087 if (!ifsq_is_started(ifsq)) { 3088 if (avoid_start) { 3089 ALTQ_SQ_UNLOCK(ifsq); 3090 3091 KKASSERT(!error); 3092 if ((stage->stg_flags & IFSQ_STAGE_FLAG_QUED) == 0) 3093 ifsq_stage_insert(head, stage); 3094 3095 IFNET_STAT_INC(ifp, obytes, len); 3096 if (mcast) 3097 IFNET_STAT_INC(ifp, omcasts, 1); 3098 crit_exit_quick(td); 3099 return error; 3100 } 3101 3102 /* 3103 * Hold the subqueue interlock of ifnet.if_start 3104 */ 3105 ifsq_set_started(ifsq); 3106 start = 1; 3107 } 3108 ALTQ_SQ_UNLOCK(ifsq); 3109 3110 if (!error) { 3111 IFNET_STAT_INC(ifp, obytes, len); 3112 if (mcast) 3113 IFNET_STAT_INC(ifp, omcasts, 1); 3114 } 3115 3116 if (stage != NULL) { 3117 if (!start && (stage->stg_flags & IFSQ_STAGE_FLAG_SCHED)) { 3118 KKASSERT(stage->stg_flags & IFSQ_STAGE_FLAG_QUED); 3119 if (!avoid_start) { 3120 ifsq_stage_remove(head, stage); 3121 ifsq_ifstart_schedule(ifsq, 1); 3122 } 3123 crit_exit_quick(td); 3124 return error; 3125 } 3126 3127 if (stage->stg_flags & IFSQ_STAGE_FLAG_QUED) { 3128 ifsq_stage_remove(head, stage); 3129 } else { 3130 stage->stg_cnt = 0; 3131 stage->stg_len = 0; 3132 } 3133 } 3134 3135 if (!start) { 3136 crit_exit_quick(td); 3137 return error; 3138 } 3139 3140 ifsq_ifstart_try(ifsq, 0); 3141 3142 crit_exit_quick(td); 3143 return error; 3144 } 3145 3146 void * 3147 ifa_create(int size) 3148 { 3149 struct ifaddr *ifa; 3150 int i; 3151 3152 KASSERT(size >= sizeof(*ifa), ("ifaddr size too small")); 3153 3154 ifa = kmalloc(size, M_IFADDR, M_INTWAIT | M_ZERO); 3155 ifa->ifa_containers = 3156 kmalloc_cachealign(ncpus * sizeof(struct ifaddr_container), 3157 M_IFADDR, M_INTWAIT | M_ZERO); 3158 3159 ifa->ifa_ncnt = ncpus; 3160 for (i = 0; i < ncpus; ++i) { 3161 struct ifaddr_container *ifac = &ifa->ifa_containers[i]; 3162 3163 ifac->ifa_magic = IFA_CONTAINER_MAGIC; 3164 ifac->ifa = ifa; 3165 ifac->ifa_refcnt = 1; 3166 } 3167 #ifdef IFADDR_DEBUG 3168 kprintf("alloc ifa %p %d\n", ifa, size); 3169 #endif 3170 return ifa; 3171 } 3172 3173 void 3174 ifac_free(struct ifaddr_container *ifac, int cpu_id) 3175 { 3176 struct ifaddr *ifa = ifac->ifa; 3177 3178 KKASSERT(ifac->ifa_magic == IFA_CONTAINER_MAGIC); 3179 KKASSERT(ifac->ifa_refcnt == 0); 3180 KASSERT(ifac->ifa_listmask == 0, 3181 ("ifa is still on %#x lists", ifac->ifa_listmask)); 3182 3183 ifac->ifa_magic = IFA_CONTAINER_DEAD; 3184 3185 #ifdef IFADDR_DEBUG_VERBOSE 3186 kprintf("try free ifa %p cpu_id %d\n", ifac->ifa, cpu_id); 3187 #endif 3188 3189 KASSERT(ifa->ifa_ncnt > 0 && ifa->ifa_ncnt <= ncpus, 3190 ("invalid # of ifac, %d", ifa->ifa_ncnt)); 3191 if (atomic_fetchadd_int(&ifa->ifa_ncnt, -1) == 1) { 3192 #ifdef IFADDR_DEBUG 3193 kprintf("free ifa %p\n", ifa); 3194 #endif 3195 kfree(ifa->ifa_containers, M_IFADDR); 3196 kfree(ifa, M_IFADDR); 3197 } 3198 } 3199 3200 static void 3201 ifa_iflink_dispatch(netmsg_t nmsg) 3202 { 3203 struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg; 3204 struct ifaddr *ifa = msg->ifa; 3205 struct ifnet *ifp = msg->ifp; 3206 int cpu = mycpuid; 3207 struct ifaddr_container *ifac; 3208 3209 crit_enter(); 3210 3211 ifac = &ifa->ifa_containers[cpu]; 3212 ASSERT_IFAC_VALID(ifac); 3213 KASSERT((ifac->ifa_listmask & IFA_LIST_IFADDRHEAD) == 0, 3214 ("ifaddr is on if_addrheads")); 3215 3216 ifac->ifa_listmask |= IFA_LIST_IFADDRHEAD; 3217 if (msg->tail) 3218 TAILQ_INSERT_TAIL(&ifp->if_addrheads[cpu], ifac, ifa_link); 3219 else 3220 TAILQ_INSERT_HEAD(&ifp->if_addrheads[cpu], ifac, ifa_link); 3221 3222 crit_exit(); 3223 3224 ifa_forwardmsg(&nmsg->lmsg, cpu + 1); 3225 } 3226 3227 void 3228 ifa_iflink(struct ifaddr *ifa, struct ifnet *ifp, int tail) 3229 { 3230 struct netmsg_ifaddr msg; 3231 3232 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 3233 0, ifa_iflink_dispatch); 3234 msg.ifa = ifa; 3235 msg.ifp = ifp; 3236 msg.tail = tail; 3237 3238 ifa_domsg(&msg.base.lmsg, 0); 3239 } 3240 3241 static void 3242 ifa_ifunlink_dispatch(netmsg_t nmsg) 3243 { 3244 struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg; 3245 struct ifaddr *ifa = msg->ifa; 3246 struct ifnet *ifp = msg->ifp; 3247 int cpu = mycpuid; 3248 struct ifaddr_container *ifac; 3249 3250 crit_enter(); 3251 3252 ifac = &ifa->ifa_containers[cpu]; 3253 ASSERT_IFAC_VALID(ifac); 3254 KASSERT(ifac->ifa_listmask & IFA_LIST_IFADDRHEAD, 3255 ("ifaddr is not on if_addrhead")); 3256 3257 TAILQ_REMOVE(&ifp->if_addrheads[cpu], ifac, ifa_link); 3258 ifac->ifa_listmask &= ~IFA_LIST_IFADDRHEAD; 3259 3260 crit_exit(); 3261 3262 ifa_forwardmsg(&nmsg->lmsg, cpu + 1); 3263 } 3264 3265 void 3266 ifa_ifunlink(struct ifaddr *ifa, struct ifnet *ifp) 3267 { 3268 struct netmsg_ifaddr msg; 3269 3270 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 3271 0, ifa_ifunlink_dispatch); 3272 msg.ifa = ifa; 3273 msg.ifp = ifp; 3274 3275 ifa_domsg(&msg.base.lmsg, 0); 3276 } 3277 3278 static void 3279 ifa_destroy_dispatch(netmsg_t nmsg) 3280 { 3281 struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg; 3282 3283 IFAFREE(msg->ifa); 3284 ifa_forwardmsg(&nmsg->lmsg, mycpuid + 1); 3285 } 3286 3287 void 3288 ifa_destroy(struct ifaddr *ifa) 3289 { 3290 struct netmsg_ifaddr msg; 3291 3292 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 3293 0, ifa_destroy_dispatch); 3294 msg.ifa = ifa; 3295 3296 ifa_domsg(&msg.base.lmsg, 0); 3297 } 3298 3299 struct lwkt_port * 3300 ifnet_portfn(int cpu) 3301 { 3302 return &ifnet_threads[cpu]->td_msgport; 3303 } 3304 3305 void 3306 ifnet_forwardmsg(struct lwkt_msg *lmsg, int next_cpu) 3307 { 3308 KKASSERT(next_cpu > mycpuid && next_cpu <= ncpus); 3309 3310 if (next_cpu < ncpus) 3311 lwkt_forwardmsg(ifnet_portfn(next_cpu), lmsg); 3312 else 3313 lwkt_replymsg(lmsg, 0); 3314 } 3315 3316 int 3317 ifnet_domsg(struct lwkt_msg *lmsg, int cpu) 3318 { 3319 KKASSERT(cpu < ncpus); 3320 return lwkt_domsg(ifnet_portfn(cpu), lmsg, 0); 3321 } 3322 3323 void 3324 ifnet_sendmsg(struct lwkt_msg *lmsg, int cpu) 3325 { 3326 KKASSERT(cpu < ncpus); 3327 lwkt_sendmsg(ifnet_portfn(cpu), lmsg); 3328 } 3329 3330 /* 3331 * Generic netmsg service loop. Some protocols may roll their own but all 3332 * must do the basic command dispatch function call done here. 3333 */ 3334 static void 3335 ifnet_service_loop(void *arg __unused) 3336 { 3337 netmsg_t msg; 3338 3339 while ((msg = lwkt_waitport(&curthread->td_msgport, 0))) { 3340 KASSERT(msg->base.nm_dispatch, ("ifnet_service: badmsg")); 3341 msg->base.nm_dispatch(msg); 3342 } 3343 } 3344 3345 static void 3346 if_start_rollup(void) 3347 { 3348 struct ifsubq_stage_head *head = &ifsubq_stage_heads[mycpuid]; 3349 struct ifsubq_stage *stage; 3350 3351 crit_enter(); 3352 3353 while ((stage = TAILQ_FIRST(&head->stg_head)) != NULL) { 3354 struct ifaltq_subque *ifsq = stage->stg_subq; 3355 int is_sched = 0; 3356 3357 if (stage->stg_flags & IFSQ_STAGE_FLAG_SCHED) 3358 is_sched = 1; 3359 ifsq_stage_remove(head, stage); 3360 3361 if (is_sched) { 3362 ifsq_ifstart_schedule(ifsq, 1); 3363 } else { 3364 int start = 0; 3365 3366 ALTQ_SQ_LOCK(ifsq); 3367 if (!ifsq_is_started(ifsq)) { 3368 /* 3369 * Hold the subqueue interlock of 3370 * ifnet.if_start 3371 */ 3372 ifsq_set_started(ifsq); 3373 start = 1; 3374 } 3375 ALTQ_SQ_UNLOCK(ifsq); 3376 3377 if (start) 3378 ifsq_ifstart_try(ifsq, 1); 3379 } 3380 KKASSERT((stage->stg_flags & 3381 (IFSQ_STAGE_FLAG_QUED | IFSQ_STAGE_FLAG_SCHED)) == 0); 3382 } 3383 3384 crit_exit(); 3385 } 3386 3387 static void 3388 ifnetinit(void *dummy __unused) 3389 { 3390 int i; 3391 3392 for (i = 0; i < ncpus; ++i) { 3393 struct thread **thr = &ifnet_threads[i]; 3394 3395 lwkt_create(ifnet_service_loop, NULL, thr, NULL, 3396 TDF_NOSTART|TDF_FORCE_SPINPORT|TDF_FIXEDCPU, 3397 i, "ifnet %d", i); 3398 netmsg_service_port_init(&(*thr)->td_msgport); 3399 lwkt_schedule(*thr); 3400 } 3401 3402 for (i = 0; i < ncpus; ++i) 3403 TAILQ_INIT(&ifsubq_stage_heads[i].stg_head); 3404 netisr_register_rollup(if_start_rollup, NETISR_ROLLUP_PRIO_IFSTART); 3405 } 3406 3407 void 3408 if_register_com_alloc(u_char type, 3409 if_com_alloc_t *a, if_com_free_t *f) 3410 { 3411 3412 KASSERT(if_com_alloc[type] == NULL, 3413 ("if_register_com_alloc: %d already registered", type)); 3414 KASSERT(if_com_free[type] == NULL, 3415 ("if_register_com_alloc: %d free already registered", type)); 3416 3417 if_com_alloc[type] = a; 3418 if_com_free[type] = f; 3419 } 3420 3421 void 3422 if_deregister_com_alloc(u_char type) 3423 { 3424 3425 KASSERT(if_com_alloc[type] != NULL, 3426 ("if_deregister_com_alloc: %d not registered", type)); 3427 KASSERT(if_com_free[type] != NULL, 3428 ("if_deregister_com_alloc: %d free not registered", type)); 3429 if_com_alloc[type] = NULL; 3430 if_com_free[type] = NULL; 3431 } 3432 3433 int 3434 if_ring_count2(int cnt, int cnt_max) 3435 { 3436 int shift = 0; 3437 3438 KASSERT(cnt_max >= 1 && powerof2(cnt_max), 3439 ("invalid ring count max %d", cnt_max)); 3440 3441 if (cnt <= 0) 3442 cnt = cnt_max; 3443 if (cnt > ncpus2) 3444 cnt = ncpus2; 3445 if (cnt > cnt_max) 3446 cnt = cnt_max; 3447 3448 while ((1 << (shift + 1)) <= cnt) 3449 ++shift; 3450 cnt = 1 << shift; 3451 3452 KASSERT(cnt >= 1 && cnt <= ncpus2 && cnt <= cnt_max, 3453 ("calculate cnt %d, ncpus2 %d, cnt max %d", 3454 cnt, ncpus2, cnt_max)); 3455 return cnt; 3456 } 3457 3458 void 3459 ifq_set_maxlen(struct ifaltq *ifq, int len) 3460 { 3461 ifq->altq_maxlen = len + (ncpus * ifsq_stage_cntmax); 3462 } 3463 3464 int 3465 ifq_mapsubq_default(struct ifaltq *ifq __unused, int cpuid __unused) 3466 { 3467 return ALTQ_SUBQ_INDEX_DEFAULT; 3468 } 3469 3470 int 3471 ifq_mapsubq_mask(struct ifaltq *ifq, int cpuid) 3472 { 3473 return (cpuid & ifq->altq_subq_mask); 3474 } 3475 3476 static void 3477 ifsq_watchdog(void *arg) 3478 { 3479 struct ifsubq_watchdog *wd = arg; 3480 struct ifnet *ifp; 3481 3482 if (__predict_true(wd->wd_timer == 0 || --wd->wd_timer)) 3483 goto done; 3484 3485 ifp = ifsq_get_ifp(wd->wd_subq); 3486 if (ifnet_tryserialize_all(ifp)) { 3487 wd->wd_watchdog(wd->wd_subq); 3488 ifnet_deserialize_all(ifp); 3489 } else { 3490 /* try again next timeout */ 3491 wd->wd_timer = 1; 3492 } 3493 done: 3494 ifsq_watchdog_reset(wd); 3495 } 3496 3497 static void 3498 ifsq_watchdog_reset(struct ifsubq_watchdog *wd) 3499 { 3500 callout_reset_bycpu(&wd->wd_callout, hz, ifsq_watchdog, wd, 3501 ifsq_get_cpuid(wd->wd_subq)); 3502 } 3503 3504 void 3505 ifsq_watchdog_init(struct ifsubq_watchdog *wd, struct ifaltq_subque *ifsq, 3506 ifsq_watchdog_t watchdog) 3507 { 3508 callout_init_mp(&wd->wd_callout); 3509 wd->wd_timer = 0; 3510 wd->wd_subq = ifsq; 3511 wd->wd_watchdog = watchdog; 3512 } 3513 3514 void 3515 ifsq_watchdog_start(struct ifsubq_watchdog *wd) 3516 { 3517 wd->wd_timer = 0; 3518 ifsq_watchdog_reset(wd); 3519 } 3520 3521 void 3522 ifsq_watchdog_stop(struct ifsubq_watchdog *wd) 3523 { 3524 wd->wd_timer = 0; 3525 callout_stop(&wd->wd_callout); 3526 } 3527 3528 void 3529 ifnet_lock(void) 3530 { 3531 KASSERT(curthread->td_type != TD_TYPE_NETISR, 3532 ("try holding ifnet lock in netisr")); 3533 mtx_lock(&ifnet_mtx); 3534 } 3535 3536 void 3537 ifnet_unlock(void) 3538 { 3539 KASSERT(curthread->td_type != TD_TYPE_NETISR, 3540 ("try holding ifnet lock in netisr")); 3541 mtx_unlock(&ifnet_mtx); 3542 } 3543 3544 static struct ifnet_array * 3545 ifnet_array_alloc(int count) 3546 { 3547 struct ifnet_array *arr; 3548 3549 arr = kmalloc(__offsetof(struct ifnet_array, ifnet_arr[count]), 3550 M_IFNET, M_WAITOK); 3551 arr->ifnet_count = count; 3552 3553 return arr; 3554 } 3555 3556 static void 3557 ifnet_array_free(struct ifnet_array *arr) 3558 { 3559 if (arr == &ifnet_array0) 3560 return; 3561 kfree(arr, M_IFNET); 3562 } 3563 3564 static struct ifnet_array * 3565 ifnet_array_add(struct ifnet *ifp, const struct ifnet_array *old_arr) 3566 { 3567 struct ifnet_array *arr; 3568 int count, i; 3569 3570 KASSERT(old_arr->ifnet_count >= 0, 3571 ("invalid ifnet array count %d", old_arr->ifnet_count)); 3572 count = old_arr->ifnet_count + 1; 3573 arr = ifnet_array_alloc(count); 3574 3575 /* 3576 * Save the old ifnet array and append this ifp to the end of 3577 * the new ifnet array. 3578 */ 3579 for (i = 0; i < old_arr->ifnet_count; ++i) { 3580 KASSERT(old_arr->ifnet_arr[i] != ifp, 3581 ("%s is already in ifnet array", ifp->if_xname)); 3582 arr->ifnet_arr[i] = old_arr->ifnet_arr[i]; 3583 } 3584 KASSERT(i == count - 1, 3585 ("add %s, ifnet array index mismatch, should be %d, but got %d", 3586 ifp->if_xname, count - 1, i)); 3587 arr->ifnet_arr[i] = ifp; 3588 3589 return arr; 3590 } 3591 3592 static struct ifnet_array * 3593 ifnet_array_del(struct ifnet *ifp, const struct ifnet_array *old_arr) 3594 { 3595 struct ifnet_array *arr; 3596 int count, i, idx, found = 0; 3597 3598 KASSERT(old_arr->ifnet_count > 0, 3599 ("invalid ifnet array count %d", old_arr->ifnet_count)); 3600 count = old_arr->ifnet_count - 1; 3601 arr = ifnet_array_alloc(count); 3602 3603 /* 3604 * Save the old ifnet array, but skip this ifp. 3605 */ 3606 idx = 0; 3607 for (i = 0; i < old_arr->ifnet_count; ++i) { 3608 if (old_arr->ifnet_arr[i] == ifp) { 3609 KASSERT(!found, 3610 ("dup %s is in ifnet array", ifp->if_xname)); 3611 found = 1; 3612 continue; 3613 } 3614 KASSERT(idx < count, 3615 ("invalid ifnet array index %d, count %d", idx, count)); 3616 arr->ifnet_arr[idx] = old_arr->ifnet_arr[i]; 3617 ++idx; 3618 } 3619 KASSERT(found, ("%s is not in ifnet array", ifp->if_xname)); 3620 KASSERT(idx == count, 3621 ("del %s, ifnet array count mismatch, should be %d, but got %d ", 3622 ifp->if_xname, count, idx)); 3623 3624 return arr; 3625 } 3626 3627 const struct ifnet_array * 3628 ifnet_array_get(void) 3629 { 3630 KASSERT(curthread->td_type == TD_TYPE_NETISR, ("not in netisr")); 3631 return ifnet_array; 3632 } 3633 3634 int 3635 ifnet_array_isempty(void) 3636 { 3637 KASSERT(curthread->td_type == TD_TYPE_NETISR, ("not in netisr")); 3638 if (ifnet_array->ifnet_count == 0) 3639 return 1; 3640 else 3641 return 0; 3642 } 3643 3644 void 3645 ifa_marker_init(struct ifaddr_marker *mark, struct ifnet *ifp) 3646 { 3647 struct ifaddr *ifa; 3648 3649 memset(mark, 0, sizeof(*mark)); 3650 ifa = &mark->ifa; 3651 3652 mark->ifac.ifa = ifa; 3653 3654 ifa->ifa_addr = &mark->addr; 3655 ifa->ifa_dstaddr = &mark->dstaddr; 3656 ifa->ifa_netmask = &mark->netmask; 3657 ifa->ifa_ifp = ifp; 3658 } 3659