1 /* 2 * Copyright (c) 1980, 1986, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)if.c 8.3 (Berkeley) 1/4/94 30 * $FreeBSD: src/sys/net/if.c,v 1.185 2004/03/13 02:35:03 brooks Exp $ 31 */ 32 33 #include "opt_inet6.h" 34 #include "opt_inet.h" 35 #include "opt_ifpoll.h" 36 37 #include <sys/param.h> 38 #include <sys/malloc.h> 39 #include <sys/mbuf.h> 40 #include <sys/systm.h> 41 #include <sys/proc.h> 42 #include <sys/priv.h> 43 #include <sys/protosw.h> 44 #include <sys/socket.h> 45 #include <sys/socketvar.h> 46 #include <sys/socketops.h> 47 #include <sys/kernel.h> 48 #include <sys/ktr.h> 49 #include <sys/mutex.h> 50 #include <sys/sockio.h> 51 #include <sys/syslog.h> 52 #include <sys/sysctl.h> 53 #include <sys/domain.h> 54 #include <sys/thread.h> 55 #include <sys/serialize.h> 56 #include <sys/bus.h> 57 58 #include <sys/thread2.h> 59 #include <sys/msgport2.h> 60 #include <sys/mutex2.h> 61 62 #include <net/if.h> 63 #include <net/if_arp.h> 64 #include <net/if_dl.h> 65 #include <net/if_types.h> 66 #include <net/if_var.h> 67 #include <net/if_ringmap.h> 68 #include <net/ifq_var.h> 69 #include <net/radix.h> 70 #include <net/route.h> 71 #include <net/if_clone.h> 72 #include <net/netisr2.h> 73 #include <net/netmsg2.h> 74 75 #include <machine/atomic.h> 76 #include <machine/stdarg.h> 77 #include <machine/smp.h> 78 79 #if defined(INET) || defined(INET6) 80 /*XXX*/ 81 #include <netinet/in.h> 82 #include <netinet/in_var.h> 83 #include <netinet/if_ether.h> 84 #ifdef INET6 85 #include <netinet6/in6_var.h> 86 #include <netinet6/in6_ifattach.h> 87 #endif 88 #endif 89 90 struct netmsg_ifaddr { 91 struct netmsg_base base; 92 struct ifaddr *ifa; 93 struct ifnet *ifp; 94 int tail; 95 }; 96 97 struct ifsubq_stage_head { 98 TAILQ_HEAD(, ifsubq_stage) stg_head; 99 } __cachealign; 100 101 struct if_ringmap { 102 int rm_cnt; 103 int rm_grid; 104 int rm_cpumap[]; 105 }; 106 107 #define RINGMAP_FLAG_NONE 0x0 108 #define RINGMAP_FLAG_POWEROF2 0x1 109 110 /* 111 * System initialization 112 */ 113 static void if_attachdomain(void *); 114 static void if_attachdomain1(struct ifnet *); 115 static int ifconf(u_long, caddr_t, struct ucred *); 116 static void ifinit(void *); 117 static void ifnetinit(void *); 118 static void if_slowtimo(void *); 119 static void link_rtrequest(int, struct rtentry *); 120 static int if_rtdel(struct radix_node *, void *); 121 static void if_slowtimo_dispatch(netmsg_t); 122 123 /* Helper functions */ 124 static void ifsq_watchdog_reset(struct ifsubq_watchdog *); 125 static int if_delmulti_serialized(struct ifnet *, struct sockaddr *); 126 static struct ifnet_array *ifnet_array_alloc(int); 127 static void ifnet_array_free(struct ifnet_array *); 128 static struct ifnet_array *ifnet_array_add(struct ifnet *, 129 const struct ifnet_array *); 130 static struct ifnet_array *ifnet_array_del(struct ifnet *, 131 const struct ifnet_array *); 132 133 #ifdef INET6 134 /* 135 * XXX: declare here to avoid to include many inet6 related files.. 136 * should be more generalized? 137 */ 138 extern void nd6_setmtu(struct ifnet *); 139 #endif 140 141 SYSCTL_NODE(_net, PF_LINK, link, CTLFLAG_RW, 0, "Link layers"); 142 SYSCTL_NODE(_net_link, 0, generic, CTLFLAG_RW, 0, "Generic link-management"); 143 SYSCTL_NODE(_net_link, OID_AUTO, ringmap, CTLFLAG_RW, 0, "link ringmap"); 144 145 static int ifsq_stage_cntmax = 16; 146 TUNABLE_INT("net.link.stage_cntmax", &ifsq_stage_cntmax); 147 SYSCTL_INT(_net_link, OID_AUTO, stage_cntmax, CTLFLAG_RW, 148 &ifsq_stage_cntmax, 0, "ifq staging packet count max"); 149 150 static int if_stats_compat = 0; 151 SYSCTL_INT(_net_link, OID_AUTO, stats_compat, CTLFLAG_RW, 152 &if_stats_compat, 0, "Compat the old ifnet stats"); 153 154 static int if_ringmap_dumprdr = 0; 155 SYSCTL_INT(_net_link_ringmap, OID_AUTO, dump_rdr, CTLFLAG_RW, 156 &if_ringmap_dumprdr, 0, "dump redirect table"); 157 158 SYSINIT(interfaces, SI_SUB_PROTO_IF, SI_ORDER_FIRST, ifinit, NULL); 159 SYSINIT(ifnet, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY, ifnetinit, NULL); 160 161 static if_com_alloc_t *if_com_alloc[256]; 162 static if_com_free_t *if_com_free[256]; 163 164 MALLOC_DEFINE(M_IFADDR, "ifaddr", "interface address"); 165 MALLOC_DEFINE(M_IFMADDR, "ether_multi", "link-level multicast address"); 166 MALLOC_DEFINE(M_IFNET, "ifnet", "interface structure"); 167 168 int ifqmaxlen = IFQ_MAXLEN; 169 struct ifnethead ifnet = TAILQ_HEAD_INITIALIZER(ifnet); 170 171 static struct ifnet_array ifnet_array0; 172 static struct ifnet_array *ifnet_array = &ifnet_array0; 173 174 static struct callout if_slowtimo_timer; 175 static struct netmsg_base if_slowtimo_netmsg; 176 177 int if_index = 0; 178 struct ifnet **ifindex2ifnet = NULL; 179 static struct mtx ifnet_mtx = MTX_INITIALIZER("ifnet"); 180 181 static struct ifsubq_stage_head ifsubq_stage_heads[MAXCPU]; 182 183 #ifdef notyet 184 #define IFQ_KTR_STRING "ifq=%p" 185 #define IFQ_KTR_ARGS struct ifaltq *ifq 186 #ifndef KTR_IFQ 187 #define KTR_IFQ KTR_ALL 188 #endif 189 KTR_INFO_MASTER(ifq); 190 KTR_INFO(KTR_IFQ, ifq, enqueue, 0, IFQ_KTR_STRING, IFQ_KTR_ARGS); 191 KTR_INFO(KTR_IFQ, ifq, dequeue, 1, IFQ_KTR_STRING, IFQ_KTR_ARGS); 192 #define logifq(name, arg) KTR_LOG(ifq_ ## name, arg) 193 194 #define IF_START_KTR_STRING "ifp=%p" 195 #define IF_START_KTR_ARGS struct ifnet *ifp 196 #ifndef KTR_IF_START 197 #define KTR_IF_START KTR_ALL 198 #endif 199 KTR_INFO_MASTER(if_start); 200 KTR_INFO(KTR_IF_START, if_start, run, 0, 201 IF_START_KTR_STRING, IF_START_KTR_ARGS); 202 KTR_INFO(KTR_IF_START, if_start, sched, 1, 203 IF_START_KTR_STRING, IF_START_KTR_ARGS); 204 KTR_INFO(KTR_IF_START, if_start, avoid, 2, 205 IF_START_KTR_STRING, IF_START_KTR_ARGS); 206 KTR_INFO(KTR_IF_START, if_start, contend_sched, 3, 207 IF_START_KTR_STRING, IF_START_KTR_ARGS); 208 KTR_INFO(KTR_IF_START, if_start, chase_sched, 4, 209 IF_START_KTR_STRING, IF_START_KTR_ARGS); 210 #define logifstart(name, arg) KTR_LOG(if_start_ ## name, arg) 211 #endif 212 213 TAILQ_HEAD(, ifg_group) ifg_head = TAILQ_HEAD_INITIALIZER(ifg_head); 214 215 /* 216 * Network interface utility routines. 217 * 218 * Routines with ifa_ifwith* names take sockaddr *'s as 219 * parameters. 220 */ 221 /* ARGSUSED*/ 222 static void 223 ifinit(void *dummy) 224 { 225 226 callout_init_mp(&if_slowtimo_timer); 227 netmsg_init(&if_slowtimo_netmsg, NULL, &netisr_adone_rport, 228 MSGF_PRIORITY, if_slowtimo_dispatch); 229 230 /* Start if_slowtimo */ 231 lwkt_sendmsg(netisr_cpuport(0), &if_slowtimo_netmsg.lmsg); 232 } 233 234 static void 235 ifsq_ifstart_ipifunc(void *arg) 236 { 237 struct ifaltq_subque *ifsq = arg; 238 struct lwkt_msg *lmsg = ifsq_get_ifstart_lmsg(ifsq, mycpuid); 239 240 crit_enter(); 241 if (lmsg->ms_flags & MSGF_DONE) 242 lwkt_sendmsg_oncpu(netisr_cpuport(mycpuid), lmsg); 243 crit_exit(); 244 } 245 246 static __inline void 247 ifsq_stage_remove(struct ifsubq_stage_head *head, struct ifsubq_stage *stage) 248 { 249 KKASSERT(stage->stg_flags & IFSQ_STAGE_FLAG_QUED); 250 TAILQ_REMOVE(&head->stg_head, stage, stg_link); 251 stage->stg_flags &= ~(IFSQ_STAGE_FLAG_QUED | IFSQ_STAGE_FLAG_SCHED); 252 stage->stg_cnt = 0; 253 stage->stg_len = 0; 254 } 255 256 static __inline void 257 ifsq_stage_insert(struct ifsubq_stage_head *head, struct ifsubq_stage *stage) 258 { 259 KKASSERT((stage->stg_flags & 260 (IFSQ_STAGE_FLAG_QUED | IFSQ_STAGE_FLAG_SCHED)) == 0); 261 stage->stg_flags |= IFSQ_STAGE_FLAG_QUED; 262 TAILQ_INSERT_TAIL(&head->stg_head, stage, stg_link); 263 } 264 265 /* 266 * Schedule ifnet.if_start on the subqueue owner CPU 267 */ 268 static void 269 ifsq_ifstart_schedule(struct ifaltq_subque *ifsq, int force) 270 { 271 int cpu; 272 273 if (!force && curthread->td_type == TD_TYPE_NETISR && 274 ifsq_stage_cntmax > 0) { 275 struct ifsubq_stage *stage = ifsq_get_stage(ifsq, mycpuid); 276 277 stage->stg_cnt = 0; 278 stage->stg_len = 0; 279 if ((stage->stg_flags & IFSQ_STAGE_FLAG_QUED) == 0) 280 ifsq_stage_insert(&ifsubq_stage_heads[mycpuid], stage); 281 stage->stg_flags |= IFSQ_STAGE_FLAG_SCHED; 282 return; 283 } 284 285 cpu = ifsq_get_cpuid(ifsq); 286 if (cpu != mycpuid) 287 lwkt_send_ipiq(globaldata_find(cpu), ifsq_ifstart_ipifunc, ifsq); 288 else 289 ifsq_ifstart_ipifunc(ifsq); 290 } 291 292 /* 293 * NOTE: 294 * This function will release ifnet.if_start subqueue interlock, 295 * if ifnet.if_start for the subqueue does not need to be scheduled 296 */ 297 static __inline int 298 ifsq_ifstart_need_schedule(struct ifaltq_subque *ifsq, int running) 299 { 300 if (!running || ifsq_is_empty(ifsq) 301 #ifdef ALTQ 302 || ifsq->ifsq_altq->altq_tbr != NULL 303 #endif 304 ) { 305 ALTQ_SQ_LOCK(ifsq); 306 /* 307 * ifnet.if_start subqueue interlock is released, if: 308 * 1) Hardware can not take any packets, due to 309 * o interface is marked down 310 * o hardware queue is full (ifsq_is_oactive) 311 * Under the second situation, hardware interrupt 312 * or polling(4) will call/schedule ifnet.if_start 313 * on the subqueue when hardware queue is ready 314 * 2) There is no packet in the subqueue. 315 * Further ifq_dispatch or ifq_handoff will call/ 316 * schedule ifnet.if_start on the subqueue. 317 * 3) TBR is used and it does not allow further 318 * dequeueing. 319 * TBR callout will call ifnet.if_start on the 320 * subqueue. 321 */ 322 if (!running || !ifsq_data_ready(ifsq)) { 323 ifsq_clr_started(ifsq); 324 ALTQ_SQ_UNLOCK(ifsq); 325 return 0; 326 } 327 ALTQ_SQ_UNLOCK(ifsq); 328 } 329 return 1; 330 } 331 332 static void 333 ifsq_ifstart_dispatch(netmsg_t msg) 334 { 335 struct lwkt_msg *lmsg = &msg->base.lmsg; 336 struct ifaltq_subque *ifsq = lmsg->u.ms_resultp; 337 struct ifnet *ifp = ifsq_get_ifp(ifsq); 338 struct globaldata *gd = mycpu; 339 int running = 0, need_sched; 340 341 crit_enter_gd(gd); 342 343 lwkt_replymsg(lmsg, 0); /* reply ASAP */ 344 345 if (gd->gd_cpuid != ifsq_get_cpuid(ifsq)) { 346 /* 347 * We need to chase the subqueue owner CPU change. 348 */ 349 ifsq_ifstart_schedule(ifsq, 1); 350 crit_exit_gd(gd); 351 return; 352 } 353 354 ifsq_serialize_hw(ifsq); 355 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) { 356 ifp->if_start(ifp, ifsq); 357 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) 358 running = 1; 359 } 360 need_sched = ifsq_ifstart_need_schedule(ifsq, running); 361 ifsq_deserialize_hw(ifsq); 362 363 if (need_sched) { 364 /* 365 * More data need to be transmitted, ifnet.if_start is 366 * scheduled on the subqueue owner CPU, and we keep going. 367 * NOTE: ifnet.if_start subqueue interlock is not released. 368 */ 369 ifsq_ifstart_schedule(ifsq, 0); 370 } 371 372 crit_exit_gd(gd); 373 } 374 375 /* Device driver ifnet.if_start helper function */ 376 void 377 ifsq_devstart(struct ifaltq_subque *ifsq) 378 { 379 struct ifnet *ifp = ifsq_get_ifp(ifsq); 380 int running = 0; 381 382 ASSERT_ALTQ_SQ_SERIALIZED_HW(ifsq); 383 384 ALTQ_SQ_LOCK(ifsq); 385 if (ifsq_is_started(ifsq) || !ifsq_data_ready(ifsq)) { 386 ALTQ_SQ_UNLOCK(ifsq); 387 return; 388 } 389 ifsq_set_started(ifsq); 390 ALTQ_SQ_UNLOCK(ifsq); 391 392 ifp->if_start(ifp, ifsq); 393 394 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) 395 running = 1; 396 397 if (ifsq_ifstart_need_schedule(ifsq, running)) { 398 /* 399 * More data need to be transmitted, ifnet.if_start is 400 * scheduled on ifnet's CPU, and we keep going. 401 * NOTE: ifnet.if_start interlock is not released. 402 */ 403 ifsq_ifstart_schedule(ifsq, 0); 404 } 405 } 406 407 void 408 if_devstart(struct ifnet *ifp) 409 { 410 ifsq_devstart(ifq_get_subq_default(&ifp->if_snd)); 411 } 412 413 /* Device driver ifnet.if_start schedule helper function */ 414 void 415 ifsq_devstart_sched(struct ifaltq_subque *ifsq) 416 { 417 ifsq_ifstart_schedule(ifsq, 1); 418 } 419 420 void 421 if_devstart_sched(struct ifnet *ifp) 422 { 423 ifsq_devstart_sched(ifq_get_subq_default(&ifp->if_snd)); 424 } 425 426 static void 427 if_default_serialize(struct ifnet *ifp, enum ifnet_serialize slz __unused) 428 { 429 lwkt_serialize_enter(ifp->if_serializer); 430 } 431 432 static void 433 if_default_deserialize(struct ifnet *ifp, enum ifnet_serialize slz __unused) 434 { 435 lwkt_serialize_exit(ifp->if_serializer); 436 } 437 438 static int 439 if_default_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz __unused) 440 { 441 return lwkt_serialize_try(ifp->if_serializer); 442 } 443 444 #ifdef INVARIANTS 445 static void 446 if_default_serialize_assert(struct ifnet *ifp, 447 enum ifnet_serialize slz __unused, 448 boolean_t serialized) 449 { 450 if (serialized) 451 ASSERT_SERIALIZED(ifp->if_serializer); 452 else 453 ASSERT_NOT_SERIALIZED(ifp->if_serializer); 454 } 455 #endif 456 457 /* 458 * Attach an interface to the list of "active" interfaces. 459 * 460 * The serializer is optional. 461 */ 462 void 463 if_attach(struct ifnet *ifp, lwkt_serialize_t serializer) 464 { 465 unsigned socksize; 466 int namelen, masklen; 467 struct sockaddr_dl *sdl, *sdl_addr; 468 struct ifaddr *ifa; 469 struct ifaltq *ifq; 470 struct ifnet **old_ifindex2ifnet = NULL; 471 struct ifnet_array *old_ifnet_array; 472 int i, q, qlen; 473 char qlenname[64]; 474 475 static int if_indexlim = 8; 476 477 if (ifp->if_serialize != NULL) { 478 KASSERT(ifp->if_deserialize != NULL && 479 ifp->if_tryserialize != NULL && 480 ifp->if_serialize_assert != NULL, 481 ("serialize functions are partially setup")); 482 483 /* 484 * If the device supplies serialize functions, 485 * then clear if_serializer to catch any invalid 486 * usage of this field. 487 */ 488 KASSERT(serializer == NULL, 489 ("both serialize functions and default serializer " 490 "are supplied")); 491 ifp->if_serializer = NULL; 492 } else { 493 KASSERT(ifp->if_deserialize == NULL && 494 ifp->if_tryserialize == NULL && 495 ifp->if_serialize_assert == NULL, 496 ("serialize functions are partially setup")); 497 ifp->if_serialize = if_default_serialize; 498 ifp->if_deserialize = if_default_deserialize; 499 ifp->if_tryserialize = if_default_tryserialize; 500 #ifdef INVARIANTS 501 ifp->if_serialize_assert = if_default_serialize_assert; 502 #endif 503 504 /* 505 * The serializer can be passed in from the device, 506 * allowing the same serializer to be used for both 507 * the interrupt interlock and the device queue. 508 * If not specified, the netif structure will use an 509 * embedded serializer. 510 */ 511 if (serializer == NULL) { 512 serializer = &ifp->if_default_serializer; 513 lwkt_serialize_init(serializer); 514 } 515 ifp->if_serializer = serializer; 516 } 517 518 /* 519 * Make if_addrhead available on all CPUs, since they 520 * could be accessed by any threads. 521 */ 522 ifp->if_addrheads = kmalloc(ncpus * sizeof(struct ifaddrhead), 523 M_IFADDR, M_WAITOK | M_ZERO); 524 for (i = 0; i < ncpus; ++i) 525 TAILQ_INIT(&ifp->if_addrheads[i]); 526 527 TAILQ_INIT(&ifp->if_multiaddrs); 528 TAILQ_INIT(&ifp->if_groups); 529 getmicrotime(&ifp->if_lastchange); 530 531 /* 532 * create a Link Level name for this device 533 */ 534 namelen = strlen(ifp->if_xname); 535 masklen = offsetof(struct sockaddr_dl, sdl_data[0]) + namelen; 536 socksize = masklen + ifp->if_addrlen; 537 if (socksize < sizeof(*sdl)) 538 socksize = sizeof(*sdl); 539 socksize = RT_ROUNDUP(socksize); 540 ifa = ifa_create(sizeof(struct ifaddr) + 2 * socksize); 541 sdl = sdl_addr = (struct sockaddr_dl *)(ifa + 1); 542 sdl->sdl_len = socksize; 543 sdl->sdl_family = AF_LINK; 544 bcopy(ifp->if_xname, sdl->sdl_data, namelen); 545 sdl->sdl_nlen = namelen; 546 sdl->sdl_type = ifp->if_type; 547 ifp->if_lladdr = ifa; 548 ifa->ifa_ifp = ifp; 549 ifa->ifa_rtrequest = link_rtrequest; 550 ifa->ifa_addr = (struct sockaddr *)sdl; 551 sdl = (struct sockaddr_dl *)(socksize + (caddr_t)sdl); 552 ifa->ifa_netmask = (struct sockaddr *)sdl; 553 sdl->sdl_len = masklen; 554 while (namelen != 0) 555 sdl->sdl_data[--namelen] = 0xff; 556 ifa_iflink(ifa, ifp, 0 /* Insert head */); 557 558 /* 559 * Make if_data available on all CPUs, since they could 560 * be updated by hardware interrupt routing, which could 561 * be bound to any CPU. 562 */ 563 ifp->if_data_pcpu = kmalloc_cachealign( 564 ncpus * sizeof(struct ifdata_pcpu), M_DEVBUF, M_WAITOK | M_ZERO); 565 566 if (ifp->if_mapsubq == NULL) 567 ifp->if_mapsubq = ifq_mapsubq_default; 568 569 ifq = &ifp->if_snd; 570 ifq->altq_type = 0; 571 ifq->altq_disc = NULL; 572 ifq->altq_flags &= ALTQF_CANTCHANGE; 573 ifq->altq_tbr = NULL; 574 ifq->altq_ifp = ifp; 575 576 if (ifq->altq_subq_cnt <= 0) 577 ifq->altq_subq_cnt = 1; 578 ifq->altq_subq = kmalloc_cachealign( 579 ifq->altq_subq_cnt * sizeof(struct ifaltq_subque), 580 M_DEVBUF, M_WAITOK | M_ZERO); 581 582 if (ifq->altq_maxlen == 0) { 583 if_printf(ifp, "driver didn't set altq_maxlen\n"); 584 ifq_set_maxlen(ifq, ifqmaxlen); 585 } 586 587 /* Allow user to override driver's setting. */ 588 ksnprintf(qlenname, sizeof(qlenname), "net.%s.qlenmax", ifp->if_xname); 589 qlen = -1; 590 TUNABLE_INT_FETCH(qlenname, &qlen); 591 if (qlen > 0) { 592 if_printf(ifp, "qlenmax -> %d\n", qlen); 593 ifq_set_maxlen(ifq, qlen); 594 } 595 596 for (q = 0; q < ifq->altq_subq_cnt; ++q) { 597 struct ifaltq_subque *ifsq = &ifq->altq_subq[q]; 598 599 ALTQ_SQ_LOCK_INIT(ifsq); 600 ifsq->ifsq_index = q; 601 602 ifsq->ifsq_altq = ifq; 603 ifsq->ifsq_ifp = ifp; 604 605 ifsq->ifsq_maxlen = ifq->altq_maxlen; 606 ifsq->ifsq_maxbcnt = ifsq->ifsq_maxlen * MCLBYTES; 607 ifsq->ifsq_prepended = NULL; 608 ifsq->ifsq_started = 0; 609 ifsq->ifsq_hw_oactive = 0; 610 ifsq_set_cpuid(ifsq, 0); 611 if (ifp->if_serializer != NULL) 612 ifsq_set_hw_serialize(ifsq, ifp->if_serializer); 613 614 /* XXX: netisr_ncpus */ 615 ifsq->ifsq_stage = 616 kmalloc_cachealign(ncpus * sizeof(struct ifsubq_stage), 617 M_DEVBUF, M_WAITOK | M_ZERO); 618 for (i = 0; i < ncpus; ++i) 619 ifsq->ifsq_stage[i].stg_subq = ifsq; 620 621 /* 622 * Allocate one if_start message for each CPU, since 623 * the hardware TX ring could be assigned to any CPU. 624 * 625 * NOTE: 626 * If the hardware TX ring polling CPU and the hardware 627 * TX ring interrupt CPU are same, one if_start message 628 * should be enough. 629 */ 630 ifsq->ifsq_ifstart_nmsg = 631 kmalloc(ncpus * sizeof(struct netmsg_base), 632 M_LWKTMSG, M_WAITOK); 633 for (i = 0; i < ncpus; ++i) { 634 netmsg_init(&ifsq->ifsq_ifstart_nmsg[i], NULL, 635 &netisr_adone_rport, 0, ifsq_ifstart_dispatch); 636 ifsq->ifsq_ifstart_nmsg[i].lmsg.u.ms_resultp = ifsq; 637 } 638 } 639 ifq_set_classic(ifq); 640 641 /* 642 * Increase mbuf cluster/jcluster limits for the mbufs that 643 * could sit on the device queues for quite some time. 644 */ 645 if (ifp->if_nmbclusters > 0) 646 mcl_inclimit(ifp->if_nmbclusters); 647 if (ifp->if_nmbjclusters > 0) 648 mjcl_inclimit(ifp->if_nmbjclusters); 649 650 /* 651 * Install this ifp into ifindex2inet, ifnet queue and ifnet 652 * array after it is setup. 653 * 654 * Protect ifindex2ifnet, ifnet queue and ifnet array changes 655 * by ifnet lock, so that non-netisr threads could get a 656 * consistent view. 657 */ 658 ifnet_lock(); 659 660 /* Don't update if_index until ifindex2ifnet is setup */ 661 ifp->if_index = if_index + 1; 662 sdl_addr->sdl_index = ifp->if_index; 663 664 /* 665 * Install this ifp into ifindex2ifnet 666 */ 667 if (ifindex2ifnet == NULL || ifp->if_index >= if_indexlim) { 668 unsigned int n; 669 struct ifnet **q; 670 671 /* 672 * Grow ifindex2ifnet 673 */ 674 if_indexlim <<= 1; 675 n = if_indexlim * sizeof(*q); 676 q = kmalloc(n, M_IFADDR, M_WAITOK | M_ZERO); 677 if (ifindex2ifnet != NULL) { 678 bcopy(ifindex2ifnet, q, n/2); 679 /* Free old ifindex2ifnet after sync all netisrs */ 680 old_ifindex2ifnet = ifindex2ifnet; 681 } 682 ifindex2ifnet = q; 683 } 684 ifindex2ifnet[ifp->if_index] = ifp; 685 /* 686 * Update if_index after this ifp is installed into ifindex2ifnet, 687 * so that netisrs could get a consistent view of ifindex2ifnet. 688 */ 689 cpu_sfence(); 690 if_index = ifp->if_index; 691 692 /* 693 * Install this ifp into ifnet array. 694 */ 695 /* Free old ifnet array after sync all netisrs */ 696 old_ifnet_array = ifnet_array; 697 ifnet_array = ifnet_array_add(ifp, old_ifnet_array); 698 699 /* 700 * Install this ifp into ifnet queue. 701 */ 702 TAILQ_INSERT_TAIL(&ifnetlist, ifp, if_link); 703 704 ifnet_unlock(); 705 706 /* 707 * Sync all netisrs so that the old ifindex2ifnet and ifnet array 708 * are no longer accessed and we can free them safely later on. 709 */ 710 netmsg_service_sync(); 711 if (old_ifindex2ifnet != NULL) 712 kfree(old_ifindex2ifnet, M_IFADDR); 713 ifnet_array_free(old_ifnet_array); 714 715 if (!SLIST_EMPTY(&domains)) 716 if_attachdomain1(ifp); 717 718 /* Announce the interface. */ 719 EVENTHANDLER_INVOKE(ifnet_attach_event, ifp); 720 devctl_notify("IFNET", ifp->if_xname, "ATTACH", NULL); 721 rt_ifannouncemsg(ifp, IFAN_ARRIVAL); 722 } 723 724 static void 725 if_attachdomain(void *dummy) 726 { 727 struct ifnet *ifp; 728 729 ifnet_lock(); 730 TAILQ_FOREACH(ifp, &ifnetlist, if_list) 731 if_attachdomain1(ifp); 732 ifnet_unlock(); 733 } 734 SYSINIT(domainifattach, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_FIRST, 735 if_attachdomain, NULL); 736 737 static void 738 if_attachdomain1(struct ifnet *ifp) 739 { 740 struct domain *dp; 741 742 crit_enter(); 743 744 /* address family dependent data region */ 745 bzero(ifp->if_afdata, sizeof(ifp->if_afdata)); 746 SLIST_FOREACH(dp, &domains, dom_next) 747 if (dp->dom_ifattach) 748 ifp->if_afdata[dp->dom_family] = 749 (*dp->dom_ifattach)(ifp); 750 crit_exit(); 751 } 752 753 /* 754 * Purge all addresses whose type is _not_ AF_LINK 755 */ 756 static void 757 if_purgeaddrs_nolink_dispatch(netmsg_t nmsg) 758 { 759 struct ifnet *ifp = nmsg->lmsg.u.ms_resultp; 760 struct ifaddr_container *ifac, *next; 761 762 ASSERT_NETISR0; 763 764 /* 765 * The ifaddr processing in the following loop will block, 766 * however, this function is called in netisr0, in which 767 * ifaddr list changes happen, so we don't care about the 768 * blockness of the ifaddr processing here. 769 */ 770 TAILQ_FOREACH_MUTABLE(ifac, &ifp->if_addrheads[mycpuid], 771 ifa_link, next) { 772 struct ifaddr *ifa = ifac->ifa; 773 774 /* Ignore marker */ 775 if (ifa->ifa_addr->sa_family == AF_UNSPEC) 776 continue; 777 778 /* Leave link ifaddr as it is */ 779 if (ifa->ifa_addr->sa_family == AF_LINK) 780 continue; 781 #ifdef INET 782 /* XXX: Ugly!! ad hoc just for INET */ 783 if (ifa->ifa_addr->sa_family == AF_INET) { 784 struct ifaliasreq ifr; 785 struct sockaddr_in saved_addr, saved_dst; 786 #ifdef IFADDR_DEBUG_VERBOSE 787 int i; 788 789 kprintf("purge in4 addr %p: ", ifa); 790 for (i = 0; i < ncpus; ++i) { 791 kprintf("%d ", 792 ifa->ifa_containers[i].ifa_refcnt); 793 } 794 kprintf("\n"); 795 #endif 796 797 /* Save information for panic. */ 798 memcpy(&saved_addr, ifa->ifa_addr, sizeof(saved_addr)); 799 if (ifa->ifa_dstaddr != NULL) { 800 memcpy(&saved_dst, ifa->ifa_dstaddr, 801 sizeof(saved_dst)); 802 } else { 803 memset(&saved_dst, 0, sizeof(saved_dst)); 804 } 805 806 bzero(&ifr, sizeof ifr); 807 ifr.ifra_addr = *ifa->ifa_addr; 808 if (ifa->ifa_dstaddr) 809 ifr.ifra_broadaddr = *ifa->ifa_dstaddr; 810 if (in_control(SIOCDIFADDR, (caddr_t)&ifr, ifp, 811 NULL) == 0) 812 continue; 813 814 /* MUST NOT HAPPEN */ 815 panic("%s: in_control failed %x, dst %x", ifp->if_xname, 816 ntohl(saved_addr.sin_addr.s_addr), 817 ntohl(saved_dst.sin_addr.s_addr)); 818 } 819 #endif /* INET */ 820 #ifdef INET6 821 if (ifa->ifa_addr->sa_family == AF_INET6) { 822 #ifdef IFADDR_DEBUG_VERBOSE 823 int i; 824 825 kprintf("purge in6 addr %p: ", ifa); 826 for (i = 0; i < ncpus; ++i) { 827 kprintf("%d ", 828 ifa->ifa_containers[i].ifa_refcnt); 829 } 830 kprintf("\n"); 831 #endif 832 833 in6_purgeaddr(ifa); 834 /* ifp_addrhead is already updated */ 835 continue; 836 } 837 #endif /* INET6 */ 838 if_printf(ifp, "destroy ifaddr family %d\n", 839 ifa->ifa_addr->sa_family); 840 ifa_ifunlink(ifa, ifp); 841 ifa_destroy(ifa); 842 } 843 844 netisr_replymsg(&nmsg->base, 0); 845 } 846 847 void 848 if_purgeaddrs_nolink(struct ifnet *ifp) 849 { 850 struct netmsg_base nmsg; 851 852 netmsg_init(&nmsg, NULL, &curthread->td_msgport, 0, 853 if_purgeaddrs_nolink_dispatch); 854 nmsg.lmsg.u.ms_resultp = ifp; 855 netisr_domsg(&nmsg, 0); 856 } 857 858 static void 859 ifq_stage_detach_handler(netmsg_t nmsg) 860 { 861 struct ifaltq *ifq = nmsg->lmsg.u.ms_resultp; 862 int q; 863 864 for (q = 0; q < ifq->altq_subq_cnt; ++q) { 865 struct ifaltq_subque *ifsq = &ifq->altq_subq[q]; 866 struct ifsubq_stage *stage = ifsq_get_stage(ifsq, mycpuid); 867 868 if (stage->stg_flags & IFSQ_STAGE_FLAG_QUED) 869 ifsq_stage_remove(&ifsubq_stage_heads[mycpuid], stage); 870 } 871 lwkt_replymsg(&nmsg->lmsg, 0); 872 } 873 874 static void 875 ifq_stage_detach(struct ifaltq *ifq) 876 { 877 struct netmsg_base base; 878 int cpu; 879 880 netmsg_init(&base, NULL, &curthread->td_msgport, 0, 881 ifq_stage_detach_handler); 882 base.lmsg.u.ms_resultp = ifq; 883 884 /* XXX netisr_ncpus */ 885 for (cpu = 0; cpu < ncpus; ++cpu) 886 lwkt_domsg(netisr_cpuport(cpu), &base.lmsg, 0); 887 } 888 889 struct netmsg_if_rtdel { 890 struct netmsg_base base; 891 struct ifnet *ifp; 892 }; 893 894 static void 895 if_rtdel_dispatch(netmsg_t msg) 896 { 897 struct netmsg_if_rtdel *rmsg = (void *)msg; 898 int i, cpu; 899 900 cpu = mycpuid; 901 ASSERT_NETISR_NCPUS(cpu); 902 903 for (i = 1; i <= AF_MAX; i++) { 904 struct radix_node_head *rnh; 905 906 if ((rnh = rt_tables[cpu][i]) == NULL) 907 continue; 908 rnh->rnh_walktree(rnh, if_rtdel, rmsg->ifp); 909 } 910 netisr_forwardmsg(&msg->base, cpu + 1); 911 } 912 913 /* 914 * Detach an interface, removing it from the 915 * list of "active" interfaces. 916 */ 917 void 918 if_detach(struct ifnet *ifp) 919 { 920 struct ifnet_array *old_ifnet_array; 921 struct netmsg_if_rtdel msg; 922 struct domain *dp; 923 int q; 924 925 /* Announce that the interface is gone. */ 926 EVENTHANDLER_INVOKE(ifnet_detach_event, ifp); 927 rt_ifannouncemsg(ifp, IFAN_DEPARTURE); 928 devctl_notify("IFNET", ifp->if_xname, "DETACH", NULL); 929 930 /* 931 * Remove this ifp from ifindex2inet, ifnet queue and ifnet 932 * array before it is whacked. 933 * 934 * Protect ifindex2ifnet, ifnet queue and ifnet array changes 935 * by ifnet lock, so that non-netisr threads could get a 936 * consistent view. 937 */ 938 ifnet_lock(); 939 940 /* 941 * Remove this ifp from ifindex2ifnet and maybe decrement if_index. 942 */ 943 ifindex2ifnet[ifp->if_index] = NULL; 944 while (if_index > 0 && ifindex2ifnet[if_index] == NULL) 945 if_index--; 946 947 /* 948 * Remove this ifp from ifnet queue. 949 */ 950 TAILQ_REMOVE(&ifnetlist, ifp, if_link); 951 952 /* 953 * Remove this ifp from ifnet array. 954 */ 955 /* Free old ifnet array after sync all netisrs */ 956 old_ifnet_array = ifnet_array; 957 ifnet_array = ifnet_array_del(ifp, old_ifnet_array); 958 959 ifnet_unlock(); 960 961 /* 962 * Sync all netisrs so that the old ifnet array is no longer 963 * accessed and we can free it safely later on. 964 */ 965 netmsg_service_sync(); 966 ifnet_array_free(old_ifnet_array); 967 968 /* 969 * Remove routes and flush queues. 970 */ 971 crit_enter(); 972 #ifdef IFPOLL_ENABLE 973 if (ifp->if_flags & IFF_NPOLLING) 974 ifpoll_deregister(ifp); 975 #endif 976 if_down(ifp); 977 978 /* Decrease the mbuf clusters/jclusters limits increased by us */ 979 if (ifp->if_nmbclusters > 0) 980 mcl_inclimit(-ifp->if_nmbclusters); 981 if (ifp->if_nmbjclusters > 0) 982 mjcl_inclimit(-ifp->if_nmbjclusters); 983 984 #ifdef ALTQ 985 if (ifq_is_enabled(&ifp->if_snd)) 986 altq_disable(&ifp->if_snd); 987 if (ifq_is_attached(&ifp->if_snd)) 988 altq_detach(&ifp->if_snd); 989 #endif 990 991 /* 992 * Clean up all addresses. 993 */ 994 ifp->if_lladdr = NULL; 995 996 if_purgeaddrs_nolink(ifp); 997 if (!TAILQ_EMPTY(&ifp->if_addrheads[mycpuid])) { 998 struct ifaddr *ifa; 999 1000 ifa = TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa; 1001 KASSERT(ifa->ifa_addr->sa_family == AF_LINK, 1002 ("non-link ifaddr is left on if_addrheads")); 1003 1004 ifa_ifunlink(ifa, ifp); 1005 ifa_destroy(ifa); 1006 KASSERT(TAILQ_EMPTY(&ifp->if_addrheads[mycpuid]), 1007 ("there are still ifaddrs left on if_addrheads")); 1008 } 1009 1010 #ifdef INET 1011 /* 1012 * Remove all IPv4 kernel structures related to ifp. 1013 */ 1014 in_ifdetach(ifp); 1015 #endif 1016 1017 #ifdef INET6 1018 /* 1019 * Remove all IPv6 kernel structs related to ifp. This should be done 1020 * before removing routing entries below, since IPv6 interface direct 1021 * routes are expected to be removed by the IPv6-specific kernel API. 1022 * Otherwise, the kernel will detect some inconsistency and bark it. 1023 */ 1024 in6_ifdetach(ifp); 1025 #endif 1026 1027 /* 1028 * Delete all remaining routes using this interface 1029 */ 1030 netmsg_init(&msg.base, NULL, &curthread->td_msgport, MSGF_PRIORITY, 1031 if_rtdel_dispatch); 1032 msg.ifp = ifp; 1033 netisr_domsg_global(&msg.base); 1034 1035 SLIST_FOREACH(dp, &domains, dom_next) 1036 if (dp->dom_ifdetach && ifp->if_afdata[dp->dom_family]) 1037 (*dp->dom_ifdetach)(ifp, 1038 ifp->if_afdata[dp->dom_family]); 1039 1040 kfree(ifp->if_addrheads, M_IFADDR); 1041 1042 lwkt_synchronize_ipiqs("if_detach"); 1043 ifq_stage_detach(&ifp->if_snd); 1044 1045 for (q = 0; q < ifp->if_snd.altq_subq_cnt; ++q) { 1046 struct ifaltq_subque *ifsq = &ifp->if_snd.altq_subq[q]; 1047 1048 kfree(ifsq->ifsq_ifstart_nmsg, M_LWKTMSG); 1049 kfree(ifsq->ifsq_stage, M_DEVBUF); 1050 } 1051 kfree(ifp->if_snd.altq_subq, M_DEVBUF); 1052 1053 kfree(ifp->if_data_pcpu, M_DEVBUF); 1054 1055 crit_exit(); 1056 } 1057 1058 /* 1059 * Create interface group without members 1060 */ 1061 struct ifg_group * 1062 if_creategroup(const char *groupname) 1063 { 1064 struct ifg_group *ifg = NULL; 1065 1066 if ((ifg = (struct ifg_group *)kmalloc(sizeof(struct ifg_group), 1067 M_TEMP, M_NOWAIT)) == NULL) 1068 return (NULL); 1069 1070 strlcpy(ifg->ifg_group, groupname, sizeof(ifg->ifg_group)); 1071 ifg->ifg_refcnt = 0; 1072 ifg->ifg_carp_demoted = 0; 1073 TAILQ_INIT(&ifg->ifg_members); 1074 #if NPF > 0 1075 pfi_attach_ifgroup(ifg); 1076 #endif 1077 TAILQ_INSERT_TAIL(&ifg_head, ifg, ifg_next); 1078 1079 return (ifg); 1080 } 1081 1082 /* 1083 * Add a group to an interface 1084 */ 1085 int 1086 if_addgroup(struct ifnet *ifp, const char *groupname) 1087 { 1088 struct ifg_list *ifgl; 1089 struct ifg_group *ifg = NULL; 1090 struct ifg_member *ifgm; 1091 1092 if (groupname[0] && groupname[strlen(groupname) - 1] >= '0' && 1093 groupname[strlen(groupname) - 1] <= '9') 1094 return (EINVAL); 1095 1096 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) 1097 if (!strcmp(ifgl->ifgl_group->ifg_group, groupname)) 1098 return (EEXIST); 1099 1100 if ((ifgl = kmalloc(sizeof(*ifgl), M_TEMP, M_NOWAIT)) == NULL) 1101 return (ENOMEM); 1102 1103 if ((ifgm = kmalloc(sizeof(*ifgm), M_TEMP, M_NOWAIT)) == NULL) { 1104 kfree(ifgl, M_TEMP); 1105 return (ENOMEM); 1106 } 1107 1108 TAILQ_FOREACH(ifg, &ifg_head, ifg_next) 1109 if (!strcmp(ifg->ifg_group, groupname)) 1110 break; 1111 1112 if (ifg == NULL && (ifg = if_creategroup(groupname)) == NULL) { 1113 kfree(ifgl, M_TEMP); 1114 kfree(ifgm, M_TEMP); 1115 return (ENOMEM); 1116 } 1117 1118 ifg->ifg_refcnt++; 1119 ifgl->ifgl_group = ifg; 1120 ifgm->ifgm_ifp = ifp; 1121 1122 TAILQ_INSERT_TAIL(&ifg->ifg_members, ifgm, ifgm_next); 1123 TAILQ_INSERT_TAIL(&ifp->if_groups, ifgl, ifgl_next); 1124 1125 #if NPF > 0 1126 pfi_group_change(groupname); 1127 #endif 1128 1129 return (0); 1130 } 1131 1132 /* 1133 * Remove a group from an interface 1134 */ 1135 int 1136 if_delgroup(struct ifnet *ifp, const char *groupname) 1137 { 1138 struct ifg_list *ifgl; 1139 struct ifg_member *ifgm; 1140 1141 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) 1142 if (!strcmp(ifgl->ifgl_group->ifg_group, groupname)) 1143 break; 1144 if (ifgl == NULL) 1145 return (ENOENT); 1146 1147 TAILQ_REMOVE(&ifp->if_groups, ifgl, ifgl_next); 1148 1149 TAILQ_FOREACH(ifgm, &ifgl->ifgl_group->ifg_members, ifgm_next) 1150 if (ifgm->ifgm_ifp == ifp) 1151 break; 1152 1153 if (ifgm != NULL) { 1154 TAILQ_REMOVE(&ifgl->ifgl_group->ifg_members, ifgm, ifgm_next); 1155 kfree(ifgm, M_TEMP); 1156 } 1157 1158 if (--ifgl->ifgl_group->ifg_refcnt == 0) { 1159 TAILQ_REMOVE(&ifg_head, ifgl->ifgl_group, ifg_next); 1160 #if NPF > 0 1161 pfi_detach_ifgroup(ifgl->ifgl_group); 1162 #endif 1163 kfree(ifgl->ifgl_group, M_TEMP); 1164 } 1165 1166 kfree(ifgl, M_TEMP); 1167 1168 #if NPF > 0 1169 pfi_group_change(groupname); 1170 #endif 1171 1172 return (0); 1173 } 1174 1175 /* 1176 * Stores all groups from an interface in memory pointed 1177 * to by data 1178 */ 1179 int 1180 if_getgroup(caddr_t data, struct ifnet *ifp) 1181 { 1182 int len, error; 1183 struct ifg_list *ifgl; 1184 struct ifg_req ifgrq, *ifgp; 1185 struct ifgroupreq *ifgr = (struct ifgroupreq *)data; 1186 1187 if (ifgr->ifgr_len == 0) { 1188 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) 1189 ifgr->ifgr_len += sizeof(struct ifg_req); 1190 return (0); 1191 } 1192 1193 len = ifgr->ifgr_len; 1194 ifgp = ifgr->ifgr_groups; 1195 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) { 1196 if (len < sizeof(ifgrq)) 1197 return (EINVAL); 1198 bzero(&ifgrq, sizeof ifgrq); 1199 strlcpy(ifgrq.ifgrq_group, ifgl->ifgl_group->ifg_group, 1200 sizeof(ifgrq.ifgrq_group)); 1201 if ((error = copyout((caddr_t)&ifgrq, (caddr_t)ifgp, 1202 sizeof(struct ifg_req)))) 1203 return (error); 1204 len -= sizeof(ifgrq); 1205 ifgp++; 1206 } 1207 1208 return (0); 1209 } 1210 1211 /* 1212 * Stores all members of a group in memory pointed to by data 1213 */ 1214 int 1215 if_getgroupmembers(caddr_t data) 1216 { 1217 struct ifgroupreq *ifgr = (struct ifgroupreq *)data; 1218 struct ifg_group *ifg; 1219 struct ifg_member *ifgm; 1220 struct ifg_req ifgrq, *ifgp; 1221 int len, error; 1222 1223 TAILQ_FOREACH(ifg, &ifg_head, ifg_next) 1224 if (!strcmp(ifg->ifg_group, ifgr->ifgr_name)) 1225 break; 1226 if (ifg == NULL) 1227 return (ENOENT); 1228 1229 if (ifgr->ifgr_len == 0) { 1230 TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next) 1231 ifgr->ifgr_len += sizeof(ifgrq); 1232 return (0); 1233 } 1234 1235 len = ifgr->ifgr_len; 1236 ifgp = ifgr->ifgr_groups; 1237 TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next) { 1238 if (len < sizeof(ifgrq)) 1239 return (EINVAL); 1240 bzero(&ifgrq, sizeof ifgrq); 1241 strlcpy(ifgrq.ifgrq_member, ifgm->ifgm_ifp->if_xname, 1242 sizeof(ifgrq.ifgrq_member)); 1243 if ((error = copyout((caddr_t)&ifgrq, (caddr_t)ifgp, 1244 sizeof(struct ifg_req)))) 1245 return (error); 1246 len -= sizeof(ifgrq); 1247 ifgp++; 1248 } 1249 1250 return (0); 1251 } 1252 1253 /* 1254 * Delete Routes for a Network Interface 1255 * 1256 * Called for each routing entry via the rnh->rnh_walktree() call above 1257 * to delete all route entries referencing a detaching network interface. 1258 * 1259 * Arguments: 1260 * rn pointer to node in the routing table 1261 * arg argument passed to rnh->rnh_walktree() - detaching interface 1262 * 1263 * Returns: 1264 * 0 successful 1265 * errno failed - reason indicated 1266 * 1267 */ 1268 static int 1269 if_rtdel(struct radix_node *rn, void *arg) 1270 { 1271 struct rtentry *rt = (struct rtentry *)rn; 1272 struct ifnet *ifp = arg; 1273 int err; 1274 1275 if (rt->rt_ifp == ifp) { 1276 1277 /* 1278 * Protect (sorta) against walktree recursion problems 1279 * with cloned routes 1280 */ 1281 if (!(rt->rt_flags & RTF_UP)) 1282 return (0); 1283 1284 err = rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, 1285 rt_mask(rt), rt->rt_flags, 1286 NULL); 1287 if (err) { 1288 log(LOG_WARNING, "if_rtdel: error %d\n", err); 1289 } 1290 } 1291 1292 return (0); 1293 } 1294 1295 static __inline boolean_t 1296 ifa_prefer(const struct ifaddr *cur_ifa, const struct ifaddr *old_ifa) 1297 { 1298 if (old_ifa == NULL) 1299 return TRUE; 1300 1301 if ((old_ifa->ifa_ifp->if_flags & IFF_UP) == 0 && 1302 (cur_ifa->ifa_ifp->if_flags & IFF_UP)) 1303 return TRUE; 1304 if ((old_ifa->ifa_flags & IFA_ROUTE) == 0 && 1305 (cur_ifa->ifa_flags & IFA_ROUTE)) 1306 return TRUE; 1307 return FALSE; 1308 } 1309 1310 /* 1311 * Locate an interface based on a complete address. 1312 */ 1313 struct ifaddr * 1314 ifa_ifwithaddr(struct sockaddr *addr) 1315 { 1316 const struct ifnet_array *arr; 1317 int i; 1318 1319 arr = ifnet_array_get(); 1320 for (i = 0; i < arr->ifnet_count; ++i) { 1321 struct ifnet *ifp = arr->ifnet_arr[i]; 1322 struct ifaddr_container *ifac; 1323 1324 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1325 struct ifaddr *ifa = ifac->ifa; 1326 1327 if (ifa->ifa_addr->sa_family != addr->sa_family) 1328 continue; 1329 if (sa_equal(addr, ifa->ifa_addr)) 1330 return (ifa); 1331 if ((ifp->if_flags & IFF_BROADCAST) && 1332 ifa->ifa_broadaddr && 1333 /* IPv6 doesn't have broadcast */ 1334 ifa->ifa_broadaddr->sa_len != 0 && 1335 sa_equal(ifa->ifa_broadaddr, addr)) 1336 return (ifa); 1337 } 1338 } 1339 return (NULL); 1340 } 1341 1342 /* 1343 * Locate the point to point interface with a given destination address. 1344 */ 1345 struct ifaddr * 1346 ifa_ifwithdstaddr(struct sockaddr *addr) 1347 { 1348 const struct ifnet_array *arr; 1349 int i; 1350 1351 arr = ifnet_array_get(); 1352 for (i = 0; i < arr->ifnet_count; ++i) { 1353 struct ifnet *ifp = arr->ifnet_arr[i]; 1354 struct ifaddr_container *ifac; 1355 1356 if (!(ifp->if_flags & IFF_POINTOPOINT)) 1357 continue; 1358 1359 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1360 struct ifaddr *ifa = ifac->ifa; 1361 1362 if (ifa->ifa_addr->sa_family != addr->sa_family) 1363 continue; 1364 if (ifa->ifa_dstaddr && 1365 sa_equal(addr, ifa->ifa_dstaddr)) 1366 return (ifa); 1367 } 1368 } 1369 return (NULL); 1370 } 1371 1372 /* 1373 * Find an interface on a specific network. If many, choice 1374 * is most specific found. 1375 */ 1376 struct ifaddr * 1377 ifa_ifwithnet(struct sockaddr *addr) 1378 { 1379 struct ifaddr *ifa_maybe = NULL; 1380 u_int af = addr->sa_family; 1381 char *addr_data = addr->sa_data, *cplim; 1382 const struct ifnet_array *arr; 1383 int i; 1384 1385 /* 1386 * AF_LINK addresses can be looked up directly by their index number, 1387 * so do that if we can. 1388 */ 1389 if (af == AF_LINK) { 1390 struct sockaddr_dl *sdl = (struct sockaddr_dl *)addr; 1391 1392 if (sdl->sdl_index && sdl->sdl_index <= if_index) 1393 return (ifindex2ifnet[sdl->sdl_index]->if_lladdr); 1394 } 1395 1396 /* 1397 * Scan though each interface, looking for ones that have 1398 * addresses in this address family. 1399 */ 1400 arr = ifnet_array_get(); 1401 for (i = 0; i < arr->ifnet_count; ++i) { 1402 struct ifnet *ifp = arr->ifnet_arr[i]; 1403 struct ifaddr_container *ifac; 1404 1405 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1406 struct ifaddr *ifa = ifac->ifa; 1407 char *cp, *cp2, *cp3; 1408 1409 if (ifa->ifa_addr->sa_family != af) 1410 next: continue; 1411 if (af == AF_INET && ifp->if_flags & IFF_POINTOPOINT) { 1412 /* 1413 * This is a bit broken as it doesn't 1414 * take into account that the remote end may 1415 * be a single node in the network we are 1416 * looking for. 1417 * The trouble is that we don't know the 1418 * netmask for the remote end. 1419 */ 1420 if (ifa->ifa_dstaddr != NULL && 1421 sa_equal(addr, ifa->ifa_dstaddr)) 1422 return (ifa); 1423 } else { 1424 /* 1425 * if we have a special address handler, 1426 * then use it instead of the generic one. 1427 */ 1428 if (ifa->ifa_claim_addr) { 1429 if ((*ifa->ifa_claim_addr)(ifa, addr)) { 1430 return (ifa); 1431 } else { 1432 continue; 1433 } 1434 } 1435 1436 /* 1437 * Scan all the bits in the ifa's address. 1438 * If a bit dissagrees with what we are 1439 * looking for, mask it with the netmask 1440 * to see if it really matters. 1441 * (A byte at a time) 1442 */ 1443 if (ifa->ifa_netmask == 0) 1444 continue; 1445 cp = addr_data; 1446 cp2 = ifa->ifa_addr->sa_data; 1447 cp3 = ifa->ifa_netmask->sa_data; 1448 cplim = ifa->ifa_netmask->sa_len + 1449 (char *)ifa->ifa_netmask; 1450 while (cp3 < cplim) 1451 if ((*cp++ ^ *cp2++) & *cp3++) 1452 goto next; /* next address! */ 1453 /* 1454 * If the netmask of what we just found 1455 * is more specific than what we had before 1456 * (if we had one) then remember the new one 1457 * before continuing to search for an even 1458 * better one. If the netmasks are equal, 1459 * we prefer the this ifa based on the result 1460 * of ifa_prefer(). 1461 */ 1462 if (ifa_maybe == NULL || 1463 rn_refines((char *)ifa->ifa_netmask, 1464 (char *)ifa_maybe->ifa_netmask) || 1465 (sa_equal(ifa_maybe->ifa_netmask, 1466 ifa->ifa_netmask) && 1467 ifa_prefer(ifa, ifa_maybe))) 1468 ifa_maybe = ifa; 1469 } 1470 } 1471 } 1472 return (ifa_maybe); 1473 } 1474 1475 /* 1476 * Find an interface address specific to an interface best matching 1477 * a given address. 1478 */ 1479 struct ifaddr * 1480 ifaof_ifpforaddr(struct sockaddr *addr, struct ifnet *ifp) 1481 { 1482 struct ifaddr_container *ifac; 1483 char *cp, *cp2, *cp3; 1484 char *cplim; 1485 struct ifaddr *ifa_maybe = NULL; 1486 u_int af = addr->sa_family; 1487 1488 if (af >= AF_MAX) 1489 return (0); 1490 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1491 struct ifaddr *ifa = ifac->ifa; 1492 1493 if (ifa->ifa_addr->sa_family != af) 1494 continue; 1495 if (ifa_maybe == NULL) 1496 ifa_maybe = ifa; 1497 if (ifa->ifa_netmask == NULL) { 1498 if (sa_equal(addr, ifa->ifa_addr) || 1499 (ifa->ifa_dstaddr != NULL && 1500 sa_equal(addr, ifa->ifa_dstaddr))) 1501 return (ifa); 1502 continue; 1503 } 1504 if (ifp->if_flags & IFF_POINTOPOINT) { 1505 if (sa_equal(addr, ifa->ifa_dstaddr)) 1506 return (ifa); 1507 } else { 1508 cp = addr->sa_data; 1509 cp2 = ifa->ifa_addr->sa_data; 1510 cp3 = ifa->ifa_netmask->sa_data; 1511 cplim = ifa->ifa_netmask->sa_len + (char *)ifa->ifa_netmask; 1512 for (; cp3 < cplim; cp3++) 1513 if ((*cp++ ^ *cp2++) & *cp3) 1514 break; 1515 if (cp3 == cplim) 1516 return (ifa); 1517 } 1518 } 1519 return (ifa_maybe); 1520 } 1521 1522 /* 1523 * Default action when installing a route with a Link Level gateway. 1524 * Lookup an appropriate real ifa to point to. 1525 * This should be moved to /sys/net/link.c eventually. 1526 */ 1527 static void 1528 link_rtrequest(int cmd, struct rtentry *rt) 1529 { 1530 struct ifaddr *ifa; 1531 struct sockaddr *dst; 1532 struct ifnet *ifp; 1533 1534 if (cmd != RTM_ADD || (ifa = rt->rt_ifa) == NULL || 1535 (ifp = ifa->ifa_ifp) == NULL || (dst = rt_key(rt)) == NULL) 1536 return; 1537 ifa = ifaof_ifpforaddr(dst, ifp); 1538 if (ifa != NULL) { 1539 IFAFREE(rt->rt_ifa); 1540 IFAREF(ifa); 1541 rt->rt_ifa = ifa; 1542 if (ifa->ifa_rtrequest && ifa->ifa_rtrequest != link_rtrequest) 1543 ifa->ifa_rtrequest(cmd, rt); 1544 } 1545 } 1546 1547 struct netmsg_ifroute { 1548 struct netmsg_base base; 1549 struct ifnet *ifp; 1550 int flag; 1551 int fam; 1552 }; 1553 1554 /* 1555 * Mark an interface down and notify protocols of the transition. 1556 */ 1557 static void 1558 if_unroute_dispatch(netmsg_t nmsg) 1559 { 1560 struct netmsg_ifroute *msg = (struct netmsg_ifroute *)nmsg; 1561 struct ifnet *ifp = msg->ifp; 1562 int flag = msg->flag, fam = msg->fam; 1563 struct ifaddr_container *ifac; 1564 1565 ASSERT_NETISR0; 1566 1567 ifp->if_flags &= ~flag; 1568 getmicrotime(&ifp->if_lastchange); 1569 /* 1570 * The ifaddr processing in the following loop will block, 1571 * however, this function is called in netisr0, in which 1572 * ifaddr list changes happen, so we don't care about the 1573 * blockness of the ifaddr processing here. 1574 */ 1575 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1576 struct ifaddr *ifa = ifac->ifa; 1577 1578 /* Ignore marker */ 1579 if (ifa->ifa_addr->sa_family == AF_UNSPEC) 1580 continue; 1581 1582 if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family)) 1583 kpfctlinput(PRC_IFDOWN, ifa->ifa_addr); 1584 } 1585 ifq_purge_all(&ifp->if_snd); 1586 rt_ifmsg(ifp); 1587 1588 netisr_replymsg(&nmsg->base, 0); 1589 } 1590 1591 void 1592 if_unroute(struct ifnet *ifp, int flag, int fam) 1593 { 1594 struct netmsg_ifroute msg; 1595 1596 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 0, 1597 if_unroute_dispatch); 1598 msg.ifp = ifp; 1599 msg.flag = flag; 1600 msg.fam = fam; 1601 netisr_domsg(&msg.base, 0); 1602 } 1603 1604 /* 1605 * Mark an interface up and notify protocols of the transition. 1606 */ 1607 static void 1608 if_route_dispatch(netmsg_t nmsg) 1609 { 1610 struct netmsg_ifroute *msg = (struct netmsg_ifroute *)nmsg; 1611 struct ifnet *ifp = msg->ifp; 1612 int flag = msg->flag, fam = msg->fam; 1613 struct ifaddr_container *ifac; 1614 1615 ASSERT_NETISR0; 1616 1617 ifq_purge_all(&ifp->if_snd); 1618 ifp->if_flags |= flag; 1619 getmicrotime(&ifp->if_lastchange); 1620 /* 1621 * The ifaddr processing in the following loop will block, 1622 * however, this function is called in netisr0, in which 1623 * ifaddr list changes happen, so we don't care about the 1624 * blockness of the ifaddr processing here. 1625 */ 1626 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 1627 struct ifaddr *ifa = ifac->ifa; 1628 1629 /* Ignore marker */ 1630 if (ifa->ifa_addr->sa_family == AF_UNSPEC) 1631 continue; 1632 1633 if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family)) 1634 kpfctlinput(PRC_IFUP, ifa->ifa_addr); 1635 } 1636 rt_ifmsg(ifp); 1637 #ifdef INET6 1638 in6_if_up(ifp); 1639 #endif 1640 1641 netisr_replymsg(&nmsg->base, 0); 1642 } 1643 1644 void 1645 if_route(struct ifnet *ifp, int flag, int fam) 1646 { 1647 struct netmsg_ifroute msg; 1648 1649 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 0, 1650 if_route_dispatch); 1651 msg.ifp = ifp; 1652 msg.flag = flag; 1653 msg.fam = fam; 1654 netisr_domsg(&msg.base, 0); 1655 } 1656 1657 /* 1658 * Mark an interface down and notify protocols of the transition. An 1659 * interface going down is also considered to be a synchronizing event. 1660 * We must ensure that all packet processing related to the interface 1661 * has completed before we return so e.g. the caller can free the ifnet 1662 * structure that the mbufs may be referencing. 1663 * 1664 * NOTE: must be called at splnet or eqivalent. 1665 */ 1666 void 1667 if_down(struct ifnet *ifp) 1668 { 1669 if_unroute(ifp, IFF_UP, AF_UNSPEC); 1670 netmsg_service_sync(); 1671 } 1672 1673 /* 1674 * Mark an interface up and notify protocols of 1675 * the transition. 1676 * NOTE: must be called at splnet or eqivalent. 1677 */ 1678 void 1679 if_up(struct ifnet *ifp) 1680 { 1681 if_route(ifp, IFF_UP, AF_UNSPEC); 1682 } 1683 1684 /* 1685 * Process a link state change. 1686 * NOTE: must be called at splsoftnet or equivalent. 1687 */ 1688 void 1689 if_link_state_change(struct ifnet *ifp) 1690 { 1691 int link_state = ifp->if_link_state; 1692 1693 rt_ifmsg(ifp); 1694 devctl_notify("IFNET", ifp->if_xname, 1695 (link_state == LINK_STATE_UP) ? "LINK_UP" : "LINK_DOWN", NULL); 1696 } 1697 1698 /* 1699 * Handle interface watchdog timer routines. Called 1700 * from softclock, we decrement timers (if set) and 1701 * call the appropriate interface routine on expiration. 1702 */ 1703 static void 1704 if_slowtimo_dispatch(netmsg_t nmsg) 1705 { 1706 struct globaldata *gd = mycpu; 1707 const struct ifnet_array *arr; 1708 int i; 1709 1710 ASSERT_NETISR0; 1711 1712 crit_enter_gd(gd); 1713 lwkt_replymsg(&nmsg->lmsg, 0); /* reply ASAP */ 1714 crit_exit_gd(gd); 1715 1716 arr = ifnet_array_get(); 1717 for (i = 0; i < arr->ifnet_count; ++i) { 1718 struct ifnet *ifp = arr->ifnet_arr[i]; 1719 1720 crit_enter_gd(gd); 1721 1722 if (if_stats_compat) { 1723 IFNET_STAT_GET(ifp, ipackets, ifp->if_ipackets); 1724 IFNET_STAT_GET(ifp, ierrors, ifp->if_ierrors); 1725 IFNET_STAT_GET(ifp, opackets, ifp->if_opackets); 1726 IFNET_STAT_GET(ifp, oerrors, ifp->if_oerrors); 1727 IFNET_STAT_GET(ifp, collisions, ifp->if_collisions); 1728 IFNET_STAT_GET(ifp, ibytes, ifp->if_ibytes); 1729 IFNET_STAT_GET(ifp, obytes, ifp->if_obytes); 1730 IFNET_STAT_GET(ifp, imcasts, ifp->if_imcasts); 1731 IFNET_STAT_GET(ifp, omcasts, ifp->if_omcasts); 1732 IFNET_STAT_GET(ifp, iqdrops, ifp->if_iqdrops); 1733 IFNET_STAT_GET(ifp, noproto, ifp->if_noproto); 1734 IFNET_STAT_GET(ifp, oqdrops, ifp->if_oqdrops); 1735 } 1736 1737 if (ifp->if_timer == 0 || --ifp->if_timer) { 1738 crit_exit_gd(gd); 1739 continue; 1740 } 1741 if (ifp->if_watchdog) { 1742 if (ifnet_tryserialize_all(ifp)) { 1743 (*ifp->if_watchdog)(ifp); 1744 ifnet_deserialize_all(ifp); 1745 } else { 1746 /* try again next timeout */ 1747 ++ifp->if_timer; 1748 } 1749 } 1750 1751 crit_exit_gd(gd); 1752 } 1753 1754 callout_reset(&if_slowtimo_timer, hz / IFNET_SLOWHZ, if_slowtimo, NULL); 1755 } 1756 1757 static void 1758 if_slowtimo(void *arg __unused) 1759 { 1760 struct lwkt_msg *lmsg = &if_slowtimo_netmsg.lmsg; 1761 1762 KASSERT(mycpuid == 0, ("not on cpu0")); 1763 crit_enter(); 1764 if (lmsg->ms_flags & MSGF_DONE) 1765 lwkt_sendmsg_oncpu(netisr_cpuport(0), lmsg); 1766 crit_exit(); 1767 } 1768 1769 /* 1770 * Map interface name to 1771 * interface structure pointer. 1772 */ 1773 struct ifnet * 1774 ifunit(const char *name) 1775 { 1776 struct ifnet *ifp; 1777 1778 /* 1779 * Search all the interfaces for this name/number 1780 */ 1781 KASSERT(mtx_owned(&ifnet_mtx), ("ifnet is not locked")); 1782 1783 TAILQ_FOREACH(ifp, &ifnetlist, if_link) { 1784 if (strncmp(ifp->if_xname, name, IFNAMSIZ) == 0) 1785 break; 1786 } 1787 return (ifp); 1788 } 1789 1790 struct ifnet * 1791 ifunit_netisr(const char *name) 1792 { 1793 const struct ifnet_array *arr; 1794 int i; 1795 1796 /* 1797 * Search all the interfaces for this name/number 1798 */ 1799 1800 arr = ifnet_array_get(); 1801 for (i = 0; i < arr->ifnet_count; ++i) { 1802 struct ifnet *ifp = arr->ifnet_arr[i]; 1803 1804 if (strncmp(ifp->if_xname, name, IFNAMSIZ) == 0) 1805 return ifp; 1806 } 1807 return NULL; 1808 } 1809 1810 /* 1811 * Interface ioctls. 1812 */ 1813 int 1814 ifioctl(struct socket *so, u_long cmd, caddr_t data, struct ucred *cred) 1815 { 1816 struct ifnet *ifp; 1817 struct ifreq *ifr; 1818 struct ifstat *ifs; 1819 int error, do_ifup = 0; 1820 short oif_flags; 1821 int new_flags; 1822 size_t namelen, onamelen; 1823 char new_name[IFNAMSIZ]; 1824 struct ifaddr *ifa; 1825 struct sockaddr_dl *sdl; 1826 1827 switch (cmd) { 1828 case SIOCGIFCONF: 1829 case OSIOCGIFCONF: 1830 return (ifconf(cmd, data, cred)); 1831 default: 1832 break; 1833 } 1834 1835 ifr = (struct ifreq *)data; 1836 1837 switch (cmd) { 1838 case SIOCIFCREATE: 1839 case SIOCIFCREATE2: 1840 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0) 1841 return (error); 1842 return (if_clone_create(ifr->ifr_name, sizeof(ifr->ifr_name), 1843 cmd == SIOCIFCREATE2 ? ifr->ifr_data : NULL)); 1844 case SIOCIFDESTROY: 1845 if ((error = priv_check_cred(cred, PRIV_ROOT, 0)) != 0) 1846 return (error); 1847 return (if_clone_destroy(ifr->ifr_name)); 1848 case SIOCIFGCLONERS: 1849 return (if_clone_list((struct if_clonereq *)data)); 1850 default: 1851 break; 1852 } 1853 1854 /* 1855 * Nominal ioctl through interface, lookup the ifp and obtain a 1856 * lock to serialize the ifconfig ioctl operation. 1857 */ 1858 ifnet_lock(); 1859 1860 ifp = ifunit(ifr->ifr_name); 1861 if (ifp == NULL) { 1862 ifnet_unlock(); 1863 return (ENXIO); 1864 } 1865 error = 0; 1866 1867 switch (cmd) { 1868 case SIOCGIFINDEX: 1869 ifr->ifr_index = ifp->if_index; 1870 break; 1871 1872 case SIOCGIFFLAGS: 1873 ifr->ifr_flags = ifp->if_flags; 1874 ifr->ifr_flagshigh = ifp->if_flags >> 16; 1875 break; 1876 1877 case SIOCGIFCAP: 1878 ifr->ifr_reqcap = ifp->if_capabilities; 1879 ifr->ifr_curcap = ifp->if_capenable; 1880 break; 1881 1882 case SIOCGIFMETRIC: 1883 ifr->ifr_metric = ifp->if_metric; 1884 break; 1885 1886 case SIOCGIFMTU: 1887 ifr->ifr_mtu = ifp->if_mtu; 1888 break; 1889 1890 case SIOCGIFTSOLEN: 1891 ifr->ifr_tsolen = ifp->if_tsolen; 1892 break; 1893 1894 case SIOCGIFDATA: 1895 error = copyout((caddr_t)&ifp->if_data, ifr->ifr_data, 1896 sizeof(ifp->if_data)); 1897 break; 1898 1899 case SIOCGIFPHYS: 1900 ifr->ifr_phys = ifp->if_physical; 1901 break; 1902 1903 case SIOCGIFPOLLCPU: 1904 ifr->ifr_pollcpu = -1; 1905 break; 1906 1907 case SIOCSIFPOLLCPU: 1908 break; 1909 1910 case SIOCSIFFLAGS: 1911 error = priv_check_cred(cred, PRIV_ROOT, 0); 1912 if (error) 1913 break; 1914 new_flags = (ifr->ifr_flags & 0xffff) | 1915 (ifr->ifr_flagshigh << 16); 1916 if (ifp->if_flags & IFF_SMART) { 1917 /* Smart drivers twiddle their own routes */ 1918 } else if (ifp->if_flags & IFF_UP && 1919 (new_flags & IFF_UP) == 0) { 1920 if_down(ifp); 1921 } else if (new_flags & IFF_UP && 1922 (ifp->if_flags & IFF_UP) == 0) { 1923 do_ifup = 1; 1924 } 1925 1926 #ifdef IFPOLL_ENABLE 1927 if ((new_flags ^ ifp->if_flags) & IFF_NPOLLING) { 1928 if (new_flags & IFF_NPOLLING) 1929 ifpoll_register(ifp); 1930 else 1931 ifpoll_deregister(ifp); 1932 } 1933 #endif 1934 1935 ifp->if_flags = (ifp->if_flags & IFF_CANTCHANGE) | 1936 (new_flags &~ IFF_CANTCHANGE); 1937 if (new_flags & IFF_PPROMISC) { 1938 /* Permanently promiscuous mode requested */ 1939 ifp->if_flags |= IFF_PROMISC; 1940 } else if (ifp->if_pcount == 0) { 1941 ifp->if_flags &= ~IFF_PROMISC; 1942 } 1943 if (ifp->if_ioctl) { 1944 ifnet_serialize_all(ifp); 1945 ifp->if_ioctl(ifp, cmd, data, cred); 1946 ifnet_deserialize_all(ifp); 1947 } 1948 if (do_ifup) 1949 if_up(ifp); 1950 getmicrotime(&ifp->if_lastchange); 1951 break; 1952 1953 case SIOCSIFCAP: 1954 error = priv_check_cred(cred, PRIV_ROOT, 0); 1955 if (error) 1956 break; 1957 if (ifr->ifr_reqcap & ~ifp->if_capabilities) { 1958 error = EINVAL; 1959 break; 1960 } 1961 ifnet_serialize_all(ifp); 1962 ifp->if_ioctl(ifp, cmd, data, cred); 1963 ifnet_deserialize_all(ifp); 1964 break; 1965 1966 case SIOCSIFNAME: 1967 error = priv_check_cred(cred, PRIV_ROOT, 0); 1968 if (error) 1969 break; 1970 error = copyinstr(ifr->ifr_data, new_name, IFNAMSIZ, NULL); 1971 if (error) 1972 break; 1973 if (new_name[0] == '\0') { 1974 error = EINVAL; 1975 break; 1976 } 1977 if (ifunit(new_name) != NULL) { 1978 error = EEXIST; 1979 break; 1980 } 1981 1982 EVENTHANDLER_INVOKE(ifnet_detach_event, ifp); 1983 1984 /* Announce the departure of the interface. */ 1985 rt_ifannouncemsg(ifp, IFAN_DEPARTURE); 1986 1987 strlcpy(ifp->if_xname, new_name, sizeof(ifp->if_xname)); 1988 ifa = TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa; 1989 sdl = (struct sockaddr_dl *)ifa->ifa_addr; 1990 namelen = strlen(new_name); 1991 onamelen = sdl->sdl_nlen; 1992 /* 1993 * Move the address if needed. This is safe because we 1994 * allocate space for a name of length IFNAMSIZ when we 1995 * create this in if_attach(). 1996 */ 1997 if (namelen != onamelen) { 1998 bcopy(sdl->sdl_data + onamelen, 1999 sdl->sdl_data + namelen, sdl->sdl_alen); 2000 } 2001 bcopy(new_name, sdl->sdl_data, namelen); 2002 sdl->sdl_nlen = namelen; 2003 sdl = (struct sockaddr_dl *)ifa->ifa_netmask; 2004 bzero(sdl->sdl_data, onamelen); 2005 while (namelen != 0) 2006 sdl->sdl_data[--namelen] = 0xff; 2007 2008 EVENTHANDLER_INVOKE(ifnet_attach_event, ifp); 2009 2010 /* Announce the return of the interface. */ 2011 rt_ifannouncemsg(ifp, IFAN_ARRIVAL); 2012 break; 2013 2014 case SIOCSIFMETRIC: 2015 error = priv_check_cred(cred, PRIV_ROOT, 0); 2016 if (error) 2017 break; 2018 ifp->if_metric = ifr->ifr_metric; 2019 getmicrotime(&ifp->if_lastchange); 2020 break; 2021 2022 case SIOCSIFPHYS: 2023 error = priv_check_cred(cred, PRIV_ROOT, 0); 2024 if (error) 2025 break; 2026 if (ifp->if_ioctl == NULL) { 2027 error = EOPNOTSUPP; 2028 break; 2029 } 2030 ifnet_serialize_all(ifp); 2031 error = ifp->if_ioctl(ifp, cmd, data, cred); 2032 ifnet_deserialize_all(ifp); 2033 if (error == 0) 2034 getmicrotime(&ifp->if_lastchange); 2035 break; 2036 2037 case SIOCSIFMTU: 2038 { 2039 u_long oldmtu = ifp->if_mtu; 2040 2041 error = priv_check_cred(cred, PRIV_ROOT, 0); 2042 if (error) 2043 break; 2044 if (ifp->if_ioctl == NULL) { 2045 error = EOPNOTSUPP; 2046 break; 2047 } 2048 if (ifr->ifr_mtu < IF_MINMTU || ifr->ifr_mtu > IF_MAXMTU) { 2049 error = EINVAL; 2050 break; 2051 } 2052 ifnet_serialize_all(ifp); 2053 error = ifp->if_ioctl(ifp, cmd, data, cred); 2054 ifnet_deserialize_all(ifp); 2055 if (error == 0) { 2056 getmicrotime(&ifp->if_lastchange); 2057 rt_ifmsg(ifp); 2058 } 2059 /* 2060 * If the link MTU changed, do network layer specific procedure. 2061 */ 2062 if (ifp->if_mtu != oldmtu) { 2063 #ifdef INET6 2064 nd6_setmtu(ifp); 2065 #endif 2066 } 2067 break; 2068 } 2069 2070 case SIOCSIFTSOLEN: 2071 error = priv_check_cred(cred, PRIV_ROOT, 0); 2072 if (error) 2073 break; 2074 2075 /* XXX need driver supplied upper limit */ 2076 if (ifr->ifr_tsolen <= 0) { 2077 error = EINVAL; 2078 break; 2079 } 2080 ifp->if_tsolen = ifr->ifr_tsolen; 2081 break; 2082 2083 case SIOCADDMULTI: 2084 case SIOCDELMULTI: 2085 error = priv_check_cred(cred, PRIV_ROOT, 0); 2086 if (error) 2087 break; 2088 2089 /* Don't allow group membership on non-multicast interfaces. */ 2090 if ((ifp->if_flags & IFF_MULTICAST) == 0) { 2091 error = EOPNOTSUPP; 2092 break; 2093 } 2094 2095 /* Don't let users screw up protocols' entries. */ 2096 if (ifr->ifr_addr.sa_family != AF_LINK) { 2097 error = EINVAL; 2098 break; 2099 } 2100 2101 if (cmd == SIOCADDMULTI) { 2102 struct ifmultiaddr *ifma; 2103 error = if_addmulti(ifp, &ifr->ifr_addr, &ifma); 2104 } else { 2105 error = if_delmulti(ifp, &ifr->ifr_addr); 2106 } 2107 if (error == 0) 2108 getmicrotime(&ifp->if_lastchange); 2109 break; 2110 2111 case SIOCSIFPHYADDR: 2112 case SIOCDIFPHYADDR: 2113 #ifdef INET6 2114 case SIOCSIFPHYADDR_IN6: 2115 #endif 2116 case SIOCSLIFPHYADDR: 2117 case SIOCSIFMEDIA: 2118 case SIOCSIFGENERIC: 2119 error = priv_check_cred(cred, PRIV_ROOT, 0); 2120 if (error) 2121 break; 2122 if (ifp->if_ioctl == 0) { 2123 error = EOPNOTSUPP; 2124 break; 2125 } 2126 ifnet_serialize_all(ifp); 2127 error = ifp->if_ioctl(ifp, cmd, data, cred); 2128 ifnet_deserialize_all(ifp); 2129 if (error == 0) 2130 getmicrotime(&ifp->if_lastchange); 2131 break; 2132 2133 case SIOCGIFSTATUS: 2134 ifs = (struct ifstat *)data; 2135 ifs->ascii[0] = '\0'; 2136 /* fall through */ 2137 case SIOCGIFPSRCADDR: 2138 case SIOCGIFPDSTADDR: 2139 case SIOCGLIFPHYADDR: 2140 case SIOCGIFMEDIA: 2141 case SIOCGIFGENERIC: 2142 if (ifp->if_ioctl == NULL) { 2143 error = EOPNOTSUPP; 2144 break; 2145 } 2146 ifnet_serialize_all(ifp); 2147 error = ifp->if_ioctl(ifp, cmd, data, cred); 2148 ifnet_deserialize_all(ifp); 2149 break; 2150 2151 case SIOCSIFLLADDR: 2152 error = priv_check_cred(cred, PRIV_ROOT, 0); 2153 if (error) 2154 break; 2155 error = if_setlladdr(ifp, ifr->ifr_addr.sa_data, 2156 ifr->ifr_addr.sa_len); 2157 EVENTHANDLER_INVOKE(iflladdr_event, ifp); 2158 break; 2159 2160 default: 2161 oif_flags = ifp->if_flags; 2162 if (so->so_proto == 0) { 2163 error = EOPNOTSUPP; 2164 break; 2165 } 2166 error = so_pru_control_direct(so, cmd, data, ifp); 2167 2168 if ((oif_flags ^ ifp->if_flags) & IFF_UP) { 2169 #ifdef INET6 2170 DELAY(100);/* XXX: temporary workaround for fxp issue*/ 2171 if (ifp->if_flags & IFF_UP) { 2172 crit_enter(); 2173 in6_if_up(ifp); 2174 crit_exit(); 2175 } 2176 #endif 2177 } 2178 break; 2179 } 2180 2181 ifnet_unlock(); 2182 return (error); 2183 } 2184 2185 /* 2186 * Set/clear promiscuous mode on interface ifp based on the truth value 2187 * of pswitch. The calls are reference counted so that only the first 2188 * "on" request actually has an effect, as does the final "off" request. 2189 * Results are undefined if the "off" and "on" requests are not matched. 2190 */ 2191 int 2192 ifpromisc(struct ifnet *ifp, int pswitch) 2193 { 2194 struct ifreq ifr; 2195 int error; 2196 int oldflags; 2197 2198 oldflags = ifp->if_flags; 2199 if (ifp->if_flags & IFF_PPROMISC) { 2200 /* Do nothing if device is in permanently promiscuous mode */ 2201 ifp->if_pcount += pswitch ? 1 : -1; 2202 return (0); 2203 } 2204 if (pswitch) { 2205 /* 2206 * If the device is not configured up, we cannot put it in 2207 * promiscuous mode. 2208 */ 2209 if ((ifp->if_flags & IFF_UP) == 0) 2210 return (ENETDOWN); 2211 if (ifp->if_pcount++ != 0) 2212 return (0); 2213 ifp->if_flags |= IFF_PROMISC; 2214 log(LOG_INFO, "%s: promiscuous mode enabled\n", 2215 ifp->if_xname); 2216 } else { 2217 if (--ifp->if_pcount > 0) 2218 return (0); 2219 ifp->if_flags &= ~IFF_PROMISC; 2220 log(LOG_INFO, "%s: promiscuous mode disabled\n", 2221 ifp->if_xname); 2222 } 2223 ifr.ifr_flags = ifp->if_flags; 2224 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2225 ifnet_serialize_all(ifp); 2226 error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, NULL); 2227 ifnet_deserialize_all(ifp); 2228 if (error == 0) 2229 rt_ifmsg(ifp); 2230 else 2231 ifp->if_flags = oldflags; 2232 return error; 2233 } 2234 2235 /* 2236 * Return interface configuration 2237 * of system. List may be used 2238 * in later ioctl's (above) to get 2239 * other information. 2240 */ 2241 static int 2242 ifconf(u_long cmd, caddr_t data, struct ucred *cred) 2243 { 2244 struct ifconf *ifc = (struct ifconf *)data; 2245 struct ifnet *ifp; 2246 struct sockaddr *sa; 2247 struct ifreq ifr, *ifrp; 2248 int space = ifc->ifc_len, error = 0; 2249 2250 ifrp = ifc->ifc_req; 2251 2252 ifnet_lock(); 2253 TAILQ_FOREACH(ifp, &ifnetlist, if_link) { 2254 struct ifaddr_container *ifac, *ifac_mark; 2255 struct ifaddr_marker mark; 2256 struct ifaddrhead *head; 2257 int addrs; 2258 2259 if (space <= sizeof ifr) 2260 break; 2261 2262 /* 2263 * Zero the stack declared structure first to prevent 2264 * memory disclosure. 2265 */ 2266 bzero(&ifr, sizeof(ifr)); 2267 if (strlcpy(ifr.ifr_name, ifp->if_xname, sizeof(ifr.ifr_name)) 2268 >= sizeof(ifr.ifr_name)) { 2269 error = ENAMETOOLONG; 2270 break; 2271 } 2272 2273 /* 2274 * Add a marker, since copyout() could block and during that 2275 * period the list could be changed. Inserting the marker to 2276 * the header of the list will not cause trouble for the code 2277 * assuming that the first element of the list is AF_LINK; the 2278 * marker will be moved to the next position w/o blocking. 2279 */ 2280 ifa_marker_init(&mark, ifp); 2281 ifac_mark = &mark.ifac; 2282 head = &ifp->if_addrheads[mycpuid]; 2283 2284 addrs = 0; 2285 TAILQ_INSERT_HEAD(head, ifac_mark, ifa_link); 2286 while ((ifac = TAILQ_NEXT(ifac_mark, ifa_link)) != NULL) { 2287 struct ifaddr *ifa = ifac->ifa; 2288 2289 TAILQ_REMOVE(head, ifac_mark, ifa_link); 2290 TAILQ_INSERT_AFTER(head, ifac, ifac_mark, ifa_link); 2291 2292 /* Ignore marker */ 2293 if (ifa->ifa_addr->sa_family == AF_UNSPEC) 2294 continue; 2295 2296 if (space <= sizeof ifr) 2297 break; 2298 sa = ifa->ifa_addr; 2299 if (cred->cr_prison && 2300 prison_if(cred, sa)) 2301 continue; 2302 addrs++; 2303 /* 2304 * Keep a reference on this ifaddr, so that it will 2305 * not be destroyed when its address is copied to 2306 * the userland, which could block. 2307 */ 2308 IFAREF(ifa); 2309 if (sa->sa_len <= sizeof(*sa)) { 2310 ifr.ifr_addr = *sa; 2311 error = copyout(&ifr, ifrp, sizeof ifr); 2312 ifrp++; 2313 } else { 2314 if (space < (sizeof ifr) + sa->sa_len - 2315 sizeof(*sa)) { 2316 IFAFREE(ifa); 2317 break; 2318 } 2319 space -= sa->sa_len - sizeof(*sa); 2320 error = copyout(&ifr, ifrp, 2321 sizeof ifr.ifr_name); 2322 if (error == 0) 2323 error = copyout(sa, &ifrp->ifr_addr, 2324 sa->sa_len); 2325 ifrp = (struct ifreq *) 2326 (sa->sa_len + (caddr_t)&ifrp->ifr_addr); 2327 } 2328 IFAFREE(ifa); 2329 if (error) 2330 break; 2331 space -= sizeof ifr; 2332 } 2333 TAILQ_REMOVE(head, ifac_mark, ifa_link); 2334 if (error) 2335 break; 2336 if (!addrs) { 2337 bzero(&ifr.ifr_addr, sizeof ifr.ifr_addr); 2338 error = copyout(&ifr, ifrp, sizeof ifr); 2339 if (error) 2340 break; 2341 space -= sizeof ifr; 2342 ifrp++; 2343 } 2344 } 2345 ifnet_unlock(); 2346 2347 ifc->ifc_len -= space; 2348 return (error); 2349 } 2350 2351 /* 2352 * Just like if_promisc(), but for all-multicast-reception mode. 2353 */ 2354 int 2355 if_allmulti(struct ifnet *ifp, int onswitch) 2356 { 2357 int error = 0; 2358 struct ifreq ifr; 2359 2360 crit_enter(); 2361 2362 if (onswitch) { 2363 if (ifp->if_amcount++ == 0) { 2364 ifp->if_flags |= IFF_ALLMULTI; 2365 ifr.ifr_flags = ifp->if_flags; 2366 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2367 ifnet_serialize_all(ifp); 2368 error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, 2369 NULL); 2370 ifnet_deserialize_all(ifp); 2371 } 2372 } else { 2373 if (ifp->if_amcount > 1) { 2374 ifp->if_amcount--; 2375 } else { 2376 ifp->if_amcount = 0; 2377 ifp->if_flags &= ~IFF_ALLMULTI; 2378 ifr.ifr_flags = ifp->if_flags; 2379 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2380 ifnet_serialize_all(ifp); 2381 error = ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, 2382 NULL); 2383 ifnet_deserialize_all(ifp); 2384 } 2385 } 2386 2387 crit_exit(); 2388 2389 if (error == 0) 2390 rt_ifmsg(ifp); 2391 return error; 2392 } 2393 2394 /* 2395 * Add a multicast listenership to the interface in question. 2396 * The link layer provides a routine which converts 2397 */ 2398 int 2399 if_addmulti_serialized(struct ifnet *ifp, struct sockaddr *sa, 2400 struct ifmultiaddr **retifma) 2401 { 2402 struct sockaddr *llsa, *dupsa; 2403 int error; 2404 struct ifmultiaddr *ifma; 2405 2406 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2407 2408 /* 2409 * If the matching multicast address already exists 2410 * then don't add a new one, just add a reference 2411 */ 2412 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2413 if (sa_equal(sa, ifma->ifma_addr)) { 2414 ifma->ifma_refcount++; 2415 if (retifma) 2416 *retifma = ifma; 2417 return 0; 2418 } 2419 } 2420 2421 /* 2422 * Give the link layer a chance to accept/reject it, and also 2423 * find out which AF_LINK address this maps to, if it isn't one 2424 * already. 2425 */ 2426 if (ifp->if_resolvemulti) { 2427 error = ifp->if_resolvemulti(ifp, &llsa, sa); 2428 if (error) 2429 return error; 2430 } else { 2431 llsa = NULL; 2432 } 2433 2434 ifma = kmalloc(sizeof *ifma, M_IFMADDR, M_INTWAIT); 2435 dupsa = kmalloc(sa->sa_len, M_IFMADDR, M_INTWAIT); 2436 bcopy(sa, dupsa, sa->sa_len); 2437 2438 ifma->ifma_addr = dupsa; 2439 ifma->ifma_lladdr = llsa; 2440 ifma->ifma_ifp = ifp; 2441 ifma->ifma_refcount = 1; 2442 ifma->ifma_protospec = NULL; 2443 rt_newmaddrmsg(RTM_NEWMADDR, ifma); 2444 2445 TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link); 2446 if (retifma) 2447 *retifma = ifma; 2448 2449 if (llsa != NULL) { 2450 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2451 if (sa_equal(ifma->ifma_addr, llsa)) 2452 break; 2453 } 2454 if (ifma) { 2455 ifma->ifma_refcount++; 2456 } else { 2457 ifma = kmalloc(sizeof *ifma, M_IFMADDR, M_INTWAIT); 2458 dupsa = kmalloc(llsa->sa_len, M_IFMADDR, M_INTWAIT); 2459 bcopy(llsa, dupsa, llsa->sa_len); 2460 ifma->ifma_addr = dupsa; 2461 ifma->ifma_ifp = ifp; 2462 ifma->ifma_refcount = 1; 2463 TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link); 2464 } 2465 } 2466 /* 2467 * We are certain we have added something, so call down to the 2468 * interface to let them know about it. 2469 */ 2470 if (ifp->if_ioctl) 2471 ifp->if_ioctl(ifp, SIOCADDMULTI, 0, NULL); 2472 2473 return 0; 2474 } 2475 2476 int 2477 if_addmulti(struct ifnet *ifp, struct sockaddr *sa, 2478 struct ifmultiaddr **retifma) 2479 { 2480 int error; 2481 2482 ifnet_serialize_all(ifp); 2483 error = if_addmulti_serialized(ifp, sa, retifma); 2484 ifnet_deserialize_all(ifp); 2485 2486 return error; 2487 } 2488 2489 /* 2490 * Remove a reference to a multicast address on this interface. Yell 2491 * if the request does not match an existing membership. 2492 */ 2493 static int 2494 if_delmulti_serialized(struct ifnet *ifp, struct sockaddr *sa) 2495 { 2496 struct ifmultiaddr *ifma; 2497 2498 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2499 2500 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) 2501 if (sa_equal(sa, ifma->ifma_addr)) 2502 break; 2503 if (ifma == NULL) 2504 return ENOENT; 2505 2506 if (ifma->ifma_refcount > 1) { 2507 ifma->ifma_refcount--; 2508 return 0; 2509 } 2510 2511 rt_newmaddrmsg(RTM_DELMADDR, ifma); 2512 sa = ifma->ifma_lladdr; 2513 TAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifma_link); 2514 /* 2515 * Make sure the interface driver is notified 2516 * in the case of a link layer mcast group being left. 2517 */ 2518 if (ifma->ifma_addr->sa_family == AF_LINK && sa == NULL) 2519 ifp->if_ioctl(ifp, SIOCDELMULTI, 0, NULL); 2520 kfree(ifma->ifma_addr, M_IFMADDR); 2521 kfree(ifma, M_IFMADDR); 2522 if (sa == NULL) 2523 return 0; 2524 2525 /* 2526 * Now look for the link-layer address which corresponds to 2527 * this network address. It had been squirreled away in 2528 * ifma->ifma_lladdr for this purpose (so we don't have 2529 * to call ifp->if_resolvemulti() again), and we saved that 2530 * value in sa above. If some nasty deleted the 2531 * link-layer address out from underneath us, we can deal because 2532 * the address we stored was is not the same as the one which was 2533 * in the record for the link-layer address. (So we don't complain 2534 * in that case.) 2535 */ 2536 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) 2537 if (sa_equal(sa, ifma->ifma_addr)) 2538 break; 2539 if (ifma == NULL) 2540 return 0; 2541 2542 if (ifma->ifma_refcount > 1) { 2543 ifma->ifma_refcount--; 2544 return 0; 2545 } 2546 2547 TAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifma_link); 2548 ifp->if_ioctl(ifp, SIOCDELMULTI, 0, NULL); 2549 kfree(ifma->ifma_addr, M_IFMADDR); 2550 kfree(sa, M_IFMADDR); 2551 kfree(ifma, M_IFMADDR); 2552 2553 return 0; 2554 } 2555 2556 int 2557 if_delmulti(struct ifnet *ifp, struct sockaddr *sa) 2558 { 2559 int error; 2560 2561 ifnet_serialize_all(ifp); 2562 error = if_delmulti_serialized(ifp, sa); 2563 ifnet_deserialize_all(ifp); 2564 2565 return error; 2566 } 2567 2568 /* 2569 * Delete all multicast group membership for an interface. 2570 * Should be used to quickly flush all multicast filters. 2571 */ 2572 void 2573 if_delallmulti_serialized(struct ifnet *ifp) 2574 { 2575 struct ifmultiaddr *ifma, mark; 2576 struct sockaddr sa; 2577 2578 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2579 2580 bzero(&sa, sizeof(sa)); 2581 sa.sa_family = AF_UNSPEC; 2582 sa.sa_len = sizeof(sa); 2583 2584 bzero(&mark, sizeof(mark)); 2585 mark.ifma_addr = &sa; 2586 2587 TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, &mark, ifma_link); 2588 while ((ifma = TAILQ_NEXT(&mark, ifma_link)) != NULL) { 2589 TAILQ_REMOVE(&ifp->if_multiaddrs, &mark, ifma_link); 2590 TAILQ_INSERT_AFTER(&ifp->if_multiaddrs, ifma, &mark, 2591 ifma_link); 2592 2593 if (ifma->ifma_addr->sa_family == AF_UNSPEC) 2594 continue; 2595 2596 if_delmulti_serialized(ifp, ifma->ifma_addr); 2597 } 2598 TAILQ_REMOVE(&ifp->if_multiaddrs, &mark, ifma_link); 2599 } 2600 2601 2602 /* 2603 * Set the link layer address on an interface. 2604 * 2605 * At this time we only support certain types of interfaces, 2606 * and we don't allow the length of the address to change. 2607 */ 2608 int 2609 if_setlladdr(struct ifnet *ifp, const u_char *lladdr, int len) 2610 { 2611 struct sockaddr_dl *sdl; 2612 struct ifreq ifr; 2613 2614 sdl = IF_LLSOCKADDR(ifp); 2615 if (sdl == NULL) 2616 return (EINVAL); 2617 if (len != sdl->sdl_alen) /* don't allow length to change */ 2618 return (EINVAL); 2619 switch (ifp->if_type) { 2620 case IFT_ETHER: /* these types use struct arpcom */ 2621 case IFT_XETHER: 2622 case IFT_L2VLAN: 2623 case IFT_IEEE8023ADLAG: 2624 bcopy(lladdr, ((struct arpcom *)ifp->if_softc)->ac_enaddr, len); 2625 bcopy(lladdr, LLADDR(sdl), len); 2626 break; 2627 default: 2628 return (ENODEV); 2629 } 2630 /* 2631 * If the interface is already up, we need 2632 * to re-init it in order to reprogram its 2633 * address filter. 2634 */ 2635 ifnet_serialize_all(ifp); 2636 if ((ifp->if_flags & IFF_UP) != 0) { 2637 #ifdef INET 2638 struct ifaddr_container *ifac; 2639 #endif 2640 2641 ifp->if_flags &= ~IFF_UP; 2642 ifr.ifr_flags = ifp->if_flags; 2643 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2644 ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, 2645 NULL); 2646 ifp->if_flags |= IFF_UP; 2647 ifr.ifr_flags = ifp->if_flags; 2648 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2649 ifp->if_ioctl(ifp, SIOCSIFFLAGS, (caddr_t)&ifr, 2650 NULL); 2651 #ifdef INET 2652 /* 2653 * Also send gratuitous ARPs to notify other nodes about 2654 * the address change. 2655 */ 2656 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) { 2657 struct ifaddr *ifa = ifac->ifa; 2658 2659 if (ifa->ifa_addr != NULL && 2660 ifa->ifa_addr->sa_family == AF_INET) 2661 arp_gratuitous(ifp, ifa); 2662 } 2663 #endif 2664 } 2665 ifnet_deserialize_all(ifp); 2666 return (0); 2667 } 2668 2669 struct ifmultiaddr * 2670 ifmaof_ifpforaddr(struct sockaddr *sa, struct ifnet *ifp) 2671 { 2672 struct ifmultiaddr *ifma; 2673 2674 /* TODO: need ifnet_serialize_main */ 2675 ifnet_serialize_all(ifp); 2676 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) 2677 if (sa_equal(ifma->ifma_addr, sa)) 2678 break; 2679 ifnet_deserialize_all(ifp); 2680 2681 return ifma; 2682 } 2683 2684 /* 2685 * This function locates the first real ethernet MAC from a network 2686 * card and loads it into node, returning 0 on success or ENOENT if 2687 * no suitable interfaces were found. It is used by the uuid code to 2688 * generate a unique 6-byte number. 2689 */ 2690 int 2691 if_getanyethermac(uint16_t *node, int minlen) 2692 { 2693 struct ifnet *ifp; 2694 struct sockaddr_dl *sdl; 2695 2696 ifnet_lock(); 2697 TAILQ_FOREACH(ifp, &ifnetlist, if_link) { 2698 if (ifp->if_type != IFT_ETHER) 2699 continue; 2700 sdl = IF_LLSOCKADDR(ifp); 2701 if (sdl->sdl_alen < minlen) 2702 continue; 2703 bcopy(((struct arpcom *)ifp->if_softc)->ac_enaddr, node, 2704 minlen); 2705 ifnet_unlock(); 2706 return(0); 2707 } 2708 ifnet_unlock(); 2709 return (ENOENT); 2710 } 2711 2712 /* 2713 * The name argument must be a pointer to storage which will last as 2714 * long as the interface does. For physical devices, the result of 2715 * device_get_name(dev) is a good choice and for pseudo-devices a 2716 * static string works well. 2717 */ 2718 void 2719 if_initname(struct ifnet *ifp, const char *name, int unit) 2720 { 2721 ifp->if_dname = name; 2722 ifp->if_dunit = unit; 2723 if (unit != IF_DUNIT_NONE) 2724 ksnprintf(ifp->if_xname, IFNAMSIZ, "%s%d", name, unit); 2725 else 2726 strlcpy(ifp->if_xname, name, IFNAMSIZ); 2727 } 2728 2729 int 2730 if_printf(struct ifnet *ifp, const char *fmt, ...) 2731 { 2732 __va_list ap; 2733 int retval; 2734 2735 retval = kprintf("%s: ", ifp->if_xname); 2736 __va_start(ap, fmt); 2737 retval += kvprintf(fmt, ap); 2738 __va_end(ap); 2739 return (retval); 2740 } 2741 2742 struct ifnet * 2743 if_alloc(uint8_t type) 2744 { 2745 struct ifnet *ifp; 2746 size_t size; 2747 2748 /* 2749 * XXX temporary hack until arpcom is setup in if_l2com 2750 */ 2751 if (type == IFT_ETHER) 2752 size = sizeof(struct arpcom); 2753 else 2754 size = sizeof(struct ifnet); 2755 2756 ifp = kmalloc(size, M_IFNET, M_WAITOK|M_ZERO); 2757 2758 ifp->if_type = type; 2759 2760 if (if_com_alloc[type] != NULL) { 2761 ifp->if_l2com = if_com_alloc[type](type, ifp); 2762 if (ifp->if_l2com == NULL) { 2763 kfree(ifp, M_IFNET); 2764 return (NULL); 2765 } 2766 } 2767 return (ifp); 2768 } 2769 2770 void 2771 if_free(struct ifnet *ifp) 2772 { 2773 kfree(ifp, M_IFNET); 2774 } 2775 2776 void 2777 ifq_set_classic(struct ifaltq *ifq) 2778 { 2779 ifq_set_methods(ifq, ifq->altq_ifp->if_mapsubq, 2780 ifsq_classic_enqueue, ifsq_classic_dequeue, ifsq_classic_request); 2781 } 2782 2783 void 2784 ifq_set_methods(struct ifaltq *ifq, altq_mapsubq_t mapsubq, 2785 ifsq_enqueue_t enqueue, ifsq_dequeue_t dequeue, ifsq_request_t request) 2786 { 2787 int q; 2788 2789 KASSERT(mapsubq != NULL, ("mapsubq is not specified")); 2790 KASSERT(enqueue != NULL, ("enqueue is not specified")); 2791 KASSERT(dequeue != NULL, ("dequeue is not specified")); 2792 KASSERT(request != NULL, ("request is not specified")); 2793 2794 ifq->altq_mapsubq = mapsubq; 2795 for (q = 0; q < ifq->altq_subq_cnt; ++q) { 2796 struct ifaltq_subque *ifsq = &ifq->altq_subq[q]; 2797 2798 ifsq->ifsq_enqueue = enqueue; 2799 ifsq->ifsq_dequeue = dequeue; 2800 ifsq->ifsq_request = request; 2801 } 2802 } 2803 2804 static void 2805 ifsq_norm_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m) 2806 { 2807 2808 classq_add(&ifsq->ifsq_norm, m); 2809 ALTQ_SQ_CNTR_INC(ifsq, m->m_pkthdr.len); 2810 } 2811 2812 static void 2813 ifsq_prio_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m) 2814 { 2815 2816 classq_add(&ifsq->ifsq_prio, m); 2817 ALTQ_SQ_CNTR_INC(ifsq, m->m_pkthdr.len); 2818 ALTQ_SQ_PRIO_CNTR_INC(ifsq, m->m_pkthdr.len); 2819 } 2820 2821 static struct mbuf * 2822 ifsq_norm_dequeue(struct ifaltq_subque *ifsq) 2823 { 2824 struct mbuf *m; 2825 2826 m = classq_get(&ifsq->ifsq_norm); 2827 if (m != NULL) 2828 ALTQ_SQ_CNTR_DEC(ifsq, m->m_pkthdr.len); 2829 return (m); 2830 } 2831 2832 static struct mbuf * 2833 ifsq_prio_dequeue(struct ifaltq_subque *ifsq) 2834 { 2835 struct mbuf *m; 2836 2837 m = classq_get(&ifsq->ifsq_prio); 2838 if (m != NULL) { 2839 ALTQ_SQ_CNTR_DEC(ifsq, m->m_pkthdr.len); 2840 ALTQ_SQ_PRIO_CNTR_DEC(ifsq, m->m_pkthdr.len); 2841 } 2842 return (m); 2843 } 2844 2845 int 2846 ifsq_classic_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m, 2847 struct altq_pktattr *pa __unused) 2848 { 2849 2850 M_ASSERTPKTHDR(m); 2851 again: 2852 if (ifsq->ifsq_len >= ifsq->ifsq_maxlen || 2853 ifsq->ifsq_bcnt >= ifsq->ifsq_maxbcnt) { 2854 struct mbuf *m_drop; 2855 2856 if (m->m_flags & M_PRIO) { 2857 m_drop = NULL; 2858 if (ifsq->ifsq_prio_len < (ifsq->ifsq_maxlen >> 1) && 2859 ifsq->ifsq_prio_bcnt < (ifsq->ifsq_maxbcnt >> 1)) { 2860 /* Try dropping some from normal queue. */ 2861 m_drop = ifsq_norm_dequeue(ifsq); 2862 } 2863 if (m_drop == NULL) 2864 m_drop = ifsq_prio_dequeue(ifsq); 2865 } else { 2866 m_drop = ifsq_norm_dequeue(ifsq); 2867 } 2868 if (m_drop != NULL) { 2869 IFNET_STAT_INC(ifsq->ifsq_ifp, oqdrops, 1); 2870 m_freem(m_drop); 2871 goto again; 2872 } 2873 /* 2874 * No old packets could be dropped! 2875 * NOTE: Caller increases oqdrops. 2876 */ 2877 m_freem(m); 2878 return (ENOBUFS); 2879 } else { 2880 if (m->m_flags & M_PRIO) 2881 ifsq_prio_enqueue(ifsq, m); 2882 else 2883 ifsq_norm_enqueue(ifsq, m); 2884 return (0); 2885 } 2886 } 2887 2888 struct mbuf * 2889 ifsq_classic_dequeue(struct ifaltq_subque *ifsq, int op) 2890 { 2891 struct mbuf *m; 2892 2893 switch (op) { 2894 case ALTDQ_POLL: 2895 m = classq_head(&ifsq->ifsq_prio); 2896 if (m == NULL) 2897 m = classq_head(&ifsq->ifsq_norm); 2898 break; 2899 2900 case ALTDQ_REMOVE: 2901 m = ifsq_prio_dequeue(ifsq); 2902 if (m == NULL) 2903 m = ifsq_norm_dequeue(ifsq); 2904 break; 2905 2906 default: 2907 panic("unsupported ALTQ dequeue op: %d", op); 2908 } 2909 return m; 2910 } 2911 2912 int 2913 ifsq_classic_request(struct ifaltq_subque *ifsq, int req, void *arg) 2914 { 2915 switch (req) { 2916 case ALTRQ_PURGE: 2917 for (;;) { 2918 struct mbuf *m; 2919 2920 m = ifsq_classic_dequeue(ifsq, ALTDQ_REMOVE); 2921 if (m == NULL) 2922 break; 2923 m_freem(m); 2924 } 2925 break; 2926 2927 default: 2928 panic("unsupported ALTQ request: %d", req); 2929 } 2930 return 0; 2931 } 2932 2933 static void 2934 ifsq_ifstart_try(struct ifaltq_subque *ifsq, int force_sched) 2935 { 2936 struct ifnet *ifp = ifsq_get_ifp(ifsq); 2937 int running = 0, need_sched; 2938 2939 /* 2940 * Try to do direct ifnet.if_start on the subqueue first, if there is 2941 * contention on the subqueue hardware serializer, ifnet.if_start on 2942 * the subqueue will be scheduled on the subqueue owner CPU. 2943 */ 2944 if (!ifsq_tryserialize_hw(ifsq)) { 2945 /* 2946 * Subqueue hardware serializer contention happened, 2947 * ifnet.if_start on the subqueue is scheduled on 2948 * the subqueue owner CPU, and we keep going. 2949 */ 2950 ifsq_ifstart_schedule(ifsq, 1); 2951 return; 2952 } 2953 2954 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) { 2955 ifp->if_start(ifp, ifsq); 2956 if ((ifp->if_flags & IFF_RUNNING) && !ifsq_is_oactive(ifsq)) 2957 running = 1; 2958 } 2959 need_sched = ifsq_ifstart_need_schedule(ifsq, running); 2960 2961 ifsq_deserialize_hw(ifsq); 2962 2963 if (need_sched) { 2964 /* 2965 * More data need to be transmitted, ifnet.if_start on the 2966 * subqueue is scheduled on the subqueue owner CPU, and we 2967 * keep going. 2968 * NOTE: ifnet.if_start subqueue interlock is not released. 2969 */ 2970 ifsq_ifstart_schedule(ifsq, force_sched); 2971 } 2972 } 2973 2974 /* 2975 * Subqeue packets staging mechanism: 2976 * 2977 * The packets enqueued into the subqueue are staged to a certain amount 2978 * before the ifnet.if_start on the subqueue is called. In this way, the 2979 * driver could avoid writing to hardware registers upon every packet, 2980 * instead, hardware registers could be written when certain amount of 2981 * packets are put onto hardware TX ring. The measurement on several modern 2982 * NICs (emx(4), igb(4), bnx(4), bge(4), jme(4)) shows that the hardware 2983 * registers writing aggregation could save ~20% CPU time when 18bytes UDP 2984 * datagrams are transmitted at 1.48Mpps. The performance improvement by 2985 * hardware registers writing aggeregation is also mentioned by Luigi Rizzo's 2986 * netmap paper (http://info.iet.unipi.it/~luigi/netmap/). 2987 * 2988 * Subqueue packets staging is performed for two entry points into drivers' 2989 * transmission function: 2990 * - Direct ifnet.if_start calling on the subqueue, i.e. ifsq_ifstart_try() 2991 * - ifnet.if_start scheduling on the subqueue, i.e. ifsq_ifstart_schedule() 2992 * 2993 * Subqueue packets staging will be stopped upon any of the following 2994 * conditions: 2995 * - If the count of packets enqueued on the current CPU is great than or 2996 * equal to ifsq_stage_cntmax. (XXX this should be per-interface) 2997 * - If the total length of packets enqueued on the current CPU is great 2998 * than or equal to the hardware's MTU - max_protohdr. max_protohdr is 2999 * cut from the hardware's MTU mainly bacause a full TCP segment's size 3000 * is usually less than hardware's MTU. 3001 * - ifsq_ifstart_schedule() is not pending on the current CPU and 3002 * ifnet.if_start subqueue interlock (ifaltq_subq.ifsq_started) is not 3003 * released. 3004 * - The if_start_rollup(), which is registered as low priority netisr 3005 * rollup function, is called; probably because no more work is pending 3006 * for netisr. 3007 * 3008 * NOTE: 3009 * Currently subqueue packet staging is only performed in netisr threads. 3010 */ 3011 int 3012 ifq_dispatch(struct ifnet *ifp, struct mbuf *m, struct altq_pktattr *pa) 3013 { 3014 struct ifaltq *ifq = &ifp->if_snd; 3015 struct ifaltq_subque *ifsq; 3016 int error, start = 0, len, mcast = 0, avoid_start = 0; 3017 struct ifsubq_stage_head *head = NULL; 3018 struct ifsubq_stage *stage = NULL; 3019 struct globaldata *gd = mycpu; 3020 struct thread *td = gd->gd_curthread; 3021 3022 crit_enter_quick(td); 3023 3024 ifsq = ifq_map_subq(ifq, gd->gd_cpuid); 3025 ASSERT_ALTQ_SQ_NOT_SERIALIZED_HW(ifsq); 3026 3027 len = m->m_pkthdr.len; 3028 if (m->m_flags & M_MCAST) 3029 mcast = 1; 3030 3031 if (td->td_type == TD_TYPE_NETISR) { 3032 head = &ifsubq_stage_heads[mycpuid]; 3033 stage = ifsq_get_stage(ifsq, mycpuid); 3034 3035 stage->stg_cnt++; 3036 stage->stg_len += len; 3037 if (stage->stg_cnt < ifsq_stage_cntmax && 3038 stage->stg_len < (ifp->if_mtu - max_protohdr)) 3039 avoid_start = 1; 3040 } 3041 3042 ALTQ_SQ_LOCK(ifsq); 3043 error = ifsq_enqueue_locked(ifsq, m, pa); 3044 if (error) { 3045 IFNET_STAT_INC(ifp, oqdrops, 1); 3046 if (!ifsq_data_ready(ifsq)) { 3047 ALTQ_SQ_UNLOCK(ifsq); 3048 crit_exit_quick(td); 3049 return error; 3050 } 3051 avoid_start = 0; 3052 } 3053 if (!ifsq_is_started(ifsq)) { 3054 if (avoid_start) { 3055 ALTQ_SQ_UNLOCK(ifsq); 3056 3057 KKASSERT(!error); 3058 if ((stage->stg_flags & IFSQ_STAGE_FLAG_QUED) == 0) 3059 ifsq_stage_insert(head, stage); 3060 3061 IFNET_STAT_INC(ifp, obytes, len); 3062 if (mcast) 3063 IFNET_STAT_INC(ifp, omcasts, 1); 3064 crit_exit_quick(td); 3065 return error; 3066 } 3067 3068 /* 3069 * Hold the subqueue interlock of ifnet.if_start 3070 */ 3071 ifsq_set_started(ifsq); 3072 start = 1; 3073 } 3074 ALTQ_SQ_UNLOCK(ifsq); 3075 3076 if (!error) { 3077 IFNET_STAT_INC(ifp, obytes, len); 3078 if (mcast) 3079 IFNET_STAT_INC(ifp, omcasts, 1); 3080 } 3081 3082 if (stage != NULL) { 3083 if (!start && (stage->stg_flags & IFSQ_STAGE_FLAG_SCHED)) { 3084 KKASSERT(stage->stg_flags & IFSQ_STAGE_FLAG_QUED); 3085 if (!avoid_start) { 3086 ifsq_stage_remove(head, stage); 3087 ifsq_ifstart_schedule(ifsq, 1); 3088 } 3089 crit_exit_quick(td); 3090 return error; 3091 } 3092 3093 if (stage->stg_flags & IFSQ_STAGE_FLAG_QUED) { 3094 ifsq_stage_remove(head, stage); 3095 } else { 3096 stage->stg_cnt = 0; 3097 stage->stg_len = 0; 3098 } 3099 } 3100 3101 if (!start) { 3102 crit_exit_quick(td); 3103 return error; 3104 } 3105 3106 ifsq_ifstart_try(ifsq, 0); 3107 3108 crit_exit_quick(td); 3109 return error; 3110 } 3111 3112 void * 3113 ifa_create(int size) 3114 { 3115 struct ifaddr *ifa; 3116 int i; 3117 3118 KASSERT(size >= sizeof(*ifa), ("ifaddr size too small")); 3119 3120 ifa = kmalloc(size, M_IFADDR, M_INTWAIT | M_ZERO); 3121 3122 /* 3123 * Make ifa_container availabel on all CPUs, since they 3124 * could be accessed by any threads. 3125 */ 3126 ifa->ifa_containers = 3127 kmalloc_cachealign(ncpus * sizeof(struct ifaddr_container), 3128 M_IFADDR, M_INTWAIT | M_ZERO); 3129 3130 ifa->ifa_ncnt = ncpus; 3131 for (i = 0; i < ncpus; ++i) { 3132 struct ifaddr_container *ifac = &ifa->ifa_containers[i]; 3133 3134 ifac->ifa_magic = IFA_CONTAINER_MAGIC; 3135 ifac->ifa = ifa; 3136 ifac->ifa_refcnt = 1; 3137 } 3138 #ifdef IFADDR_DEBUG 3139 kprintf("alloc ifa %p %d\n", ifa, size); 3140 #endif 3141 return ifa; 3142 } 3143 3144 void 3145 ifac_free(struct ifaddr_container *ifac, int cpu_id) 3146 { 3147 struct ifaddr *ifa = ifac->ifa; 3148 3149 KKASSERT(ifac->ifa_magic == IFA_CONTAINER_MAGIC); 3150 KKASSERT(ifac->ifa_refcnt == 0); 3151 KASSERT(ifac->ifa_listmask == 0, 3152 ("ifa is still on %#x lists", ifac->ifa_listmask)); 3153 3154 ifac->ifa_magic = IFA_CONTAINER_DEAD; 3155 3156 #ifdef IFADDR_DEBUG_VERBOSE 3157 kprintf("try free ifa %p cpu_id %d\n", ifac->ifa, cpu_id); 3158 #endif 3159 3160 KASSERT(ifa->ifa_ncnt > 0 && ifa->ifa_ncnt <= ncpus, 3161 ("invalid # of ifac, %d", ifa->ifa_ncnt)); 3162 if (atomic_fetchadd_int(&ifa->ifa_ncnt, -1) == 1) { 3163 #ifdef IFADDR_DEBUG 3164 kprintf("free ifa %p\n", ifa); 3165 #endif 3166 kfree(ifa->ifa_containers, M_IFADDR); 3167 kfree(ifa, M_IFADDR); 3168 } 3169 } 3170 3171 static void 3172 ifa_iflink_dispatch(netmsg_t nmsg) 3173 { 3174 struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg; 3175 struct ifaddr *ifa = msg->ifa; 3176 struct ifnet *ifp = msg->ifp; 3177 int cpu = mycpuid; 3178 struct ifaddr_container *ifac; 3179 3180 crit_enter(); 3181 3182 ifac = &ifa->ifa_containers[cpu]; 3183 ASSERT_IFAC_VALID(ifac); 3184 KASSERT((ifac->ifa_listmask & IFA_LIST_IFADDRHEAD) == 0, 3185 ("ifaddr is on if_addrheads")); 3186 3187 ifac->ifa_listmask |= IFA_LIST_IFADDRHEAD; 3188 if (msg->tail) 3189 TAILQ_INSERT_TAIL(&ifp->if_addrheads[cpu], ifac, ifa_link); 3190 else 3191 TAILQ_INSERT_HEAD(&ifp->if_addrheads[cpu], ifac, ifa_link); 3192 3193 crit_exit(); 3194 3195 netisr_forwardmsg_all(&nmsg->base, cpu + 1); 3196 } 3197 3198 void 3199 ifa_iflink(struct ifaddr *ifa, struct ifnet *ifp, int tail) 3200 { 3201 struct netmsg_ifaddr msg; 3202 3203 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 3204 0, ifa_iflink_dispatch); 3205 msg.ifa = ifa; 3206 msg.ifp = ifp; 3207 msg.tail = tail; 3208 3209 netisr_domsg(&msg.base, 0); 3210 } 3211 3212 static void 3213 ifa_ifunlink_dispatch(netmsg_t nmsg) 3214 { 3215 struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg; 3216 struct ifaddr *ifa = msg->ifa; 3217 struct ifnet *ifp = msg->ifp; 3218 int cpu = mycpuid; 3219 struct ifaddr_container *ifac; 3220 3221 crit_enter(); 3222 3223 ifac = &ifa->ifa_containers[cpu]; 3224 ASSERT_IFAC_VALID(ifac); 3225 KASSERT(ifac->ifa_listmask & IFA_LIST_IFADDRHEAD, 3226 ("ifaddr is not on if_addrhead")); 3227 3228 TAILQ_REMOVE(&ifp->if_addrheads[cpu], ifac, ifa_link); 3229 ifac->ifa_listmask &= ~IFA_LIST_IFADDRHEAD; 3230 3231 crit_exit(); 3232 3233 netisr_forwardmsg_all(&nmsg->base, cpu + 1); 3234 } 3235 3236 void 3237 ifa_ifunlink(struct ifaddr *ifa, struct ifnet *ifp) 3238 { 3239 struct netmsg_ifaddr msg; 3240 3241 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 3242 0, ifa_ifunlink_dispatch); 3243 msg.ifa = ifa; 3244 msg.ifp = ifp; 3245 3246 netisr_domsg(&msg.base, 0); 3247 } 3248 3249 static void 3250 ifa_destroy_dispatch(netmsg_t nmsg) 3251 { 3252 struct netmsg_ifaddr *msg = (struct netmsg_ifaddr *)nmsg; 3253 3254 IFAFREE(msg->ifa); 3255 netisr_forwardmsg_all(&nmsg->base, mycpuid + 1); 3256 } 3257 3258 void 3259 ifa_destroy(struct ifaddr *ifa) 3260 { 3261 struct netmsg_ifaddr msg; 3262 3263 netmsg_init(&msg.base, NULL, &curthread->td_msgport, 3264 0, ifa_destroy_dispatch); 3265 msg.ifa = ifa; 3266 3267 netisr_domsg(&msg.base, 0); 3268 } 3269 3270 static void 3271 if_start_rollup(void) 3272 { 3273 struct ifsubq_stage_head *head = &ifsubq_stage_heads[mycpuid]; 3274 struct ifsubq_stage *stage; 3275 3276 crit_enter(); 3277 3278 while ((stage = TAILQ_FIRST(&head->stg_head)) != NULL) { 3279 struct ifaltq_subque *ifsq = stage->stg_subq; 3280 int is_sched = 0; 3281 3282 if (stage->stg_flags & IFSQ_STAGE_FLAG_SCHED) 3283 is_sched = 1; 3284 ifsq_stage_remove(head, stage); 3285 3286 if (is_sched) { 3287 ifsq_ifstart_schedule(ifsq, 1); 3288 } else { 3289 int start = 0; 3290 3291 ALTQ_SQ_LOCK(ifsq); 3292 if (!ifsq_is_started(ifsq)) { 3293 /* 3294 * Hold the subqueue interlock of 3295 * ifnet.if_start 3296 */ 3297 ifsq_set_started(ifsq); 3298 start = 1; 3299 } 3300 ALTQ_SQ_UNLOCK(ifsq); 3301 3302 if (start) 3303 ifsq_ifstart_try(ifsq, 1); 3304 } 3305 KKASSERT((stage->stg_flags & 3306 (IFSQ_STAGE_FLAG_QUED | IFSQ_STAGE_FLAG_SCHED)) == 0); 3307 } 3308 3309 crit_exit(); 3310 } 3311 3312 static void 3313 ifnetinit(void *dummy __unused) 3314 { 3315 int i; 3316 3317 /* XXX netisr_ncpus */ 3318 for (i = 0; i < ncpus; ++i) 3319 TAILQ_INIT(&ifsubq_stage_heads[i].stg_head); 3320 netisr_register_rollup(if_start_rollup, NETISR_ROLLUP_PRIO_IFSTART); 3321 } 3322 3323 void 3324 if_register_com_alloc(u_char type, 3325 if_com_alloc_t *a, if_com_free_t *f) 3326 { 3327 3328 KASSERT(if_com_alloc[type] == NULL, 3329 ("if_register_com_alloc: %d already registered", type)); 3330 KASSERT(if_com_free[type] == NULL, 3331 ("if_register_com_alloc: %d free already registered", type)); 3332 3333 if_com_alloc[type] = a; 3334 if_com_free[type] = f; 3335 } 3336 3337 void 3338 if_deregister_com_alloc(u_char type) 3339 { 3340 3341 KASSERT(if_com_alloc[type] != NULL, 3342 ("if_deregister_com_alloc: %d not registered", type)); 3343 KASSERT(if_com_free[type] != NULL, 3344 ("if_deregister_com_alloc: %d free not registered", type)); 3345 if_com_alloc[type] = NULL; 3346 if_com_free[type] = NULL; 3347 } 3348 3349 void 3350 ifq_set_maxlen(struct ifaltq *ifq, int len) 3351 { 3352 ifq->altq_maxlen = len + (ncpus * ifsq_stage_cntmax); 3353 } 3354 3355 int 3356 ifq_mapsubq_default(struct ifaltq *ifq __unused, int cpuid __unused) 3357 { 3358 return ALTQ_SUBQ_INDEX_DEFAULT; 3359 } 3360 3361 int 3362 ifq_mapsubq_modulo(struct ifaltq *ifq, int cpuid) 3363 { 3364 3365 return (cpuid % ifq->altq_subq_mappriv); 3366 } 3367 3368 static void 3369 ifsq_watchdog(void *arg) 3370 { 3371 struct ifsubq_watchdog *wd = arg; 3372 struct ifnet *ifp; 3373 3374 if (__predict_true(wd->wd_timer == 0 || --wd->wd_timer)) 3375 goto done; 3376 3377 ifp = ifsq_get_ifp(wd->wd_subq); 3378 if (ifnet_tryserialize_all(ifp)) { 3379 wd->wd_watchdog(wd->wd_subq); 3380 ifnet_deserialize_all(ifp); 3381 } else { 3382 /* try again next timeout */ 3383 wd->wd_timer = 1; 3384 } 3385 done: 3386 ifsq_watchdog_reset(wd); 3387 } 3388 3389 static void 3390 ifsq_watchdog_reset(struct ifsubq_watchdog *wd) 3391 { 3392 callout_reset_bycpu(&wd->wd_callout, hz, ifsq_watchdog, wd, 3393 ifsq_get_cpuid(wd->wd_subq)); 3394 } 3395 3396 void 3397 ifsq_watchdog_init(struct ifsubq_watchdog *wd, struct ifaltq_subque *ifsq, 3398 ifsq_watchdog_t watchdog) 3399 { 3400 callout_init_mp(&wd->wd_callout); 3401 wd->wd_timer = 0; 3402 wd->wd_subq = ifsq; 3403 wd->wd_watchdog = watchdog; 3404 } 3405 3406 void 3407 ifsq_watchdog_start(struct ifsubq_watchdog *wd) 3408 { 3409 wd->wd_timer = 0; 3410 ifsq_watchdog_reset(wd); 3411 } 3412 3413 void 3414 ifsq_watchdog_stop(struct ifsubq_watchdog *wd) 3415 { 3416 wd->wd_timer = 0; 3417 callout_stop(&wd->wd_callout); 3418 } 3419 3420 void 3421 ifnet_lock(void) 3422 { 3423 KASSERT(curthread->td_type != TD_TYPE_NETISR, 3424 ("try holding ifnet lock in netisr")); 3425 mtx_lock(&ifnet_mtx); 3426 } 3427 3428 void 3429 ifnet_unlock(void) 3430 { 3431 KASSERT(curthread->td_type != TD_TYPE_NETISR, 3432 ("try holding ifnet lock in netisr")); 3433 mtx_unlock(&ifnet_mtx); 3434 } 3435 3436 static struct ifnet_array * 3437 ifnet_array_alloc(int count) 3438 { 3439 struct ifnet_array *arr; 3440 3441 arr = kmalloc(__offsetof(struct ifnet_array, ifnet_arr[count]), 3442 M_IFNET, M_WAITOK); 3443 arr->ifnet_count = count; 3444 3445 return arr; 3446 } 3447 3448 static void 3449 ifnet_array_free(struct ifnet_array *arr) 3450 { 3451 if (arr == &ifnet_array0) 3452 return; 3453 kfree(arr, M_IFNET); 3454 } 3455 3456 static struct ifnet_array * 3457 ifnet_array_add(struct ifnet *ifp, const struct ifnet_array *old_arr) 3458 { 3459 struct ifnet_array *arr; 3460 int count, i; 3461 3462 KASSERT(old_arr->ifnet_count >= 0, 3463 ("invalid ifnet array count %d", old_arr->ifnet_count)); 3464 count = old_arr->ifnet_count + 1; 3465 arr = ifnet_array_alloc(count); 3466 3467 /* 3468 * Save the old ifnet array and append this ifp to the end of 3469 * the new ifnet array. 3470 */ 3471 for (i = 0; i < old_arr->ifnet_count; ++i) { 3472 KASSERT(old_arr->ifnet_arr[i] != ifp, 3473 ("%s is already in ifnet array", ifp->if_xname)); 3474 arr->ifnet_arr[i] = old_arr->ifnet_arr[i]; 3475 } 3476 KASSERT(i == count - 1, 3477 ("add %s, ifnet array index mismatch, should be %d, but got %d", 3478 ifp->if_xname, count - 1, i)); 3479 arr->ifnet_arr[i] = ifp; 3480 3481 return arr; 3482 } 3483 3484 static struct ifnet_array * 3485 ifnet_array_del(struct ifnet *ifp, const struct ifnet_array *old_arr) 3486 { 3487 struct ifnet_array *arr; 3488 int count, i, idx, found = 0; 3489 3490 KASSERT(old_arr->ifnet_count > 0, 3491 ("invalid ifnet array count %d", old_arr->ifnet_count)); 3492 count = old_arr->ifnet_count - 1; 3493 arr = ifnet_array_alloc(count); 3494 3495 /* 3496 * Save the old ifnet array, but skip this ifp. 3497 */ 3498 idx = 0; 3499 for (i = 0; i < old_arr->ifnet_count; ++i) { 3500 if (old_arr->ifnet_arr[i] == ifp) { 3501 KASSERT(!found, 3502 ("dup %s is in ifnet array", ifp->if_xname)); 3503 found = 1; 3504 continue; 3505 } 3506 KASSERT(idx < count, 3507 ("invalid ifnet array index %d, count %d", idx, count)); 3508 arr->ifnet_arr[idx] = old_arr->ifnet_arr[i]; 3509 ++idx; 3510 } 3511 KASSERT(found, ("%s is not in ifnet array", ifp->if_xname)); 3512 KASSERT(idx == count, 3513 ("del %s, ifnet array count mismatch, should be %d, but got %d ", 3514 ifp->if_xname, count, idx)); 3515 3516 return arr; 3517 } 3518 3519 const struct ifnet_array * 3520 ifnet_array_get(void) 3521 { 3522 const struct ifnet_array *ret; 3523 3524 KASSERT(curthread->td_type == TD_TYPE_NETISR, ("not in netisr")); 3525 ret = ifnet_array; 3526 /* Make sure 'ret' is really used. */ 3527 cpu_ccfence(); 3528 return (ret); 3529 } 3530 3531 int 3532 ifnet_array_isempty(void) 3533 { 3534 KASSERT(curthread->td_type == TD_TYPE_NETISR, ("not in netisr")); 3535 if (ifnet_array->ifnet_count == 0) 3536 return 1; 3537 else 3538 return 0; 3539 } 3540 3541 void 3542 ifa_marker_init(struct ifaddr_marker *mark, struct ifnet *ifp) 3543 { 3544 struct ifaddr *ifa; 3545 3546 memset(mark, 0, sizeof(*mark)); 3547 ifa = &mark->ifa; 3548 3549 mark->ifac.ifa = ifa; 3550 3551 ifa->ifa_addr = &mark->addr; 3552 ifa->ifa_dstaddr = &mark->dstaddr; 3553 ifa->ifa_netmask = &mark->netmask; 3554 ifa->ifa_ifp = ifp; 3555 } 3556 3557 static int 3558 if_ringcnt_fixup(int ring_cnt, int ring_cntmax) 3559 { 3560 3561 KASSERT(ring_cntmax > 0, ("invalid ring count max %d", ring_cntmax)); 3562 3563 if (ring_cnt <= 0 || ring_cnt > ring_cntmax) 3564 ring_cnt = ring_cntmax; 3565 if (ring_cnt > netisr_ncpus) 3566 ring_cnt = netisr_ncpus; 3567 return (ring_cnt); 3568 } 3569 3570 static void 3571 if_ringmap_set_grid(device_t dev, struct if_ringmap *rm, int grid) 3572 { 3573 int i, offset; 3574 3575 KASSERT(grid > 0, ("invalid if_ringmap grid %d", grid)); 3576 KASSERT(grid >= rm->rm_cnt, ("invalid if_ringmap grid %d, count %d", 3577 grid, rm->rm_cnt)); 3578 rm->rm_grid = grid; 3579 3580 offset = (rm->rm_grid * device_get_unit(dev)) % netisr_ncpus; 3581 for (i = 0; i < rm->rm_cnt; ++i) { 3582 rm->rm_cpumap[i] = offset + i; 3583 KASSERT(rm->rm_cpumap[i] < netisr_ncpus, 3584 ("invalid cpumap[%d] = %d, offset %d", i, 3585 rm->rm_cpumap[i], offset)); 3586 } 3587 } 3588 3589 static struct if_ringmap * 3590 if_ringmap_alloc_flags(device_t dev, int ring_cnt, int ring_cntmax, 3591 uint32_t flags) 3592 { 3593 struct if_ringmap *rm; 3594 int i, grid = 0, prev_grid; 3595 3596 ring_cnt = if_ringcnt_fixup(ring_cnt, ring_cntmax); 3597 rm = kmalloc(__offsetof(struct if_ringmap, rm_cpumap[ring_cnt]), 3598 M_DEVBUF, M_WAITOK | M_ZERO); 3599 3600 rm->rm_cnt = ring_cnt; 3601 if (flags & RINGMAP_FLAG_POWEROF2) 3602 rm->rm_cnt = 1 << (fls(rm->rm_cnt) - 1); 3603 3604 prev_grid = netisr_ncpus; 3605 for (i = 0; i < netisr_ncpus; ++i) { 3606 if (netisr_ncpus % (i + 1) != 0) 3607 continue; 3608 3609 grid = netisr_ncpus / (i + 1); 3610 if (rm->rm_cnt > grid) { 3611 grid = prev_grid; 3612 break; 3613 } 3614 3615 if (rm->rm_cnt > netisr_ncpus / (i + 2)) 3616 break; 3617 prev_grid = grid; 3618 } 3619 if_ringmap_set_grid(dev, rm, grid); 3620 3621 return (rm); 3622 } 3623 3624 struct if_ringmap * 3625 if_ringmap_alloc(device_t dev, int ring_cnt, int ring_cntmax) 3626 { 3627 3628 return (if_ringmap_alloc_flags(dev, ring_cnt, ring_cntmax, 3629 RINGMAP_FLAG_NONE)); 3630 } 3631 3632 struct if_ringmap * 3633 if_ringmap_alloc2(device_t dev, int ring_cnt, int ring_cntmax) 3634 { 3635 3636 return (if_ringmap_alloc_flags(dev, ring_cnt, ring_cntmax, 3637 RINGMAP_FLAG_POWEROF2)); 3638 } 3639 3640 void 3641 if_ringmap_free(struct if_ringmap *rm) 3642 { 3643 3644 kfree(rm, M_DEVBUF); 3645 } 3646 3647 /* 3648 * Align the two ringmaps. 3649 * 3650 * e.g. 8 netisrs, rm0 contains 4 rings, rm1 contains 2 rings. 3651 * 3652 * Before: 3653 * 3654 * CPU 0 1 2 3 4 5 6 7 3655 * NIC_RX n0 n1 n2 n3 3656 * NIC_TX N0 N1 3657 * 3658 * After: 3659 * 3660 * CPU 0 1 2 3 4 5 6 7 3661 * NIC_RX n0 n1 n2 n3 3662 * NIC_TX N0 N1 3663 */ 3664 void 3665 if_ringmap_align(device_t dev, struct if_ringmap *rm0, struct if_ringmap *rm1) 3666 { 3667 3668 if (rm0->rm_grid > rm1->rm_grid) 3669 if_ringmap_set_grid(dev, rm1, rm0->rm_grid); 3670 else if (rm0->rm_grid < rm1->rm_grid) 3671 if_ringmap_set_grid(dev, rm0, rm1->rm_grid); 3672 } 3673 3674 void 3675 if_ringmap_match(device_t dev, struct if_ringmap *rm0, struct if_ringmap *rm1) 3676 { 3677 int subset_grid, cnt, divisor, mod, offset, i; 3678 struct if_ringmap *subset_rm, *rm; 3679 int old_rm0_grid, old_rm1_grid; 3680 3681 if (rm0->rm_grid == rm1->rm_grid) 3682 return; 3683 3684 /* Save grid for later use */ 3685 old_rm0_grid = rm0->rm_grid; 3686 old_rm1_grid = rm1->rm_grid; 3687 3688 if_ringmap_align(dev, rm0, rm1); 3689 3690 /* 3691 * Re-shuffle rings to get more even distribution. 3692 * 3693 * e.g. 12 netisrs, rm0 contains 4 rings, rm1 contains 2 rings. 3694 * 3695 * CPU 0 1 2 3 4 5 6 7 8 9 10 11 3696 * 3697 * NIC_RX a0 a1 a2 a3 b0 b1 b2 b3 c0 c1 c2 c3 3698 * NIC_TX A0 A1 B0 B1 C0 C1 3699 * 3700 * NIC_RX d0 d1 d2 d3 e0 e1 e2 e3 f0 f1 f2 f3 3701 * NIC_TX D0 D1 E0 E1 F0 F1 3702 */ 3703 3704 if (rm0->rm_cnt >= (2 * old_rm1_grid)) { 3705 cnt = rm0->rm_cnt; 3706 subset_grid = old_rm1_grid; 3707 subset_rm = rm1; 3708 rm = rm0; 3709 } else if (rm1->rm_cnt > (2 * old_rm0_grid)) { 3710 cnt = rm1->rm_cnt; 3711 subset_grid = old_rm0_grid; 3712 subset_rm = rm0; 3713 rm = rm1; 3714 } else { 3715 /* No space to shuffle. */ 3716 return; 3717 } 3718 3719 mod = cnt / subset_grid; 3720 KKASSERT(mod >= 2); 3721 divisor = netisr_ncpus / rm->rm_grid; 3722 offset = ((device_get_unit(dev) / divisor) % mod) * subset_grid; 3723 3724 for (i = 0; i < subset_rm->rm_cnt; ++i) { 3725 subset_rm->rm_cpumap[i] += offset; 3726 KASSERT(subset_rm->rm_cpumap[i] < netisr_ncpus, 3727 ("match: invalid cpumap[%d] = %d, offset %d", 3728 i, subset_rm->rm_cpumap[i], offset)); 3729 } 3730 #ifdef INVARIANTS 3731 for (i = 0; i < subset_rm->rm_cnt; ++i) { 3732 int j; 3733 3734 for (j = 0; j < rm->rm_cnt; ++j) { 3735 if (rm->rm_cpumap[j] == subset_rm->rm_cpumap[i]) 3736 break; 3737 } 3738 KASSERT(j < rm->rm_cnt, 3739 ("subset cpumap[%d] = %d not found in superset", 3740 i, subset_rm->rm_cpumap[i])); 3741 } 3742 #endif 3743 } 3744 3745 int 3746 if_ringmap_count(const struct if_ringmap *rm) 3747 { 3748 3749 return (rm->rm_cnt); 3750 } 3751 3752 int 3753 if_ringmap_cpumap(const struct if_ringmap *rm, int ring) 3754 { 3755 3756 KASSERT(ring >= 0 && ring < rm->rm_cnt, ("invalid ring %d", ring)); 3757 return (rm->rm_cpumap[ring]); 3758 } 3759 3760 void 3761 if_ringmap_rdrtable(const struct if_ringmap *rm, int table[], int table_nent) 3762 { 3763 int i, grid_idx, grid_cnt, patch_off, patch_cnt, ncopy; 3764 3765 KASSERT(table_nent > 0 && (table_nent & NETISR_CPUMASK) == 0, 3766 ("invalid redirect table entries %d", table_nent)); 3767 3768 grid_idx = 0; 3769 for (i = 0; i < NETISR_CPUMAX; ++i) { 3770 table[i] = grid_idx++ % rm->rm_cnt; 3771 3772 if (grid_idx == rm->rm_grid) 3773 grid_idx = 0; 3774 } 3775 3776 /* 3777 * Make the ring distributed more evenly for the remainder 3778 * of each grid. 3779 * 3780 * e.g. 12 netisrs, rm contains 8 rings. 3781 * 3782 * Redirect table before: 3783 * 3784 * 0 1 2 3 4 5 6 7 0 1 2 3 0 1 2 3 3785 * 4 5 6 7 0 1 2 3 0 1 2 3 4 5 6 7 3786 * 0 1 2 3 0 1 2 3 4 5 6 7 0 1 2 3 3787 * .... 3788 * 3789 * Redirect table after being patched (pX, patched entries): 3790 * 3791 * 0 1 2 3 4 5 6 7 p0 p1 p2 p3 0 1 2 3 3792 * 4 5 6 7 p4 p5 p6 p7 0 1 2 3 4 5 6 7 3793 * p0 p1 p2 p3 0 1 2 3 4 5 6 7 p4 p5 p6 p7 3794 * .... 3795 */ 3796 patch_cnt = rm->rm_grid % rm->rm_cnt; 3797 if (patch_cnt == 0) 3798 goto done; 3799 patch_off = rm->rm_grid - (rm->rm_grid % rm->rm_cnt); 3800 3801 grid_cnt = roundup(NETISR_CPUMAX, rm->rm_grid) / rm->rm_grid; 3802 grid_idx = 0; 3803 for (i = 0; i < grid_cnt; ++i) { 3804 int j; 3805 3806 for (j = 0; j < patch_cnt; ++j) { 3807 int fix_idx; 3808 3809 fix_idx = (i * rm->rm_grid) + patch_off + j; 3810 if (fix_idx >= NETISR_CPUMAX) 3811 goto done; 3812 table[fix_idx] = grid_idx++ % rm->rm_cnt; 3813 } 3814 } 3815 done: 3816 /* 3817 * If the device supports larger redirect table, duplicate 3818 * the first NETISR_CPUMAX entries to the rest of the table, 3819 * so that it matches upper layer's expectation: 3820 * (hash & NETISR_CPUMASK) % netisr_ncpus 3821 */ 3822 ncopy = table_nent / NETISR_CPUMAX; 3823 for (i = 1; i < ncopy; ++i) { 3824 memcpy(&table[i * NETISR_CPUMAX], table, 3825 NETISR_CPUMAX * sizeof(table[0])); 3826 } 3827 if (if_ringmap_dumprdr) { 3828 for (i = 0; i < table_nent; ++i) { 3829 if (i != 0 && i % 16 == 0) 3830 kprintf("\n"); 3831 kprintf("%03d ", table[i]); 3832 } 3833 kprintf("\n"); 3834 } 3835 } 3836 3837 int 3838 if_ringmap_cpumap_sysctl(SYSCTL_HANDLER_ARGS) 3839 { 3840 struct if_ringmap *rm = arg1; 3841 int i, error = 0; 3842 3843 for (i = 0; i < rm->rm_cnt; ++i) { 3844 int cpu = rm->rm_cpumap[i]; 3845 3846 error = SYSCTL_OUT(req, &cpu, sizeof(cpu)); 3847 if (error) 3848 break; 3849 } 3850 return (error); 3851 } 3852