1 /* $NetBSD: ieee8023ad_lacp.c,v 1.3 2005/12/11 12:24:54 christos Exp $ 2 * $FreeBSD: head/sys/net/ieee8023ad_lacp.c 253687 2013-07-26 19:41:13Z adrian $ 3 */ 4 5 /*- 6 * Copyright (c)2005 YAMAMOTO Takashi, 7 * Copyright (c)2008 Andrew Thompson <thompsa@FreeBSD.org> 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 34 #include <sys/param.h> 35 #include <sys/callout.h> 36 #include <sys/eventhandler.h> 37 #include <sys/mbuf.h> 38 #include <sys/systm.h> 39 #include <sys/malloc.h> 40 #include <sys/kernel.h> /* hz */ 41 #include <sys/socket.h> /* for net/if.h */ 42 #include <sys/sockio.h> 43 #include <sys/sysctl.h> 44 #include <machine/stdarg.h> 45 #include <sys/lock.h> 46 #include <sys/taskqueue.h> 47 48 #include <net/if.h> 49 #include <net/if_arp.h> 50 #include <net/if_var.h> 51 #include <net/if_dl.h> 52 #include <net/ethernet.h> 53 #include <net/if_media.h> 54 #include <net/if_types.h> 55 56 #include <net/lagg/if_lagg.h> 57 #include <net/lagg/ieee8023ad_lacp.h> 58 59 /* 60 * actor system priority and port priority. 61 * XXX should be configurable. 62 */ 63 64 #define LACP_SYSTEM_PRIO 0x8000 65 #define LACP_PORT_PRIO 0x8000 66 67 const uint8_t ethermulticastaddr_slowprotocols[ETHER_ADDR_LEN] = 68 { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x02 }; 69 70 static const struct tlv_template lacp_info_tlv_template[] = { 71 { LACP_TYPE_ACTORINFO, 72 sizeof(struct tlvhdr) + sizeof(struct lacp_peerinfo) }, 73 { LACP_TYPE_PARTNERINFO, 74 sizeof(struct tlvhdr) + sizeof(struct lacp_peerinfo) }, 75 { LACP_TYPE_COLLECTORINFO, 76 sizeof(struct tlvhdr) + sizeof(struct lacp_collectorinfo) }, 77 { 0, 0 }, 78 }; 79 80 static const struct tlv_template marker_info_tlv_template[] = { 81 { MARKER_TYPE_INFO, 82 sizeof(struct tlvhdr) + sizeof(struct lacp_markerinfo) }, 83 { 0, 0 }, 84 }; 85 86 static const struct tlv_template marker_response_tlv_template[] = { 87 { MARKER_TYPE_RESPONSE, 88 sizeof(struct tlvhdr) + sizeof(struct lacp_markerinfo) }, 89 { 0, 0 }, 90 }; 91 92 typedef void (*lacp_timer_func_t)(struct lacp_port *); 93 94 static void lacp_fill_actorinfo(struct lacp_port *, struct lacp_peerinfo *); 95 static void lacp_fill_markerinfo(struct lacp_port *, 96 struct lacp_markerinfo *); 97 98 static uint64_t lacp_aggregator_bandwidth(struct lacp_aggregator *); 99 static void lacp_suppress_distributing(struct lacp_softc *, 100 struct lacp_aggregator *); 101 static void lacp_transit_expire(void *); 102 static void lacp_update_portmap(struct lacp_softc *); 103 static void lacp_select_active_aggregator(struct lacp_softc *); 104 static uint16_t lacp_compose_key(struct lacp_port *); 105 static int tlv_check(const void *, size_t, const struct tlvhdr *, 106 const struct tlv_template *, boolean_t); 107 static void lacp_tick(void *); 108 109 static void lacp_fill_aggregator_id(struct lacp_aggregator *, 110 const struct lacp_port *); 111 static void lacp_fill_aggregator_id_peer(struct lacp_peerinfo *, 112 const struct lacp_peerinfo *); 113 static int lacp_aggregator_is_compatible(const struct lacp_aggregator *, 114 const struct lacp_port *); 115 static int lacp_peerinfo_is_compatible(const struct lacp_peerinfo *, 116 const struct lacp_peerinfo *); 117 118 static struct lacp_aggregator *lacp_aggregator_get(struct lacp_softc *, 119 struct lacp_port *); 120 static void lacp_aggregator_addref(struct lacp_softc *, 121 struct lacp_aggregator *); 122 static void lacp_aggregator_delref(struct lacp_softc *, 123 struct lacp_aggregator *); 124 125 /* receive machine */ 126 127 static int lacp_pdu_input(struct lacp_port *, struct mbuf *); 128 static int lacp_marker_input(struct lacp_port *, struct mbuf *); 129 static void lacp_sm_rx(struct lacp_port *, const struct lacpdu *); 130 static void lacp_sm_rx_timer(struct lacp_port *); 131 static void lacp_sm_rx_set_expired(struct lacp_port *); 132 static void lacp_sm_rx_update_ntt(struct lacp_port *, 133 const struct lacpdu *); 134 static void lacp_sm_rx_record_pdu(struct lacp_port *, 135 const struct lacpdu *); 136 static void lacp_sm_rx_update_selected(struct lacp_port *, 137 const struct lacpdu *); 138 static void lacp_sm_rx_record_default(struct lacp_port *); 139 static void lacp_sm_rx_update_default_selected(struct lacp_port *); 140 static void lacp_sm_rx_update_selected_from_peerinfo(struct lacp_port *, 141 const struct lacp_peerinfo *); 142 143 /* mux machine */ 144 145 static void lacp_sm_mux(struct lacp_port *); 146 static void lacp_set_mux(struct lacp_port *, enum lacp_mux_state); 147 static void lacp_sm_mux_timer(struct lacp_port *); 148 149 /* periodic transmit machine */ 150 151 static void lacp_sm_ptx_update_timeout(struct lacp_port *, uint8_t); 152 static void lacp_sm_ptx_tx_schedule(struct lacp_port *); 153 static void lacp_sm_ptx_timer(struct lacp_port *); 154 155 /* transmit machine */ 156 157 static void lacp_sm_tx(struct lacp_port *); 158 static void lacp_sm_assert_ntt(struct lacp_port *); 159 160 static void lacp_run_timers(struct lacp_port *); 161 static int lacp_compare_peerinfo(const struct lacp_peerinfo *, 162 const struct lacp_peerinfo *); 163 static int lacp_compare_systemid(const struct lacp_systemid *, 164 const struct lacp_systemid *); 165 static void lacp_port_enable(struct lacp_port *); 166 static void lacp_port_disable(struct lacp_port *); 167 static void lacp_select(struct lacp_port *); 168 static void lacp_unselect(struct lacp_port *); 169 static void lacp_disable_collecting(struct lacp_port *); 170 static void lacp_enable_collecting(struct lacp_port *); 171 static void lacp_disable_distributing(struct lacp_port *); 172 static void lacp_enable_distributing(struct lacp_port *); 173 static int lacp_xmit_lacpdu(struct lacp_port *); 174 static int lacp_xmit_marker(struct lacp_port *); 175 176 /* Debugging */ 177 178 static void lacp_dump_lacpdu(const struct lacpdu *); 179 static const char *lacp_format_partner(const struct lacp_peerinfo *, char *, 180 size_t); 181 static const char *lacp_format_lagid(const struct lacp_peerinfo *, 182 const struct lacp_peerinfo *, char *, size_t); 183 static const char *lacp_format_lagid_aggregator(const struct lacp_aggregator *, 184 char *, size_t); 185 static const char *lacp_format_state(uint8_t, char *, size_t); 186 static const char *lacp_format_mac(const uint8_t *, char *, size_t); 187 static const char *lacp_format_systemid(const struct lacp_systemid *, char *, 188 size_t); 189 static const char *lacp_format_portid(const struct lacp_portid *, char *, 190 size_t); 191 static void lacp_dprintf(const struct lacp_port *, const char *, ...) 192 __printflike(2, 3); 193 194 static int lacp_debug = 0; 195 SYSCTL_NODE(_net_link_lagg, OID_AUTO, lacp, CTLFLAG_RD, 0, "ieee802.3ad"); 196 SYSCTL_INT(_net_link_lagg_lacp, OID_AUTO, debug, CTLFLAG_RW, 197 &lacp_debug, 0, "Enable LACP debug logging (1=debug, 2=trace)"); 198 TUNABLE_INT("net.link.lagg.lacp.debug", &lacp_debug); 199 200 #define LACP_DPRINTF(a) if (lacp_debug & 0x01) { lacp_dprintf a ; } 201 #define LACP_TRACE(a) if (lacp_debug & 0x02) { lacp_dprintf(a,"%s\n",__func__); } 202 #define LACP_TPRINTF(a) if (lacp_debug & 0x04) { lacp_dprintf a ; } 203 204 /* 205 * partner administration variables. 206 * XXX should be configurable. 207 */ 208 209 static const struct lacp_peerinfo lacp_partner_admin_optimistic = { 210 .lip_systemid = { .lsi_prio = 0xffff }, 211 .lip_portid = { .lpi_prio = 0xffff }, 212 .lip_state = LACP_STATE_SYNC | LACP_STATE_AGGREGATION | 213 LACP_STATE_COLLECTING | LACP_STATE_DISTRIBUTING, 214 }; 215 216 static const struct lacp_peerinfo lacp_partner_admin_strict = { 217 .lip_systemid = { .lsi_prio = 0xffff }, 218 .lip_portid = { .lpi_prio = 0xffff }, 219 .lip_state = 0, 220 }; 221 222 static const lacp_timer_func_t lacp_timer_funcs[LACP_NTIMER] = { 223 [LACP_TIMER_CURRENT_WHILE] = lacp_sm_rx_timer, 224 [LACP_TIMER_PERIODIC] = lacp_sm_ptx_timer, 225 [LACP_TIMER_WAIT_WHILE] = lacp_sm_mux_timer, 226 }; 227 228 struct mbuf * 229 lacp_input(struct lagg_port *lgp, struct mbuf *m) 230 { 231 struct lacp_port *lp = LACP_PORT(lgp); 232 uint8_t subtype; 233 234 if (m->m_pkthdr.len < sizeof(struct ether_header) + sizeof(subtype)) { 235 m_freem(m); 236 return (NULL); 237 } 238 239 m_copydata(m, sizeof(struct ether_header), sizeof(subtype), &subtype); 240 switch (subtype) { 241 case SLOWPROTOCOLS_SUBTYPE_LACP: 242 lacp_pdu_input(lp, m); 243 return (NULL); 244 245 case SLOWPROTOCOLS_SUBTYPE_MARKER: 246 lacp_marker_input(lp, m); 247 return (NULL); 248 } 249 250 /* Not a subtype we are interested in */ 251 return (m); 252 } 253 254 /* 255 * lacp_pdu_input: process lacpdu 256 */ 257 static int 258 lacp_pdu_input(struct lacp_port *lp, struct mbuf *m) 259 { 260 struct lacp_softc *lsc = lp->lp_lsc; 261 struct lacpdu *du; 262 int error = 0; 263 264 if (m->m_pkthdr.len != sizeof(*du)) { 265 goto bad; 266 } 267 268 if ((m->m_flags & M_MCAST) == 0) { 269 goto bad; 270 } 271 272 if (m->m_len < sizeof(*du)) { 273 m = m_pullup(m, sizeof(*du)); 274 if (m == NULL) { 275 return (ENOMEM); 276 } 277 } 278 279 du = mtod(m, struct lacpdu *); 280 281 if (memcmp(&du->ldu_eh.ether_dhost, 282 ðermulticastaddr_slowprotocols, ETHER_ADDR_LEN)) { 283 goto bad; 284 } 285 286 /* 287 * ignore the version for compatibility with 288 * the future protocol revisions. 289 */ 290 #if 0 291 if (du->ldu_sph.sph_version != 1) { 292 goto bad; 293 } 294 #endif 295 296 /* 297 * ignore tlv types for compatibility with 298 * the future protocol revisions. 299 */ 300 if (tlv_check(du, sizeof(*du), &du->ldu_tlv_actor, 301 lacp_info_tlv_template, FALSE)) { 302 goto bad; 303 } 304 305 if (lacp_debug > 0) { 306 lacp_dprintf(lp, "lacpdu receive\n"); 307 lacp_dump_lacpdu(du); 308 } 309 310 if ((1 << lp->lp_ifp->if_dunit) & lp->lp_lsc->lsc_debug.lsc_rx_test) { 311 LACP_TPRINTF((lp, "Dropping RX PDU\n")); 312 goto bad; 313 } 314 315 LACP_LOCK(lsc); 316 lacp_sm_rx(lp, du); 317 LACP_UNLOCK(lsc); 318 319 m_freem(m); 320 return (error); 321 322 bad: 323 m_freem(m); 324 return (EINVAL); 325 } 326 327 static void 328 lacp_fill_actorinfo(struct lacp_port *lp, struct lacp_peerinfo *info) 329 { 330 struct lagg_port *lgp = lp->lp_lagg; 331 struct lagg_softc *sc = lgp->lp_softc; 332 333 info->lip_systemid.lsi_prio = htons(LACP_SYSTEM_PRIO); 334 memcpy(&info->lip_systemid.lsi_mac, 335 IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN); 336 info->lip_portid.lpi_prio = htons(LACP_PORT_PRIO); 337 info->lip_portid.lpi_portno = htons(lp->lp_ifp->if_index); 338 info->lip_state = lp->lp_state; 339 } 340 341 static void 342 lacp_fill_markerinfo(struct lacp_port *lp, struct lacp_markerinfo *info) 343 { 344 struct ifnet *ifp = lp->lp_ifp; 345 346 /* Fill in the port index and system id (encoded as the MAC) */ 347 info->mi_rq_port = htons(ifp->if_index); 348 memcpy(&info->mi_rq_system, lp->lp_systemid.lsi_mac, ETHER_ADDR_LEN); 349 info->mi_rq_xid = htonl(0); 350 } 351 352 static int 353 lacp_xmit_lacpdu(struct lacp_port *lp) 354 { 355 struct lagg_port *lgp = lp->lp_lagg; 356 struct mbuf *m; 357 struct lacpdu *du; 358 int error; 359 360 LACP_LOCK_ASSERT(lp->lp_lsc); 361 362 m = m_gethdr(M_NOWAIT, MT_DATA); 363 if (m == NULL) { 364 return (ENOMEM); 365 } 366 m->m_len = m->m_pkthdr.len = sizeof(*du); 367 368 du = mtod(m, struct lacpdu *); 369 memset(du, 0, sizeof(*du)); 370 371 memcpy(&du->ldu_eh.ether_dhost, ethermulticastaddr_slowprotocols, 372 ETHER_ADDR_LEN); 373 memcpy(&du->ldu_eh.ether_shost, lgp->lp_lladdr, ETHER_ADDR_LEN); 374 du->ldu_eh.ether_type = htons(ETHERTYPE_SLOW); 375 376 du->ldu_sph.sph_subtype = SLOWPROTOCOLS_SUBTYPE_LACP; 377 du->ldu_sph.sph_version = 1; 378 379 TLV_SET(&du->ldu_tlv_actor, LACP_TYPE_ACTORINFO, sizeof(du->ldu_actor)); 380 du->ldu_actor = lp->lp_actor; 381 382 TLV_SET(&du->ldu_tlv_partner, LACP_TYPE_PARTNERINFO, 383 sizeof(du->ldu_partner)); 384 du->ldu_partner = lp->lp_partner; 385 386 TLV_SET(&du->ldu_tlv_collector, LACP_TYPE_COLLECTORINFO, 387 sizeof(du->ldu_collector)); 388 du->ldu_collector.lci_maxdelay = 0; 389 390 if (lacp_debug > 0) { 391 lacp_dprintf(lp, "lacpdu transmit\n"); 392 lacp_dump_lacpdu(du); 393 } 394 395 m->m_flags |= M_MCAST; 396 397 /* 398 * XXX should use higher priority queue. 399 * otherwise network congestion can break aggregation. 400 */ 401 402 error = lagg_enqueue(lp->lp_ifp, m); 403 return (error); 404 } 405 406 static int 407 lacp_xmit_marker(struct lacp_port *lp) 408 { 409 struct lagg_port *lgp = lp->lp_lagg; 410 struct mbuf *m; 411 struct markerdu *mdu; 412 int error; 413 414 LACP_LOCK_ASSERT(lp->lp_lsc); 415 416 m = m_gethdr(M_NOWAIT, MT_DATA); 417 if (m == NULL) { 418 return (ENOMEM); 419 } 420 m->m_len = m->m_pkthdr.len = sizeof(*mdu); 421 422 mdu = mtod(m, struct markerdu *); 423 memset(mdu, 0, sizeof(*mdu)); 424 425 memcpy(&mdu->mdu_eh.ether_dhost, ethermulticastaddr_slowprotocols, 426 ETHER_ADDR_LEN); 427 memcpy(&mdu->mdu_eh.ether_shost, lgp->lp_lladdr, ETHER_ADDR_LEN); 428 mdu->mdu_eh.ether_type = htons(ETHERTYPE_SLOW); 429 430 mdu->mdu_sph.sph_subtype = SLOWPROTOCOLS_SUBTYPE_MARKER; 431 mdu->mdu_sph.sph_version = 1; 432 433 /* Bump the transaction id and copy over the marker info */ 434 lp->lp_marker.mi_rq_xid = htonl(ntohl(lp->lp_marker.mi_rq_xid) + 1); 435 TLV_SET(&mdu->mdu_tlv, MARKER_TYPE_INFO, sizeof(mdu->mdu_info)); 436 mdu->mdu_info = lp->lp_marker; 437 438 LACP_DPRINTF((lp, "marker transmit, port=%u, sys=%6d, id=%u\n", 439 ntohs(mdu->mdu_info.mi_rq_port), *mdu->mdu_info.mi_rq_system, 440 ntohl(mdu->mdu_info.mi_rq_xid))); 441 442 m->m_flags |= M_MCAST; 443 error = lagg_enqueue(lp->lp_ifp, m); 444 return (error); 445 } 446 447 void 448 lacp_linkstate(struct lagg_port *lgp) 449 { 450 struct lacp_port *lp = LACP_PORT(lgp); 451 struct lacp_softc *lsc = lp->lp_lsc; 452 struct ifnet *ifp = lgp->lp_ifp; 453 struct ifmediareq ifmr; 454 int error = 0; 455 u_int media; 456 uint8_t old_state; 457 uint16_t old_key; 458 459 bzero((char *)&ifmr, sizeof(ifmr)); 460 /* ifnet_deserialize_all(ifp); */ 461 ifnet_serialize_all(ifp); 462 error = (*ifp->if_ioctl)(ifp, SIOCGIFMEDIA, (caddr_t)&ifmr, NULL); 463 ifnet_deserialize_all(ifp); 464 /* ifnet_serialize_all(ifp); */ 465 466 if (error != 0) 467 return; 468 469 LACP_LOCK(lsc); 470 media = ifmr.ifm_active; 471 LACP_DPRINTF((lp, "media changed 0x%x -> 0x%x, ether = %d, fdx = %d, " 472 "link = %d\n", lp->lp_media, media, IFM_TYPE(media) == IFM_ETHER, 473 (media & IFM_FDX) != 0, ifp->if_link_state == LINK_STATE_UP)); 474 old_state = lp->lp_state; 475 old_key = lp->lp_key; 476 477 478 lp->lp_media = media; 479 /* 480 * If the port is not an active full duplex Ethernet link then it can 481 * not be aggregated. 482 */ 483 if (IFM_TYPE(media) != IFM_ETHER || (media & IFM_FDX) == 0 || 484 ifp->if_link_state != LINK_STATE_UP) { 485 lacp_port_disable(lp); 486 } else { 487 lacp_port_enable(lp); 488 } 489 lp->lp_key = lacp_compose_key(lp); 490 491 if (old_state != lp->lp_state || old_key != lp->lp_key) { 492 LACP_DPRINTF((lp, "-> UNSELECTED\n")); 493 lp->lp_selected = LACP_UNSELECTED; 494 } 495 LACP_UNLOCK(lsc); 496 } 497 498 static void 499 lacp_tick(void *arg) 500 { 501 struct lacp_softc *lsc = arg; 502 struct lacp_port *lp; 503 504 LACP_LOCK(lsc); 505 LIST_FOREACH(lp, &lsc->lsc_ports, lp_next) { 506 if ((lp->lp_state & LACP_STATE_AGGREGATION) == 0) 507 continue; 508 509 lacp_run_timers(lp); 510 511 lacp_select(lp); 512 lacp_sm_mux(lp); 513 lacp_sm_tx(lp); 514 lacp_sm_ptx_tx_schedule(lp); 515 } 516 LACP_UNLOCK(lsc); 517 callout_reset(&lsc->lsc_callout, hz, lacp_tick, lsc); 518 } 519 520 int 521 lacp_port_create(struct lagg_port *lgp) 522 { 523 struct lagg_softc *sc = lgp->lp_softc; 524 struct lacp_softc *lsc = LACP_SOFTC(sc); 525 struct lacp_port *lp; 526 struct ifnet *ifp = lgp->lp_ifp; 527 struct sockaddr_dl sdl; 528 struct ifmultiaddr *rifma = NULL; 529 int error; 530 531 boolean_t active = TRUE; /* XXX should be configurable */ 532 boolean_t fast = FALSE; /* XXX should be configurable */ 533 534 bzero((char *)&sdl, sizeof(sdl)); 535 sdl.sdl_len = sizeof(sdl); 536 sdl.sdl_family = AF_LINK; 537 sdl.sdl_index = ifp->if_index; 538 sdl.sdl_type = IFT_ETHER; 539 sdl.sdl_alen = ETHER_ADDR_LEN; 540 541 bcopy(ðermulticastaddr_slowprotocols, 542 LLADDR(&sdl), ETHER_ADDR_LEN); 543 544 error = if_addmulti(ifp, (struct sockaddr *)&sdl, &rifma); 545 if (error) { 546 kprintf("%s: ADDMULTI failed on %s\n", __func__, lgp->lp_ifname); 547 return (error); 548 } 549 lp = kmalloc(sizeof(struct lacp_port), 550 M_DEVBUF, M_NOWAIT|M_ZERO); 551 if (lp == NULL) 552 return (ENOMEM); 553 554 LACP_LOCK(lsc); 555 lgp->lp_psc = (caddr_t)lp; 556 lp->lp_ifp = ifp; 557 lp->lp_lagg = lgp; 558 lp->lp_lsc = lsc; 559 lp->lp_ifma = rifma; 560 561 LIST_INSERT_HEAD(&lsc->lsc_ports, lp, lp_next); 562 563 lacp_fill_actorinfo(lp, &lp->lp_actor); 564 lacp_fill_markerinfo(lp, &lp->lp_marker); 565 lp->lp_state = 566 (active ? LACP_STATE_ACTIVITY : 0) | 567 (fast ? LACP_STATE_TIMEOUT : 0); 568 lp->lp_aggregator = NULL; 569 lacp_sm_rx_set_expired(lp); 570 LACP_UNLOCK(lsc); 571 lacp_linkstate(lgp); 572 573 return (0); 574 } 575 576 void 577 lacp_port_destroy(struct lagg_port *lgp) 578 { 579 struct lacp_port *lp = LACP_PORT(lgp); 580 struct lacp_softc *lsc = lp->lp_lsc; 581 int i; 582 583 LACP_LOCK(lsc); 584 for (i = 0; i < LACP_NTIMER; i++) { 585 LACP_TIMER_DISARM(lp, i); 586 } 587 588 lacp_disable_collecting(lp); 589 lacp_disable_distributing(lp); 590 lacp_unselect(lp); 591 592 /* The address may have already been removed by if_purgemaddrs() */ 593 #if 0 /* XXX */ 594 if (!lgp->lp_detaching) 595 if_delmulti_ifma(lp->lp_ifma); 596 #endif 597 LIST_REMOVE(lp, lp_next); 598 LACP_UNLOCK(lsc); 599 kfree(lp, M_DEVBUF); 600 } 601 602 void 603 lacp_req(struct lagg_softc *sc, caddr_t data) 604 { 605 struct lacp_opreq *req = (struct lacp_opreq *)data; 606 struct lacp_softc *lsc = LACP_SOFTC(sc); 607 struct lacp_aggregator *la = lsc->lsc_active_aggregator; 608 609 LACP_LOCK(lsc); 610 bzero(req, sizeof(struct lacp_opreq)); 611 if (la != NULL) { 612 req->actor_prio = ntohs(la->la_actor.lip_systemid.lsi_prio); 613 memcpy(&req->actor_mac, &la->la_actor.lip_systemid.lsi_mac, 614 ETHER_ADDR_LEN); 615 req->actor_key = ntohs(la->la_actor.lip_key); 616 req->actor_portprio = ntohs(la->la_actor.lip_portid.lpi_prio); 617 req->actor_portno = ntohs(la->la_actor.lip_portid.lpi_portno); 618 req->actor_state = la->la_actor.lip_state; 619 620 req->partner_prio = ntohs(la->la_partner.lip_systemid.lsi_prio); 621 memcpy(&req->partner_mac, &la->la_partner.lip_systemid.lsi_mac, 622 ETHER_ADDR_LEN); 623 req->partner_key = ntohs(la->la_partner.lip_key); 624 req->partner_portprio = ntohs(la->la_partner.lip_portid.lpi_prio); 625 req->partner_portno = ntohs(la->la_partner.lip_portid.lpi_portno); 626 req->partner_state = la->la_partner.lip_state; 627 } 628 LACP_UNLOCK(lsc); 629 } 630 631 void 632 lacp_portreq(struct lagg_port *lgp, caddr_t data) 633 { 634 struct lacp_opreq *req = (struct lacp_opreq *)data; 635 struct lacp_port *lp = LACP_PORT(lgp); 636 struct lacp_softc *lsc = lp->lp_lsc; 637 638 LACP_LOCK(lsc); 639 req->actor_prio = ntohs(lp->lp_actor.lip_systemid.lsi_prio); 640 memcpy(&req->actor_mac, &lp->lp_actor.lip_systemid.lsi_mac, 641 ETHER_ADDR_LEN); 642 req->actor_key = ntohs(lp->lp_actor.lip_key); 643 req->actor_portprio = ntohs(lp->lp_actor.lip_portid.lpi_prio); 644 req->actor_portno = ntohs(lp->lp_actor.lip_portid.lpi_portno); 645 req->actor_state = lp->lp_actor.lip_state; 646 647 req->partner_prio = ntohs(lp->lp_partner.lip_systemid.lsi_prio); 648 memcpy(&req->partner_mac, &lp->lp_partner.lip_systemid.lsi_mac, 649 ETHER_ADDR_LEN); 650 req->partner_key = ntohs(lp->lp_partner.lip_key); 651 req->partner_portprio = ntohs(lp->lp_partner.lip_portid.lpi_prio); 652 req->partner_portno = ntohs(lp->lp_partner.lip_portid.lpi_portno); 653 req->partner_state = lp->lp_partner.lip_state; 654 LACP_UNLOCK(lsc); 655 } 656 657 static void 658 lacp_disable_collecting(struct lacp_port *lp) 659 { 660 LACP_DPRINTF((lp, "collecting disabled\n")); 661 lp->lp_state &= ~LACP_STATE_COLLECTING; 662 } 663 664 static void 665 lacp_enable_collecting(struct lacp_port *lp) 666 { 667 LACP_DPRINTF((lp, "collecting enabled\n")); 668 lp->lp_state |= LACP_STATE_COLLECTING; 669 } 670 671 static void 672 lacp_disable_distributing(struct lacp_port *lp) 673 { 674 struct lacp_aggregator *la = lp->lp_aggregator; 675 struct lacp_softc *lsc = lp->lp_lsc; 676 struct lagg_softc *sc = lsc->lsc_softc; 677 char buf[LACP_LAGIDSTR_MAX+1]; 678 679 LACP_LOCK_ASSERT(lsc); 680 681 if (la == NULL || (lp->lp_state & LACP_STATE_DISTRIBUTING) == 0) { 682 return; 683 } 684 685 KASSERT(!TAILQ_EMPTY(&la->la_ports), ("no aggregator ports")); 686 KASSERT(la->la_nports > 0, ("nports invalid (%d)", la->la_nports)); 687 KASSERT(la->la_refcnt >= la->la_nports, ("aggregator refcnt invalid")); 688 689 LACP_DPRINTF((lp, "disable distributing on aggregator %s, " 690 "nports %d -> %d\n", 691 lacp_format_lagid_aggregator(la, buf, sizeof(buf)), 692 la->la_nports, la->la_nports - 1)); 693 694 TAILQ_REMOVE(&la->la_ports, lp, lp_dist_q); 695 la->la_nports--; 696 sc->sc_active = la->la_nports; 697 698 if (lsc->lsc_active_aggregator == la) { 699 lacp_suppress_distributing(lsc, la); 700 lacp_select_active_aggregator(lsc); 701 /* regenerate the port map, the active aggregator has changed */ 702 lacp_update_portmap(lsc); 703 } 704 705 lp->lp_state &= ~LACP_STATE_DISTRIBUTING; 706 } 707 708 static void 709 lacp_enable_distributing(struct lacp_port *lp) 710 { 711 struct lacp_aggregator *la = lp->lp_aggregator; 712 struct lacp_softc *lsc = lp->lp_lsc; 713 struct lagg_softc *sc = lsc->lsc_softc; 714 char buf[LACP_LAGIDSTR_MAX+1]; 715 716 LACP_LOCK_ASSERT(lsc); 717 718 if ((lp->lp_state & LACP_STATE_DISTRIBUTING) != 0) { 719 return; 720 } 721 722 LACP_DPRINTF((lp, "enable distributing on aggregator %s, " 723 "nports %d -> %d\n", 724 lacp_format_lagid_aggregator(la, buf, sizeof(buf)), 725 la->la_nports, la->la_nports + 1)); 726 727 KASSERT(la->la_refcnt > la->la_nports, ("aggregator refcnt invalid")); 728 TAILQ_INSERT_HEAD(&la->la_ports, lp, lp_dist_q); 729 la->la_nports++; 730 sc->sc_active = la->la_nports; 731 732 lp->lp_state |= LACP_STATE_DISTRIBUTING; 733 734 if (lsc->lsc_active_aggregator == la) { 735 lacp_suppress_distributing(lsc, la); 736 lacp_update_portmap(lsc); 737 } else 738 /* try to become the active aggregator */ 739 lacp_select_active_aggregator(lsc); 740 } 741 742 static void 743 lacp_transit_expire(void *vp) 744 { 745 struct lacp_softc *lsc = vp; 746 747 LACP_LOCK(lsc); 748 LACP_LOCK_ASSERT(lsc); 749 750 LACP_TRACE(NULL); 751 752 lsc->lsc_suppress_distributing = FALSE; 753 754 LACP_UNLOCK(lsc); 755 } 756 757 static void 758 lacp_attach_sysctl(struct lacp_softc *lsc, struct sysctl_oid *p_oid) 759 { 760 struct lagg_softc *sc = lsc->lsc_softc; 761 762 SYSCTL_ADD_UINT(&sc->ctx, SYSCTL_CHILDREN(p_oid), OID_AUTO, 763 "lacp_strict_mode", 764 CTLFLAG_RW, 765 &lsc->lsc_strict_mode, 766 lsc->lsc_strict_mode, 767 "Enable LACP strict mode"); 768 } 769 770 static void 771 lacp_attach_sysctl_debug(struct lacp_softc *lsc, struct sysctl_oid *p_oid) 772 { 773 struct lagg_softc *sc = lsc->lsc_softc; 774 struct sysctl_oid *oid; 775 776 /* Create a child of the parent lagg interface */ 777 oid = SYSCTL_ADD_NODE(&sc->ctx, SYSCTL_CHILDREN(p_oid), 778 OID_AUTO, "debug", CTLFLAG_RD, NULL, "DEBUG"); 779 780 SYSCTL_ADD_UINT(&sc->ctx, SYSCTL_CHILDREN(oid), OID_AUTO, 781 "rx_test", 782 CTLFLAG_RW, 783 &lsc->lsc_debug.lsc_rx_test, 784 lsc->lsc_debug.lsc_rx_test, 785 "Bitmap of if_dunit entries to drop RX frames for"); 786 SYSCTL_ADD_UINT(&sc->ctx, SYSCTL_CHILDREN(oid), OID_AUTO, 787 "tx_test", 788 CTLFLAG_RW, 789 &lsc->lsc_debug.lsc_tx_test, 790 lsc->lsc_debug.lsc_tx_test, 791 "Bitmap of if_dunit entries to drop TX frames for"); 792 } 793 794 int 795 lacp_attach(struct lagg_softc *sc) 796 { 797 struct lacp_softc *lsc; 798 struct sysctl_oid *oid; 799 800 lsc = kmalloc(sizeof(struct lacp_softc), 801 M_DEVBUF, M_NOWAIT|M_ZERO); 802 if (lsc == NULL) 803 return (ENOMEM); 804 805 sc->sc_psc = (caddr_t)lsc; 806 lsc->lsc_softc = sc; 807 808 lsc->lsc_hashkey = karc4random(); 809 lsc->lsc_active_aggregator = NULL; 810 lsc->lsc_strict_mode = 1; 811 LACP_LOCK_INIT(lsc); 812 TAILQ_INIT(&lsc->lsc_aggregators); 813 LIST_INIT(&lsc->lsc_ports); 814 815 /* Create a child of the parent lagg interface */ 816 oid = SYSCTL_ADD_NODE(&sc->ctx, SYSCTL_CHILDREN(sc->sc_oid), 817 OID_AUTO, "lacp", CTLFLAG_RD, NULL, "LACP"); 818 819 /* Attach sysctl nodes */ 820 lacp_attach_sysctl(lsc, oid); 821 lacp_attach_sysctl_debug(lsc, oid); 822 823 #if 0 /* XXX */ 824 callout_init_mtx(&lsc->lsc_transit_callout, &lsc->lsc_lock, 0); 825 callout_init_mtx(&lsc->lsc_callout, &lsc->lsc_lock, 0); 826 #endif 827 828 callout_init(&lsc->lsc_transit_callout); 829 callout_init(&lsc->lsc_callout); 830 831 /* if the lagg is already up then do the same */ 832 if (sc->sc_ifp->if_flags & IFF_RUNNING) 833 lacp_init(sc); 834 835 return (0); 836 } 837 838 int 839 lacp_detach(struct lagg_softc *sc) 840 { 841 struct lacp_softc *lsc = LACP_SOFTC(sc); 842 843 KASSERT(TAILQ_EMPTY(&lsc->lsc_aggregators), 844 ("aggregators still active")); 845 KASSERT(lsc->lsc_active_aggregator == NULL, 846 ("aggregator still attached")); 847 848 sc->sc_psc = NULL; 849 callout_drain(&lsc->lsc_transit_callout); 850 callout_drain(&lsc->lsc_callout); 851 852 LACP_LOCK_DESTROY(lsc); 853 kfree(lsc, M_DEVBUF); 854 return (0); 855 } 856 857 void 858 lacp_init(struct lagg_softc *sc) 859 { 860 struct lacp_softc *lsc = LACP_SOFTC(sc); 861 862 LACP_LOCK(lsc); 863 callout_reset(&lsc->lsc_callout, hz, lacp_tick, lsc); 864 LACP_UNLOCK(lsc); 865 } 866 867 void 868 lacp_stop(struct lagg_softc *sc) 869 { 870 struct lacp_softc *lsc = LACP_SOFTC(sc); 871 872 LACP_LOCK(lsc); 873 callout_stop(&lsc->lsc_transit_callout); 874 callout_stop(&lsc->lsc_callout); 875 LACP_UNLOCK(lsc); 876 } 877 878 struct lagg_port * 879 lacp_select_tx_port(struct lagg_softc *sc, struct mbuf *m) 880 { 881 struct lacp_softc *lsc = LACP_SOFTC(sc); 882 struct lacp_portmap *pm; 883 struct lacp_port *lp; 884 uint32_t hash; 885 886 if (__predict_false(lsc->lsc_suppress_distributing)) { 887 LACP_DPRINTF((NULL, "%s: waiting transit\n", __func__)); 888 return (NULL); 889 } 890 891 pm = &lsc->lsc_pmap[lsc->lsc_activemap]; 892 if (pm->pm_count == 0) { 893 LACP_DPRINTF((NULL, "%s: no active aggregator\n", __func__)); 894 return (NULL); 895 } 896 897 #if 0 /* XXX */ 898 if (sc->use_flowid && (m->m_flags & M_FLOWID)) 899 hash = m->m_pkthdr.flowid; 900 else 901 #endif 902 hash = lagg_hashmbuf(sc, m, lsc->lsc_hashkey); 903 hash %= pm->pm_count; 904 lp = pm->pm_map[hash]; 905 906 KASSERT((lp->lp_state & LACP_STATE_DISTRIBUTING) != 0, 907 ("aggregated port is not distributing")); 908 909 return (lp->lp_lagg); 910 } 911 /* 912 * lacp_suppress_distributing: drop transmit packets for a while 913 * to preserve packet ordering. 914 */ 915 916 static void 917 lacp_suppress_distributing(struct lacp_softc *lsc, struct lacp_aggregator *la) 918 { 919 struct lacp_port *lp; 920 921 if (lsc->lsc_active_aggregator != la) { 922 return; 923 } 924 925 LACP_TRACE(NULL); 926 927 lsc->lsc_suppress_distributing = TRUE; 928 929 /* send a marker frame down each port to verify the queues are empty */ 930 LIST_FOREACH(lp, &lsc->lsc_ports, lp_next) { 931 lp->lp_flags |= LACP_PORT_MARK; 932 lacp_xmit_marker(lp); 933 } 934 935 /* set a timeout for the marker frames */ 936 callout_reset(&lsc->lsc_transit_callout, 937 LACP_TRANSIT_DELAY * hz / 1000, lacp_transit_expire, lsc); 938 } 939 940 static int 941 lacp_compare_peerinfo(const struct lacp_peerinfo *a, 942 const struct lacp_peerinfo *b) 943 { 944 return (memcmp(a, b, offsetof(struct lacp_peerinfo, lip_state))); 945 } 946 947 static int 948 lacp_compare_systemid(const struct lacp_systemid *a, 949 const struct lacp_systemid *b) 950 { 951 return (memcmp(a, b, sizeof(*a))); 952 } 953 954 #if 0 /* unused */ 955 static int 956 lacp_compare_portid(const struct lacp_portid *a, 957 const struct lacp_portid *b) 958 { 959 return (memcmp(a, b, sizeof(*a))); 960 } 961 #endif 962 963 static uint64_t 964 lacp_aggregator_bandwidth(struct lacp_aggregator *la) 965 { 966 struct lacp_port *lp; 967 uint64_t speed; 968 969 lp = TAILQ_FIRST(&la->la_ports); 970 if (lp == NULL) { 971 return (0); 972 } 973 974 speed = ifmedia_baudrate(lp->lp_media); 975 speed *= la->la_nports; 976 if (speed == 0) { 977 LACP_DPRINTF((lp, "speed 0? media=0x%x nports=%d\n", 978 lp->lp_media, la->la_nports)); 979 } 980 981 return (speed); 982 } 983 984 /* 985 * lacp_select_active_aggregator: select an aggregator to be used to transmit 986 * packets from lagg(4) interface. 987 */ 988 989 static void 990 lacp_select_active_aggregator(struct lacp_softc *lsc) 991 { 992 struct lacp_aggregator *la; 993 struct lacp_aggregator *best_la = NULL; 994 uint64_t best_speed = 0; 995 char buf[LACP_LAGIDSTR_MAX+1]; 996 997 LACP_TRACE(NULL); 998 999 TAILQ_FOREACH(la, &lsc->lsc_aggregators, la_q) { 1000 uint64_t speed; 1001 1002 if (la->la_nports == 0) { 1003 continue; 1004 } 1005 1006 speed = lacp_aggregator_bandwidth(la); 1007 LACP_DPRINTF((NULL, "%s, speed=%jd, nports=%d\n", 1008 lacp_format_lagid_aggregator(la, buf, sizeof(buf)), 1009 speed, la->la_nports)); 1010 1011 /* This aggregator is chosen if 1012 * the partner has a better system priority 1013 * or, the total aggregated speed is higher 1014 * or, it is already the chosen aggregator 1015 */ 1016 if ((best_la != NULL && LACP_SYS_PRI(la->la_partner) < 1017 LACP_SYS_PRI(best_la->la_partner)) || 1018 speed > best_speed || 1019 (speed == best_speed && 1020 la == lsc->lsc_active_aggregator)) { 1021 best_la = la; 1022 best_speed = speed; 1023 } 1024 } 1025 1026 KASSERT(best_la == NULL || best_la->la_nports > 0, 1027 ("invalid aggregator refcnt")); 1028 KASSERT(best_la == NULL || !TAILQ_EMPTY(&best_la->la_ports), 1029 ("invalid aggregator list")); 1030 1031 if (lsc->lsc_active_aggregator != best_la) { 1032 LACP_DPRINTF((NULL, "active aggregator changed\n")); 1033 LACP_DPRINTF((NULL, "old %s\n", 1034 lacp_format_lagid_aggregator(lsc->lsc_active_aggregator, 1035 buf, sizeof(buf)))); 1036 } else { 1037 LACP_DPRINTF((NULL, "active aggregator not changed\n")); 1038 } 1039 LACP_DPRINTF((NULL, "new %s\n", 1040 lacp_format_lagid_aggregator(best_la, buf, sizeof(buf)))); 1041 1042 if (lsc->lsc_active_aggregator != best_la) { 1043 lsc->lsc_active_aggregator = best_la; 1044 lacp_update_portmap(lsc); 1045 if (best_la) { 1046 lacp_suppress_distributing(lsc, best_la); 1047 } 1048 } 1049 } 1050 1051 /* 1052 * Updated the inactive portmap array with the new list of ports and 1053 * make it live. 1054 */ 1055 static void 1056 lacp_update_portmap(struct lacp_softc *lsc) 1057 { 1058 struct lagg_softc *sc = lsc->lsc_softc; 1059 struct lacp_aggregator *la; 1060 struct lacp_portmap *p; 1061 struct lacp_port *lp; 1062 uint64_t speed; 1063 u_int newmap; 1064 int i; 1065 1066 newmap = lsc->lsc_activemap == 0 ? 1 : 0; 1067 p = &lsc->lsc_pmap[newmap]; 1068 la = lsc->lsc_active_aggregator; 1069 speed = 0; 1070 bzero(p, sizeof(struct lacp_portmap)); 1071 1072 if (la != NULL && la->la_nports > 0) { 1073 p->pm_count = la->la_nports; 1074 i = 0; 1075 TAILQ_FOREACH(lp, &la->la_ports, lp_dist_q) 1076 p->pm_map[i++] = lp; 1077 KASSERT(i == p->pm_count, ("Invalid port count")); 1078 speed = lacp_aggregator_bandwidth(la); 1079 } 1080 sc->sc_ifp->if_baudrate = speed; 1081 1082 /* switch the active portmap over */ 1083 atomic_store_rel_int(&lsc->lsc_activemap, newmap); 1084 LACP_DPRINTF((NULL, "Set table %d with %d ports\n", 1085 lsc->lsc_activemap, 1086 lsc->lsc_pmap[lsc->lsc_activemap].pm_count)); 1087 } 1088 1089 static uint16_t 1090 lacp_compose_key(struct lacp_port *lp) 1091 { 1092 struct lagg_port *lgp = lp->lp_lagg; 1093 struct lagg_softc *sc = lgp->lp_softc; 1094 u_int media = lp->lp_media; 1095 uint16_t key; 1096 1097 if ((lp->lp_state & LACP_STATE_AGGREGATION) == 0) { 1098 1099 /* 1100 * non-aggregatable links should have unique keys. 1101 * 1102 * XXX this isn't really unique as if_index is 16 bit. 1103 */ 1104 1105 /* bit 0..14: (some bits of) if_index of this port */ 1106 key = lp->lp_ifp->if_index; 1107 /* bit 15: 1 */ 1108 key |= 0x8000; 1109 } else { 1110 u_int subtype = IFM_SUBTYPE(media); 1111 1112 KASSERT(IFM_TYPE(media) == IFM_ETHER, ("invalid media type")); 1113 KASSERT((media & IFM_FDX) != 0, ("aggregating HDX interface")); 1114 1115 /* bit 0..4: IFM_SUBTYPE modulo speed */ 1116 switch (subtype) { 1117 case IFM_10_T: 1118 case IFM_10_2: 1119 case IFM_10_5: 1120 case IFM_10_STP: 1121 case IFM_10_FL: 1122 key = IFM_10_T; 1123 break; 1124 case IFM_100_TX: 1125 case IFM_100_FX: 1126 case IFM_100_T4: 1127 case IFM_100_VG: 1128 case IFM_100_T2: 1129 key = IFM_100_TX; 1130 break; 1131 case IFM_1000_SX: 1132 case IFM_1000_LX: 1133 case IFM_1000_CX: 1134 case IFM_1000_T: 1135 key = IFM_1000_SX; 1136 break; 1137 case IFM_10G_LR: 1138 case IFM_10G_SR: 1139 case IFM_10G_CX4: 1140 case IFM_10G_TWINAX: 1141 case IFM_10G_TWINAX_LONG: 1142 case IFM_10G_LRM: 1143 case IFM_10G_T: 1144 key = IFM_10G_LR; 1145 break; 1146 case IFM_40G_CR4: 1147 case IFM_40G_SR4: 1148 case IFM_40G_LR4: 1149 key = IFM_40G_CR4; 1150 break; 1151 default: 1152 key = subtype; 1153 } 1154 /* bit 5..14: (some bits of) if_index of lagg device */ 1155 key |= 0x7fe0 & ((sc->sc_ifp->if_index) << 5); 1156 /* bit 15: 0 */ 1157 } 1158 return (htons(key)); 1159 } 1160 1161 static void 1162 lacp_aggregator_addref(struct lacp_softc *lsc, struct lacp_aggregator *la) 1163 { 1164 char buf[LACP_LAGIDSTR_MAX+1]; 1165 1166 LACP_DPRINTF((NULL, "%s: lagid=%s, refcnt %d -> %d\n", 1167 __func__, 1168 lacp_format_lagid(&la->la_actor, &la->la_partner, 1169 buf, sizeof(buf)), 1170 la->la_refcnt, la->la_refcnt + 1)); 1171 1172 KASSERT(la->la_refcnt > 0, ("refcount <= 0")); 1173 la->la_refcnt++; 1174 KASSERT(la->la_refcnt > la->la_nports, ("invalid refcount")); 1175 } 1176 1177 static void 1178 lacp_aggregator_delref(struct lacp_softc *lsc, struct lacp_aggregator *la) 1179 { 1180 char buf[LACP_LAGIDSTR_MAX+1]; 1181 1182 LACP_DPRINTF((NULL, "%s: lagid=%s, refcnt %d -> %d\n", 1183 __func__, 1184 lacp_format_lagid(&la->la_actor, &la->la_partner, 1185 buf, sizeof(buf)), 1186 la->la_refcnt, la->la_refcnt - 1)); 1187 1188 KASSERT(la->la_refcnt > la->la_nports, ("invalid refcnt")); 1189 la->la_refcnt--; 1190 if (la->la_refcnt > 0) { 1191 return; 1192 } 1193 1194 KASSERT(la->la_refcnt == 0, ("refcount not zero")); 1195 KASSERT(lsc->lsc_active_aggregator != la, ("aggregator active")); 1196 1197 TAILQ_REMOVE(&lsc->lsc_aggregators, la, la_q); 1198 1199 kfree(la, M_DEVBUF); 1200 } 1201 1202 /* 1203 * lacp_aggregator_get: allocate an aggregator. 1204 */ 1205 1206 static struct lacp_aggregator * 1207 lacp_aggregator_get(struct lacp_softc *lsc, struct lacp_port *lp) 1208 { 1209 struct lacp_aggregator *la; 1210 1211 la = kmalloc(sizeof(*la), M_DEVBUF, M_NOWAIT); 1212 if (la) { 1213 la->la_refcnt = 1; 1214 la->la_nports = 0; 1215 TAILQ_INIT(&la->la_ports); 1216 la->la_pending = 0; 1217 TAILQ_INSERT_TAIL(&lsc->lsc_aggregators, la, la_q); 1218 } 1219 1220 return (la); 1221 } 1222 1223 /* 1224 * lacp_fill_aggregator_id: setup a newly allocated aggregator from a port. 1225 */ 1226 1227 static void 1228 lacp_fill_aggregator_id(struct lacp_aggregator *la, const struct lacp_port *lp) 1229 { 1230 lacp_fill_aggregator_id_peer(&la->la_partner, &lp->lp_partner); 1231 lacp_fill_aggregator_id_peer(&la->la_actor, &lp->lp_actor); 1232 1233 la->la_actor.lip_state = lp->lp_state & LACP_STATE_AGGREGATION; 1234 } 1235 1236 static void 1237 lacp_fill_aggregator_id_peer(struct lacp_peerinfo *lpi_aggr, 1238 const struct lacp_peerinfo *lpi_port) 1239 { 1240 memset(lpi_aggr, 0, sizeof(*lpi_aggr)); 1241 lpi_aggr->lip_systemid = lpi_port->lip_systemid; 1242 lpi_aggr->lip_key = lpi_port->lip_key; 1243 } 1244 1245 /* 1246 * lacp_aggregator_is_compatible: check if a port can join to an aggregator. 1247 */ 1248 1249 static int 1250 lacp_aggregator_is_compatible(const struct lacp_aggregator *la, 1251 const struct lacp_port *lp) 1252 { 1253 if (!(lp->lp_state & LACP_STATE_AGGREGATION) || 1254 !(lp->lp_partner.lip_state & LACP_STATE_AGGREGATION)) { 1255 return (0); 1256 } 1257 1258 if (!(la->la_actor.lip_state & LACP_STATE_AGGREGATION)) { 1259 return (0); 1260 } 1261 1262 if (!lacp_peerinfo_is_compatible(&la->la_partner, &lp->lp_partner)) { 1263 return (0); 1264 } 1265 1266 if (!lacp_peerinfo_is_compatible(&la->la_actor, &lp->lp_actor)) { 1267 return (0); 1268 } 1269 1270 return (1); 1271 } 1272 1273 static int 1274 lacp_peerinfo_is_compatible(const struct lacp_peerinfo *a, 1275 const struct lacp_peerinfo *b) 1276 { 1277 if (memcmp(&a->lip_systemid, &b->lip_systemid, 1278 sizeof(a->lip_systemid))) { 1279 return (0); 1280 } 1281 1282 if (memcmp(&a->lip_key, &b->lip_key, sizeof(a->lip_key))) { 1283 return (0); 1284 } 1285 1286 return (1); 1287 } 1288 1289 static void 1290 lacp_port_enable(struct lacp_port *lp) 1291 { 1292 lp->lp_state |= LACP_STATE_AGGREGATION; 1293 } 1294 1295 static void 1296 lacp_port_disable(struct lacp_port *lp) 1297 { 1298 lacp_set_mux(lp, LACP_MUX_DETACHED); 1299 1300 lp->lp_state &= ~LACP_STATE_AGGREGATION; 1301 lp->lp_selected = LACP_UNSELECTED; 1302 lacp_sm_rx_record_default(lp); 1303 lp->lp_partner.lip_state &= ~LACP_STATE_AGGREGATION; 1304 lp->lp_state &= ~LACP_STATE_EXPIRED; 1305 } 1306 1307 /* 1308 * lacp_select: select an aggregator. create one if necessary. 1309 */ 1310 static void 1311 lacp_select(struct lacp_port *lp) 1312 { 1313 struct lacp_softc *lsc = lp->lp_lsc; 1314 struct lacp_aggregator *la; 1315 char buf[LACP_LAGIDSTR_MAX+1]; 1316 1317 if (lp->lp_aggregator) { 1318 return; 1319 } 1320 1321 KASSERT(!LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE), 1322 ("timer_wait_while still active")); 1323 1324 LACP_DPRINTF((lp, "port lagid=%s\n", 1325 lacp_format_lagid(&lp->lp_actor, &lp->lp_partner, 1326 buf, sizeof(buf)))); 1327 1328 TAILQ_FOREACH(la, &lsc->lsc_aggregators, la_q) { 1329 if (lacp_aggregator_is_compatible(la, lp)) { 1330 break; 1331 } 1332 } 1333 1334 if (la == NULL) { 1335 la = lacp_aggregator_get(lsc, lp); 1336 if (la == NULL) { 1337 LACP_DPRINTF((lp, "aggregator creation failed\n")); 1338 1339 /* 1340 * will retry on the next tick. 1341 */ 1342 1343 return; 1344 } 1345 lacp_fill_aggregator_id(la, lp); 1346 LACP_DPRINTF((lp, "aggregator created\n")); 1347 } else { 1348 LACP_DPRINTF((lp, "compatible aggregator found\n")); 1349 if (la->la_refcnt == LACP_MAX_PORTS) 1350 return; 1351 lacp_aggregator_addref(lsc, la); 1352 } 1353 1354 LACP_DPRINTF((lp, "aggregator lagid=%s\n", 1355 lacp_format_lagid(&la->la_actor, &la->la_partner, 1356 buf, sizeof(buf)))); 1357 1358 lp->lp_aggregator = la; 1359 lp->lp_selected = LACP_SELECTED; 1360 } 1361 1362 /* 1363 * lacp_unselect: finish unselect/detach process. 1364 */ 1365 1366 static void 1367 lacp_unselect(struct lacp_port *lp) 1368 { 1369 struct lacp_softc *lsc = lp->lp_lsc; 1370 struct lacp_aggregator *la = lp->lp_aggregator; 1371 1372 KASSERT(!LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE), 1373 ("timer_wait_while still active")); 1374 1375 if (la == NULL) { 1376 return; 1377 } 1378 1379 lp->lp_aggregator = NULL; 1380 lacp_aggregator_delref(lsc, la); 1381 } 1382 1383 /* mux machine */ 1384 1385 static void 1386 lacp_sm_mux(struct lacp_port *lp) 1387 { 1388 struct lagg_port *lgp = lp->lp_lagg; 1389 struct lagg_softc *sc = lgp->lp_softc; 1390 enum lacp_mux_state new_state; 1391 boolean_t p_sync = 1392 (lp->lp_partner.lip_state & LACP_STATE_SYNC) != 0; 1393 boolean_t p_collecting = 1394 (lp->lp_partner.lip_state & LACP_STATE_COLLECTING) != 0; 1395 enum lacp_selected selected = lp->lp_selected; 1396 struct lacp_aggregator *la; 1397 1398 if (lacp_debug > 1) 1399 lacp_dprintf(lp, "%s: state= 0x%x, selected= 0x%x, " 1400 "p_sync= 0x%x, p_collecting= 0x%x\n", __func__, 1401 lp->lp_mux_state, selected, p_sync, p_collecting); 1402 1403 re_eval: 1404 la = lp->lp_aggregator; 1405 KASSERT(lp->lp_mux_state == LACP_MUX_DETACHED || la != NULL, 1406 ("MUX not detached")); 1407 new_state = lp->lp_mux_state; 1408 switch (lp->lp_mux_state) { 1409 case LACP_MUX_DETACHED: 1410 if (selected != LACP_UNSELECTED) { 1411 new_state = LACP_MUX_WAITING; 1412 } 1413 break; 1414 case LACP_MUX_WAITING: 1415 KASSERT(la->la_pending > 0 || 1416 !LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE), 1417 ("timer_wait_while still active")); 1418 if (selected == LACP_SELECTED && la->la_pending == 0) { 1419 new_state = LACP_MUX_ATTACHED; 1420 } else if (selected == LACP_UNSELECTED) { 1421 new_state = LACP_MUX_DETACHED; 1422 } 1423 break; 1424 case LACP_MUX_ATTACHED: 1425 if (selected == LACP_SELECTED && p_sync) { 1426 new_state = LACP_MUX_COLLECTING; 1427 } else if (selected != LACP_SELECTED) { 1428 new_state = LACP_MUX_DETACHED; 1429 } 1430 break; 1431 case LACP_MUX_COLLECTING: 1432 if (selected == LACP_SELECTED && p_sync && p_collecting) { 1433 new_state = LACP_MUX_DISTRIBUTING; 1434 } else if (selected != LACP_SELECTED || !p_sync) { 1435 new_state = LACP_MUX_ATTACHED; 1436 } 1437 break; 1438 case LACP_MUX_DISTRIBUTING: 1439 if (selected != LACP_SELECTED || !p_sync || !p_collecting) { 1440 new_state = LACP_MUX_COLLECTING; 1441 lacp_dprintf(lp, "Interface stopped DISTRIBUTING, possible flapping\n"); 1442 sc->sc_flapping++; 1443 } 1444 break; 1445 default: 1446 panic("%s: unknown state", __func__); 1447 } 1448 1449 if (lp->lp_mux_state == new_state) { 1450 return; 1451 } 1452 1453 lacp_set_mux(lp, new_state); 1454 goto re_eval; 1455 } 1456 1457 static void 1458 lacp_set_mux(struct lacp_port *lp, enum lacp_mux_state new_state) 1459 { 1460 struct lacp_aggregator *la = lp->lp_aggregator; 1461 1462 if (lp->lp_mux_state == new_state) { 1463 return; 1464 } 1465 1466 switch (new_state) { 1467 case LACP_MUX_DETACHED: 1468 lp->lp_state &= ~LACP_STATE_SYNC; 1469 lacp_disable_distributing(lp); 1470 lacp_disable_collecting(lp); 1471 lacp_sm_assert_ntt(lp); 1472 /* cancel timer */ 1473 if (LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE)) { 1474 KASSERT(la->la_pending > 0, 1475 ("timer_wait_while not active")); 1476 la->la_pending--; 1477 } 1478 LACP_TIMER_DISARM(lp, LACP_TIMER_WAIT_WHILE); 1479 lacp_unselect(lp); 1480 break; 1481 case LACP_MUX_WAITING: 1482 LACP_TIMER_ARM(lp, LACP_TIMER_WAIT_WHILE, 1483 LACP_AGGREGATE_WAIT_TIME); 1484 la->la_pending++; 1485 break; 1486 case LACP_MUX_ATTACHED: 1487 lp->lp_state |= LACP_STATE_SYNC; 1488 lacp_disable_collecting(lp); 1489 lacp_sm_assert_ntt(lp); 1490 break; 1491 case LACP_MUX_COLLECTING: 1492 lacp_enable_collecting(lp); 1493 lacp_disable_distributing(lp); 1494 lacp_sm_assert_ntt(lp); 1495 break; 1496 case LACP_MUX_DISTRIBUTING: 1497 lacp_enable_distributing(lp); 1498 break; 1499 default: 1500 panic("%s: unknown state", __func__); 1501 } 1502 1503 LACP_DPRINTF((lp, "mux_state %d -> %d\n", lp->lp_mux_state, new_state)); 1504 1505 lp->lp_mux_state = new_state; 1506 } 1507 1508 static void 1509 lacp_sm_mux_timer(struct lacp_port *lp) 1510 { 1511 struct lacp_aggregator *la = lp->lp_aggregator; 1512 char buf[LACP_LAGIDSTR_MAX+1]; 1513 1514 KASSERT(la->la_pending > 0, ("no pending event")); 1515 1516 LACP_DPRINTF((lp, "%s: aggregator %s, pending %d -> %d\n", __func__, 1517 lacp_format_lagid(&la->la_actor, &la->la_partner, 1518 buf, sizeof(buf)), 1519 la->la_pending, la->la_pending - 1)); 1520 1521 la->la_pending--; 1522 } 1523 1524 /* periodic transmit machine */ 1525 1526 static void 1527 lacp_sm_ptx_update_timeout(struct lacp_port *lp, uint8_t oldpstate) 1528 { 1529 if (LACP_STATE_EQ(oldpstate, lp->lp_partner.lip_state, 1530 LACP_STATE_TIMEOUT)) { 1531 return; 1532 } 1533 1534 LACP_DPRINTF((lp, "partner timeout changed\n")); 1535 1536 /* 1537 * FAST_PERIODIC -> SLOW_PERIODIC 1538 * or 1539 * SLOW_PERIODIC (-> PERIODIC_TX) -> FAST_PERIODIC 1540 * 1541 * let lacp_sm_ptx_tx_schedule to update timeout. 1542 */ 1543 1544 LACP_TIMER_DISARM(lp, LACP_TIMER_PERIODIC); 1545 1546 /* 1547 * if timeout has been shortened, assert NTT. 1548 */ 1549 1550 if ((lp->lp_partner.lip_state & LACP_STATE_TIMEOUT)) { 1551 lacp_sm_assert_ntt(lp); 1552 } 1553 } 1554 1555 static void 1556 lacp_sm_ptx_tx_schedule(struct lacp_port *lp) 1557 { 1558 int timeout; 1559 1560 if (!(lp->lp_state & LACP_STATE_ACTIVITY) && 1561 !(lp->lp_partner.lip_state & LACP_STATE_ACTIVITY)) { 1562 1563 /* 1564 * NO_PERIODIC 1565 */ 1566 1567 LACP_TIMER_DISARM(lp, LACP_TIMER_PERIODIC); 1568 return; 1569 } 1570 1571 if (LACP_TIMER_ISARMED(lp, LACP_TIMER_PERIODIC)) { 1572 return; 1573 } 1574 1575 timeout = (lp->lp_partner.lip_state & LACP_STATE_TIMEOUT) ? 1576 LACP_FAST_PERIODIC_TIME : LACP_SLOW_PERIODIC_TIME; 1577 1578 LACP_TIMER_ARM(lp, LACP_TIMER_PERIODIC, timeout); 1579 } 1580 1581 static void 1582 lacp_sm_ptx_timer(struct lacp_port *lp) 1583 { 1584 lacp_sm_assert_ntt(lp); 1585 } 1586 1587 static void 1588 lacp_sm_rx(struct lacp_port *lp, const struct lacpdu *du) 1589 { 1590 int timeout; 1591 1592 /* 1593 * check LACP_DISABLED first 1594 */ 1595 1596 if (!(lp->lp_state & LACP_STATE_AGGREGATION)) { 1597 return; 1598 } 1599 1600 /* 1601 * check loopback condition. 1602 */ 1603 1604 if (!lacp_compare_systemid(&du->ldu_actor.lip_systemid, 1605 &lp->lp_actor.lip_systemid)) { 1606 return; 1607 } 1608 1609 /* 1610 * EXPIRED, DEFAULTED, CURRENT -> CURRENT 1611 */ 1612 1613 lacp_sm_rx_update_selected(lp, du); 1614 lacp_sm_rx_update_ntt(lp, du); 1615 lacp_sm_rx_record_pdu(lp, du); 1616 1617 timeout = (lp->lp_state & LACP_STATE_TIMEOUT) ? 1618 LACP_SHORT_TIMEOUT_TIME : LACP_LONG_TIMEOUT_TIME; 1619 LACP_TIMER_ARM(lp, LACP_TIMER_CURRENT_WHILE, timeout); 1620 1621 lp->lp_state &= ~LACP_STATE_EXPIRED; 1622 1623 /* 1624 * kick transmit machine without waiting the next tick. 1625 */ 1626 1627 lacp_sm_tx(lp); 1628 } 1629 1630 static void 1631 lacp_sm_rx_set_expired(struct lacp_port *lp) 1632 { 1633 lp->lp_partner.lip_state &= ~LACP_STATE_SYNC; 1634 lp->lp_partner.lip_state |= LACP_STATE_TIMEOUT; 1635 LACP_TIMER_ARM(lp, LACP_TIMER_CURRENT_WHILE, LACP_SHORT_TIMEOUT_TIME); 1636 lp->lp_state |= LACP_STATE_EXPIRED; 1637 } 1638 1639 static void 1640 lacp_sm_rx_timer(struct lacp_port *lp) 1641 { 1642 if ((lp->lp_state & LACP_STATE_EXPIRED) == 0) { 1643 /* CURRENT -> EXPIRED */ 1644 LACP_DPRINTF((lp, "%s: CURRENT -> EXPIRED\n", __func__)); 1645 lacp_sm_rx_set_expired(lp); 1646 } else { 1647 /* EXPIRED -> DEFAULTED */ 1648 LACP_DPRINTF((lp, "%s: EXPIRED -> DEFAULTED\n", __func__)); 1649 lacp_sm_rx_update_default_selected(lp); 1650 lacp_sm_rx_record_default(lp); 1651 lp->lp_state &= ~LACP_STATE_EXPIRED; 1652 } 1653 } 1654 1655 static void 1656 lacp_sm_rx_record_pdu(struct lacp_port *lp, const struct lacpdu *du) 1657 { 1658 boolean_t active; 1659 uint8_t oldpstate; 1660 char buf[LACP_STATESTR_MAX+1]; 1661 1662 LACP_TRACE(lp); 1663 1664 oldpstate = lp->lp_partner.lip_state; 1665 1666 active = (du->ldu_actor.lip_state & LACP_STATE_ACTIVITY) 1667 || ((lp->lp_state & LACP_STATE_ACTIVITY) && 1668 (du->ldu_partner.lip_state & LACP_STATE_ACTIVITY)); 1669 1670 lp->lp_partner = du->ldu_actor; 1671 if (active && 1672 ((LACP_STATE_EQ(lp->lp_state, du->ldu_partner.lip_state, 1673 LACP_STATE_AGGREGATION) && 1674 !lacp_compare_peerinfo(&lp->lp_actor, &du->ldu_partner)) 1675 || (du->ldu_partner.lip_state & LACP_STATE_AGGREGATION) == 0)) { 1676 /* XXX nothing? */ 1677 } else { 1678 lp->lp_partner.lip_state &= ~LACP_STATE_SYNC; 1679 } 1680 1681 lp->lp_state &= ~LACP_STATE_DEFAULTED; 1682 1683 if (oldpstate != lp->lp_partner.lip_state) { 1684 LACP_DPRINTF((lp, "old pstate %s\n", 1685 lacp_format_state(oldpstate, buf, sizeof(buf)))); 1686 LACP_DPRINTF((lp, "new pstate %s\n", 1687 lacp_format_state(lp->lp_partner.lip_state, buf, 1688 sizeof(buf)))); 1689 } 1690 1691 /* XXX Hack, still need to implement 5.4.9 para 2,3,4 */ 1692 if (lp->lp_lsc->lsc_strict_mode) 1693 lp->lp_partner.lip_state |= LACP_STATE_SYNC; 1694 1695 lacp_sm_ptx_update_timeout(lp, oldpstate); 1696 } 1697 1698 static void 1699 lacp_sm_rx_update_ntt(struct lacp_port *lp, const struct lacpdu *du) 1700 { 1701 1702 LACP_TRACE(lp); 1703 1704 if (lacp_compare_peerinfo(&lp->lp_actor, &du->ldu_partner) || 1705 !LACP_STATE_EQ(lp->lp_state, du->ldu_partner.lip_state, 1706 LACP_STATE_ACTIVITY | LACP_STATE_SYNC | LACP_STATE_AGGREGATION)) { 1707 LACP_DPRINTF((lp, "%s: assert ntt\n", __func__)); 1708 lacp_sm_assert_ntt(lp); 1709 } 1710 } 1711 1712 static void 1713 lacp_sm_rx_record_default(struct lacp_port *lp) 1714 { 1715 uint8_t oldpstate; 1716 1717 LACP_TRACE(lp); 1718 1719 oldpstate = lp->lp_partner.lip_state; 1720 if (lp->lp_lsc->lsc_strict_mode) 1721 lp->lp_partner = lacp_partner_admin_strict; 1722 else 1723 lp->lp_partner = lacp_partner_admin_optimistic;; 1724 lp->lp_state |= LACP_STATE_DEFAULTED; 1725 lacp_sm_ptx_update_timeout(lp, oldpstate); 1726 } 1727 1728 static void 1729 lacp_sm_rx_update_selected_from_peerinfo(struct lacp_port *lp, 1730 const struct lacp_peerinfo *info) 1731 { 1732 1733 LACP_TRACE(lp); 1734 1735 if (lacp_compare_peerinfo(&lp->lp_partner, info) || 1736 !LACP_STATE_EQ(lp->lp_partner.lip_state, info->lip_state, 1737 LACP_STATE_AGGREGATION)) { 1738 lp->lp_selected = LACP_UNSELECTED; 1739 /* mux machine will clean up lp->lp_aggregator */ 1740 } 1741 } 1742 1743 static void 1744 lacp_sm_rx_update_selected(struct lacp_port *lp, const struct lacpdu *du) 1745 { 1746 1747 LACP_TRACE(lp); 1748 1749 lacp_sm_rx_update_selected_from_peerinfo(lp, &du->ldu_actor); 1750 } 1751 1752 static void 1753 lacp_sm_rx_update_default_selected(struct lacp_port *lp) 1754 { 1755 1756 LACP_TRACE(lp); 1757 1758 if (lp->lp_lsc->lsc_strict_mode) 1759 lacp_sm_rx_update_selected_from_peerinfo(lp, 1760 &lacp_partner_admin_strict); 1761 else 1762 lacp_sm_rx_update_selected_from_peerinfo(lp, 1763 &lacp_partner_admin_optimistic); 1764 } 1765 1766 /* transmit machine */ 1767 1768 static void 1769 lacp_sm_tx(struct lacp_port *lp) 1770 { 1771 int error = 0; 1772 1773 if (!(lp->lp_state & LACP_STATE_AGGREGATION) 1774 #if 1 1775 || (!(lp->lp_state & LACP_STATE_ACTIVITY) 1776 && !(lp->lp_partner.lip_state & LACP_STATE_ACTIVITY)) 1777 #endif 1778 ) { 1779 lp->lp_flags &= ~LACP_PORT_NTT; 1780 } 1781 1782 if (!(lp->lp_flags & LACP_PORT_NTT)) { 1783 return; 1784 } 1785 1786 /* Rate limit to 3 PDUs per LACP_FAST_PERIODIC_TIME */ 1787 if (ppsratecheck(&lp->lp_last_lacpdu, &lp->lp_lacpdu_sent, 1788 (3 / LACP_FAST_PERIODIC_TIME)) == 0) { 1789 LACP_DPRINTF((lp, "rate limited pdu\n")); 1790 return; 1791 } 1792 1793 if (((1 << lp->lp_ifp->if_dunit) & lp->lp_lsc->lsc_debug.lsc_tx_test) == 0) { 1794 error = lacp_xmit_lacpdu(lp); 1795 } else { 1796 LACP_TPRINTF((lp, "Dropping TX PDU\n")); 1797 } 1798 1799 if (error == 0) { 1800 lp->lp_flags &= ~LACP_PORT_NTT; 1801 } else { 1802 LACP_DPRINTF((lp, "lacpdu transmit failure, error %d\n", 1803 error)); 1804 } 1805 } 1806 1807 static void 1808 lacp_sm_assert_ntt(struct lacp_port *lp) 1809 { 1810 1811 lp->lp_flags |= LACP_PORT_NTT; 1812 } 1813 1814 static void 1815 lacp_run_timers(struct lacp_port *lp) 1816 { 1817 int i; 1818 1819 for (i = 0; i < LACP_NTIMER; i++) { 1820 KASSERT(lp->lp_timer[i] >= 0, 1821 ("invalid timer value %d", lp->lp_timer[i])); 1822 if (lp->lp_timer[i] == 0) { 1823 continue; 1824 } else if (--lp->lp_timer[i] <= 0) { 1825 if (lacp_timer_funcs[i]) { 1826 (*lacp_timer_funcs[i])(lp); 1827 } 1828 } 1829 } 1830 } 1831 1832 int 1833 lacp_marker_input(struct lacp_port *lp, struct mbuf *m) 1834 { 1835 struct lacp_softc *lsc = lp->lp_lsc; 1836 struct lagg_port *lgp = lp->lp_lagg; 1837 struct lacp_port *lp2; 1838 struct markerdu *mdu; 1839 int error = 0; 1840 int pending = 0; 1841 1842 if (m->m_pkthdr.len != sizeof(*mdu)) { 1843 goto bad; 1844 } 1845 1846 if ((m->m_flags & M_MCAST) == 0) { 1847 goto bad; 1848 } 1849 1850 if (m->m_len < sizeof(*mdu)) { 1851 m = m_pullup(m, sizeof(*mdu)); 1852 if (m == NULL) { 1853 return (ENOMEM); 1854 } 1855 } 1856 1857 mdu = mtod(m, struct markerdu *); 1858 1859 if (memcmp(&mdu->mdu_eh.ether_dhost, 1860 ðermulticastaddr_slowprotocols, ETHER_ADDR_LEN)) { 1861 goto bad; 1862 } 1863 1864 if (mdu->mdu_sph.sph_version != 1) { 1865 goto bad; 1866 } 1867 1868 switch (mdu->mdu_tlv.tlv_type) { 1869 case MARKER_TYPE_INFO: 1870 if (tlv_check(mdu, sizeof(*mdu), &mdu->mdu_tlv, 1871 marker_info_tlv_template, TRUE)) { 1872 goto bad; 1873 } 1874 mdu->mdu_tlv.tlv_type = MARKER_TYPE_RESPONSE; 1875 memcpy(&mdu->mdu_eh.ether_dhost, 1876 ðermulticastaddr_slowprotocols, ETHER_ADDR_LEN); 1877 memcpy(&mdu->mdu_eh.ether_shost, 1878 lgp->lp_lladdr, ETHER_ADDR_LEN); 1879 error = lagg_enqueue(lp->lp_ifp, m); 1880 break; 1881 1882 case MARKER_TYPE_RESPONSE: 1883 if (tlv_check(mdu, sizeof(*mdu), &mdu->mdu_tlv, 1884 marker_response_tlv_template, TRUE)) { 1885 goto bad; 1886 } 1887 LACP_DPRINTF((lp, "marker response, port=%u, sys=%6d, id=%u\n", 1888 ntohs(mdu->mdu_info.mi_rq_port), *mdu->mdu_info.mi_rq_system, 1889 ntohl(mdu->mdu_info.mi_rq_xid))); 1890 1891 /* Verify that it is the last marker we sent out */ 1892 if (memcmp(&mdu->mdu_info, &lp->lp_marker, 1893 sizeof(struct lacp_markerinfo))) 1894 goto bad; 1895 1896 LACP_LOCK(lsc); 1897 lp->lp_flags &= ~LACP_PORT_MARK; 1898 1899 if (lsc->lsc_suppress_distributing) { 1900 /* Check if any ports are waiting for a response */ 1901 LIST_FOREACH(lp2, &lsc->lsc_ports, lp_next) { 1902 if (lp2->lp_flags & LACP_PORT_MARK) { 1903 pending = 1; 1904 break; 1905 } 1906 } 1907 1908 if (pending == 0) { 1909 /* All interface queues are clear */ 1910 LACP_DPRINTF((NULL, "queue flush complete\n")); 1911 lsc->lsc_suppress_distributing = FALSE; 1912 } 1913 } 1914 LACP_UNLOCK(lsc); 1915 m_freem(m); 1916 break; 1917 1918 default: 1919 goto bad; 1920 } 1921 1922 return (error); 1923 1924 bad: 1925 LACP_DPRINTF((lp, "bad marker frame\n")); 1926 m_freem(m); 1927 return (EINVAL); 1928 } 1929 1930 static int 1931 tlv_check(const void *p, size_t size, const struct tlvhdr *tlv, 1932 const struct tlv_template *tmpl, boolean_t check_type) 1933 { 1934 while (/* CONSTCOND */ 1) { 1935 if ((const char *)tlv - (const char *)p + sizeof(*tlv) > size) { 1936 return (EINVAL); 1937 } 1938 if ((check_type && tlv->tlv_type != tmpl->tmpl_type) || 1939 tlv->tlv_length != tmpl->tmpl_length) { 1940 return (EINVAL); 1941 } 1942 if (tmpl->tmpl_type == 0) { 1943 break; 1944 } 1945 tlv = (const struct tlvhdr *) 1946 ((const char *)tlv + tlv->tlv_length); 1947 tmpl++; 1948 } 1949 1950 return (0); 1951 } 1952 1953 /* Debugging */ 1954 const char * 1955 lacp_format_mac(const uint8_t *mac, char *buf, size_t buflen) 1956 { 1957 ksnprintf(buf, buflen, "%02X-%02X-%02X-%02X-%02X-%02X", 1958 (int)mac[0], 1959 (int)mac[1], 1960 (int)mac[2], 1961 (int)mac[3], 1962 (int)mac[4], 1963 (int)mac[5]); 1964 1965 return (buf); 1966 } 1967 1968 const char * 1969 lacp_format_systemid(const struct lacp_systemid *sysid, 1970 char *buf, size_t buflen) 1971 { 1972 char macbuf[LACP_MACSTR_MAX+1]; 1973 1974 ksnprintf(buf, buflen, "%04X,%s", 1975 ntohs(sysid->lsi_prio), 1976 lacp_format_mac(sysid->lsi_mac, macbuf, sizeof(macbuf))); 1977 1978 return (buf); 1979 } 1980 1981 const char * 1982 lacp_format_portid(const struct lacp_portid *portid, char *buf, size_t buflen) 1983 { 1984 ksnprintf(buf, buflen, "%04X,%04X", 1985 ntohs(portid->lpi_prio), 1986 ntohs(portid->lpi_portno)); 1987 1988 return (buf); 1989 } 1990 1991 const char * 1992 lacp_format_partner(const struct lacp_peerinfo *peer, char *buf, size_t buflen) 1993 { 1994 char sysid[LACP_SYSTEMIDSTR_MAX+1]; 1995 char portid[LACP_PORTIDSTR_MAX+1]; 1996 1997 ksnprintf(buf, buflen, "(%s,%04X,%s)", 1998 lacp_format_systemid(&peer->lip_systemid, sysid, sizeof(sysid)), 1999 ntohs(peer->lip_key), 2000 lacp_format_portid(&peer->lip_portid, portid, sizeof(portid))); 2001 2002 return (buf); 2003 } 2004 2005 const char * 2006 lacp_format_lagid(const struct lacp_peerinfo *a, 2007 const struct lacp_peerinfo *b, char *buf, size_t buflen) 2008 { 2009 char astr[LACP_PARTNERSTR_MAX+1]; 2010 char bstr[LACP_PARTNERSTR_MAX+1]; 2011 2012 #if 0 2013 /* 2014 * there's a convention to display small numbered peer 2015 * in the left. 2016 */ 2017 2018 if (lacp_compare_peerinfo(a, b) > 0) { 2019 const struct lacp_peerinfo *t; 2020 2021 t = a; 2022 a = b; 2023 b = t; 2024 } 2025 #endif 2026 2027 ksnprintf(buf, buflen, "[%s,%s]", 2028 lacp_format_partner(a, astr, sizeof(astr)), 2029 lacp_format_partner(b, bstr, sizeof(bstr))); 2030 2031 return (buf); 2032 } 2033 2034 const char * 2035 lacp_format_lagid_aggregator(const struct lacp_aggregator *la, 2036 char *buf, size_t buflen) 2037 { 2038 if (la == NULL) { 2039 return ("(none)"); 2040 } 2041 2042 return (lacp_format_lagid(&la->la_actor, &la->la_partner, buf, buflen)); 2043 } 2044 2045 const char * 2046 lacp_format_state(uint8_t state, char *buf, size_t buflen) 2047 { 2048 ksnprintf(buf, buflen, "%b", state, LACP_STATE_BITS); 2049 return (buf); 2050 } 2051 2052 static void 2053 lacp_dump_lacpdu(const struct lacpdu *du) 2054 { 2055 char buf[LACP_PARTNERSTR_MAX+1]; 2056 char buf2[LACP_STATESTR_MAX+1]; 2057 2058 kprintf("actor=%s\n", 2059 lacp_format_partner(&du->ldu_actor, buf, sizeof(buf))); 2060 kprintf("actor.state=%s\n", 2061 lacp_format_state(du->ldu_actor.lip_state, buf2, sizeof(buf2))); 2062 kprintf("partner=%s\n", 2063 lacp_format_partner(&du->ldu_partner, buf, sizeof(buf))); 2064 kprintf("partner.state=%s\n", 2065 lacp_format_state(du->ldu_partner.lip_state, buf2, sizeof(buf2))); 2066 2067 kprintf("maxdelay=%d\n", ntohs(du->ldu_collector.lci_maxdelay)); 2068 } 2069 2070 static void 2071 lacp_dprintf(const struct lacp_port *lp, const char *fmt, ...) 2072 { 2073 __va_list va; 2074 2075 if (lp) { 2076 kprintf("%s: ", lp->lp_ifp->if_xname); 2077 } 2078 2079 __va_start(va, fmt); 2080 kvprintf(fmt, va); 2081 __va_end(va); 2082 } 2083