xref: /freebsd/sys/net/ieee8023ad_lacp.c (revision 2f513db7)
1 /*	$NetBSD: ieee8023ad_lacp.c,v 1.3 2005/12/11 12:24:54 christos Exp $	*/
2 
3 /*-
4  * SPDX-License-Identifier: BSD-2-Clause-NetBSD
5  *
6  * Copyright (c)2005 YAMAMOTO Takashi,
7  * Copyright (c)2008 Andrew Thompson <thompsa@FreeBSD.org>
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include "opt_kern_tls.h"
36 #include "opt_ratelimit.h"
37 
38 #include <sys/param.h>
39 #include <sys/callout.h>
40 #include <sys/eventhandler.h>
41 #include <sys/mbuf.h>
42 #include <sys/systm.h>
43 #include <sys/malloc.h>
44 #include <sys/kernel.h> /* hz */
45 #include <sys/socket.h> /* for net/if.h */
46 #include <sys/sockio.h>
47 #include <sys/sysctl.h>
48 #include <machine/stdarg.h>
49 #include <sys/lock.h>
50 #include <sys/rwlock.h>
51 #include <sys/taskqueue.h>
52 
53 #include <net/if.h>
54 #include <net/if_var.h>
55 #include <net/if_dl.h>
56 #include <net/ethernet.h>
57 #include <net/if_media.h>
58 #include <net/if_types.h>
59 
60 #include <net/if_lagg.h>
61 #include <net/ieee8023ad_lacp.h>
62 
63 /*
64  * actor system priority and port priority.
65  * XXX should be configurable.
66  */
67 
68 #define	LACP_SYSTEM_PRIO	0x8000
69 #define	LACP_PORT_PRIO		0x8000
70 
71 const uint8_t ethermulticastaddr_slowprotocols[ETHER_ADDR_LEN] =
72     { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x02 };
73 
74 static const struct tlv_template lacp_info_tlv_template[] = {
75 	{ LACP_TYPE_ACTORINFO,
76 	    sizeof(struct tlvhdr) + sizeof(struct lacp_peerinfo) },
77 	{ LACP_TYPE_PARTNERINFO,
78 	    sizeof(struct tlvhdr) + sizeof(struct lacp_peerinfo) },
79 	{ LACP_TYPE_COLLECTORINFO,
80 	    sizeof(struct tlvhdr) + sizeof(struct lacp_collectorinfo) },
81 	{ 0, 0 },
82 };
83 
84 static const struct tlv_template marker_info_tlv_template[] = {
85 	{ MARKER_TYPE_INFO,
86 	    sizeof(struct tlvhdr) + sizeof(struct lacp_markerinfo) },
87 	{ 0, 0 },
88 };
89 
90 static const struct tlv_template marker_response_tlv_template[] = {
91 	{ MARKER_TYPE_RESPONSE,
92 	    sizeof(struct tlvhdr) + sizeof(struct lacp_markerinfo) },
93 	{ 0, 0 },
94 };
95 
96 typedef void (*lacp_timer_func_t)(struct lacp_port *);
97 
98 static void	lacp_fill_actorinfo(struct lacp_port *, struct lacp_peerinfo *);
99 static void	lacp_fill_markerinfo(struct lacp_port *,
100 		    struct lacp_markerinfo *);
101 
102 static uint64_t	lacp_aggregator_bandwidth(struct lacp_aggregator *);
103 static void	lacp_suppress_distributing(struct lacp_softc *,
104 		    struct lacp_aggregator *);
105 static void	lacp_transit_expire(void *);
106 static void	lacp_update_portmap(struct lacp_softc *);
107 static void	lacp_select_active_aggregator(struct lacp_softc *);
108 static uint16_t	lacp_compose_key(struct lacp_port *);
109 static int	tlv_check(const void *, size_t, const struct tlvhdr *,
110 		    const struct tlv_template *, boolean_t);
111 static void	lacp_tick(void *);
112 
113 static void	lacp_fill_aggregator_id(struct lacp_aggregator *,
114 		    const struct lacp_port *);
115 static void	lacp_fill_aggregator_id_peer(struct lacp_peerinfo *,
116 		    const struct lacp_peerinfo *);
117 static int	lacp_aggregator_is_compatible(const struct lacp_aggregator *,
118 		    const struct lacp_port *);
119 static int	lacp_peerinfo_is_compatible(const struct lacp_peerinfo *,
120 		    const struct lacp_peerinfo *);
121 
122 static struct lacp_aggregator *lacp_aggregator_get(struct lacp_softc *,
123 		    struct lacp_port *);
124 static void	lacp_aggregator_addref(struct lacp_softc *,
125 		    struct lacp_aggregator *);
126 static void	lacp_aggregator_delref(struct lacp_softc *,
127 		    struct lacp_aggregator *);
128 
129 /* receive machine */
130 
131 static int	lacp_pdu_input(struct lacp_port *, struct mbuf *);
132 static int	lacp_marker_input(struct lacp_port *, struct mbuf *);
133 static void	lacp_sm_rx(struct lacp_port *, const struct lacpdu *);
134 static void	lacp_sm_rx_timer(struct lacp_port *);
135 static void	lacp_sm_rx_set_expired(struct lacp_port *);
136 static void	lacp_sm_rx_update_ntt(struct lacp_port *,
137 		    const struct lacpdu *);
138 static void	lacp_sm_rx_record_pdu(struct lacp_port *,
139 		    const struct lacpdu *);
140 static void	lacp_sm_rx_update_selected(struct lacp_port *,
141 		    const struct lacpdu *);
142 static void	lacp_sm_rx_record_default(struct lacp_port *);
143 static void	lacp_sm_rx_update_default_selected(struct lacp_port *);
144 static void	lacp_sm_rx_update_selected_from_peerinfo(struct lacp_port *,
145 		    const struct lacp_peerinfo *);
146 
147 /* mux machine */
148 
149 static void	lacp_sm_mux(struct lacp_port *);
150 static void	lacp_set_mux(struct lacp_port *, enum lacp_mux_state);
151 static void	lacp_sm_mux_timer(struct lacp_port *);
152 
153 /* periodic transmit machine */
154 
155 static void	lacp_sm_ptx_update_timeout(struct lacp_port *, uint8_t);
156 static void	lacp_sm_ptx_tx_schedule(struct lacp_port *);
157 static void	lacp_sm_ptx_timer(struct lacp_port *);
158 
159 /* transmit machine */
160 
161 static void	lacp_sm_tx(struct lacp_port *);
162 static void	lacp_sm_assert_ntt(struct lacp_port *);
163 
164 static void	lacp_run_timers(struct lacp_port *);
165 static int	lacp_compare_peerinfo(const struct lacp_peerinfo *,
166 		    const struct lacp_peerinfo *);
167 static int	lacp_compare_systemid(const struct lacp_systemid *,
168 		    const struct lacp_systemid *);
169 static void	lacp_port_enable(struct lacp_port *);
170 static void	lacp_port_disable(struct lacp_port *);
171 static void	lacp_select(struct lacp_port *);
172 static void	lacp_unselect(struct lacp_port *);
173 static void	lacp_disable_collecting(struct lacp_port *);
174 static void	lacp_enable_collecting(struct lacp_port *);
175 static void	lacp_disable_distributing(struct lacp_port *);
176 static void	lacp_enable_distributing(struct lacp_port *);
177 static int	lacp_xmit_lacpdu(struct lacp_port *);
178 static int	lacp_xmit_marker(struct lacp_port *);
179 
180 /* Debugging */
181 
182 static void	lacp_dump_lacpdu(const struct lacpdu *);
183 static const char *lacp_format_partner(const struct lacp_peerinfo *, char *,
184 		    size_t);
185 static const char *lacp_format_lagid(const struct lacp_peerinfo *,
186 		    const struct lacp_peerinfo *, char *, size_t);
187 static const char *lacp_format_lagid_aggregator(const struct lacp_aggregator *,
188 		    char *, size_t);
189 static const char *lacp_format_state(uint8_t, char *, size_t);
190 static const char *lacp_format_mac(const uint8_t *, char *, size_t);
191 static const char *lacp_format_systemid(const struct lacp_systemid *, char *,
192 		    size_t);
193 static const char *lacp_format_portid(const struct lacp_portid *, char *,
194 		    size_t);
195 static void	lacp_dprintf(const struct lacp_port *, const char *, ...)
196 		    __attribute__((__format__(__printf__, 2, 3)));
197 
198 VNET_DEFINE_STATIC(int, lacp_debug);
199 #define	V_lacp_debug	VNET(lacp_debug)
200 SYSCTL_NODE(_net_link_lagg, OID_AUTO, lacp, CTLFLAG_RD, 0, "ieee802.3ad");
201 SYSCTL_INT(_net_link_lagg_lacp, OID_AUTO, debug, CTLFLAG_RWTUN | CTLFLAG_VNET,
202     &VNET_NAME(lacp_debug), 0, "Enable LACP debug logging (1=debug, 2=trace)");
203 
204 VNET_DEFINE_STATIC(int, lacp_default_strict_mode) = 1;
205 SYSCTL_INT(_net_link_lagg_lacp, OID_AUTO, default_strict_mode,
206     CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(lacp_default_strict_mode), 0,
207     "LACP strict protocol compliance default");
208 
209 #define LACP_DPRINTF(a) if (V_lacp_debug & 0x01) { lacp_dprintf a ; }
210 #define LACP_TRACE(a) if (V_lacp_debug & 0x02) { lacp_dprintf(a,"%s\n",__func__); }
211 #define LACP_TPRINTF(a) if (V_lacp_debug & 0x04) { lacp_dprintf a ; }
212 
213 /*
214  * partner administration variables.
215  * XXX should be configurable.
216  */
217 
218 static const struct lacp_peerinfo lacp_partner_admin_optimistic = {
219 	.lip_systemid = { .lsi_prio = 0xffff },
220 	.lip_portid = { .lpi_prio = 0xffff },
221 	.lip_state = LACP_STATE_SYNC | LACP_STATE_AGGREGATION |
222 	    LACP_STATE_COLLECTING | LACP_STATE_DISTRIBUTING,
223 };
224 
225 static const struct lacp_peerinfo lacp_partner_admin_strict = {
226 	.lip_systemid = { .lsi_prio = 0xffff },
227 	.lip_portid = { .lpi_prio = 0xffff },
228 	.lip_state = 0,
229 };
230 
231 static const lacp_timer_func_t lacp_timer_funcs[LACP_NTIMER] = {
232 	[LACP_TIMER_CURRENT_WHILE] = lacp_sm_rx_timer,
233 	[LACP_TIMER_PERIODIC] = lacp_sm_ptx_timer,
234 	[LACP_TIMER_WAIT_WHILE] = lacp_sm_mux_timer,
235 };
236 
237 struct mbuf *
238 lacp_input(struct lagg_port *lgp, struct mbuf *m)
239 {
240 	struct lacp_port *lp = LACP_PORT(lgp);
241 	uint8_t subtype;
242 
243 	if (m->m_pkthdr.len < sizeof(struct ether_header) + sizeof(subtype)) {
244 		m_freem(m);
245 		return (NULL);
246 	}
247 
248 	m_copydata(m, sizeof(struct ether_header), sizeof(subtype), &subtype);
249 	switch (subtype) {
250 		case SLOWPROTOCOLS_SUBTYPE_LACP:
251 			lacp_pdu_input(lp, m);
252 			return (NULL);
253 
254 		case SLOWPROTOCOLS_SUBTYPE_MARKER:
255 			lacp_marker_input(lp, m);
256 			return (NULL);
257 	}
258 
259 	/* Not a subtype we are interested in */
260 	return (m);
261 }
262 
263 /*
264  * lacp_pdu_input: process lacpdu
265  */
266 static int
267 lacp_pdu_input(struct lacp_port *lp, struct mbuf *m)
268 {
269 	struct lacp_softc *lsc = lp->lp_lsc;
270 	struct lacpdu *du;
271 	int error = 0;
272 
273 	if (m->m_pkthdr.len != sizeof(*du)) {
274 		goto bad;
275 	}
276 
277 	if ((m->m_flags & M_MCAST) == 0) {
278 		goto bad;
279 	}
280 
281 	if (m->m_len < sizeof(*du)) {
282 		m = m_pullup(m, sizeof(*du));
283 		if (m == NULL) {
284 			return (ENOMEM);
285 		}
286 	}
287 
288 	du = mtod(m, struct lacpdu *);
289 
290 	if (memcmp(&du->ldu_eh.ether_dhost,
291 	    &ethermulticastaddr_slowprotocols, ETHER_ADDR_LEN)) {
292 		goto bad;
293 	}
294 
295 	/*
296 	 * ignore the version for compatibility with
297 	 * the future protocol revisions.
298 	 */
299 #if 0
300 	if (du->ldu_sph.sph_version != 1) {
301 		goto bad;
302 	}
303 #endif
304 
305 	/*
306 	 * ignore tlv types for compatibility with
307 	 * the future protocol revisions.
308 	 */
309 	if (tlv_check(du, sizeof(*du), &du->ldu_tlv_actor,
310 	    lacp_info_tlv_template, FALSE)) {
311 		goto bad;
312 	}
313 
314         if (V_lacp_debug > 0) {
315 		lacp_dprintf(lp, "lacpdu receive\n");
316 		lacp_dump_lacpdu(du);
317 	}
318 
319 	if ((1 << lp->lp_ifp->if_dunit) & lp->lp_lsc->lsc_debug.lsc_rx_test) {
320 		LACP_TPRINTF((lp, "Dropping RX PDU\n"));
321 		goto bad;
322 	}
323 
324 	LACP_LOCK(lsc);
325 	lacp_sm_rx(lp, du);
326 	LACP_UNLOCK(lsc);
327 
328 	m_freem(m);
329 	return (error);
330 
331 bad:
332 	m_freem(m);
333 	return (EINVAL);
334 }
335 
336 static void
337 lacp_fill_actorinfo(struct lacp_port *lp, struct lacp_peerinfo *info)
338 {
339 	struct lagg_port *lgp = lp->lp_lagg;
340 	struct lagg_softc *sc = lgp->lp_softc;
341 
342 	info->lip_systemid.lsi_prio = htons(LACP_SYSTEM_PRIO);
343 	memcpy(&info->lip_systemid.lsi_mac,
344 	    IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
345 	info->lip_portid.lpi_prio = htons(LACP_PORT_PRIO);
346 	info->lip_portid.lpi_portno = htons(lp->lp_ifp->if_index);
347 	info->lip_state = lp->lp_state;
348 }
349 
350 static void
351 lacp_fill_markerinfo(struct lacp_port *lp, struct lacp_markerinfo *info)
352 {
353 	struct ifnet *ifp = lp->lp_ifp;
354 
355 	/* Fill in the port index and system id (encoded as the MAC) */
356 	info->mi_rq_port = htons(ifp->if_index);
357 	memcpy(&info->mi_rq_system, lp->lp_systemid.lsi_mac, ETHER_ADDR_LEN);
358 	info->mi_rq_xid = htonl(0);
359 }
360 
361 static int
362 lacp_xmit_lacpdu(struct lacp_port *lp)
363 {
364 	struct lagg_port *lgp = lp->lp_lagg;
365 	struct mbuf *m;
366 	struct lacpdu *du;
367 	int error;
368 
369 	LACP_LOCK_ASSERT(lp->lp_lsc);
370 
371 	m = m_gethdr(M_NOWAIT, MT_DATA);
372 	if (m == NULL) {
373 		return (ENOMEM);
374 	}
375 	m->m_len = m->m_pkthdr.len = sizeof(*du);
376 
377 	du = mtod(m, struct lacpdu *);
378 	memset(du, 0, sizeof(*du));
379 
380 	memcpy(&du->ldu_eh.ether_dhost, ethermulticastaddr_slowprotocols,
381 	    ETHER_ADDR_LEN);
382 	memcpy(&du->ldu_eh.ether_shost, lgp->lp_lladdr, ETHER_ADDR_LEN);
383 	du->ldu_eh.ether_type = htons(ETHERTYPE_SLOW);
384 
385 	du->ldu_sph.sph_subtype = SLOWPROTOCOLS_SUBTYPE_LACP;
386 	du->ldu_sph.sph_version = 1;
387 
388 	TLV_SET(&du->ldu_tlv_actor, LACP_TYPE_ACTORINFO, sizeof(du->ldu_actor));
389 	du->ldu_actor = lp->lp_actor;
390 
391 	TLV_SET(&du->ldu_tlv_partner, LACP_TYPE_PARTNERINFO,
392 	    sizeof(du->ldu_partner));
393 	du->ldu_partner = lp->lp_partner;
394 
395 	TLV_SET(&du->ldu_tlv_collector, LACP_TYPE_COLLECTORINFO,
396 	    sizeof(du->ldu_collector));
397 	du->ldu_collector.lci_maxdelay = 0;
398 
399 	if (V_lacp_debug > 0) {
400 		lacp_dprintf(lp, "lacpdu transmit\n");
401 		lacp_dump_lacpdu(du);
402 	}
403 
404 	m->m_flags |= M_MCAST;
405 
406 	/*
407 	 * XXX should use higher priority queue.
408 	 * otherwise network congestion can break aggregation.
409 	 */
410 
411 	error = lagg_enqueue(lp->lp_ifp, m);
412 	return (error);
413 }
414 
415 static int
416 lacp_xmit_marker(struct lacp_port *lp)
417 {
418 	struct lagg_port *lgp = lp->lp_lagg;
419 	struct mbuf *m;
420 	struct markerdu *mdu;
421 	int error;
422 
423 	LACP_LOCK_ASSERT(lp->lp_lsc);
424 
425 	m = m_gethdr(M_NOWAIT, MT_DATA);
426 	if (m == NULL) {
427 		return (ENOMEM);
428 	}
429 	m->m_len = m->m_pkthdr.len = sizeof(*mdu);
430 
431 	mdu = mtod(m, struct markerdu *);
432 	memset(mdu, 0, sizeof(*mdu));
433 
434 	memcpy(&mdu->mdu_eh.ether_dhost, ethermulticastaddr_slowprotocols,
435 	    ETHER_ADDR_LEN);
436 	memcpy(&mdu->mdu_eh.ether_shost, lgp->lp_lladdr, ETHER_ADDR_LEN);
437 	mdu->mdu_eh.ether_type = htons(ETHERTYPE_SLOW);
438 
439 	mdu->mdu_sph.sph_subtype = SLOWPROTOCOLS_SUBTYPE_MARKER;
440 	mdu->mdu_sph.sph_version = 1;
441 
442 	/* Bump the transaction id and copy over the marker info */
443 	lp->lp_marker.mi_rq_xid = htonl(ntohl(lp->lp_marker.mi_rq_xid) + 1);
444 	TLV_SET(&mdu->mdu_tlv, MARKER_TYPE_INFO, sizeof(mdu->mdu_info));
445 	mdu->mdu_info = lp->lp_marker;
446 
447 	LACP_DPRINTF((lp, "marker transmit, port=%u, sys=%6D, id=%u\n",
448 	    ntohs(mdu->mdu_info.mi_rq_port), mdu->mdu_info.mi_rq_system, ":",
449 	    ntohl(mdu->mdu_info.mi_rq_xid)));
450 
451 	m->m_flags |= M_MCAST;
452 	error = lagg_enqueue(lp->lp_ifp, m);
453 	return (error);
454 }
455 
456 void
457 lacp_linkstate(struct lagg_port *lgp)
458 {
459 	struct lacp_port *lp = LACP_PORT(lgp);
460 	struct lacp_softc *lsc = lp->lp_lsc;
461 	struct ifnet *ifp = lgp->lp_ifp;
462 	struct ifmediareq ifmr;
463 	int error = 0;
464 	u_int media;
465 	uint8_t old_state;
466 	uint16_t old_key;
467 
468 	bzero((char *)&ifmr, sizeof(ifmr));
469 	error = (*ifp->if_ioctl)(ifp, SIOCGIFXMEDIA, (caddr_t)&ifmr);
470 	if (error != 0) {
471 		bzero((char *)&ifmr, sizeof(ifmr));
472 		error = (*ifp->if_ioctl)(ifp, SIOCGIFMEDIA, (caddr_t)&ifmr);
473 	}
474 	if (error != 0)
475 		return;
476 
477 	LACP_LOCK(lsc);
478 	media = ifmr.ifm_active;
479 	LACP_DPRINTF((lp, "media changed 0x%x -> 0x%x, ether = %d, fdx = %d, "
480 	    "link = %d\n", lp->lp_media, media, IFM_TYPE(media) == IFM_ETHER,
481 	    (media & IFM_FDX) != 0, ifp->if_link_state == LINK_STATE_UP));
482 	old_state = lp->lp_state;
483 	old_key = lp->lp_key;
484 
485 	lp->lp_media = media;
486 	/*
487 	 * If the port is not an active full duplex Ethernet link then it can
488 	 * not be aggregated.
489 	 */
490 	if (IFM_TYPE(media) != IFM_ETHER || (media & IFM_FDX) == 0 ||
491 	    ifp->if_link_state != LINK_STATE_UP) {
492 		lacp_port_disable(lp);
493 	} else {
494 		lacp_port_enable(lp);
495 	}
496 	lp->lp_key = lacp_compose_key(lp);
497 
498 	if (old_state != lp->lp_state || old_key != lp->lp_key) {
499 		LACP_DPRINTF((lp, "-> UNSELECTED\n"));
500 		lp->lp_selected = LACP_UNSELECTED;
501 	}
502 	LACP_UNLOCK(lsc);
503 }
504 
505 static void
506 lacp_tick(void *arg)
507 {
508 	struct lacp_softc *lsc = arg;
509 	struct lacp_port *lp;
510 
511 	LIST_FOREACH(lp, &lsc->lsc_ports, lp_next) {
512 		if ((lp->lp_state & LACP_STATE_AGGREGATION) == 0)
513 			continue;
514 
515 		CURVNET_SET(lp->lp_ifp->if_vnet);
516 		lacp_run_timers(lp);
517 
518 		lacp_select(lp);
519 		lacp_sm_mux(lp);
520 		lacp_sm_tx(lp);
521 		lacp_sm_ptx_tx_schedule(lp);
522 		CURVNET_RESTORE();
523 	}
524 	callout_reset(&lsc->lsc_callout, hz, lacp_tick, lsc);
525 }
526 
527 int
528 lacp_port_create(struct lagg_port *lgp)
529 {
530 	struct lagg_softc *sc = lgp->lp_softc;
531 	struct lacp_softc *lsc = LACP_SOFTC(sc);
532 	struct lacp_port *lp;
533 	struct ifnet *ifp = lgp->lp_ifp;
534 	struct sockaddr_dl sdl;
535 	struct ifmultiaddr *rifma = NULL;
536 	int error;
537 
538 	link_init_sdl(ifp, (struct sockaddr *)&sdl, IFT_ETHER);
539 	sdl.sdl_alen = ETHER_ADDR_LEN;
540 
541 	bcopy(&ethermulticastaddr_slowprotocols,
542 	    LLADDR(&sdl), ETHER_ADDR_LEN);
543 	error = if_addmulti(ifp, (struct sockaddr *)&sdl, &rifma);
544 	if (error) {
545 		printf("%s: ADDMULTI failed on %s\n", __func__,
546 		    lgp->lp_ifp->if_xname);
547 		return (error);
548 	}
549 
550 	lp = malloc(sizeof(struct lacp_port),
551 	    M_DEVBUF, M_NOWAIT|M_ZERO);
552 	if (lp == NULL)
553 		return (ENOMEM);
554 
555 	LACP_LOCK(lsc);
556 	lgp->lp_psc = lp;
557 	lp->lp_ifp = ifp;
558 	lp->lp_lagg = lgp;
559 	lp->lp_lsc = lsc;
560 	lp->lp_ifma = rifma;
561 
562 	LIST_INSERT_HEAD(&lsc->lsc_ports, lp, lp_next);
563 
564 	lacp_fill_actorinfo(lp, &lp->lp_actor);
565 	lacp_fill_markerinfo(lp, &lp->lp_marker);
566 	lp->lp_state = LACP_STATE_ACTIVITY;
567 	lp->lp_aggregator = NULL;
568 	lacp_sm_rx_set_expired(lp);
569 	LACP_UNLOCK(lsc);
570 	lacp_linkstate(lgp);
571 
572 	return (0);
573 }
574 
575 void
576 lacp_port_destroy(struct lagg_port *lgp)
577 {
578 	struct lacp_port *lp = LACP_PORT(lgp);
579 	struct lacp_softc *lsc = lp->lp_lsc;
580 	int i;
581 
582 	LACP_LOCK(lsc);
583 	for (i = 0; i < LACP_NTIMER; i++) {
584 		LACP_TIMER_DISARM(lp, i);
585 	}
586 
587 	lacp_disable_collecting(lp);
588 	lacp_disable_distributing(lp);
589 	lacp_unselect(lp);
590 
591 	LIST_REMOVE(lp, lp_next);
592 	LACP_UNLOCK(lsc);
593 
594 	/* The address may have already been removed by if_purgemaddrs() */
595 	if (!lgp->lp_detaching)
596 		if_delmulti_ifma(lp->lp_ifma);
597 
598 	free(lp, M_DEVBUF);
599 }
600 
601 void
602 lacp_req(struct lagg_softc *sc, void *data)
603 {
604 	struct lacp_opreq *req = (struct lacp_opreq *)data;
605 	struct lacp_softc *lsc = LACP_SOFTC(sc);
606 	struct lacp_aggregator *la;
607 
608 	bzero(req, sizeof(struct lacp_opreq));
609 
610 	/*
611 	 * If the LACP softc is NULL, return with the opreq structure full of
612 	 * zeros.  It is normal for the softc to be NULL while the lagg is
613 	 * being destroyed.
614 	 */
615 	if (NULL == lsc)
616 		return;
617 
618 	la = lsc->lsc_active_aggregator;
619 	LACP_LOCK(lsc);
620 	if (la != NULL) {
621 		req->actor_prio = ntohs(la->la_actor.lip_systemid.lsi_prio);
622 		memcpy(&req->actor_mac, &la->la_actor.lip_systemid.lsi_mac,
623 		    ETHER_ADDR_LEN);
624 		req->actor_key = ntohs(la->la_actor.lip_key);
625 		req->actor_portprio = ntohs(la->la_actor.lip_portid.lpi_prio);
626 		req->actor_portno = ntohs(la->la_actor.lip_portid.lpi_portno);
627 		req->actor_state = la->la_actor.lip_state;
628 
629 		req->partner_prio = ntohs(la->la_partner.lip_systemid.lsi_prio);
630 		memcpy(&req->partner_mac, &la->la_partner.lip_systemid.lsi_mac,
631 		    ETHER_ADDR_LEN);
632 		req->partner_key = ntohs(la->la_partner.lip_key);
633 		req->partner_portprio = ntohs(la->la_partner.lip_portid.lpi_prio);
634 		req->partner_portno = ntohs(la->la_partner.lip_portid.lpi_portno);
635 		req->partner_state = la->la_partner.lip_state;
636 	}
637 	LACP_UNLOCK(lsc);
638 }
639 
640 void
641 lacp_portreq(struct lagg_port *lgp, void *data)
642 {
643 	struct lacp_opreq *req = (struct lacp_opreq *)data;
644 	struct lacp_port *lp = LACP_PORT(lgp);
645 	struct lacp_softc *lsc = lp->lp_lsc;
646 
647 	LACP_LOCK(lsc);
648 	req->actor_prio = ntohs(lp->lp_actor.lip_systemid.lsi_prio);
649 	memcpy(&req->actor_mac, &lp->lp_actor.lip_systemid.lsi_mac,
650 	    ETHER_ADDR_LEN);
651 	req->actor_key = ntohs(lp->lp_actor.lip_key);
652 	req->actor_portprio = ntohs(lp->lp_actor.lip_portid.lpi_prio);
653 	req->actor_portno = ntohs(lp->lp_actor.lip_portid.lpi_portno);
654 	req->actor_state = lp->lp_actor.lip_state;
655 
656 	req->partner_prio = ntohs(lp->lp_partner.lip_systemid.lsi_prio);
657 	memcpy(&req->partner_mac, &lp->lp_partner.lip_systemid.lsi_mac,
658 	    ETHER_ADDR_LEN);
659 	req->partner_key = ntohs(lp->lp_partner.lip_key);
660 	req->partner_portprio = ntohs(lp->lp_partner.lip_portid.lpi_prio);
661 	req->partner_portno = ntohs(lp->lp_partner.lip_portid.lpi_portno);
662 	req->partner_state = lp->lp_partner.lip_state;
663 	LACP_UNLOCK(lsc);
664 }
665 
666 static void
667 lacp_disable_collecting(struct lacp_port *lp)
668 {
669 	LACP_DPRINTF((lp, "collecting disabled\n"));
670 	lp->lp_state &= ~LACP_STATE_COLLECTING;
671 }
672 
673 static void
674 lacp_enable_collecting(struct lacp_port *lp)
675 {
676 	LACP_DPRINTF((lp, "collecting enabled\n"));
677 	lp->lp_state |= LACP_STATE_COLLECTING;
678 }
679 
680 static void
681 lacp_disable_distributing(struct lacp_port *lp)
682 {
683 	struct lacp_aggregator *la = lp->lp_aggregator;
684 	struct lacp_softc *lsc = lp->lp_lsc;
685 	struct lagg_softc *sc = lsc->lsc_softc;
686 	char buf[LACP_LAGIDSTR_MAX+1];
687 
688 	LACP_LOCK_ASSERT(lsc);
689 
690 	if (la == NULL || (lp->lp_state & LACP_STATE_DISTRIBUTING) == 0) {
691 		return;
692 	}
693 
694 	KASSERT(!TAILQ_EMPTY(&la->la_ports), ("no aggregator ports"));
695 	KASSERT(la->la_nports > 0, ("nports invalid (%d)", la->la_nports));
696 	KASSERT(la->la_refcnt >= la->la_nports, ("aggregator refcnt invalid"));
697 
698 	LACP_DPRINTF((lp, "disable distributing on aggregator %s, "
699 	    "nports %d -> %d\n",
700 	    lacp_format_lagid_aggregator(la, buf, sizeof(buf)),
701 	    la->la_nports, la->la_nports - 1));
702 
703 	TAILQ_REMOVE(&la->la_ports, lp, lp_dist_q);
704 	la->la_nports--;
705 	sc->sc_active = la->la_nports;
706 
707 	if (lsc->lsc_active_aggregator == la) {
708 		lacp_suppress_distributing(lsc, la);
709 		lacp_select_active_aggregator(lsc);
710 		/* regenerate the port map, the active aggregator has changed */
711 		lacp_update_portmap(lsc);
712 	}
713 
714 	lp->lp_state &= ~LACP_STATE_DISTRIBUTING;
715 	if_link_state_change(sc->sc_ifp,
716 	    sc->sc_active ? LINK_STATE_UP : LINK_STATE_DOWN);
717 }
718 
719 static void
720 lacp_enable_distributing(struct lacp_port *lp)
721 {
722 	struct lacp_aggregator *la = lp->lp_aggregator;
723 	struct lacp_softc *lsc = lp->lp_lsc;
724 	struct lagg_softc *sc = lsc->lsc_softc;
725 	char buf[LACP_LAGIDSTR_MAX+1];
726 
727 	LACP_LOCK_ASSERT(lsc);
728 
729 	if ((lp->lp_state & LACP_STATE_DISTRIBUTING) != 0) {
730 		return;
731 	}
732 
733 	LACP_DPRINTF((lp, "enable distributing on aggregator %s, "
734 	    "nports %d -> %d\n",
735 	    lacp_format_lagid_aggregator(la, buf, sizeof(buf)),
736 	    la->la_nports, la->la_nports + 1));
737 
738 	KASSERT(la->la_refcnt > la->la_nports, ("aggregator refcnt invalid"));
739 	TAILQ_INSERT_HEAD(&la->la_ports, lp, lp_dist_q);
740 	la->la_nports++;
741 	sc->sc_active = la->la_nports;
742 
743 	lp->lp_state |= LACP_STATE_DISTRIBUTING;
744 
745 	if (lsc->lsc_active_aggregator == la) {
746 		lacp_suppress_distributing(lsc, la);
747 		lacp_update_portmap(lsc);
748 	} else
749 		/* try to become the active aggregator */
750 		lacp_select_active_aggregator(lsc);
751 
752 	if_link_state_change(sc->sc_ifp,
753 	    sc->sc_active ? LINK_STATE_UP : LINK_STATE_DOWN);
754 }
755 
756 static void
757 lacp_transit_expire(void *vp)
758 {
759 	struct lacp_softc *lsc = vp;
760 
761 	LACP_LOCK_ASSERT(lsc);
762 
763 	CURVNET_SET(lsc->lsc_softc->sc_ifp->if_vnet);
764 	LACP_TRACE(NULL);
765 	CURVNET_RESTORE();
766 
767 	lsc->lsc_suppress_distributing = FALSE;
768 }
769 
770 void
771 lacp_attach(struct lagg_softc *sc)
772 {
773 	struct lacp_softc *lsc;
774 
775 	lsc = malloc(sizeof(struct lacp_softc), M_DEVBUF, M_WAITOK | M_ZERO);
776 
777 	sc->sc_psc = lsc;
778 	lsc->lsc_softc = sc;
779 
780 	lsc->lsc_hashkey = m_ether_tcpip_hash_init();
781 	lsc->lsc_active_aggregator = NULL;
782 	lsc->lsc_strict_mode = VNET(lacp_default_strict_mode);
783 	LACP_LOCK_INIT(lsc);
784 	TAILQ_INIT(&lsc->lsc_aggregators);
785 	LIST_INIT(&lsc->lsc_ports);
786 
787 	callout_init_mtx(&lsc->lsc_transit_callout, &lsc->lsc_mtx, 0);
788 	callout_init_mtx(&lsc->lsc_callout, &lsc->lsc_mtx, 0);
789 
790 	/* if the lagg is already up then do the same */
791 	if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING)
792 		lacp_init(sc);
793 }
794 
795 void
796 lacp_detach(void *psc)
797 {
798 	struct lacp_softc *lsc = (struct lacp_softc *)psc;
799 
800 	KASSERT(TAILQ_EMPTY(&lsc->lsc_aggregators),
801 	    ("aggregators still active"));
802 	KASSERT(lsc->lsc_active_aggregator == NULL,
803 	    ("aggregator still attached"));
804 
805 	callout_drain(&lsc->lsc_transit_callout);
806 	callout_drain(&lsc->lsc_callout);
807 
808 	LACP_LOCK_DESTROY(lsc);
809 	free(lsc, M_DEVBUF);
810 }
811 
812 void
813 lacp_init(struct lagg_softc *sc)
814 {
815 	struct lacp_softc *lsc = LACP_SOFTC(sc);
816 
817 	LACP_LOCK(lsc);
818 	callout_reset(&lsc->lsc_callout, hz, lacp_tick, lsc);
819 	LACP_UNLOCK(lsc);
820 }
821 
822 void
823 lacp_stop(struct lagg_softc *sc)
824 {
825 	struct lacp_softc *lsc = LACP_SOFTC(sc);
826 
827 	LACP_LOCK(lsc);
828 	callout_stop(&lsc->lsc_transit_callout);
829 	callout_stop(&lsc->lsc_callout);
830 	LACP_UNLOCK(lsc);
831 }
832 
833 struct lagg_port *
834 lacp_select_tx_port(struct lagg_softc *sc, struct mbuf *m)
835 {
836 	struct lacp_softc *lsc = LACP_SOFTC(sc);
837 	struct lacp_portmap *pm;
838 	struct lacp_port *lp;
839 	struct lacp_port **map;
840 	uint32_t hash;
841 	int count;
842 
843 	if (__predict_false(lsc->lsc_suppress_distributing)) {
844 		LACP_DPRINTF((NULL, "%s: waiting transit\n", __func__));
845 		return (NULL);
846 	}
847 
848 	pm = &lsc->lsc_pmap[lsc->lsc_activemap];
849 	if (pm->pm_count == 0) {
850 		LACP_DPRINTF((NULL, "%s: no active aggregator\n", __func__));
851 		return (NULL);
852 	}
853 
854 #ifdef NUMA
855 	if ((sc->sc_opts & LAGG_OPT_USE_NUMA) &&
856 	    pm->pm_num_dom > 1 && m->m_pkthdr.numa_domain < MAXMEMDOM) {
857 		count = pm->pm_numa[m->m_pkthdr.numa_domain].count;
858 		if (count > 0) {
859 			map = pm->pm_numa[m->m_pkthdr.numa_domain].map;
860 		} else {
861 			/* No ports on this domain; use global hash. */
862 			map = pm->pm_map;
863 			count = pm->pm_count;
864 		}
865 	} else
866 #endif
867 	{
868 		map = pm->pm_map;
869 		count = pm->pm_count;
870 	}
871 	if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) &&
872 	    M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
873 		hash = m->m_pkthdr.flowid >> sc->flowid_shift;
874 	else
875 		hash = m_ether_tcpip_hash(sc->sc_flags, m, lsc->lsc_hashkey);
876 
877 	hash %= count;
878 	lp = map[hash];
879 
880 	KASSERT((lp->lp_state & LACP_STATE_DISTRIBUTING) != 0,
881 	    ("aggregated port is not distributing"));
882 
883 	return (lp->lp_lagg);
884 }
885 
886 #if defined(RATELIMIT) || defined(KERN_TLS)
887 struct lagg_port *
888 lacp_select_tx_port_by_hash(struct lagg_softc *sc, uint32_t flowid)
889 {
890 	struct lacp_softc *lsc = LACP_SOFTC(sc);
891 	struct lacp_portmap *pm;
892 	struct lacp_port *lp;
893 	uint32_t hash;
894 
895 	if (__predict_false(lsc->lsc_suppress_distributing)) {
896 		LACP_DPRINTF((NULL, "%s: waiting transit\n", __func__));
897 		return (NULL);
898 	}
899 
900 	pm = &lsc->lsc_pmap[lsc->lsc_activemap];
901 	if (pm->pm_count == 0) {
902 		LACP_DPRINTF((NULL, "%s: no active aggregator\n", __func__));
903 		return (NULL);
904 	}
905 
906 	hash = flowid >> sc->flowid_shift;
907 	hash %= pm->pm_count;
908 	lp = pm->pm_map[hash];
909 
910 	return (lp->lp_lagg);
911 }
912 #endif
913 
914 /*
915  * lacp_suppress_distributing: drop transmit packets for a while
916  * to preserve packet ordering.
917  */
918 
919 static void
920 lacp_suppress_distributing(struct lacp_softc *lsc, struct lacp_aggregator *la)
921 {
922 	struct lacp_port *lp;
923 
924 	if (lsc->lsc_active_aggregator != la) {
925 		return;
926 	}
927 
928 	LACP_TRACE(NULL);
929 
930 	lsc->lsc_suppress_distributing = TRUE;
931 
932 	/* send a marker frame down each port to verify the queues are empty */
933 	LIST_FOREACH(lp, &lsc->lsc_ports, lp_next) {
934 		lp->lp_flags |= LACP_PORT_MARK;
935 		lacp_xmit_marker(lp);
936 	}
937 
938 	/* set a timeout for the marker frames */
939 	callout_reset(&lsc->lsc_transit_callout,
940 	    LACP_TRANSIT_DELAY * hz / 1000, lacp_transit_expire, lsc);
941 }
942 
943 static int
944 lacp_compare_peerinfo(const struct lacp_peerinfo *a,
945     const struct lacp_peerinfo *b)
946 {
947 	return (memcmp(a, b, offsetof(struct lacp_peerinfo, lip_state)));
948 }
949 
950 static int
951 lacp_compare_systemid(const struct lacp_systemid *a,
952     const struct lacp_systemid *b)
953 {
954 	return (memcmp(a, b, sizeof(*a)));
955 }
956 
957 #if 0	/* unused */
958 static int
959 lacp_compare_portid(const struct lacp_portid *a,
960     const struct lacp_portid *b)
961 {
962 	return (memcmp(a, b, sizeof(*a)));
963 }
964 #endif
965 
966 static uint64_t
967 lacp_aggregator_bandwidth(struct lacp_aggregator *la)
968 {
969 	struct lacp_port *lp;
970 	uint64_t speed;
971 
972 	lp = TAILQ_FIRST(&la->la_ports);
973 	if (lp == NULL) {
974 		return (0);
975 	}
976 
977 	speed = ifmedia_baudrate(lp->lp_media);
978 	speed *= la->la_nports;
979 	if (speed == 0) {
980 		LACP_DPRINTF((lp, "speed 0? media=0x%x nports=%d\n",
981 		    lp->lp_media, la->la_nports));
982 	}
983 
984 	return (speed);
985 }
986 
987 /*
988  * lacp_select_active_aggregator: select an aggregator to be used to transmit
989  * packets from lagg(4) interface.
990  */
991 
992 static void
993 lacp_select_active_aggregator(struct lacp_softc *lsc)
994 {
995 	struct lacp_aggregator *la;
996 	struct lacp_aggregator *best_la = NULL;
997 	uint64_t best_speed = 0;
998 	char buf[LACP_LAGIDSTR_MAX+1];
999 
1000 	LACP_TRACE(NULL);
1001 
1002 	TAILQ_FOREACH(la, &lsc->lsc_aggregators, la_q) {
1003 		uint64_t speed;
1004 
1005 		if (la->la_nports == 0) {
1006 			continue;
1007 		}
1008 
1009 		speed = lacp_aggregator_bandwidth(la);
1010 		LACP_DPRINTF((NULL, "%s, speed=%jd, nports=%d\n",
1011 		    lacp_format_lagid_aggregator(la, buf, sizeof(buf)),
1012 		    speed, la->la_nports));
1013 
1014 		/*
1015 		 * This aggregator is chosen if the partner has a better
1016 		 * system priority or, the total aggregated speed is higher
1017 		 * or, it is already the chosen aggregator
1018 		 */
1019 		if ((best_la != NULL && LACP_SYS_PRI(la->la_partner) <
1020 		    LACP_SYS_PRI(best_la->la_partner)) ||
1021 		    speed > best_speed ||
1022 		    (speed == best_speed &&
1023 		    la == lsc->lsc_active_aggregator)) {
1024 			best_la = la;
1025 			best_speed = speed;
1026 		}
1027 	}
1028 
1029 	KASSERT(best_la == NULL || best_la->la_nports > 0,
1030 	    ("invalid aggregator refcnt"));
1031 	KASSERT(best_la == NULL || !TAILQ_EMPTY(&best_la->la_ports),
1032 	    ("invalid aggregator list"));
1033 
1034 	if (lsc->lsc_active_aggregator != best_la) {
1035 		LACP_DPRINTF((NULL, "active aggregator changed\n"));
1036 		LACP_DPRINTF((NULL, "old %s\n",
1037 		    lacp_format_lagid_aggregator(lsc->lsc_active_aggregator,
1038 		    buf, sizeof(buf))));
1039 	} else {
1040 		LACP_DPRINTF((NULL, "active aggregator not changed\n"));
1041 	}
1042 	LACP_DPRINTF((NULL, "new %s\n",
1043 	    lacp_format_lagid_aggregator(best_la, buf, sizeof(buf))));
1044 
1045 	if (lsc->lsc_active_aggregator != best_la) {
1046 		lsc->lsc_active_aggregator = best_la;
1047 		lacp_update_portmap(lsc);
1048 		if (best_la) {
1049 			lacp_suppress_distributing(lsc, best_la);
1050 		}
1051 	}
1052 }
1053 
1054 /*
1055  * Updated the inactive portmap array with the new list of ports and
1056  * make it live.
1057  */
1058 static void
1059 lacp_update_portmap(struct lacp_softc *lsc)
1060 {
1061 	struct lagg_softc *sc = lsc->lsc_softc;
1062 	struct lacp_aggregator *la;
1063 	struct lacp_portmap *p;
1064 	struct lacp_port *lp;
1065 	uint64_t speed;
1066 	u_int newmap;
1067 	int i;
1068 #ifdef NUMA
1069 	int count;
1070 	uint8_t domain;
1071 #endif
1072 
1073 	newmap = lsc->lsc_activemap == 0 ? 1 : 0;
1074 	p = &lsc->lsc_pmap[newmap];
1075 	la = lsc->lsc_active_aggregator;
1076 	speed = 0;
1077 	bzero(p, sizeof(struct lacp_portmap));
1078 
1079 	if (la != NULL && la->la_nports > 0) {
1080 		p->pm_count = la->la_nports;
1081 		i = 0;
1082 		TAILQ_FOREACH(lp, &la->la_ports, lp_dist_q) {
1083 			p->pm_map[i++] = lp;
1084 #ifdef NUMA
1085 			domain = lp->lp_ifp->if_numa_domain;
1086 			if (domain >= MAXMEMDOM)
1087 				continue;
1088 			count = p->pm_numa[domain].count;
1089 			p->pm_numa[domain].map[count] = lp;
1090 			p->pm_numa[domain].count++;
1091 #endif
1092 		}
1093 		KASSERT(i == p->pm_count, ("Invalid port count"));
1094 
1095 #ifdef NUMA
1096 		for (i = 0; i < MAXMEMDOM; i++) {
1097 			if (p->pm_numa[i].count != 0)
1098 				p->pm_num_dom++;
1099 		}
1100 #endif
1101 		speed = lacp_aggregator_bandwidth(la);
1102 	}
1103 	sc->sc_ifp->if_baudrate = speed;
1104 
1105 	/* switch the active portmap over */
1106 	atomic_store_rel_int(&lsc->lsc_activemap, newmap);
1107 	LACP_DPRINTF((NULL, "Set table %d with %d ports\n",
1108 		    lsc->lsc_activemap,
1109 		    lsc->lsc_pmap[lsc->lsc_activemap].pm_count));
1110 }
1111 
1112 static uint16_t
1113 lacp_compose_key(struct lacp_port *lp)
1114 {
1115 	struct lagg_port *lgp = lp->lp_lagg;
1116 	struct lagg_softc *sc = lgp->lp_softc;
1117 	u_int media = lp->lp_media;
1118 	uint16_t key;
1119 
1120 	if ((lp->lp_state & LACP_STATE_AGGREGATION) == 0) {
1121 
1122 		/*
1123 		 * non-aggregatable links should have unique keys.
1124 		 *
1125 		 * XXX this isn't really unique as if_index is 16 bit.
1126 		 */
1127 
1128 		/* bit 0..14:	(some bits of) if_index of this port */
1129 		key = lp->lp_ifp->if_index;
1130 		/* bit 15:	1 */
1131 		key |= 0x8000;
1132 	} else {
1133 		u_int subtype = IFM_SUBTYPE(media);
1134 
1135 		KASSERT(IFM_TYPE(media) == IFM_ETHER, ("invalid media type"));
1136 		KASSERT((media & IFM_FDX) != 0, ("aggregating HDX interface"));
1137 
1138 		/* bit 0..4:	IFM_SUBTYPE modulo speed */
1139 		switch (subtype) {
1140 		case IFM_10_T:
1141 		case IFM_10_2:
1142 		case IFM_10_5:
1143 		case IFM_10_STP:
1144 		case IFM_10_FL:
1145 			key = IFM_10_T;
1146 			break;
1147 		case IFM_100_TX:
1148 		case IFM_100_FX:
1149 		case IFM_100_T4:
1150 		case IFM_100_VG:
1151 		case IFM_100_T2:
1152 		case IFM_100_T:
1153 		case IFM_100_SGMII:
1154 			key = IFM_100_TX;
1155 			break;
1156 		case IFM_1000_SX:
1157 		case IFM_1000_LX:
1158 		case IFM_1000_CX:
1159 		case IFM_1000_T:
1160 		case IFM_1000_KX:
1161 		case IFM_1000_SGMII:
1162 		case IFM_1000_CX_SGMII:
1163 			key = IFM_1000_SX;
1164 			break;
1165 		case IFM_10G_LR:
1166 		case IFM_10G_SR:
1167 		case IFM_10G_CX4:
1168 		case IFM_10G_TWINAX:
1169 		case IFM_10G_TWINAX_LONG:
1170 		case IFM_10G_LRM:
1171 		case IFM_10G_T:
1172 		case IFM_10G_KX4:
1173 		case IFM_10G_KR:
1174 		case IFM_10G_CR1:
1175 		case IFM_10G_ER:
1176 		case IFM_10G_SFI:
1177 		case IFM_10G_AOC:
1178 			key = IFM_10G_LR;
1179 			break;
1180 		case IFM_20G_KR2:
1181 			key = IFM_20G_KR2;
1182 			break;
1183 		case IFM_2500_KX:
1184 		case IFM_2500_T:
1185 		case IFM_2500_X:
1186 			key = IFM_2500_KX;
1187 			break;
1188 		case IFM_5000_T:
1189 		case IFM_5000_KR:
1190 		case IFM_5000_KR_S:
1191 		case IFM_5000_KR1:
1192 			key = IFM_5000_T;
1193 			break;
1194 		case IFM_50G_PCIE:
1195 		case IFM_50G_CR2:
1196 		case IFM_50G_KR2:
1197 		case IFM_50G_KR4:
1198 		case IFM_50G_SR2:
1199 		case IFM_50G_LR2:
1200 		case IFM_50G_LAUI2_AC:
1201 		case IFM_50G_LAUI2:
1202 		case IFM_50G_AUI2_AC:
1203 		case IFM_50G_AUI2:
1204 		case IFM_50G_CP:
1205 		case IFM_50G_SR:
1206 		case IFM_50G_LR:
1207 		case IFM_50G_FR:
1208 		case IFM_50G_KR_PAM4:
1209 		case IFM_50G_AUI1_AC:
1210 		case IFM_50G_AUI1:
1211 			key = IFM_50G_PCIE;
1212 			break;
1213 		case IFM_56G_R4:
1214 			key = IFM_56G_R4;
1215 			break;
1216 		case IFM_25G_PCIE:
1217 		case IFM_25G_CR:
1218 		case IFM_25G_KR:
1219 		case IFM_25G_SR:
1220 		case IFM_25G_LR:
1221 		case IFM_25G_ACC:
1222 		case IFM_25G_AOC:
1223 		case IFM_25G_T:
1224 		case IFM_25G_CR_S:
1225 		case IFM_25G_CR1:
1226 		case IFM_25G_KR_S:
1227 		case IFM_25G_AUI:
1228 		case IFM_25G_KR1:
1229 			key = IFM_25G_PCIE;
1230 			break;
1231 		case IFM_40G_CR4:
1232 		case IFM_40G_SR4:
1233 		case IFM_40G_LR4:
1234 		case IFM_40G_XLPPI:
1235 		case IFM_40G_KR4:
1236 		case IFM_40G_XLAUI:
1237 		case IFM_40G_XLAUI_AC:
1238 		case IFM_40G_ER4:
1239 			key = IFM_40G_CR4;
1240 			break;
1241 		case IFM_100G_CR4:
1242 		case IFM_100G_SR4:
1243 		case IFM_100G_KR4:
1244 		case IFM_100G_LR4:
1245 		case IFM_100G_CAUI4_AC:
1246 		case IFM_100G_CAUI4:
1247 		case IFM_100G_AUI4_AC:
1248 		case IFM_100G_AUI4:
1249 		case IFM_100G_CR_PAM4:
1250 		case IFM_100G_KR_PAM4:
1251 		case IFM_100G_CP2:
1252 		case IFM_100G_SR2:
1253 		case IFM_100G_DR:
1254 		case IFM_100G_KR2_PAM4:
1255 		case IFM_100G_CAUI2_AC:
1256 		case IFM_100G_CAUI2:
1257 		case IFM_100G_AUI2_AC:
1258 		case IFM_100G_AUI2:
1259 			key = IFM_100G_CR4;
1260 			break;
1261 		case IFM_200G_CR4_PAM4:
1262 		case IFM_200G_SR4:
1263 		case IFM_200G_FR4:
1264 		case IFM_200G_LR4:
1265 		case IFM_200G_DR4:
1266 		case IFM_200G_KR4_PAM4:
1267 		case IFM_200G_AUI4_AC:
1268 		case IFM_200G_AUI4:
1269 		case IFM_200G_AUI8_AC:
1270 		case IFM_200G_AUI8:
1271 			key = IFM_200G_CR4_PAM4;
1272 			break;
1273 		case IFM_400G_FR8:
1274 		case IFM_400G_LR8:
1275 		case IFM_400G_DR4:
1276 		case IFM_400G_AUI8_AC:
1277 		case IFM_400G_AUI8:
1278 			key = IFM_400G_FR8;
1279 			break;
1280 		default:
1281 			key = subtype;
1282 			break;
1283 		}
1284 		/* bit 5..14:	(some bits of) if_index of lagg device */
1285 		key |= 0x7fe0 & ((sc->sc_ifp->if_index) << 5);
1286 		/* bit 15:	0 */
1287 	}
1288 	return (htons(key));
1289 }
1290 
1291 static void
1292 lacp_aggregator_addref(struct lacp_softc *lsc, struct lacp_aggregator *la)
1293 {
1294 	char buf[LACP_LAGIDSTR_MAX+1];
1295 
1296 	LACP_DPRINTF((NULL, "%s: lagid=%s, refcnt %d -> %d\n",
1297 	    __func__,
1298 	    lacp_format_lagid(&la->la_actor, &la->la_partner,
1299 	    buf, sizeof(buf)),
1300 	    la->la_refcnt, la->la_refcnt + 1));
1301 
1302 	KASSERT(la->la_refcnt > 0, ("refcount <= 0"));
1303 	la->la_refcnt++;
1304 	KASSERT(la->la_refcnt > la->la_nports, ("invalid refcount"));
1305 }
1306 
1307 static void
1308 lacp_aggregator_delref(struct lacp_softc *lsc, struct lacp_aggregator *la)
1309 {
1310 	char buf[LACP_LAGIDSTR_MAX+1];
1311 
1312 	LACP_DPRINTF((NULL, "%s: lagid=%s, refcnt %d -> %d\n",
1313 	    __func__,
1314 	    lacp_format_lagid(&la->la_actor, &la->la_partner,
1315 	    buf, sizeof(buf)),
1316 	    la->la_refcnt, la->la_refcnt - 1));
1317 
1318 	KASSERT(la->la_refcnt > la->la_nports, ("invalid refcnt"));
1319 	la->la_refcnt--;
1320 	if (la->la_refcnt > 0) {
1321 		return;
1322 	}
1323 
1324 	KASSERT(la->la_refcnt == 0, ("refcount not zero"));
1325 	KASSERT(lsc->lsc_active_aggregator != la, ("aggregator active"));
1326 
1327 	TAILQ_REMOVE(&lsc->lsc_aggregators, la, la_q);
1328 
1329 	free(la, M_DEVBUF);
1330 }
1331 
1332 /*
1333  * lacp_aggregator_get: allocate an aggregator.
1334  */
1335 
1336 static struct lacp_aggregator *
1337 lacp_aggregator_get(struct lacp_softc *lsc, struct lacp_port *lp)
1338 {
1339 	struct lacp_aggregator *la;
1340 
1341 	la = malloc(sizeof(*la), M_DEVBUF, M_NOWAIT);
1342 	if (la) {
1343 		la->la_refcnt = 1;
1344 		la->la_nports = 0;
1345 		TAILQ_INIT(&la->la_ports);
1346 		la->la_pending = 0;
1347 		TAILQ_INSERT_TAIL(&lsc->lsc_aggregators, la, la_q);
1348 	}
1349 
1350 	return (la);
1351 }
1352 
1353 /*
1354  * lacp_fill_aggregator_id: setup a newly allocated aggregator from a port.
1355  */
1356 
1357 static void
1358 lacp_fill_aggregator_id(struct lacp_aggregator *la, const struct lacp_port *lp)
1359 {
1360 	lacp_fill_aggregator_id_peer(&la->la_partner, &lp->lp_partner);
1361 	lacp_fill_aggregator_id_peer(&la->la_actor, &lp->lp_actor);
1362 
1363 	la->la_actor.lip_state = lp->lp_state & LACP_STATE_AGGREGATION;
1364 }
1365 
1366 static void
1367 lacp_fill_aggregator_id_peer(struct lacp_peerinfo *lpi_aggr,
1368     const struct lacp_peerinfo *lpi_port)
1369 {
1370 	memset(lpi_aggr, 0, sizeof(*lpi_aggr));
1371 	lpi_aggr->lip_systemid = lpi_port->lip_systemid;
1372 	lpi_aggr->lip_key = lpi_port->lip_key;
1373 }
1374 
1375 /*
1376  * lacp_aggregator_is_compatible: check if a port can join to an aggregator.
1377  */
1378 
1379 static int
1380 lacp_aggregator_is_compatible(const struct lacp_aggregator *la,
1381     const struct lacp_port *lp)
1382 {
1383 	if (!(lp->lp_state & LACP_STATE_AGGREGATION) ||
1384 	    !(lp->lp_partner.lip_state & LACP_STATE_AGGREGATION)) {
1385 		return (0);
1386 	}
1387 
1388 	if (!(la->la_actor.lip_state & LACP_STATE_AGGREGATION)) {
1389 		return (0);
1390 	}
1391 
1392 	if (!lacp_peerinfo_is_compatible(&la->la_partner, &lp->lp_partner)) {
1393 		return (0);
1394 	}
1395 
1396 	if (!lacp_peerinfo_is_compatible(&la->la_actor, &lp->lp_actor)) {
1397 		return (0);
1398 	}
1399 
1400 	return (1);
1401 }
1402 
1403 static int
1404 lacp_peerinfo_is_compatible(const struct lacp_peerinfo *a,
1405     const struct lacp_peerinfo *b)
1406 {
1407 	if (memcmp(&a->lip_systemid, &b->lip_systemid,
1408 	    sizeof(a->lip_systemid))) {
1409 		return (0);
1410 	}
1411 
1412 	if (memcmp(&a->lip_key, &b->lip_key, sizeof(a->lip_key))) {
1413 		return (0);
1414 	}
1415 
1416 	return (1);
1417 }
1418 
1419 static void
1420 lacp_port_enable(struct lacp_port *lp)
1421 {
1422 	lp->lp_state |= LACP_STATE_AGGREGATION;
1423 }
1424 
1425 static void
1426 lacp_port_disable(struct lacp_port *lp)
1427 {
1428 	lacp_set_mux(lp, LACP_MUX_DETACHED);
1429 
1430 	lp->lp_state &= ~LACP_STATE_AGGREGATION;
1431 	lp->lp_selected = LACP_UNSELECTED;
1432 	lacp_sm_rx_record_default(lp);
1433 	lp->lp_partner.lip_state &= ~LACP_STATE_AGGREGATION;
1434 	lp->lp_state &= ~LACP_STATE_EXPIRED;
1435 }
1436 
1437 /*
1438  * lacp_select: select an aggregator.  create one if necessary.
1439  */
1440 static void
1441 lacp_select(struct lacp_port *lp)
1442 {
1443 	struct lacp_softc *lsc = lp->lp_lsc;
1444 	struct lacp_aggregator *la;
1445 	char buf[LACP_LAGIDSTR_MAX+1];
1446 
1447 	if (lp->lp_aggregator) {
1448 		return;
1449 	}
1450 
1451 	/* If we haven't heard from our peer, skip this step. */
1452 	if (lp->lp_state & LACP_STATE_DEFAULTED)
1453 		return;
1454 
1455 	KASSERT(!LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE),
1456 	    ("timer_wait_while still active"));
1457 
1458 	LACP_DPRINTF((lp, "port lagid=%s\n",
1459 	    lacp_format_lagid(&lp->lp_actor, &lp->lp_partner,
1460 	    buf, sizeof(buf))));
1461 
1462 	TAILQ_FOREACH(la, &lsc->lsc_aggregators, la_q) {
1463 		if (lacp_aggregator_is_compatible(la, lp)) {
1464 			break;
1465 		}
1466 	}
1467 
1468 	if (la == NULL) {
1469 		la = lacp_aggregator_get(lsc, lp);
1470 		if (la == NULL) {
1471 			LACP_DPRINTF((lp, "aggregator creation failed\n"));
1472 
1473 			/*
1474 			 * will retry on the next tick.
1475 			 */
1476 
1477 			return;
1478 		}
1479 		lacp_fill_aggregator_id(la, lp);
1480 		LACP_DPRINTF((lp, "aggregator created\n"));
1481 	} else {
1482 		LACP_DPRINTF((lp, "compatible aggregator found\n"));
1483 		if (la->la_refcnt == LACP_MAX_PORTS)
1484 			return;
1485 		lacp_aggregator_addref(lsc, la);
1486 	}
1487 
1488 	LACP_DPRINTF((lp, "aggregator lagid=%s\n",
1489 	    lacp_format_lagid(&la->la_actor, &la->la_partner,
1490 	    buf, sizeof(buf))));
1491 
1492 	lp->lp_aggregator = la;
1493 	lp->lp_selected = LACP_SELECTED;
1494 }
1495 
1496 /*
1497  * lacp_unselect: finish unselect/detach process.
1498  */
1499 
1500 static void
1501 lacp_unselect(struct lacp_port *lp)
1502 {
1503 	struct lacp_softc *lsc = lp->lp_lsc;
1504 	struct lacp_aggregator *la = lp->lp_aggregator;
1505 
1506 	KASSERT(!LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE),
1507 	    ("timer_wait_while still active"));
1508 
1509 	if (la == NULL) {
1510 		return;
1511 	}
1512 
1513 	lp->lp_aggregator = NULL;
1514 	lacp_aggregator_delref(lsc, la);
1515 }
1516 
1517 /* mux machine */
1518 
1519 static void
1520 lacp_sm_mux(struct lacp_port *lp)
1521 {
1522 	struct lagg_port *lgp = lp->lp_lagg;
1523 	struct lagg_softc *sc = lgp->lp_softc;
1524 	enum lacp_mux_state new_state;
1525 	boolean_t p_sync =
1526 		    (lp->lp_partner.lip_state & LACP_STATE_SYNC) != 0;
1527 	boolean_t p_collecting =
1528 	    (lp->lp_partner.lip_state & LACP_STATE_COLLECTING) != 0;
1529 	enum lacp_selected selected = lp->lp_selected;
1530 	struct lacp_aggregator *la;
1531 
1532 	if (V_lacp_debug > 1)
1533 		lacp_dprintf(lp, "%s: state= 0x%x, selected= 0x%x, "
1534 		    "p_sync= 0x%x, p_collecting= 0x%x\n", __func__,
1535 		    lp->lp_mux_state, selected, p_sync, p_collecting);
1536 
1537 re_eval:
1538 	la = lp->lp_aggregator;
1539 	KASSERT(lp->lp_mux_state == LACP_MUX_DETACHED || la != NULL,
1540 	    ("MUX not detached"));
1541 	new_state = lp->lp_mux_state;
1542 	switch (lp->lp_mux_state) {
1543 	case LACP_MUX_DETACHED:
1544 		if (selected != LACP_UNSELECTED) {
1545 			new_state = LACP_MUX_WAITING;
1546 		}
1547 		break;
1548 	case LACP_MUX_WAITING:
1549 		KASSERT(la->la_pending > 0 ||
1550 		    !LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE),
1551 		    ("timer_wait_while still active"));
1552 		if (selected == LACP_SELECTED && la->la_pending == 0) {
1553 			new_state = LACP_MUX_ATTACHED;
1554 		} else if (selected == LACP_UNSELECTED) {
1555 			new_state = LACP_MUX_DETACHED;
1556 		}
1557 		break;
1558 	case LACP_MUX_ATTACHED:
1559 		if (selected == LACP_SELECTED && p_sync) {
1560 			new_state = LACP_MUX_COLLECTING;
1561 		} else if (selected != LACP_SELECTED) {
1562 			new_state = LACP_MUX_DETACHED;
1563 		}
1564 		break;
1565 	case LACP_MUX_COLLECTING:
1566 		if (selected == LACP_SELECTED && p_sync && p_collecting) {
1567 			new_state = LACP_MUX_DISTRIBUTING;
1568 		} else if (selected != LACP_SELECTED || !p_sync) {
1569 			new_state = LACP_MUX_ATTACHED;
1570 		}
1571 		break;
1572 	case LACP_MUX_DISTRIBUTING:
1573 		if (selected != LACP_SELECTED || !p_sync || !p_collecting) {
1574 			new_state = LACP_MUX_COLLECTING;
1575 			lacp_dprintf(lp, "Interface stopped DISTRIBUTING, possible flapping\n");
1576 			sc->sc_flapping++;
1577 		}
1578 		break;
1579 	default:
1580 		panic("%s: unknown state", __func__);
1581 	}
1582 
1583 	if (lp->lp_mux_state == new_state) {
1584 		return;
1585 	}
1586 
1587 	lacp_set_mux(lp, new_state);
1588 	goto re_eval;
1589 }
1590 
1591 static void
1592 lacp_set_mux(struct lacp_port *lp, enum lacp_mux_state new_state)
1593 {
1594 	struct lacp_aggregator *la = lp->lp_aggregator;
1595 
1596 	if (lp->lp_mux_state == new_state) {
1597 		return;
1598 	}
1599 
1600 	switch (new_state) {
1601 	case LACP_MUX_DETACHED:
1602 		lp->lp_state &= ~LACP_STATE_SYNC;
1603 		lacp_disable_distributing(lp);
1604 		lacp_disable_collecting(lp);
1605 		lacp_sm_assert_ntt(lp);
1606 		/* cancel timer */
1607 		if (LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE)) {
1608 			KASSERT(la->la_pending > 0,
1609 			    ("timer_wait_while not active"));
1610 			la->la_pending--;
1611 		}
1612 		LACP_TIMER_DISARM(lp, LACP_TIMER_WAIT_WHILE);
1613 		lacp_unselect(lp);
1614 		break;
1615 	case LACP_MUX_WAITING:
1616 		LACP_TIMER_ARM(lp, LACP_TIMER_WAIT_WHILE,
1617 		    LACP_AGGREGATE_WAIT_TIME);
1618 		la->la_pending++;
1619 		break;
1620 	case LACP_MUX_ATTACHED:
1621 		lp->lp_state |= LACP_STATE_SYNC;
1622 		lacp_disable_collecting(lp);
1623 		lacp_sm_assert_ntt(lp);
1624 		break;
1625 	case LACP_MUX_COLLECTING:
1626 		lacp_enable_collecting(lp);
1627 		lacp_disable_distributing(lp);
1628 		lacp_sm_assert_ntt(lp);
1629 		break;
1630 	case LACP_MUX_DISTRIBUTING:
1631 		lacp_enable_distributing(lp);
1632 		break;
1633 	default:
1634 		panic("%s: unknown state", __func__);
1635 	}
1636 
1637 	LACP_DPRINTF((lp, "mux_state %d -> %d\n", lp->lp_mux_state, new_state));
1638 
1639 	lp->lp_mux_state = new_state;
1640 }
1641 
1642 static void
1643 lacp_sm_mux_timer(struct lacp_port *lp)
1644 {
1645 	struct lacp_aggregator *la = lp->lp_aggregator;
1646 	char buf[LACP_LAGIDSTR_MAX+1];
1647 
1648 	KASSERT(la->la_pending > 0, ("no pending event"));
1649 
1650 	LACP_DPRINTF((lp, "%s: aggregator %s, pending %d -> %d\n", __func__,
1651 	    lacp_format_lagid(&la->la_actor, &la->la_partner,
1652 	    buf, sizeof(buf)),
1653 	    la->la_pending, la->la_pending - 1));
1654 
1655 	la->la_pending--;
1656 }
1657 
1658 /* periodic transmit machine */
1659 
1660 static void
1661 lacp_sm_ptx_update_timeout(struct lacp_port *lp, uint8_t oldpstate)
1662 {
1663 	if (LACP_STATE_EQ(oldpstate, lp->lp_partner.lip_state,
1664 	    LACP_STATE_TIMEOUT)) {
1665 		return;
1666 	}
1667 
1668 	LACP_DPRINTF((lp, "partner timeout changed\n"));
1669 
1670 	/*
1671 	 * FAST_PERIODIC -> SLOW_PERIODIC
1672 	 * or
1673 	 * SLOW_PERIODIC (-> PERIODIC_TX) -> FAST_PERIODIC
1674 	 *
1675 	 * let lacp_sm_ptx_tx_schedule to update timeout.
1676 	 */
1677 
1678 	LACP_TIMER_DISARM(lp, LACP_TIMER_PERIODIC);
1679 
1680 	/*
1681 	 * if timeout has been shortened, assert NTT.
1682 	 */
1683 
1684 	if ((lp->lp_partner.lip_state & LACP_STATE_TIMEOUT)) {
1685 		lacp_sm_assert_ntt(lp);
1686 	}
1687 }
1688 
1689 static void
1690 lacp_sm_ptx_tx_schedule(struct lacp_port *lp)
1691 {
1692 	int timeout;
1693 
1694 	if (!(lp->lp_state & LACP_STATE_ACTIVITY) &&
1695 	    !(lp->lp_partner.lip_state & LACP_STATE_ACTIVITY)) {
1696 
1697 		/*
1698 		 * NO_PERIODIC
1699 		 */
1700 
1701 		LACP_TIMER_DISARM(lp, LACP_TIMER_PERIODIC);
1702 		return;
1703 	}
1704 
1705 	if (LACP_TIMER_ISARMED(lp, LACP_TIMER_PERIODIC)) {
1706 		return;
1707 	}
1708 
1709 	timeout = (lp->lp_partner.lip_state & LACP_STATE_TIMEOUT) ?
1710 	    LACP_FAST_PERIODIC_TIME : LACP_SLOW_PERIODIC_TIME;
1711 
1712 	LACP_TIMER_ARM(lp, LACP_TIMER_PERIODIC, timeout);
1713 }
1714 
1715 static void
1716 lacp_sm_ptx_timer(struct lacp_port *lp)
1717 {
1718 	lacp_sm_assert_ntt(lp);
1719 }
1720 
1721 static void
1722 lacp_sm_rx(struct lacp_port *lp, const struct lacpdu *du)
1723 {
1724 	int timeout;
1725 
1726 	/*
1727 	 * check LACP_DISABLED first
1728 	 */
1729 
1730 	if (!(lp->lp_state & LACP_STATE_AGGREGATION)) {
1731 		return;
1732 	}
1733 
1734 	/*
1735 	 * check loopback condition.
1736 	 */
1737 
1738 	if (!lacp_compare_systemid(&du->ldu_actor.lip_systemid,
1739 	    &lp->lp_actor.lip_systemid)) {
1740 		return;
1741 	}
1742 
1743 	/*
1744 	 * EXPIRED, DEFAULTED, CURRENT -> CURRENT
1745 	 */
1746 
1747 	lacp_sm_rx_update_selected(lp, du);
1748 	lacp_sm_rx_update_ntt(lp, du);
1749 	lacp_sm_rx_record_pdu(lp, du);
1750 
1751 	timeout = (lp->lp_state & LACP_STATE_TIMEOUT) ?
1752 	    LACP_SHORT_TIMEOUT_TIME : LACP_LONG_TIMEOUT_TIME;
1753 	LACP_TIMER_ARM(lp, LACP_TIMER_CURRENT_WHILE, timeout);
1754 
1755 	lp->lp_state &= ~LACP_STATE_EXPIRED;
1756 
1757 	/*
1758 	 * kick transmit machine without waiting the next tick.
1759 	 */
1760 
1761 	lacp_sm_tx(lp);
1762 }
1763 
1764 static void
1765 lacp_sm_rx_set_expired(struct lacp_port *lp)
1766 {
1767 	lp->lp_partner.lip_state &= ~LACP_STATE_SYNC;
1768 	lp->lp_partner.lip_state |= LACP_STATE_TIMEOUT;
1769 	LACP_TIMER_ARM(lp, LACP_TIMER_CURRENT_WHILE, LACP_SHORT_TIMEOUT_TIME);
1770 	lp->lp_state |= LACP_STATE_EXPIRED;
1771 }
1772 
1773 static void
1774 lacp_sm_rx_timer(struct lacp_port *lp)
1775 {
1776 	if ((lp->lp_state & LACP_STATE_EXPIRED) == 0) {
1777 		/* CURRENT -> EXPIRED */
1778 		LACP_DPRINTF((lp, "%s: CURRENT -> EXPIRED\n", __func__));
1779 		lacp_sm_rx_set_expired(lp);
1780 	} else {
1781 		/* EXPIRED -> DEFAULTED */
1782 		LACP_DPRINTF((lp, "%s: EXPIRED -> DEFAULTED\n", __func__));
1783 		lacp_sm_rx_update_default_selected(lp);
1784 		lacp_sm_rx_record_default(lp);
1785 		lp->lp_state &= ~LACP_STATE_EXPIRED;
1786 	}
1787 }
1788 
1789 static void
1790 lacp_sm_rx_record_pdu(struct lacp_port *lp, const struct lacpdu *du)
1791 {
1792 	boolean_t active;
1793 	uint8_t oldpstate;
1794 	char buf[LACP_STATESTR_MAX+1];
1795 
1796 	LACP_TRACE(lp);
1797 
1798 	oldpstate = lp->lp_partner.lip_state;
1799 
1800 	active = (du->ldu_actor.lip_state & LACP_STATE_ACTIVITY)
1801 	    || ((lp->lp_state & LACP_STATE_ACTIVITY) &&
1802 	    (du->ldu_partner.lip_state & LACP_STATE_ACTIVITY));
1803 
1804 	lp->lp_partner = du->ldu_actor;
1805 	if (active &&
1806 	    ((LACP_STATE_EQ(lp->lp_state, du->ldu_partner.lip_state,
1807 	    LACP_STATE_AGGREGATION) &&
1808 	    !lacp_compare_peerinfo(&lp->lp_actor, &du->ldu_partner))
1809 	    || (du->ldu_partner.lip_state & LACP_STATE_AGGREGATION) == 0)) {
1810 		/*
1811 		 * XXX Maintain legacy behavior of leaving the
1812 		 * LACP_STATE_SYNC bit unchanged from the partner's
1813 		 * advertisement if lsc_strict_mode is false.
1814 		 * TODO: We should re-examine the concept of the "strict mode"
1815 		 * to ensure it makes sense to maintain a non-strict mode.
1816 		 */
1817 		if (lp->lp_lsc->lsc_strict_mode)
1818 			lp->lp_partner.lip_state |= LACP_STATE_SYNC;
1819 	} else {
1820 		lp->lp_partner.lip_state &= ~LACP_STATE_SYNC;
1821 	}
1822 
1823 	lp->lp_state &= ~LACP_STATE_DEFAULTED;
1824 
1825 	if (oldpstate != lp->lp_partner.lip_state) {
1826 		LACP_DPRINTF((lp, "old pstate %s\n",
1827 		    lacp_format_state(oldpstate, buf, sizeof(buf))));
1828 		LACP_DPRINTF((lp, "new pstate %s\n",
1829 		    lacp_format_state(lp->lp_partner.lip_state, buf,
1830 		    sizeof(buf))));
1831 	}
1832 
1833 	lacp_sm_ptx_update_timeout(lp, oldpstate);
1834 }
1835 
1836 static void
1837 lacp_sm_rx_update_ntt(struct lacp_port *lp, const struct lacpdu *du)
1838 {
1839 
1840 	LACP_TRACE(lp);
1841 
1842 	if (lacp_compare_peerinfo(&lp->lp_actor, &du->ldu_partner) ||
1843 	    !LACP_STATE_EQ(lp->lp_state, du->ldu_partner.lip_state,
1844 	    LACP_STATE_ACTIVITY | LACP_STATE_SYNC | LACP_STATE_AGGREGATION)) {
1845 		LACP_DPRINTF((lp, "%s: assert ntt\n", __func__));
1846 		lacp_sm_assert_ntt(lp);
1847 	}
1848 }
1849 
1850 static void
1851 lacp_sm_rx_record_default(struct lacp_port *lp)
1852 {
1853 	uint8_t oldpstate;
1854 
1855 	LACP_TRACE(lp);
1856 
1857 	oldpstate = lp->lp_partner.lip_state;
1858 	if (lp->lp_lsc->lsc_strict_mode)
1859 		lp->lp_partner = lacp_partner_admin_strict;
1860 	else
1861 		lp->lp_partner = lacp_partner_admin_optimistic;
1862 	lp->lp_state |= LACP_STATE_DEFAULTED;
1863 	lacp_sm_ptx_update_timeout(lp, oldpstate);
1864 }
1865 
1866 static void
1867 lacp_sm_rx_update_selected_from_peerinfo(struct lacp_port *lp,
1868     const struct lacp_peerinfo *info)
1869 {
1870 
1871 	LACP_TRACE(lp);
1872 
1873 	if (lacp_compare_peerinfo(&lp->lp_partner, info) ||
1874 	    !LACP_STATE_EQ(lp->lp_partner.lip_state, info->lip_state,
1875 	    LACP_STATE_AGGREGATION)) {
1876 		lp->lp_selected = LACP_UNSELECTED;
1877 		/* mux machine will clean up lp->lp_aggregator */
1878 	}
1879 }
1880 
1881 static void
1882 lacp_sm_rx_update_selected(struct lacp_port *lp, const struct lacpdu *du)
1883 {
1884 
1885 	LACP_TRACE(lp);
1886 
1887 	lacp_sm_rx_update_selected_from_peerinfo(lp, &du->ldu_actor);
1888 }
1889 
1890 static void
1891 lacp_sm_rx_update_default_selected(struct lacp_port *lp)
1892 {
1893 
1894 	LACP_TRACE(lp);
1895 
1896 	if (lp->lp_lsc->lsc_strict_mode)
1897 		lacp_sm_rx_update_selected_from_peerinfo(lp,
1898 		    &lacp_partner_admin_strict);
1899 	else
1900 		lacp_sm_rx_update_selected_from_peerinfo(lp,
1901 		    &lacp_partner_admin_optimistic);
1902 }
1903 
1904 /* transmit machine */
1905 
1906 static void
1907 lacp_sm_tx(struct lacp_port *lp)
1908 {
1909 	int error = 0;
1910 
1911 	if (!(lp->lp_state & LACP_STATE_AGGREGATION)
1912 #if 1
1913 	    || (!(lp->lp_state & LACP_STATE_ACTIVITY)
1914 	    && !(lp->lp_partner.lip_state & LACP_STATE_ACTIVITY))
1915 #endif
1916 	    ) {
1917 		lp->lp_flags &= ~LACP_PORT_NTT;
1918 	}
1919 
1920 	if (!(lp->lp_flags & LACP_PORT_NTT)) {
1921 		return;
1922 	}
1923 
1924 	/* Rate limit to 3 PDUs per LACP_FAST_PERIODIC_TIME */
1925 	if (ppsratecheck(&lp->lp_last_lacpdu, &lp->lp_lacpdu_sent,
1926 		    (3 / LACP_FAST_PERIODIC_TIME)) == 0) {
1927 		LACP_DPRINTF((lp, "rate limited pdu\n"));
1928 		return;
1929 	}
1930 
1931 	if (((1 << lp->lp_ifp->if_dunit) & lp->lp_lsc->lsc_debug.lsc_tx_test) == 0) {
1932 		error = lacp_xmit_lacpdu(lp);
1933 	} else {
1934 		LACP_TPRINTF((lp, "Dropping TX PDU\n"));
1935 	}
1936 
1937 	if (error == 0) {
1938 		lp->lp_flags &= ~LACP_PORT_NTT;
1939 	} else {
1940 		LACP_DPRINTF((lp, "lacpdu transmit failure, error %d\n",
1941 		    error));
1942 	}
1943 }
1944 
1945 static void
1946 lacp_sm_assert_ntt(struct lacp_port *lp)
1947 {
1948 
1949 	lp->lp_flags |= LACP_PORT_NTT;
1950 }
1951 
1952 static void
1953 lacp_run_timers(struct lacp_port *lp)
1954 {
1955 	int i;
1956 
1957 	for (i = 0; i < LACP_NTIMER; i++) {
1958 		KASSERT(lp->lp_timer[i] >= 0,
1959 		    ("invalid timer value %d", lp->lp_timer[i]));
1960 		if (lp->lp_timer[i] == 0) {
1961 			continue;
1962 		} else if (--lp->lp_timer[i] <= 0) {
1963 			if (lacp_timer_funcs[i]) {
1964 				(*lacp_timer_funcs[i])(lp);
1965 			}
1966 		}
1967 	}
1968 }
1969 
1970 int
1971 lacp_marker_input(struct lacp_port *lp, struct mbuf *m)
1972 {
1973 	struct lacp_softc *lsc = lp->lp_lsc;
1974 	struct lagg_port *lgp = lp->lp_lagg;
1975 	struct lacp_port *lp2;
1976 	struct markerdu *mdu;
1977 	int error = 0;
1978 	int pending = 0;
1979 
1980 	if (m->m_pkthdr.len != sizeof(*mdu)) {
1981 		goto bad;
1982 	}
1983 
1984 	if ((m->m_flags & M_MCAST) == 0) {
1985 		goto bad;
1986 	}
1987 
1988 	if (m->m_len < sizeof(*mdu)) {
1989 		m = m_pullup(m, sizeof(*mdu));
1990 		if (m == NULL) {
1991 			return (ENOMEM);
1992 		}
1993 	}
1994 
1995 	mdu = mtod(m, struct markerdu *);
1996 
1997 	if (memcmp(&mdu->mdu_eh.ether_dhost,
1998 	    &ethermulticastaddr_slowprotocols, ETHER_ADDR_LEN)) {
1999 		goto bad;
2000 	}
2001 
2002 	if (mdu->mdu_sph.sph_version != 1) {
2003 		goto bad;
2004 	}
2005 
2006 	switch (mdu->mdu_tlv.tlv_type) {
2007 	case MARKER_TYPE_INFO:
2008 		if (tlv_check(mdu, sizeof(*mdu), &mdu->mdu_tlv,
2009 		    marker_info_tlv_template, TRUE)) {
2010 			goto bad;
2011 		}
2012 		mdu->mdu_tlv.tlv_type = MARKER_TYPE_RESPONSE;
2013 		memcpy(&mdu->mdu_eh.ether_dhost,
2014 		    &ethermulticastaddr_slowprotocols, ETHER_ADDR_LEN);
2015 		memcpy(&mdu->mdu_eh.ether_shost,
2016 		    lgp->lp_lladdr, ETHER_ADDR_LEN);
2017 		error = lagg_enqueue(lp->lp_ifp, m);
2018 		break;
2019 
2020 	case MARKER_TYPE_RESPONSE:
2021 		if (tlv_check(mdu, sizeof(*mdu), &mdu->mdu_tlv,
2022 		    marker_response_tlv_template, TRUE)) {
2023 			goto bad;
2024 		}
2025 		LACP_DPRINTF((lp, "marker response, port=%u, sys=%6D, id=%u\n",
2026 		    ntohs(mdu->mdu_info.mi_rq_port), mdu->mdu_info.mi_rq_system,
2027 		    ":", ntohl(mdu->mdu_info.mi_rq_xid)));
2028 
2029 		/* Verify that it is the last marker we sent out */
2030 		if (memcmp(&mdu->mdu_info, &lp->lp_marker,
2031 		    sizeof(struct lacp_markerinfo)))
2032 			goto bad;
2033 
2034 		LACP_LOCK(lsc);
2035 		lp->lp_flags &= ~LACP_PORT_MARK;
2036 
2037 		if (lsc->lsc_suppress_distributing) {
2038 			/* Check if any ports are waiting for a response */
2039 			LIST_FOREACH(lp2, &lsc->lsc_ports, lp_next) {
2040 				if (lp2->lp_flags & LACP_PORT_MARK) {
2041 					pending = 1;
2042 					break;
2043 				}
2044 			}
2045 
2046 			if (pending == 0) {
2047 				/* All interface queues are clear */
2048 				LACP_DPRINTF((NULL, "queue flush complete\n"));
2049 				lsc->lsc_suppress_distributing = FALSE;
2050 			}
2051 		}
2052 		LACP_UNLOCK(lsc);
2053 		m_freem(m);
2054 		break;
2055 
2056 	default:
2057 		goto bad;
2058 	}
2059 
2060 	return (error);
2061 
2062 bad:
2063 	LACP_DPRINTF((lp, "bad marker frame\n"));
2064 	m_freem(m);
2065 	return (EINVAL);
2066 }
2067 
2068 static int
2069 tlv_check(const void *p, size_t size, const struct tlvhdr *tlv,
2070     const struct tlv_template *tmpl, boolean_t check_type)
2071 {
2072 	while (/* CONSTCOND */ 1) {
2073 		if ((const char *)tlv - (const char *)p + sizeof(*tlv) > size) {
2074 			return (EINVAL);
2075 		}
2076 		if ((check_type && tlv->tlv_type != tmpl->tmpl_type) ||
2077 		    tlv->tlv_length != tmpl->tmpl_length) {
2078 			return (EINVAL);
2079 		}
2080 		if (tmpl->tmpl_type == 0) {
2081 			break;
2082 		}
2083 		tlv = (const struct tlvhdr *)
2084 		    ((const char *)tlv + tlv->tlv_length);
2085 		tmpl++;
2086 	}
2087 
2088 	return (0);
2089 }
2090 
2091 /* Debugging */
2092 const char *
2093 lacp_format_mac(const uint8_t *mac, char *buf, size_t buflen)
2094 {
2095 	snprintf(buf, buflen, "%02X-%02X-%02X-%02X-%02X-%02X",
2096 	    (int)mac[0],
2097 	    (int)mac[1],
2098 	    (int)mac[2],
2099 	    (int)mac[3],
2100 	    (int)mac[4],
2101 	    (int)mac[5]);
2102 
2103 	return (buf);
2104 }
2105 
2106 const char *
2107 lacp_format_systemid(const struct lacp_systemid *sysid,
2108     char *buf, size_t buflen)
2109 {
2110 	char macbuf[LACP_MACSTR_MAX+1];
2111 
2112 	snprintf(buf, buflen, "%04X,%s",
2113 	    ntohs(sysid->lsi_prio),
2114 	    lacp_format_mac(sysid->lsi_mac, macbuf, sizeof(macbuf)));
2115 
2116 	return (buf);
2117 }
2118 
2119 const char *
2120 lacp_format_portid(const struct lacp_portid *portid, char *buf, size_t buflen)
2121 {
2122 	snprintf(buf, buflen, "%04X,%04X",
2123 	    ntohs(portid->lpi_prio),
2124 	    ntohs(portid->lpi_portno));
2125 
2126 	return (buf);
2127 }
2128 
2129 const char *
2130 lacp_format_partner(const struct lacp_peerinfo *peer, char *buf, size_t buflen)
2131 {
2132 	char sysid[LACP_SYSTEMIDSTR_MAX+1];
2133 	char portid[LACP_PORTIDSTR_MAX+1];
2134 
2135 	snprintf(buf, buflen, "(%s,%04X,%s)",
2136 	    lacp_format_systemid(&peer->lip_systemid, sysid, sizeof(sysid)),
2137 	    ntohs(peer->lip_key),
2138 	    lacp_format_portid(&peer->lip_portid, portid, sizeof(portid)));
2139 
2140 	return (buf);
2141 }
2142 
2143 const char *
2144 lacp_format_lagid(const struct lacp_peerinfo *a,
2145     const struct lacp_peerinfo *b, char *buf, size_t buflen)
2146 {
2147 	char astr[LACP_PARTNERSTR_MAX+1];
2148 	char bstr[LACP_PARTNERSTR_MAX+1];
2149 
2150 #if 0
2151 	/*
2152 	 * there's a convention to display small numbered peer
2153 	 * in the left.
2154 	 */
2155 
2156 	if (lacp_compare_peerinfo(a, b) > 0) {
2157 		const struct lacp_peerinfo *t;
2158 
2159 		t = a;
2160 		a = b;
2161 		b = t;
2162 	}
2163 #endif
2164 
2165 	snprintf(buf, buflen, "[%s,%s]",
2166 	    lacp_format_partner(a, astr, sizeof(astr)),
2167 	    lacp_format_partner(b, bstr, sizeof(bstr)));
2168 
2169 	return (buf);
2170 }
2171 
2172 const char *
2173 lacp_format_lagid_aggregator(const struct lacp_aggregator *la,
2174     char *buf, size_t buflen)
2175 {
2176 	if (la == NULL) {
2177 		return ("(none)");
2178 	}
2179 
2180 	return (lacp_format_lagid(&la->la_actor, &la->la_partner, buf, buflen));
2181 }
2182 
2183 const char *
2184 lacp_format_state(uint8_t state, char *buf, size_t buflen)
2185 {
2186 	snprintf(buf, buflen, "%b", state, LACP_STATE_BITS);
2187 	return (buf);
2188 }
2189 
2190 static void
2191 lacp_dump_lacpdu(const struct lacpdu *du)
2192 {
2193 	char buf[LACP_PARTNERSTR_MAX+1];
2194 	char buf2[LACP_STATESTR_MAX+1];
2195 
2196 	printf("actor=%s\n",
2197 	    lacp_format_partner(&du->ldu_actor, buf, sizeof(buf)));
2198 	printf("actor.state=%s\n",
2199 	    lacp_format_state(du->ldu_actor.lip_state, buf2, sizeof(buf2)));
2200 	printf("partner=%s\n",
2201 	    lacp_format_partner(&du->ldu_partner, buf, sizeof(buf)));
2202 	printf("partner.state=%s\n",
2203 	    lacp_format_state(du->ldu_partner.lip_state, buf2, sizeof(buf2)));
2204 
2205 	printf("maxdelay=%d\n", ntohs(du->ldu_collector.lci_maxdelay));
2206 }
2207 
2208 static void
2209 lacp_dprintf(const struct lacp_port *lp, const char *fmt, ...)
2210 {
2211 	va_list va;
2212 
2213 	if (lp) {
2214 		printf("%s: ", lp->lp_ifp->if_xname);
2215 	}
2216 
2217 	va_start(va, fmt);
2218 	vprintf(fmt, va);
2219 	va_end(va);
2220 }
2221