xref: /freebsd/sys/net/ieee8023ad_lacp.c (revision 5b9c547c)
1 /*	$NetBSD: ieee8023ad_lacp.c,v 1.3 2005/12/11 12:24:54 christos Exp $	*/
2 
3 /*-
4  * Copyright (c)2005 YAMAMOTO Takashi,
5  * Copyright (c)2008 Andrew Thompson <thompsa@FreeBSD.org>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/callout.h>
35 #include <sys/eventhandler.h>
36 #include <sys/mbuf.h>
37 #include <sys/systm.h>
38 #include <sys/malloc.h>
39 #include <sys/kernel.h> /* hz */
40 #include <sys/socket.h> /* for net/if.h */
41 #include <sys/sockio.h>
42 #include <sys/sysctl.h>
43 #include <machine/stdarg.h>
44 #include <sys/lock.h>
45 #include <sys/rwlock.h>
46 #include <sys/taskqueue.h>
47 
48 #include <net/if.h>
49 #include <net/if_var.h>
50 #include <net/if_dl.h>
51 #include <net/ethernet.h>
52 #include <net/if_media.h>
53 #include <net/if_types.h>
54 
55 #include <net/if_lagg.h>
56 #include <net/ieee8023ad_lacp.h>
57 
58 /*
59  * actor system priority and port priority.
60  * XXX should be configurable.
61  */
62 
63 #define	LACP_SYSTEM_PRIO	0x8000
64 #define	LACP_PORT_PRIO		0x8000
65 
66 const uint8_t ethermulticastaddr_slowprotocols[ETHER_ADDR_LEN] =
67     { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x02 };
68 
69 static const struct tlv_template lacp_info_tlv_template[] = {
70 	{ LACP_TYPE_ACTORINFO,
71 	    sizeof(struct tlvhdr) + sizeof(struct lacp_peerinfo) },
72 	{ LACP_TYPE_PARTNERINFO,
73 	    sizeof(struct tlvhdr) + sizeof(struct lacp_peerinfo) },
74 	{ LACP_TYPE_COLLECTORINFO,
75 	    sizeof(struct tlvhdr) + sizeof(struct lacp_collectorinfo) },
76 	{ 0, 0 },
77 };
78 
79 static const struct tlv_template marker_info_tlv_template[] = {
80 	{ MARKER_TYPE_INFO,
81 	    sizeof(struct tlvhdr) + sizeof(struct lacp_markerinfo) },
82 	{ 0, 0 },
83 };
84 
85 static const struct tlv_template marker_response_tlv_template[] = {
86 	{ MARKER_TYPE_RESPONSE,
87 	    sizeof(struct tlvhdr) + sizeof(struct lacp_markerinfo) },
88 	{ 0, 0 },
89 };
90 
91 typedef void (*lacp_timer_func_t)(struct lacp_port *);
92 
93 static void	lacp_fill_actorinfo(struct lacp_port *, struct lacp_peerinfo *);
94 static void	lacp_fill_markerinfo(struct lacp_port *,
95 		    struct lacp_markerinfo *);
96 
97 static uint64_t	lacp_aggregator_bandwidth(struct lacp_aggregator *);
98 static void	lacp_suppress_distributing(struct lacp_softc *,
99 		    struct lacp_aggregator *);
100 static void	lacp_transit_expire(void *);
101 static void	lacp_update_portmap(struct lacp_softc *);
102 static void	lacp_select_active_aggregator(struct lacp_softc *);
103 static uint16_t	lacp_compose_key(struct lacp_port *);
104 static int	tlv_check(const void *, size_t, const struct tlvhdr *,
105 		    const struct tlv_template *, boolean_t);
106 static void	lacp_tick(void *);
107 
108 static void	lacp_fill_aggregator_id(struct lacp_aggregator *,
109 		    const struct lacp_port *);
110 static void	lacp_fill_aggregator_id_peer(struct lacp_peerinfo *,
111 		    const struct lacp_peerinfo *);
112 static int	lacp_aggregator_is_compatible(const struct lacp_aggregator *,
113 		    const struct lacp_port *);
114 static int	lacp_peerinfo_is_compatible(const struct lacp_peerinfo *,
115 		    const struct lacp_peerinfo *);
116 
117 static struct lacp_aggregator *lacp_aggregator_get(struct lacp_softc *,
118 		    struct lacp_port *);
119 static void	lacp_aggregator_addref(struct lacp_softc *,
120 		    struct lacp_aggregator *);
121 static void	lacp_aggregator_delref(struct lacp_softc *,
122 		    struct lacp_aggregator *);
123 
124 /* receive machine */
125 
126 static int	lacp_pdu_input(struct lacp_port *, struct mbuf *);
127 static int	lacp_marker_input(struct lacp_port *, struct mbuf *);
128 static void	lacp_sm_rx(struct lacp_port *, const struct lacpdu *);
129 static void	lacp_sm_rx_timer(struct lacp_port *);
130 static void	lacp_sm_rx_set_expired(struct lacp_port *);
131 static void	lacp_sm_rx_update_ntt(struct lacp_port *,
132 		    const struct lacpdu *);
133 static void	lacp_sm_rx_record_pdu(struct lacp_port *,
134 		    const struct lacpdu *);
135 static void	lacp_sm_rx_update_selected(struct lacp_port *,
136 		    const struct lacpdu *);
137 static void	lacp_sm_rx_record_default(struct lacp_port *);
138 static void	lacp_sm_rx_update_default_selected(struct lacp_port *);
139 static void	lacp_sm_rx_update_selected_from_peerinfo(struct lacp_port *,
140 		    const struct lacp_peerinfo *);
141 
142 /* mux machine */
143 
144 static void	lacp_sm_mux(struct lacp_port *);
145 static void	lacp_set_mux(struct lacp_port *, enum lacp_mux_state);
146 static void	lacp_sm_mux_timer(struct lacp_port *);
147 
148 /* periodic transmit machine */
149 
150 static void	lacp_sm_ptx_update_timeout(struct lacp_port *, uint8_t);
151 static void	lacp_sm_ptx_tx_schedule(struct lacp_port *);
152 static void	lacp_sm_ptx_timer(struct lacp_port *);
153 
154 /* transmit machine */
155 
156 static void	lacp_sm_tx(struct lacp_port *);
157 static void	lacp_sm_assert_ntt(struct lacp_port *);
158 
159 static void	lacp_run_timers(struct lacp_port *);
160 static int	lacp_compare_peerinfo(const struct lacp_peerinfo *,
161 		    const struct lacp_peerinfo *);
162 static int	lacp_compare_systemid(const struct lacp_systemid *,
163 		    const struct lacp_systemid *);
164 static void	lacp_port_enable(struct lacp_port *);
165 static void	lacp_port_disable(struct lacp_port *);
166 static void	lacp_select(struct lacp_port *);
167 static void	lacp_unselect(struct lacp_port *);
168 static void	lacp_disable_collecting(struct lacp_port *);
169 static void	lacp_enable_collecting(struct lacp_port *);
170 static void	lacp_disable_distributing(struct lacp_port *);
171 static void	lacp_enable_distributing(struct lacp_port *);
172 static int	lacp_xmit_lacpdu(struct lacp_port *);
173 static int	lacp_xmit_marker(struct lacp_port *);
174 
175 /* Debugging */
176 
177 static void	lacp_dump_lacpdu(const struct lacpdu *);
178 static const char *lacp_format_partner(const struct lacp_peerinfo *, char *,
179 		    size_t);
180 static const char *lacp_format_lagid(const struct lacp_peerinfo *,
181 		    const struct lacp_peerinfo *, char *, size_t);
182 static const char *lacp_format_lagid_aggregator(const struct lacp_aggregator *,
183 		    char *, size_t);
184 static const char *lacp_format_state(uint8_t, char *, size_t);
185 static const char *lacp_format_mac(const uint8_t *, char *, size_t);
186 static const char *lacp_format_systemid(const struct lacp_systemid *, char *,
187 		    size_t);
188 static const char *lacp_format_portid(const struct lacp_portid *, char *,
189 		    size_t);
190 static void	lacp_dprintf(const struct lacp_port *, const char *, ...)
191 		    __attribute__((__format__(__printf__, 2, 3)));
192 
193 static VNET_DEFINE(int, lacp_debug);
194 #define	V_lacp_debug	VNET(lacp_debug)
195 SYSCTL_NODE(_net_link_lagg, OID_AUTO, lacp, CTLFLAG_RD, 0, "ieee802.3ad");
196 SYSCTL_INT(_net_link_lagg_lacp, OID_AUTO, debug, CTLFLAG_RWTUN | CTLFLAG_VNET,
197     &VNET_NAME(lacp_debug), 0, "Enable LACP debug logging (1=debug, 2=trace)");
198 
199 #define LACP_DPRINTF(a) if (V_lacp_debug & 0x01) { lacp_dprintf a ; }
200 #define LACP_TRACE(a) if (V_lacp_debug & 0x02) { lacp_dprintf(a,"%s\n",__func__); }
201 #define LACP_TPRINTF(a) if (V_lacp_debug & 0x04) { lacp_dprintf a ; }
202 
203 /*
204  * partner administration variables.
205  * XXX should be configurable.
206  */
207 
208 static const struct lacp_peerinfo lacp_partner_admin_optimistic = {
209 	.lip_systemid = { .lsi_prio = 0xffff },
210 	.lip_portid = { .lpi_prio = 0xffff },
211 	.lip_state = LACP_STATE_SYNC | LACP_STATE_AGGREGATION |
212 	    LACP_STATE_COLLECTING | LACP_STATE_DISTRIBUTING,
213 };
214 
215 static const struct lacp_peerinfo lacp_partner_admin_strict = {
216 	.lip_systemid = { .lsi_prio = 0xffff },
217 	.lip_portid = { .lpi_prio = 0xffff },
218 	.lip_state = 0,
219 };
220 
221 static const lacp_timer_func_t lacp_timer_funcs[LACP_NTIMER] = {
222 	[LACP_TIMER_CURRENT_WHILE] = lacp_sm_rx_timer,
223 	[LACP_TIMER_PERIODIC] = lacp_sm_ptx_timer,
224 	[LACP_TIMER_WAIT_WHILE] = lacp_sm_mux_timer,
225 };
226 
227 struct mbuf *
228 lacp_input(struct lagg_port *lgp, struct mbuf *m)
229 {
230 	struct lacp_port *lp = LACP_PORT(lgp);
231 	uint8_t subtype;
232 
233 	if (m->m_pkthdr.len < sizeof(struct ether_header) + sizeof(subtype)) {
234 		m_freem(m);
235 		return (NULL);
236 	}
237 
238 	m_copydata(m, sizeof(struct ether_header), sizeof(subtype), &subtype);
239 	switch (subtype) {
240 		case SLOWPROTOCOLS_SUBTYPE_LACP:
241 			lacp_pdu_input(lp, m);
242 			return (NULL);
243 
244 		case SLOWPROTOCOLS_SUBTYPE_MARKER:
245 			lacp_marker_input(lp, m);
246 			return (NULL);
247 	}
248 
249 	/* Not a subtype we are interested in */
250 	return (m);
251 }
252 
253 /*
254  * lacp_pdu_input: process lacpdu
255  */
256 static int
257 lacp_pdu_input(struct lacp_port *lp, struct mbuf *m)
258 {
259 	struct lacp_softc *lsc = lp->lp_lsc;
260 	struct lacpdu *du;
261 	int error = 0;
262 
263 	if (m->m_pkthdr.len != sizeof(*du)) {
264 		goto bad;
265 	}
266 
267 	if ((m->m_flags & M_MCAST) == 0) {
268 		goto bad;
269 	}
270 
271 	if (m->m_len < sizeof(*du)) {
272 		m = m_pullup(m, sizeof(*du));
273 		if (m == NULL) {
274 			return (ENOMEM);
275 		}
276 	}
277 
278 	du = mtod(m, struct lacpdu *);
279 
280 	if (memcmp(&du->ldu_eh.ether_dhost,
281 	    &ethermulticastaddr_slowprotocols, ETHER_ADDR_LEN)) {
282 		goto bad;
283 	}
284 
285 	/*
286 	 * ignore the version for compatibility with
287 	 * the future protocol revisions.
288 	 */
289 #if 0
290 	if (du->ldu_sph.sph_version != 1) {
291 		goto bad;
292 	}
293 #endif
294 
295 	/*
296 	 * ignore tlv types for compatibility with
297 	 * the future protocol revisions.
298 	 */
299 	if (tlv_check(du, sizeof(*du), &du->ldu_tlv_actor,
300 	    lacp_info_tlv_template, FALSE)) {
301 		goto bad;
302 	}
303 
304         if (V_lacp_debug > 0) {
305 		lacp_dprintf(lp, "lacpdu receive\n");
306 		lacp_dump_lacpdu(du);
307 	}
308 
309 	if ((1 << lp->lp_ifp->if_dunit) & lp->lp_lsc->lsc_debug.lsc_rx_test) {
310 		LACP_TPRINTF((lp, "Dropping RX PDU\n"));
311 		goto bad;
312 	}
313 
314 	LACP_LOCK(lsc);
315 	lacp_sm_rx(lp, du);
316 	LACP_UNLOCK(lsc);
317 
318 	m_freem(m);
319 	return (error);
320 
321 bad:
322 	m_freem(m);
323 	return (EINVAL);
324 }
325 
326 static void
327 lacp_fill_actorinfo(struct lacp_port *lp, struct lacp_peerinfo *info)
328 {
329 	struct lagg_port *lgp = lp->lp_lagg;
330 	struct lagg_softc *sc = lgp->lp_softc;
331 
332 	info->lip_systemid.lsi_prio = htons(LACP_SYSTEM_PRIO);
333 	memcpy(&info->lip_systemid.lsi_mac,
334 	    IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
335 	info->lip_portid.lpi_prio = htons(LACP_PORT_PRIO);
336 	info->lip_portid.lpi_portno = htons(lp->lp_ifp->if_index);
337 	info->lip_state = lp->lp_state;
338 }
339 
340 static void
341 lacp_fill_markerinfo(struct lacp_port *lp, struct lacp_markerinfo *info)
342 {
343 	struct ifnet *ifp = lp->lp_ifp;
344 
345 	/* Fill in the port index and system id (encoded as the MAC) */
346 	info->mi_rq_port = htons(ifp->if_index);
347 	memcpy(&info->mi_rq_system, lp->lp_systemid.lsi_mac, ETHER_ADDR_LEN);
348 	info->mi_rq_xid = htonl(0);
349 }
350 
351 static int
352 lacp_xmit_lacpdu(struct lacp_port *lp)
353 {
354 	struct lagg_port *lgp = lp->lp_lagg;
355 	struct mbuf *m;
356 	struct lacpdu *du;
357 	int error;
358 
359 	LACP_LOCK_ASSERT(lp->lp_lsc);
360 
361 	m = m_gethdr(M_NOWAIT, MT_DATA);
362 	if (m == NULL) {
363 		return (ENOMEM);
364 	}
365 	m->m_len = m->m_pkthdr.len = sizeof(*du);
366 
367 	du = mtod(m, struct lacpdu *);
368 	memset(du, 0, sizeof(*du));
369 
370 	memcpy(&du->ldu_eh.ether_dhost, ethermulticastaddr_slowprotocols,
371 	    ETHER_ADDR_LEN);
372 	memcpy(&du->ldu_eh.ether_shost, lgp->lp_lladdr, ETHER_ADDR_LEN);
373 	du->ldu_eh.ether_type = htons(ETHERTYPE_SLOW);
374 
375 	du->ldu_sph.sph_subtype = SLOWPROTOCOLS_SUBTYPE_LACP;
376 	du->ldu_sph.sph_version = 1;
377 
378 	TLV_SET(&du->ldu_tlv_actor, LACP_TYPE_ACTORINFO, sizeof(du->ldu_actor));
379 	du->ldu_actor = lp->lp_actor;
380 
381 	TLV_SET(&du->ldu_tlv_partner, LACP_TYPE_PARTNERINFO,
382 	    sizeof(du->ldu_partner));
383 	du->ldu_partner = lp->lp_partner;
384 
385 	TLV_SET(&du->ldu_tlv_collector, LACP_TYPE_COLLECTORINFO,
386 	    sizeof(du->ldu_collector));
387 	du->ldu_collector.lci_maxdelay = 0;
388 
389 	if (V_lacp_debug > 0) {
390 		lacp_dprintf(lp, "lacpdu transmit\n");
391 		lacp_dump_lacpdu(du);
392 	}
393 
394 	m->m_flags |= M_MCAST;
395 
396 	/*
397 	 * XXX should use higher priority queue.
398 	 * otherwise network congestion can break aggregation.
399 	 */
400 
401 	error = lagg_enqueue(lp->lp_ifp, m);
402 	return (error);
403 }
404 
405 static int
406 lacp_xmit_marker(struct lacp_port *lp)
407 {
408 	struct lagg_port *lgp = lp->lp_lagg;
409 	struct mbuf *m;
410 	struct markerdu *mdu;
411 	int error;
412 
413 	LACP_LOCK_ASSERT(lp->lp_lsc);
414 
415 	m = m_gethdr(M_NOWAIT, MT_DATA);
416 	if (m == NULL) {
417 		return (ENOMEM);
418 	}
419 	m->m_len = m->m_pkthdr.len = sizeof(*mdu);
420 
421 	mdu = mtod(m, struct markerdu *);
422 	memset(mdu, 0, sizeof(*mdu));
423 
424 	memcpy(&mdu->mdu_eh.ether_dhost, ethermulticastaddr_slowprotocols,
425 	    ETHER_ADDR_LEN);
426 	memcpy(&mdu->mdu_eh.ether_shost, lgp->lp_lladdr, ETHER_ADDR_LEN);
427 	mdu->mdu_eh.ether_type = htons(ETHERTYPE_SLOW);
428 
429 	mdu->mdu_sph.sph_subtype = SLOWPROTOCOLS_SUBTYPE_MARKER;
430 	mdu->mdu_sph.sph_version = 1;
431 
432 	/* Bump the transaction id and copy over the marker info */
433 	lp->lp_marker.mi_rq_xid = htonl(ntohl(lp->lp_marker.mi_rq_xid) + 1);
434 	TLV_SET(&mdu->mdu_tlv, MARKER_TYPE_INFO, sizeof(mdu->mdu_info));
435 	mdu->mdu_info = lp->lp_marker;
436 
437 	LACP_DPRINTF((lp, "marker transmit, port=%u, sys=%6D, id=%u\n",
438 	    ntohs(mdu->mdu_info.mi_rq_port), mdu->mdu_info.mi_rq_system, ":",
439 	    ntohl(mdu->mdu_info.mi_rq_xid)));
440 
441 	m->m_flags |= M_MCAST;
442 	error = lagg_enqueue(lp->lp_ifp, m);
443 	return (error);
444 }
445 
446 void
447 lacp_linkstate(struct lagg_port *lgp)
448 {
449 	struct lacp_port *lp = LACP_PORT(lgp);
450 	struct lacp_softc *lsc = lp->lp_lsc;
451 	struct ifnet *ifp = lgp->lp_ifp;
452 	struct ifmediareq ifmr;
453 	int error = 0;
454 	u_int media;
455 	uint8_t old_state;
456 	uint16_t old_key;
457 
458 	bzero((char *)&ifmr, sizeof(ifmr));
459 	error = (*ifp->if_ioctl)(ifp, SIOCGIFMEDIA, (caddr_t)&ifmr);
460 	if (error != 0)
461 		return;
462 
463 	LACP_LOCK(lsc);
464 	media = ifmr.ifm_active;
465 	LACP_DPRINTF((lp, "media changed 0x%x -> 0x%x, ether = %d, fdx = %d, "
466 	    "link = %d\n", lp->lp_media, media, IFM_TYPE(media) == IFM_ETHER,
467 	    (media & IFM_FDX) != 0, ifp->if_link_state == LINK_STATE_UP));
468 	old_state = lp->lp_state;
469 	old_key = lp->lp_key;
470 
471 	lp->lp_media = media;
472 	/*
473 	 * If the port is not an active full duplex Ethernet link then it can
474 	 * not be aggregated.
475 	 */
476 	if (IFM_TYPE(media) != IFM_ETHER || (media & IFM_FDX) == 0 ||
477 	    ifp->if_link_state != LINK_STATE_UP) {
478 		lacp_port_disable(lp);
479 	} else {
480 		lacp_port_enable(lp);
481 	}
482 	lp->lp_key = lacp_compose_key(lp);
483 
484 	if (old_state != lp->lp_state || old_key != lp->lp_key) {
485 		LACP_DPRINTF((lp, "-> UNSELECTED\n"));
486 		lp->lp_selected = LACP_UNSELECTED;
487 	}
488 	LACP_UNLOCK(lsc);
489 }
490 
491 static void
492 lacp_tick(void *arg)
493 {
494 	struct lacp_softc *lsc = arg;
495 	struct lacp_port *lp;
496 
497 	LIST_FOREACH(lp, &lsc->lsc_ports, lp_next) {
498 		if ((lp->lp_state & LACP_STATE_AGGREGATION) == 0)
499 			continue;
500 
501 		CURVNET_SET(lp->lp_ifp->if_vnet);
502 		lacp_run_timers(lp);
503 
504 		lacp_select(lp);
505 		lacp_sm_mux(lp);
506 		lacp_sm_tx(lp);
507 		lacp_sm_ptx_tx_schedule(lp);
508 		CURVNET_RESTORE();
509 	}
510 	callout_reset(&lsc->lsc_callout, hz, lacp_tick, lsc);
511 }
512 
513 int
514 lacp_port_create(struct lagg_port *lgp)
515 {
516 	struct lagg_softc *sc = lgp->lp_softc;
517 	struct lacp_softc *lsc = LACP_SOFTC(sc);
518 	struct lacp_port *lp;
519 	struct ifnet *ifp = lgp->lp_ifp;
520 	struct sockaddr_dl sdl;
521 	struct ifmultiaddr *rifma = NULL;
522 	int error;
523 
524 	boolean_t active = TRUE; /* XXX should be configurable */
525 	boolean_t fast = FALSE; /* XXX should be configurable */
526 
527 	link_init_sdl(ifp, (struct sockaddr *)&sdl, IFT_ETHER);
528 	sdl.sdl_alen = ETHER_ADDR_LEN;
529 
530 	bcopy(&ethermulticastaddr_slowprotocols,
531 	    LLADDR(&sdl), ETHER_ADDR_LEN);
532 	error = if_addmulti(ifp, (struct sockaddr *)&sdl, &rifma);
533 	if (error) {
534 		printf("%s: ADDMULTI failed on %s\n", __func__,
535 		    lgp->lp_ifp->if_xname);
536 		return (error);
537 	}
538 
539 	lp = malloc(sizeof(struct lacp_port),
540 	    M_DEVBUF, M_NOWAIT|M_ZERO);
541 	if (lp == NULL)
542 		return (ENOMEM);
543 
544 	LACP_LOCK(lsc);
545 	lgp->lp_psc = lp;
546 	lp->lp_ifp = ifp;
547 	lp->lp_lagg = lgp;
548 	lp->lp_lsc = lsc;
549 	lp->lp_ifma = rifma;
550 
551 	LIST_INSERT_HEAD(&lsc->lsc_ports, lp, lp_next);
552 
553 	lacp_fill_actorinfo(lp, &lp->lp_actor);
554 	lacp_fill_markerinfo(lp, &lp->lp_marker);
555 	lp->lp_state =
556 	    (active ? LACP_STATE_ACTIVITY : 0) |
557 	    (fast ? LACP_STATE_TIMEOUT : 0);
558 	lp->lp_aggregator = NULL;
559 	lacp_sm_rx_set_expired(lp);
560 	LACP_UNLOCK(lsc);
561 	lacp_linkstate(lgp);
562 
563 	return (0);
564 }
565 
566 void
567 lacp_port_destroy(struct lagg_port *lgp)
568 {
569 	struct lacp_port *lp = LACP_PORT(lgp);
570 	struct lacp_softc *lsc = lp->lp_lsc;
571 	int i;
572 
573 	LACP_LOCK(lsc);
574 	for (i = 0; i < LACP_NTIMER; i++) {
575 		LACP_TIMER_DISARM(lp, i);
576 	}
577 
578 	lacp_disable_collecting(lp);
579 	lacp_disable_distributing(lp);
580 	lacp_unselect(lp);
581 
582 	LIST_REMOVE(lp, lp_next);
583 	LACP_UNLOCK(lsc);
584 
585 	/* The address may have already been removed by if_purgemaddrs() */
586 	if (!lgp->lp_detaching)
587 		if_delmulti_ifma(lp->lp_ifma);
588 
589 	free(lp, M_DEVBUF);
590 }
591 
592 void
593 lacp_req(struct lagg_softc *sc, void *data)
594 {
595 	struct lacp_opreq *req = (struct lacp_opreq *)data;
596 	struct lacp_softc *lsc = LACP_SOFTC(sc);
597 	struct lacp_aggregator *la;
598 
599 	bzero(req, sizeof(struct lacp_opreq));
600 
601 	/*
602 	 * If the LACP softc is NULL, return with the opreq structure full of
603 	 * zeros.  It is normal for the softc to be NULL while the lagg is
604 	 * being destroyed.
605 	 */
606 	if (NULL == lsc)
607 		return;
608 
609 	la = lsc->lsc_active_aggregator;
610 	LACP_LOCK(lsc);
611 	if (la != NULL) {
612 		req->actor_prio = ntohs(la->la_actor.lip_systemid.lsi_prio);
613 		memcpy(&req->actor_mac, &la->la_actor.lip_systemid.lsi_mac,
614 		    ETHER_ADDR_LEN);
615 		req->actor_key = ntohs(la->la_actor.lip_key);
616 		req->actor_portprio = ntohs(la->la_actor.lip_portid.lpi_prio);
617 		req->actor_portno = ntohs(la->la_actor.lip_portid.lpi_portno);
618 		req->actor_state = la->la_actor.lip_state;
619 
620 		req->partner_prio = ntohs(la->la_partner.lip_systemid.lsi_prio);
621 		memcpy(&req->partner_mac, &la->la_partner.lip_systemid.lsi_mac,
622 		    ETHER_ADDR_LEN);
623 		req->partner_key = ntohs(la->la_partner.lip_key);
624 		req->partner_portprio = ntohs(la->la_partner.lip_portid.lpi_prio);
625 		req->partner_portno = ntohs(la->la_partner.lip_portid.lpi_portno);
626 		req->partner_state = la->la_partner.lip_state;
627 	}
628 	LACP_UNLOCK(lsc);
629 }
630 
631 void
632 lacp_portreq(struct lagg_port *lgp, void *data)
633 {
634 	struct lacp_opreq *req = (struct lacp_opreq *)data;
635 	struct lacp_port *lp = LACP_PORT(lgp);
636 	struct lacp_softc *lsc = lp->lp_lsc;
637 
638 	LACP_LOCK(lsc);
639 	req->actor_prio = ntohs(lp->lp_actor.lip_systemid.lsi_prio);
640 	memcpy(&req->actor_mac, &lp->lp_actor.lip_systemid.lsi_mac,
641 	    ETHER_ADDR_LEN);
642 	req->actor_key = ntohs(lp->lp_actor.lip_key);
643 	req->actor_portprio = ntohs(lp->lp_actor.lip_portid.lpi_prio);
644 	req->actor_portno = ntohs(lp->lp_actor.lip_portid.lpi_portno);
645 	req->actor_state = lp->lp_actor.lip_state;
646 
647 	req->partner_prio = ntohs(lp->lp_partner.lip_systemid.lsi_prio);
648 	memcpy(&req->partner_mac, &lp->lp_partner.lip_systemid.lsi_mac,
649 	    ETHER_ADDR_LEN);
650 	req->partner_key = ntohs(lp->lp_partner.lip_key);
651 	req->partner_portprio = ntohs(lp->lp_partner.lip_portid.lpi_prio);
652 	req->partner_portno = ntohs(lp->lp_partner.lip_portid.lpi_portno);
653 	req->partner_state = lp->lp_partner.lip_state;
654 	LACP_UNLOCK(lsc);
655 }
656 
657 static void
658 lacp_disable_collecting(struct lacp_port *lp)
659 {
660 	LACP_DPRINTF((lp, "collecting disabled\n"));
661 	lp->lp_state &= ~LACP_STATE_COLLECTING;
662 }
663 
664 static void
665 lacp_enable_collecting(struct lacp_port *lp)
666 {
667 	LACP_DPRINTF((lp, "collecting enabled\n"));
668 	lp->lp_state |= LACP_STATE_COLLECTING;
669 }
670 
671 static void
672 lacp_disable_distributing(struct lacp_port *lp)
673 {
674 	struct lacp_aggregator *la = lp->lp_aggregator;
675 	struct lacp_softc *lsc = lp->lp_lsc;
676 	struct lagg_softc *sc = lsc->lsc_softc;
677 	char buf[LACP_LAGIDSTR_MAX+1];
678 
679 	LACP_LOCK_ASSERT(lsc);
680 
681 	if (la == NULL || (lp->lp_state & LACP_STATE_DISTRIBUTING) == 0) {
682 		return;
683 	}
684 
685 	KASSERT(!TAILQ_EMPTY(&la->la_ports), ("no aggregator ports"));
686 	KASSERT(la->la_nports > 0, ("nports invalid (%d)", la->la_nports));
687 	KASSERT(la->la_refcnt >= la->la_nports, ("aggregator refcnt invalid"));
688 
689 	LACP_DPRINTF((lp, "disable distributing on aggregator %s, "
690 	    "nports %d -> %d\n",
691 	    lacp_format_lagid_aggregator(la, buf, sizeof(buf)),
692 	    la->la_nports, la->la_nports - 1));
693 
694 	TAILQ_REMOVE(&la->la_ports, lp, lp_dist_q);
695 	la->la_nports--;
696 	sc->sc_active = la->la_nports;
697 
698 	if (lsc->lsc_active_aggregator == la) {
699 		lacp_suppress_distributing(lsc, la);
700 		lacp_select_active_aggregator(lsc);
701 		/* regenerate the port map, the active aggregator has changed */
702 		lacp_update_portmap(lsc);
703 	}
704 
705 	lp->lp_state &= ~LACP_STATE_DISTRIBUTING;
706 }
707 
708 static void
709 lacp_enable_distributing(struct lacp_port *lp)
710 {
711 	struct lacp_aggregator *la = lp->lp_aggregator;
712 	struct lacp_softc *lsc = lp->lp_lsc;
713 	struct lagg_softc *sc = lsc->lsc_softc;
714 	char buf[LACP_LAGIDSTR_MAX+1];
715 
716 	LACP_LOCK_ASSERT(lsc);
717 
718 	if ((lp->lp_state & LACP_STATE_DISTRIBUTING) != 0) {
719 		return;
720 	}
721 
722 	LACP_DPRINTF((lp, "enable distributing on aggregator %s, "
723 	    "nports %d -> %d\n",
724 	    lacp_format_lagid_aggregator(la, buf, sizeof(buf)),
725 	    la->la_nports, la->la_nports + 1));
726 
727 	KASSERT(la->la_refcnt > la->la_nports, ("aggregator refcnt invalid"));
728 	TAILQ_INSERT_HEAD(&la->la_ports, lp, lp_dist_q);
729 	la->la_nports++;
730 	sc->sc_active = la->la_nports;
731 
732 	lp->lp_state |= LACP_STATE_DISTRIBUTING;
733 
734 	if (lsc->lsc_active_aggregator == la) {
735 		lacp_suppress_distributing(lsc, la);
736 		lacp_update_portmap(lsc);
737 	} else
738 		/* try to become the active aggregator */
739 		lacp_select_active_aggregator(lsc);
740 }
741 
742 static void
743 lacp_transit_expire(void *vp)
744 {
745 	struct lacp_softc *lsc = vp;
746 
747 	LACP_LOCK_ASSERT(lsc);
748 
749 	CURVNET_SET(lsc->lsc_softc->sc_ifp->if_vnet);
750 	LACP_TRACE(NULL);
751 	CURVNET_RESTORE();
752 
753 	lsc->lsc_suppress_distributing = FALSE;
754 }
755 
756 void
757 lacp_attach(struct lagg_softc *sc)
758 {
759 	struct lacp_softc *lsc;
760 
761 	lsc = malloc(sizeof(struct lacp_softc), M_DEVBUF, M_WAITOK | M_ZERO);
762 
763 	sc->sc_psc = lsc;
764 	lsc->lsc_softc = sc;
765 
766 	lsc->lsc_hashkey = m_ether_tcpip_hash_init();
767 	lsc->lsc_active_aggregator = NULL;
768 	lsc->lsc_strict_mode = 1;
769 	LACP_LOCK_INIT(lsc);
770 	TAILQ_INIT(&lsc->lsc_aggregators);
771 	LIST_INIT(&lsc->lsc_ports);
772 
773 	callout_init_mtx(&lsc->lsc_transit_callout, &lsc->lsc_mtx, 0);
774 	callout_init_mtx(&lsc->lsc_callout, &lsc->lsc_mtx, 0);
775 
776 	/* if the lagg is already up then do the same */
777 	if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING)
778 		lacp_init(sc);
779 }
780 
781 void
782 lacp_detach(void *psc)
783 {
784 	struct lacp_softc *lsc = (struct lacp_softc *)psc;
785 
786 	KASSERT(TAILQ_EMPTY(&lsc->lsc_aggregators),
787 	    ("aggregators still active"));
788 	KASSERT(lsc->lsc_active_aggregator == NULL,
789 	    ("aggregator still attached"));
790 
791 	callout_drain(&lsc->lsc_transit_callout);
792 	callout_drain(&lsc->lsc_callout);
793 
794 	LACP_LOCK_DESTROY(lsc);
795 	free(lsc, M_DEVBUF);
796 }
797 
798 void
799 lacp_init(struct lagg_softc *sc)
800 {
801 	struct lacp_softc *lsc = LACP_SOFTC(sc);
802 
803 	LACP_LOCK(lsc);
804 	callout_reset(&lsc->lsc_callout, hz, lacp_tick, lsc);
805 	LACP_UNLOCK(lsc);
806 }
807 
808 void
809 lacp_stop(struct lagg_softc *sc)
810 {
811 	struct lacp_softc *lsc = LACP_SOFTC(sc);
812 
813 	LACP_LOCK(lsc);
814 	callout_stop(&lsc->lsc_transit_callout);
815 	callout_stop(&lsc->lsc_callout);
816 	LACP_UNLOCK(lsc);
817 }
818 
819 struct lagg_port *
820 lacp_select_tx_port(struct lagg_softc *sc, struct mbuf *m)
821 {
822 	struct lacp_softc *lsc = LACP_SOFTC(sc);
823 	struct lacp_portmap *pm;
824 	struct lacp_port *lp;
825 	uint32_t hash;
826 
827 	if (__predict_false(lsc->lsc_suppress_distributing)) {
828 		LACP_DPRINTF((NULL, "%s: waiting transit\n", __func__));
829 		return (NULL);
830 	}
831 
832 	pm = &lsc->lsc_pmap[lsc->lsc_activemap];
833 	if (pm->pm_count == 0) {
834 		LACP_DPRINTF((NULL, "%s: no active aggregator\n", __func__));
835 		return (NULL);
836 	}
837 
838 	if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) &&
839 	    M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
840 		hash = m->m_pkthdr.flowid >> sc->flowid_shift;
841 	else
842 		hash = m_ether_tcpip_hash(sc->sc_flags, m, lsc->lsc_hashkey);
843 	hash %= pm->pm_count;
844 	lp = pm->pm_map[hash];
845 
846 	KASSERT((lp->lp_state & LACP_STATE_DISTRIBUTING) != 0,
847 	    ("aggregated port is not distributing"));
848 
849 	return (lp->lp_lagg);
850 }
851 /*
852  * lacp_suppress_distributing: drop transmit packets for a while
853  * to preserve packet ordering.
854  */
855 
856 static void
857 lacp_suppress_distributing(struct lacp_softc *lsc, struct lacp_aggregator *la)
858 {
859 	struct lacp_port *lp;
860 
861 	if (lsc->lsc_active_aggregator != la) {
862 		return;
863 	}
864 
865 	LACP_TRACE(NULL);
866 
867 	lsc->lsc_suppress_distributing = TRUE;
868 
869 	/* send a marker frame down each port to verify the queues are empty */
870 	LIST_FOREACH(lp, &lsc->lsc_ports, lp_next) {
871 		lp->lp_flags |= LACP_PORT_MARK;
872 		lacp_xmit_marker(lp);
873 	}
874 
875 	/* set a timeout for the marker frames */
876 	callout_reset(&lsc->lsc_transit_callout,
877 	    LACP_TRANSIT_DELAY * hz / 1000, lacp_transit_expire, lsc);
878 }
879 
880 static int
881 lacp_compare_peerinfo(const struct lacp_peerinfo *a,
882     const struct lacp_peerinfo *b)
883 {
884 	return (memcmp(a, b, offsetof(struct lacp_peerinfo, lip_state)));
885 }
886 
887 static int
888 lacp_compare_systemid(const struct lacp_systemid *a,
889     const struct lacp_systemid *b)
890 {
891 	return (memcmp(a, b, sizeof(*a)));
892 }
893 
894 #if 0	/* unused */
895 static int
896 lacp_compare_portid(const struct lacp_portid *a,
897     const struct lacp_portid *b)
898 {
899 	return (memcmp(a, b, sizeof(*a)));
900 }
901 #endif
902 
903 static uint64_t
904 lacp_aggregator_bandwidth(struct lacp_aggregator *la)
905 {
906 	struct lacp_port *lp;
907 	uint64_t speed;
908 
909 	lp = TAILQ_FIRST(&la->la_ports);
910 	if (lp == NULL) {
911 		return (0);
912 	}
913 
914 	speed = ifmedia_baudrate(lp->lp_media);
915 	speed *= la->la_nports;
916 	if (speed == 0) {
917 		LACP_DPRINTF((lp, "speed 0? media=0x%x nports=%d\n",
918 		    lp->lp_media, la->la_nports));
919 	}
920 
921 	return (speed);
922 }
923 
924 /*
925  * lacp_select_active_aggregator: select an aggregator to be used to transmit
926  * packets from lagg(4) interface.
927  */
928 
929 static void
930 lacp_select_active_aggregator(struct lacp_softc *lsc)
931 {
932 	struct lacp_aggregator *la;
933 	struct lacp_aggregator *best_la = NULL;
934 	uint64_t best_speed = 0;
935 	char buf[LACP_LAGIDSTR_MAX+1];
936 
937 	LACP_TRACE(NULL);
938 
939 	TAILQ_FOREACH(la, &lsc->lsc_aggregators, la_q) {
940 		uint64_t speed;
941 
942 		if (la->la_nports == 0) {
943 			continue;
944 		}
945 
946 		speed = lacp_aggregator_bandwidth(la);
947 		LACP_DPRINTF((NULL, "%s, speed=%jd, nports=%d\n",
948 		    lacp_format_lagid_aggregator(la, buf, sizeof(buf)),
949 		    speed, la->la_nports));
950 
951 		/*
952 		 * This aggregator is chosen if the partner has a better
953 		 * system priority or, the total aggregated speed is higher
954 		 * or, it is already the chosen aggregator
955 		 */
956 		if ((best_la != NULL && LACP_SYS_PRI(la->la_partner) <
957 		    LACP_SYS_PRI(best_la->la_partner)) ||
958 		    speed > best_speed ||
959 		    (speed == best_speed &&
960 		    la == lsc->lsc_active_aggregator)) {
961 			best_la = la;
962 			best_speed = speed;
963 		}
964 	}
965 
966 	KASSERT(best_la == NULL || best_la->la_nports > 0,
967 	    ("invalid aggregator refcnt"));
968 	KASSERT(best_la == NULL || !TAILQ_EMPTY(&best_la->la_ports),
969 	    ("invalid aggregator list"));
970 
971 	if (lsc->lsc_active_aggregator != best_la) {
972 		LACP_DPRINTF((NULL, "active aggregator changed\n"));
973 		LACP_DPRINTF((NULL, "old %s\n",
974 		    lacp_format_lagid_aggregator(lsc->lsc_active_aggregator,
975 		    buf, sizeof(buf))));
976 	} else {
977 		LACP_DPRINTF((NULL, "active aggregator not changed\n"));
978 	}
979 	LACP_DPRINTF((NULL, "new %s\n",
980 	    lacp_format_lagid_aggregator(best_la, buf, sizeof(buf))));
981 
982 	if (lsc->lsc_active_aggregator != best_la) {
983 		lsc->lsc_active_aggregator = best_la;
984 		lacp_update_portmap(lsc);
985 		if (best_la) {
986 			lacp_suppress_distributing(lsc, best_la);
987 		}
988 	}
989 }
990 
991 /*
992  * Updated the inactive portmap array with the new list of ports and
993  * make it live.
994  */
995 static void
996 lacp_update_portmap(struct lacp_softc *lsc)
997 {
998 	struct lagg_softc *sc = lsc->lsc_softc;
999 	struct lacp_aggregator *la;
1000 	struct lacp_portmap *p;
1001 	struct lacp_port *lp;
1002 	uint64_t speed;
1003 	u_int newmap;
1004 	int i;
1005 
1006 	newmap = lsc->lsc_activemap == 0 ? 1 : 0;
1007 	p = &lsc->lsc_pmap[newmap];
1008 	la = lsc->lsc_active_aggregator;
1009 	speed = 0;
1010 	bzero(p, sizeof(struct lacp_portmap));
1011 
1012 	if (la != NULL && la->la_nports > 0) {
1013 		p->pm_count = la->la_nports;
1014 		i = 0;
1015 		TAILQ_FOREACH(lp, &la->la_ports, lp_dist_q)
1016 			p->pm_map[i++] = lp;
1017 		KASSERT(i == p->pm_count, ("Invalid port count"));
1018 		speed = lacp_aggregator_bandwidth(la);
1019 	}
1020 	sc->sc_ifp->if_baudrate = speed;
1021 
1022 	/* switch the active portmap over */
1023 	atomic_store_rel_int(&lsc->lsc_activemap, newmap);
1024 	LACP_DPRINTF((NULL, "Set table %d with %d ports\n",
1025 		    lsc->lsc_activemap,
1026 		    lsc->lsc_pmap[lsc->lsc_activemap].pm_count));
1027 }
1028 
1029 static uint16_t
1030 lacp_compose_key(struct lacp_port *lp)
1031 {
1032 	struct lagg_port *lgp = lp->lp_lagg;
1033 	struct lagg_softc *sc = lgp->lp_softc;
1034 	u_int media = lp->lp_media;
1035 	uint16_t key;
1036 
1037 	if ((lp->lp_state & LACP_STATE_AGGREGATION) == 0) {
1038 
1039 		/*
1040 		 * non-aggregatable links should have unique keys.
1041 		 *
1042 		 * XXX this isn't really unique as if_index is 16 bit.
1043 		 */
1044 
1045 		/* bit 0..14:	(some bits of) if_index of this port */
1046 		key = lp->lp_ifp->if_index;
1047 		/* bit 15:	1 */
1048 		key |= 0x8000;
1049 	} else {
1050 		u_int subtype = IFM_SUBTYPE(media);
1051 
1052 		KASSERT(IFM_TYPE(media) == IFM_ETHER, ("invalid media type"));
1053 		KASSERT((media & IFM_FDX) != 0, ("aggregating HDX interface"));
1054 
1055 		/* bit 0..4:	IFM_SUBTYPE modulo speed */
1056 		switch (subtype) {
1057 		case IFM_10_T:
1058 		case IFM_10_2:
1059 		case IFM_10_5:
1060 		case IFM_10_STP:
1061 		case IFM_10_FL:
1062 			key = IFM_10_T;
1063 			break;
1064 		case IFM_100_TX:
1065 		case IFM_100_FX:
1066 		case IFM_100_T4:
1067 		case IFM_100_VG:
1068 		case IFM_100_T2:
1069 		case IFM_100_T:
1070 			key = IFM_100_TX;
1071 			break;
1072 		case IFM_1000_SX:
1073 		case IFM_1000_LX:
1074 		case IFM_1000_CX:
1075 		case IFM_1000_T:
1076 		case IFM_1000_KX:
1077 		case IFM_1000_SGMII:
1078 		case IFM_1000_CX_SGMII:
1079 			key = IFM_1000_SX;
1080 			break;
1081 		case IFM_10G_LR:
1082 		case IFM_10G_SR:
1083 		case IFM_10G_CX4:
1084 		case IFM_10G_TWINAX:
1085 		case IFM_10G_TWINAX_LONG:
1086 		case IFM_10G_LRM:
1087 		case IFM_10G_T:
1088 		case IFM_10G_KX4:
1089 		case IFM_10G_KR:
1090 		case IFM_10G_CR1:
1091 		case IFM_10G_ER:
1092 		case IFM_10G_SFI:
1093 			key = IFM_10G_LR;
1094 			break;
1095 		case IFM_20G_KR2:
1096 			key = IFM_20G_KR2;
1097 			break;
1098 		case IFM_2500_KX:
1099 		case IFM_2500_T:
1100 			key = IFM_2500_KX;
1101 			break;
1102 		case IFM_5000_T:
1103 			key = IFM_5000_T;
1104 			break;
1105 		case IFM_50G_PCIE:
1106 		case IFM_50G_CR2:
1107 		case IFM_50G_KR2:
1108 			key = IFM_50G_PCIE;
1109 			break;
1110 		case IFM_56G_R4:
1111 			key = IFM_56G_R4;
1112 			break;
1113 		case IFM_25G_PCIE:
1114 		case IFM_25G_CR:
1115 		case IFM_25G_KR:
1116 		case IFM_25G_SR:
1117 			key = IFM_25G_PCIE;
1118 			break;
1119 		case IFM_40G_CR4:
1120 		case IFM_40G_SR4:
1121 		case IFM_40G_LR4:
1122 		case IFM_40G_XLPPI:
1123 		case IFM_40G_KR4:
1124 			key = IFM_40G_CR4;
1125 			break;
1126 		case IFM_100G_CR4:
1127 		case IFM_100G_SR4:
1128 		case IFM_100G_KR4:
1129 		case IFM_100G_LR4:
1130 			key = IFM_100G_CR4;
1131 			break;
1132 		default:
1133 			key = subtype;
1134 			break;
1135 		}
1136 		/* bit 5..14:	(some bits of) if_index of lagg device */
1137 		key |= 0x7fe0 & ((sc->sc_ifp->if_index) << 5);
1138 		/* bit 15:	0 */
1139 	}
1140 	return (htons(key));
1141 }
1142 
1143 static void
1144 lacp_aggregator_addref(struct lacp_softc *lsc, struct lacp_aggregator *la)
1145 {
1146 	char buf[LACP_LAGIDSTR_MAX+1];
1147 
1148 	LACP_DPRINTF((NULL, "%s: lagid=%s, refcnt %d -> %d\n",
1149 	    __func__,
1150 	    lacp_format_lagid(&la->la_actor, &la->la_partner,
1151 	    buf, sizeof(buf)),
1152 	    la->la_refcnt, la->la_refcnt + 1));
1153 
1154 	KASSERT(la->la_refcnt > 0, ("refcount <= 0"));
1155 	la->la_refcnt++;
1156 	KASSERT(la->la_refcnt > la->la_nports, ("invalid refcount"));
1157 }
1158 
1159 static void
1160 lacp_aggregator_delref(struct lacp_softc *lsc, struct lacp_aggregator *la)
1161 {
1162 	char buf[LACP_LAGIDSTR_MAX+1];
1163 
1164 	LACP_DPRINTF((NULL, "%s: lagid=%s, refcnt %d -> %d\n",
1165 	    __func__,
1166 	    lacp_format_lagid(&la->la_actor, &la->la_partner,
1167 	    buf, sizeof(buf)),
1168 	    la->la_refcnt, la->la_refcnt - 1));
1169 
1170 	KASSERT(la->la_refcnt > la->la_nports, ("invalid refcnt"));
1171 	la->la_refcnt--;
1172 	if (la->la_refcnt > 0) {
1173 		return;
1174 	}
1175 
1176 	KASSERT(la->la_refcnt == 0, ("refcount not zero"));
1177 	KASSERT(lsc->lsc_active_aggregator != la, ("aggregator active"));
1178 
1179 	TAILQ_REMOVE(&lsc->lsc_aggregators, la, la_q);
1180 
1181 	free(la, M_DEVBUF);
1182 }
1183 
1184 /*
1185  * lacp_aggregator_get: allocate an aggregator.
1186  */
1187 
1188 static struct lacp_aggregator *
1189 lacp_aggregator_get(struct lacp_softc *lsc, struct lacp_port *lp)
1190 {
1191 	struct lacp_aggregator *la;
1192 
1193 	la = malloc(sizeof(*la), M_DEVBUF, M_NOWAIT);
1194 	if (la) {
1195 		la->la_refcnt = 1;
1196 		la->la_nports = 0;
1197 		TAILQ_INIT(&la->la_ports);
1198 		la->la_pending = 0;
1199 		TAILQ_INSERT_TAIL(&lsc->lsc_aggregators, la, la_q);
1200 	}
1201 
1202 	return (la);
1203 }
1204 
1205 /*
1206  * lacp_fill_aggregator_id: setup a newly allocated aggregator from a port.
1207  */
1208 
1209 static void
1210 lacp_fill_aggregator_id(struct lacp_aggregator *la, const struct lacp_port *lp)
1211 {
1212 	lacp_fill_aggregator_id_peer(&la->la_partner, &lp->lp_partner);
1213 	lacp_fill_aggregator_id_peer(&la->la_actor, &lp->lp_actor);
1214 
1215 	la->la_actor.lip_state = lp->lp_state & LACP_STATE_AGGREGATION;
1216 }
1217 
1218 static void
1219 lacp_fill_aggregator_id_peer(struct lacp_peerinfo *lpi_aggr,
1220     const struct lacp_peerinfo *lpi_port)
1221 {
1222 	memset(lpi_aggr, 0, sizeof(*lpi_aggr));
1223 	lpi_aggr->lip_systemid = lpi_port->lip_systemid;
1224 	lpi_aggr->lip_key = lpi_port->lip_key;
1225 }
1226 
1227 /*
1228  * lacp_aggregator_is_compatible: check if a port can join to an aggregator.
1229  */
1230 
1231 static int
1232 lacp_aggregator_is_compatible(const struct lacp_aggregator *la,
1233     const struct lacp_port *lp)
1234 {
1235 	if (!(lp->lp_state & LACP_STATE_AGGREGATION) ||
1236 	    !(lp->lp_partner.lip_state & LACP_STATE_AGGREGATION)) {
1237 		return (0);
1238 	}
1239 
1240 	if (!(la->la_actor.lip_state & LACP_STATE_AGGREGATION)) {
1241 		return (0);
1242 	}
1243 
1244 	if (!lacp_peerinfo_is_compatible(&la->la_partner, &lp->lp_partner)) {
1245 		return (0);
1246 	}
1247 
1248 	if (!lacp_peerinfo_is_compatible(&la->la_actor, &lp->lp_actor)) {
1249 		return (0);
1250 	}
1251 
1252 	return (1);
1253 }
1254 
1255 static int
1256 lacp_peerinfo_is_compatible(const struct lacp_peerinfo *a,
1257     const struct lacp_peerinfo *b)
1258 {
1259 	if (memcmp(&a->lip_systemid, &b->lip_systemid,
1260 	    sizeof(a->lip_systemid))) {
1261 		return (0);
1262 	}
1263 
1264 	if (memcmp(&a->lip_key, &b->lip_key, sizeof(a->lip_key))) {
1265 		return (0);
1266 	}
1267 
1268 	return (1);
1269 }
1270 
1271 static void
1272 lacp_port_enable(struct lacp_port *lp)
1273 {
1274 	lp->lp_state |= LACP_STATE_AGGREGATION;
1275 }
1276 
1277 static void
1278 lacp_port_disable(struct lacp_port *lp)
1279 {
1280 	lacp_set_mux(lp, LACP_MUX_DETACHED);
1281 
1282 	lp->lp_state &= ~LACP_STATE_AGGREGATION;
1283 	lp->lp_selected = LACP_UNSELECTED;
1284 	lacp_sm_rx_record_default(lp);
1285 	lp->lp_partner.lip_state &= ~LACP_STATE_AGGREGATION;
1286 	lp->lp_state &= ~LACP_STATE_EXPIRED;
1287 }
1288 
1289 /*
1290  * lacp_select: select an aggregator.  create one if necessary.
1291  */
1292 static void
1293 lacp_select(struct lacp_port *lp)
1294 {
1295 	struct lacp_softc *lsc = lp->lp_lsc;
1296 	struct lacp_aggregator *la;
1297 	char buf[LACP_LAGIDSTR_MAX+1];
1298 
1299 	if (lp->lp_aggregator) {
1300 		return;
1301 	}
1302 
1303 	KASSERT(!LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE),
1304 	    ("timer_wait_while still active"));
1305 
1306 	LACP_DPRINTF((lp, "port lagid=%s\n",
1307 	    lacp_format_lagid(&lp->lp_actor, &lp->lp_partner,
1308 	    buf, sizeof(buf))));
1309 
1310 	TAILQ_FOREACH(la, &lsc->lsc_aggregators, la_q) {
1311 		if (lacp_aggregator_is_compatible(la, lp)) {
1312 			break;
1313 		}
1314 	}
1315 
1316 	if (la == NULL) {
1317 		la = lacp_aggregator_get(lsc, lp);
1318 		if (la == NULL) {
1319 			LACP_DPRINTF((lp, "aggregator creation failed\n"));
1320 
1321 			/*
1322 			 * will retry on the next tick.
1323 			 */
1324 
1325 			return;
1326 		}
1327 		lacp_fill_aggregator_id(la, lp);
1328 		LACP_DPRINTF((lp, "aggregator created\n"));
1329 	} else {
1330 		LACP_DPRINTF((lp, "compatible aggregator found\n"));
1331 		if (la->la_refcnt == LACP_MAX_PORTS)
1332 			return;
1333 		lacp_aggregator_addref(lsc, la);
1334 	}
1335 
1336 	LACP_DPRINTF((lp, "aggregator lagid=%s\n",
1337 	    lacp_format_lagid(&la->la_actor, &la->la_partner,
1338 	    buf, sizeof(buf))));
1339 
1340 	lp->lp_aggregator = la;
1341 	lp->lp_selected = LACP_SELECTED;
1342 }
1343 
1344 /*
1345  * lacp_unselect: finish unselect/detach process.
1346  */
1347 
1348 static void
1349 lacp_unselect(struct lacp_port *lp)
1350 {
1351 	struct lacp_softc *lsc = lp->lp_lsc;
1352 	struct lacp_aggregator *la = lp->lp_aggregator;
1353 
1354 	KASSERT(!LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE),
1355 	    ("timer_wait_while still active"));
1356 
1357 	if (la == NULL) {
1358 		return;
1359 	}
1360 
1361 	lp->lp_aggregator = NULL;
1362 	lacp_aggregator_delref(lsc, la);
1363 }
1364 
1365 /* mux machine */
1366 
1367 static void
1368 lacp_sm_mux(struct lacp_port *lp)
1369 {
1370 	struct lagg_port *lgp = lp->lp_lagg;
1371 	struct lagg_softc *sc = lgp->lp_softc;
1372 	enum lacp_mux_state new_state;
1373 	boolean_t p_sync =
1374 		    (lp->lp_partner.lip_state & LACP_STATE_SYNC) != 0;
1375 	boolean_t p_collecting =
1376 	    (lp->lp_partner.lip_state & LACP_STATE_COLLECTING) != 0;
1377 	enum lacp_selected selected = lp->lp_selected;
1378 	struct lacp_aggregator *la;
1379 
1380 	if (V_lacp_debug > 1)
1381 		lacp_dprintf(lp, "%s: state= 0x%x, selected= 0x%x, "
1382 		    "p_sync= 0x%x, p_collecting= 0x%x\n", __func__,
1383 		    lp->lp_mux_state, selected, p_sync, p_collecting);
1384 
1385 re_eval:
1386 	la = lp->lp_aggregator;
1387 	KASSERT(lp->lp_mux_state == LACP_MUX_DETACHED || la != NULL,
1388 	    ("MUX not detached"));
1389 	new_state = lp->lp_mux_state;
1390 	switch (lp->lp_mux_state) {
1391 	case LACP_MUX_DETACHED:
1392 		if (selected != LACP_UNSELECTED) {
1393 			new_state = LACP_MUX_WAITING;
1394 		}
1395 		break;
1396 	case LACP_MUX_WAITING:
1397 		KASSERT(la->la_pending > 0 ||
1398 		    !LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE),
1399 		    ("timer_wait_while still active"));
1400 		if (selected == LACP_SELECTED && la->la_pending == 0) {
1401 			new_state = LACP_MUX_ATTACHED;
1402 		} else if (selected == LACP_UNSELECTED) {
1403 			new_state = LACP_MUX_DETACHED;
1404 		}
1405 		break;
1406 	case LACP_MUX_ATTACHED:
1407 		if (selected == LACP_SELECTED && p_sync) {
1408 			new_state = LACP_MUX_COLLECTING;
1409 		} else if (selected != LACP_SELECTED) {
1410 			new_state = LACP_MUX_DETACHED;
1411 		}
1412 		break;
1413 	case LACP_MUX_COLLECTING:
1414 		if (selected == LACP_SELECTED && p_sync && p_collecting) {
1415 			new_state = LACP_MUX_DISTRIBUTING;
1416 		} else if (selected != LACP_SELECTED || !p_sync) {
1417 			new_state = LACP_MUX_ATTACHED;
1418 		}
1419 		break;
1420 	case LACP_MUX_DISTRIBUTING:
1421 		if (selected != LACP_SELECTED || !p_sync || !p_collecting) {
1422 			new_state = LACP_MUX_COLLECTING;
1423 			lacp_dprintf(lp, "Interface stopped DISTRIBUTING, possible flapping\n");
1424 			sc->sc_flapping++;
1425 		}
1426 		break;
1427 	default:
1428 		panic("%s: unknown state", __func__);
1429 	}
1430 
1431 	if (lp->lp_mux_state == new_state) {
1432 		return;
1433 	}
1434 
1435 	lacp_set_mux(lp, new_state);
1436 	goto re_eval;
1437 }
1438 
1439 static void
1440 lacp_set_mux(struct lacp_port *lp, enum lacp_mux_state new_state)
1441 {
1442 	struct lacp_aggregator *la = lp->lp_aggregator;
1443 
1444 	if (lp->lp_mux_state == new_state) {
1445 		return;
1446 	}
1447 
1448 	switch (new_state) {
1449 	case LACP_MUX_DETACHED:
1450 		lp->lp_state &= ~LACP_STATE_SYNC;
1451 		lacp_disable_distributing(lp);
1452 		lacp_disable_collecting(lp);
1453 		lacp_sm_assert_ntt(lp);
1454 		/* cancel timer */
1455 		if (LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE)) {
1456 			KASSERT(la->la_pending > 0,
1457 			    ("timer_wait_while not active"));
1458 			la->la_pending--;
1459 		}
1460 		LACP_TIMER_DISARM(lp, LACP_TIMER_WAIT_WHILE);
1461 		lacp_unselect(lp);
1462 		break;
1463 	case LACP_MUX_WAITING:
1464 		LACP_TIMER_ARM(lp, LACP_TIMER_WAIT_WHILE,
1465 		    LACP_AGGREGATE_WAIT_TIME);
1466 		la->la_pending++;
1467 		break;
1468 	case LACP_MUX_ATTACHED:
1469 		lp->lp_state |= LACP_STATE_SYNC;
1470 		lacp_disable_collecting(lp);
1471 		lacp_sm_assert_ntt(lp);
1472 		break;
1473 	case LACP_MUX_COLLECTING:
1474 		lacp_enable_collecting(lp);
1475 		lacp_disable_distributing(lp);
1476 		lacp_sm_assert_ntt(lp);
1477 		break;
1478 	case LACP_MUX_DISTRIBUTING:
1479 		lacp_enable_distributing(lp);
1480 		break;
1481 	default:
1482 		panic("%s: unknown state", __func__);
1483 	}
1484 
1485 	LACP_DPRINTF((lp, "mux_state %d -> %d\n", lp->lp_mux_state, new_state));
1486 
1487 	lp->lp_mux_state = new_state;
1488 }
1489 
1490 static void
1491 lacp_sm_mux_timer(struct lacp_port *lp)
1492 {
1493 	struct lacp_aggregator *la = lp->lp_aggregator;
1494 	char buf[LACP_LAGIDSTR_MAX+1];
1495 
1496 	KASSERT(la->la_pending > 0, ("no pending event"));
1497 
1498 	LACP_DPRINTF((lp, "%s: aggregator %s, pending %d -> %d\n", __func__,
1499 	    lacp_format_lagid(&la->la_actor, &la->la_partner,
1500 	    buf, sizeof(buf)),
1501 	    la->la_pending, la->la_pending - 1));
1502 
1503 	la->la_pending--;
1504 }
1505 
1506 /* periodic transmit machine */
1507 
1508 static void
1509 lacp_sm_ptx_update_timeout(struct lacp_port *lp, uint8_t oldpstate)
1510 {
1511 	if (LACP_STATE_EQ(oldpstate, lp->lp_partner.lip_state,
1512 	    LACP_STATE_TIMEOUT)) {
1513 		return;
1514 	}
1515 
1516 	LACP_DPRINTF((lp, "partner timeout changed\n"));
1517 
1518 	/*
1519 	 * FAST_PERIODIC -> SLOW_PERIODIC
1520 	 * or
1521 	 * SLOW_PERIODIC (-> PERIODIC_TX) -> FAST_PERIODIC
1522 	 *
1523 	 * let lacp_sm_ptx_tx_schedule to update timeout.
1524 	 */
1525 
1526 	LACP_TIMER_DISARM(lp, LACP_TIMER_PERIODIC);
1527 
1528 	/*
1529 	 * if timeout has been shortened, assert NTT.
1530 	 */
1531 
1532 	if ((lp->lp_partner.lip_state & LACP_STATE_TIMEOUT)) {
1533 		lacp_sm_assert_ntt(lp);
1534 	}
1535 }
1536 
1537 static void
1538 lacp_sm_ptx_tx_schedule(struct lacp_port *lp)
1539 {
1540 	int timeout;
1541 
1542 	if (!(lp->lp_state & LACP_STATE_ACTIVITY) &&
1543 	    !(lp->lp_partner.lip_state & LACP_STATE_ACTIVITY)) {
1544 
1545 		/*
1546 		 * NO_PERIODIC
1547 		 */
1548 
1549 		LACP_TIMER_DISARM(lp, LACP_TIMER_PERIODIC);
1550 		return;
1551 	}
1552 
1553 	if (LACP_TIMER_ISARMED(lp, LACP_TIMER_PERIODIC)) {
1554 		return;
1555 	}
1556 
1557 	timeout = (lp->lp_partner.lip_state & LACP_STATE_TIMEOUT) ?
1558 	    LACP_FAST_PERIODIC_TIME : LACP_SLOW_PERIODIC_TIME;
1559 
1560 	LACP_TIMER_ARM(lp, LACP_TIMER_PERIODIC, timeout);
1561 }
1562 
1563 static void
1564 lacp_sm_ptx_timer(struct lacp_port *lp)
1565 {
1566 	lacp_sm_assert_ntt(lp);
1567 }
1568 
1569 static void
1570 lacp_sm_rx(struct lacp_port *lp, const struct lacpdu *du)
1571 {
1572 	int timeout;
1573 
1574 	/*
1575 	 * check LACP_DISABLED first
1576 	 */
1577 
1578 	if (!(lp->lp_state & LACP_STATE_AGGREGATION)) {
1579 		return;
1580 	}
1581 
1582 	/*
1583 	 * check loopback condition.
1584 	 */
1585 
1586 	if (!lacp_compare_systemid(&du->ldu_actor.lip_systemid,
1587 	    &lp->lp_actor.lip_systemid)) {
1588 		return;
1589 	}
1590 
1591 	/*
1592 	 * EXPIRED, DEFAULTED, CURRENT -> CURRENT
1593 	 */
1594 
1595 	lacp_sm_rx_update_selected(lp, du);
1596 	lacp_sm_rx_update_ntt(lp, du);
1597 	lacp_sm_rx_record_pdu(lp, du);
1598 
1599 	timeout = (lp->lp_state & LACP_STATE_TIMEOUT) ?
1600 	    LACP_SHORT_TIMEOUT_TIME : LACP_LONG_TIMEOUT_TIME;
1601 	LACP_TIMER_ARM(lp, LACP_TIMER_CURRENT_WHILE, timeout);
1602 
1603 	lp->lp_state &= ~LACP_STATE_EXPIRED;
1604 
1605 	/*
1606 	 * kick transmit machine without waiting the next tick.
1607 	 */
1608 
1609 	lacp_sm_tx(lp);
1610 }
1611 
1612 static void
1613 lacp_sm_rx_set_expired(struct lacp_port *lp)
1614 {
1615 	lp->lp_partner.lip_state &= ~LACP_STATE_SYNC;
1616 	lp->lp_partner.lip_state |= LACP_STATE_TIMEOUT;
1617 	LACP_TIMER_ARM(lp, LACP_TIMER_CURRENT_WHILE, LACP_SHORT_TIMEOUT_TIME);
1618 	lp->lp_state |= LACP_STATE_EXPIRED;
1619 }
1620 
1621 static void
1622 lacp_sm_rx_timer(struct lacp_port *lp)
1623 {
1624 	if ((lp->lp_state & LACP_STATE_EXPIRED) == 0) {
1625 		/* CURRENT -> EXPIRED */
1626 		LACP_DPRINTF((lp, "%s: CURRENT -> EXPIRED\n", __func__));
1627 		lacp_sm_rx_set_expired(lp);
1628 	} else {
1629 		/* EXPIRED -> DEFAULTED */
1630 		LACP_DPRINTF((lp, "%s: EXPIRED -> DEFAULTED\n", __func__));
1631 		lacp_sm_rx_update_default_selected(lp);
1632 		lacp_sm_rx_record_default(lp);
1633 		lp->lp_state &= ~LACP_STATE_EXPIRED;
1634 	}
1635 }
1636 
1637 static void
1638 lacp_sm_rx_record_pdu(struct lacp_port *lp, const struct lacpdu *du)
1639 {
1640 	boolean_t active;
1641 	uint8_t oldpstate;
1642 	char buf[LACP_STATESTR_MAX+1];
1643 
1644 	LACP_TRACE(lp);
1645 
1646 	oldpstate = lp->lp_partner.lip_state;
1647 
1648 	active = (du->ldu_actor.lip_state & LACP_STATE_ACTIVITY)
1649 	    || ((lp->lp_state & LACP_STATE_ACTIVITY) &&
1650 	    (du->ldu_partner.lip_state & LACP_STATE_ACTIVITY));
1651 
1652 	lp->lp_partner = du->ldu_actor;
1653 	if (active &&
1654 	    ((LACP_STATE_EQ(lp->lp_state, du->ldu_partner.lip_state,
1655 	    LACP_STATE_AGGREGATION) &&
1656 	    !lacp_compare_peerinfo(&lp->lp_actor, &du->ldu_partner))
1657 	    || (du->ldu_partner.lip_state & LACP_STATE_AGGREGATION) == 0)) {
1658 		/* XXX nothing? */
1659 	} else {
1660 		lp->lp_partner.lip_state &= ~LACP_STATE_SYNC;
1661 	}
1662 
1663 	lp->lp_state &= ~LACP_STATE_DEFAULTED;
1664 
1665 	if (oldpstate != lp->lp_partner.lip_state) {
1666 		LACP_DPRINTF((lp, "old pstate %s\n",
1667 		    lacp_format_state(oldpstate, buf, sizeof(buf))));
1668 		LACP_DPRINTF((lp, "new pstate %s\n",
1669 		    lacp_format_state(lp->lp_partner.lip_state, buf,
1670 		    sizeof(buf))));
1671 	}
1672 
1673 	/* XXX Hack, still need to implement 5.4.9 para 2,3,4 */
1674 	if (lp->lp_lsc->lsc_strict_mode)
1675 		lp->lp_partner.lip_state |= LACP_STATE_SYNC;
1676 
1677 	lacp_sm_ptx_update_timeout(lp, oldpstate);
1678 }
1679 
1680 static void
1681 lacp_sm_rx_update_ntt(struct lacp_port *lp, const struct lacpdu *du)
1682 {
1683 
1684 	LACP_TRACE(lp);
1685 
1686 	if (lacp_compare_peerinfo(&lp->lp_actor, &du->ldu_partner) ||
1687 	    !LACP_STATE_EQ(lp->lp_state, du->ldu_partner.lip_state,
1688 	    LACP_STATE_ACTIVITY | LACP_STATE_SYNC | LACP_STATE_AGGREGATION)) {
1689 		LACP_DPRINTF((lp, "%s: assert ntt\n", __func__));
1690 		lacp_sm_assert_ntt(lp);
1691 	}
1692 }
1693 
1694 static void
1695 lacp_sm_rx_record_default(struct lacp_port *lp)
1696 {
1697 	uint8_t oldpstate;
1698 
1699 	LACP_TRACE(lp);
1700 
1701 	oldpstate = lp->lp_partner.lip_state;
1702 	if (lp->lp_lsc->lsc_strict_mode)
1703 		lp->lp_partner = lacp_partner_admin_strict;
1704 	else
1705 		lp->lp_partner = lacp_partner_admin_optimistic;;
1706 	lp->lp_state |= LACP_STATE_DEFAULTED;
1707 	lacp_sm_ptx_update_timeout(lp, oldpstate);
1708 }
1709 
1710 static void
1711 lacp_sm_rx_update_selected_from_peerinfo(struct lacp_port *lp,
1712     const struct lacp_peerinfo *info)
1713 {
1714 
1715 	LACP_TRACE(lp);
1716 
1717 	if (lacp_compare_peerinfo(&lp->lp_partner, info) ||
1718 	    !LACP_STATE_EQ(lp->lp_partner.lip_state, info->lip_state,
1719 	    LACP_STATE_AGGREGATION)) {
1720 		lp->lp_selected = LACP_UNSELECTED;
1721 		/* mux machine will clean up lp->lp_aggregator */
1722 	}
1723 }
1724 
1725 static void
1726 lacp_sm_rx_update_selected(struct lacp_port *lp, const struct lacpdu *du)
1727 {
1728 
1729 	LACP_TRACE(lp);
1730 
1731 	lacp_sm_rx_update_selected_from_peerinfo(lp, &du->ldu_actor);
1732 }
1733 
1734 static void
1735 lacp_sm_rx_update_default_selected(struct lacp_port *lp)
1736 {
1737 
1738 	LACP_TRACE(lp);
1739 
1740 	if (lp->lp_lsc->lsc_strict_mode)
1741 		lacp_sm_rx_update_selected_from_peerinfo(lp,
1742 		    &lacp_partner_admin_strict);
1743 	else
1744 		lacp_sm_rx_update_selected_from_peerinfo(lp,
1745 		    &lacp_partner_admin_optimistic);
1746 }
1747 
1748 /* transmit machine */
1749 
1750 static void
1751 lacp_sm_tx(struct lacp_port *lp)
1752 {
1753 	int error = 0;
1754 
1755 	if (!(lp->lp_state & LACP_STATE_AGGREGATION)
1756 #if 1
1757 	    || (!(lp->lp_state & LACP_STATE_ACTIVITY)
1758 	    && !(lp->lp_partner.lip_state & LACP_STATE_ACTIVITY))
1759 #endif
1760 	    ) {
1761 		lp->lp_flags &= ~LACP_PORT_NTT;
1762 	}
1763 
1764 	if (!(lp->lp_flags & LACP_PORT_NTT)) {
1765 		return;
1766 	}
1767 
1768 	/* Rate limit to 3 PDUs per LACP_FAST_PERIODIC_TIME */
1769 	if (ppsratecheck(&lp->lp_last_lacpdu, &lp->lp_lacpdu_sent,
1770 		    (3 / LACP_FAST_PERIODIC_TIME)) == 0) {
1771 		LACP_DPRINTF((lp, "rate limited pdu\n"));
1772 		return;
1773 	}
1774 
1775 	if (((1 << lp->lp_ifp->if_dunit) & lp->lp_lsc->lsc_debug.lsc_tx_test) == 0) {
1776 		error = lacp_xmit_lacpdu(lp);
1777 	} else {
1778 		LACP_TPRINTF((lp, "Dropping TX PDU\n"));
1779 	}
1780 
1781 	if (error == 0) {
1782 		lp->lp_flags &= ~LACP_PORT_NTT;
1783 	} else {
1784 		LACP_DPRINTF((lp, "lacpdu transmit failure, error %d\n",
1785 		    error));
1786 	}
1787 }
1788 
1789 static void
1790 lacp_sm_assert_ntt(struct lacp_port *lp)
1791 {
1792 
1793 	lp->lp_flags |= LACP_PORT_NTT;
1794 }
1795 
1796 static void
1797 lacp_run_timers(struct lacp_port *lp)
1798 {
1799 	int i;
1800 
1801 	for (i = 0; i < LACP_NTIMER; i++) {
1802 		KASSERT(lp->lp_timer[i] >= 0,
1803 		    ("invalid timer value %d", lp->lp_timer[i]));
1804 		if (lp->lp_timer[i] == 0) {
1805 			continue;
1806 		} else if (--lp->lp_timer[i] <= 0) {
1807 			if (lacp_timer_funcs[i]) {
1808 				(*lacp_timer_funcs[i])(lp);
1809 			}
1810 		}
1811 	}
1812 }
1813 
1814 int
1815 lacp_marker_input(struct lacp_port *lp, struct mbuf *m)
1816 {
1817 	struct lacp_softc *lsc = lp->lp_lsc;
1818 	struct lagg_port *lgp = lp->lp_lagg;
1819 	struct lacp_port *lp2;
1820 	struct markerdu *mdu;
1821 	int error = 0;
1822 	int pending = 0;
1823 
1824 	if (m->m_pkthdr.len != sizeof(*mdu)) {
1825 		goto bad;
1826 	}
1827 
1828 	if ((m->m_flags & M_MCAST) == 0) {
1829 		goto bad;
1830 	}
1831 
1832 	if (m->m_len < sizeof(*mdu)) {
1833 		m = m_pullup(m, sizeof(*mdu));
1834 		if (m == NULL) {
1835 			return (ENOMEM);
1836 		}
1837 	}
1838 
1839 	mdu = mtod(m, struct markerdu *);
1840 
1841 	if (memcmp(&mdu->mdu_eh.ether_dhost,
1842 	    &ethermulticastaddr_slowprotocols, ETHER_ADDR_LEN)) {
1843 		goto bad;
1844 	}
1845 
1846 	if (mdu->mdu_sph.sph_version != 1) {
1847 		goto bad;
1848 	}
1849 
1850 	switch (mdu->mdu_tlv.tlv_type) {
1851 	case MARKER_TYPE_INFO:
1852 		if (tlv_check(mdu, sizeof(*mdu), &mdu->mdu_tlv,
1853 		    marker_info_tlv_template, TRUE)) {
1854 			goto bad;
1855 		}
1856 		mdu->mdu_tlv.tlv_type = MARKER_TYPE_RESPONSE;
1857 		memcpy(&mdu->mdu_eh.ether_dhost,
1858 		    &ethermulticastaddr_slowprotocols, ETHER_ADDR_LEN);
1859 		memcpy(&mdu->mdu_eh.ether_shost,
1860 		    lgp->lp_lladdr, ETHER_ADDR_LEN);
1861 		error = lagg_enqueue(lp->lp_ifp, m);
1862 		break;
1863 
1864 	case MARKER_TYPE_RESPONSE:
1865 		if (tlv_check(mdu, sizeof(*mdu), &mdu->mdu_tlv,
1866 		    marker_response_tlv_template, TRUE)) {
1867 			goto bad;
1868 		}
1869 		LACP_DPRINTF((lp, "marker response, port=%u, sys=%6D, id=%u\n",
1870 		    ntohs(mdu->mdu_info.mi_rq_port), mdu->mdu_info.mi_rq_system,
1871 		    ":", ntohl(mdu->mdu_info.mi_rq_xid)));
1872 
1873 		/* Verify that it is the last marker we sent out */
1874 		if (memcmp(&mdu->mdu_info, &lp->lp_marker,
1875 		    sizeof(struct lacp_markerinfo)))
1876 			goto bad;
1877 
1878 		LACP_LOCK(lsc);
1879 		lp->lp_flags &= ~LACP_PORT_MARK;
1880 
1881 		if (lsc->lsc_suppress_distributing) {
1882 			/* Check if any ports are waiting for a response */
1883 			LIST_FOREACH(lp2, &lsc->lsc_ports, lp_next) {
1884 				if (lp2->lp_flags & LACP_PORT_MARK) {
1885 					pending = 1;
1886 					break;
1887 				}
1888 			}
1889 
1890 			if (pending == 0) {
1891 				/* All interface queues are clear */
1892 				LACP_DPRINTF((NULL, "queue flush complete\n"));
1893 				lsc->lsc_suppress_distributing = FALSE;
1894 			}
1895 		}
1896 		LACP_UNLOCK(lsc);
1897 		m_freem(m);
1898 		break;
1899 
1900 	default:
1901 		goto bad;
1902 	}
1903 
1904 	return (error);
1905 
1906 bad:
1907 	LACP_DPRINTF((lp, "bad marker frame\n"));
1908 	m_freem(m);
1909 	return (EINVAL);
1910 }
1911 
1912 static int
1913 tlv_check(const void *p, size_t size, const struct tlvhdr *tlv,
1914     const struct tlv_template *tmpl, boolean_t check_type)
1915 {
1916 	while (/* CONSTCOND */ 1) {
1917 		if ((const char *)tlv - (const char *)p + sizeof(*tlv) > size) {
1918 			return (EINVAL);
1919 		}
1920 		if ((check_type && tlv->tlv_type != tmpl->tmpl_type) ||
1921 		    tlv->tlv_length != tmpl->tmpl_length) {
1922 			return (EINVAL);
1923 		}
1924 		if (tmpl->tmpl_type == 0) {
1925 			break;
1926 		}
1927 		tlv = (const struct tlvhdr *)
1928 		    ((const char *)tlv + tlv->tlv_length);
1929 		tmpl++;
1930 	}
1931 
1932 	return (0);
1933 }
1934 
1935 /* Debugging */
1936 const char *
1937 lacp_format_mac(const uint8_t *mac, char *buf, size_t buflen)
1938 {
1939 	snprintf(buf, buflen, "%02X-%02X-%02X-%02X-%02X-%02X",
1940 	    (int)mac[0],
1941 	    (int)mac[1],
1942 	    (int)mac[2],
1943 	    (int)mac[3],
1944 	    (int)mac[4],
1945 	    (int)mac[5]);
1946 
1947 	return (buf);
1948 }
1949 
1950 const char *
1951 lacp_format_systemid(const struct lacp_systemid *sysid,
1952     char *buf, size_t buflen)
1953 {
1954 	char macbuf[LACP_MACSTR_MAX+1];
1955 
1956 	snprintf(buf, buflen, "%04X,%s",
1957 	    ntohs(sysid->lsi_prio),
1958 	    lacp_format_mac(sysid->lsi_mac, macbuf, sizeof(macbuf)));
1959 
1960 	return (buf);
1961 }
1962 
1963 const char *
1964 lacp_format_portid(const struct lacp_portid *portid, char *buf, size_t buflen)
1965 {
1966 	snprintf(buf, buflen, "%04X,%04X",
1967 	    ntohs(portid->lpi_prio),
1968 	    ntohs(portid->lpi_portno));
1969 
1970 	return (buf);
1971 }
1972 
1973 const char *
1974 lacp_format_partner(const struct lacp_peerinfo *peer, char *buf, size_t buflen)
1975 {
1976 	char sysid[LACP_SYSTEMIDSTR_MAX+1];
1977 	char portid[LACP_PORTIDSTR_MAX+1];
1978 
1979 	snprintf(buf, buflen, "(%s,%04X,%s)",
1980 	    lacp_format_systemid(&peer->lip_systemid, sysid, sizeof(sysid)),
1981 	    ntohs(peer->lip_key),
1982 	    lacp_format_portid(&peer->lip_portid, portid, sizeof(portid)));
1983 
1984 	return (buf);
1985 }
1986 
1987 const char *
1988 lacp_format_lagid(const struct lacp_peerinfo *a,
1989     const struct lacp_peerinfo *b, char *buf, size_t buflen)
1990 {
1991 	char astr[LACP_PARTNERSTR_MAX+1];
1992 	char bstr[LACP_PARTNERSTR_MAX+1];
1993 
1994 #if 0
1995 	/*
1996 	 * there's a convention to display small numbered peer
1997 	 * in the left.
1998 	 */
1999 
2000 	if (lacp_compare_peerinfo(a, b) > 0) {
2001 		const struct lacp_peerinfo *t;
2002 
2003 		t = a;
2004 		a = b;
2005 		b = t;
2006 	}
2007 #endif
2008 
2009 	snprintf(buf, buflen, "[%s,%s]",
2010 	    lacp_format_partner(a, astr, sizeof(astr)),
2011 	    lacp_format_partner(b, bstr, sizeof(bstr)));
2012 
2013 	return (buf);
2014 }
2015 
2016 const char *
2017 lacp_format_lagid_aggregator(const struct lacp_aggregator *la,
2018     char *buf, size_t buflen)
2019 {
2020 	if (la == NULL) {
2021 		return ("(none)");
2022 	}
2023 
2024 	return (lacp_format_lagid(&la->la_actor, &la->la_partner, buf, buflen));
2025 }
2026 
2027 const char *
2028 lacp_format_state(uint8_t state, char *buf, size_t buflen)
2029 {
2030 	snprintf(buf, buflen, "%b", state, LACP_STATE_BITS);
2031 	return (buf);
2032 }
2033 
2034 static void
2035 lacp_dump_lacpdu(const struct lacpdu *du)
2036 {
2037 	char buf[LACP_PARTNERSTR_MAX+1];
2038 	char buf2[LACP_STATESTR_MAX+1];
2039 
2040 	printf("actor=%s\n",
2041 	    lacp_format_partner(&du->ldu_actor, buf, sizeof(buf)));
2042 	printf("actor.state=%s\n",
2043 	    lacp_format_state(du->ldu_actor.lip_state, buf2, sizeof(buf2)));
2044 	printf("partner=%s\n",
2045 	    lacp_format_partner(&du->ldu_partner, buf, sizeof(buf)));
2046 	printf("partner.state=%s\n",
2047 	    lacp_format_state(du->ldu_partner.lip_state, buf2, sizeof(buf2)));
2048 
2049 	printf("maxdelay=%d\n", ntohs(du->ldu_collector.lci_maxdelay));
2050 }
2051 
2052 static void
2053 lacp_dprintf(const struct lacp_port *lp, const char *fmt, ...)
2054 {
2055 	va_list va;
2056 
2057 	if (lp) {
2058 		printf("%s: ", lp->lp_ifp->if_xname);
2059 	}
2060 
2061 	va_start(va, fmt);
2062 	vprintf(fmt, va);
2063 	va_end(va);
2064 }
2065