xref: /freebsd/sys/net/ieee8023ad_lacp.c (revision 685dc743)
1 /*	$NetBSD: ieee8023ad_lacp.c,v 1.3 2005/12/11 12:24:54 christos Exp $	*/
2 
3 /*-
4  * SPDX-License-Identifier: BSD-2-Clause
5  *
6  * Copyright (c)2005 YAMAMOTO Takashi,
7  * Copyright (c)2008 Andrew Thompson <thompsa@FreeBSD.org>
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 #include "opt_kern_tls.h"
34 #include "opt_ratelimit.h"
35 
36 #include <sys/param.h>
37 #include <sys/callout.h>
38 #include <sys/eventhandler.h>
39 #include <sys/mbuf.h>
40 #include <sys/systm.h>
41 #include <sys/malloc.h>
42 #include <sys/kernel.h> /* hz */
43 #include <sys/socket.h> /* for net/if.h */
44 #include <sys/sockio.h>
45 #include <sys/sysctl.h>
46 #include <machine/stdarg.h>
47 #include <sys/lock.h>
48 #include <sys/rwlock.h>
49 #include <sys/taskqueue.h>
50 #include <sys/time.h>
51 
52 #include <net/if.h>
53 #include <net/if_var.h>
54 #include <net/if_private.h>
55 #include <net/if_dl.h>
56 #include <net/ethernet.h>
57 #include <net/infiniband.h>
58 #include <net/if_media.h>
59 #include <net/if_types.h>
60 
61 #include <net/if_lagg.h>
62 #include <net/ieee8023ad_lacp.h>
63 
64 /*
65  * actor system priority and port priority.
66  * XXX should be configurable.
67  */
68 
69 #define	LACP_SYSTEM_PRIO	0x8000
70 #define	LACP_PORT_PRIO		0x8000
71 
72 const uint8_t ethermulticastaddr_slowprotocols[ETHER_ADDR_LEN] =
73     { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x02 };
74 
75 static const struct tlv_template lacp_info_tlv_template[] = {
76 	{ LACP_TYPE_ACTORINFO,
77 	    sizeof(struct tlvhdr) + sizeof(struct lacp_peerinfo) },
78 	{ LACP_TYPE_PARTNERINFO,
79 	    sizeof(struct tlvhdr) + sizeof(struct lacp_peerinfo) },
80 	{ LACP_TYPE_COLLECTORINFO,
81 	    sizeof(struct tlvhdr) + sizeof(struct lacp_collectorinfo) },
82 	{ 0, 0 },
83 };
84 
85 static const struct tlv_template marker_info_tlv_template[] = {
86 	{ MARKER_TYPE_INFO,
87 	    sizeof(struct tlvhdr) + sizeof(struct lacp_markerinfo) },
88 	{ 0, 0 },
89 };
90 
91 static const struct tlv_template marker_response_tlv_template[] = {
92 	{ MARKER_TYPE_RESPONSE,
93 	    sizeof(struct tlvhdr) + sizeof(struct lacp_markerinfo) },
94 	{ 0, 0 },
95 };
96 
97 typedef void (*lacp_timer_func_t)(struct lacp_port *);
98 
99 static void	lacp_fill_actorinfo(struct lacp_port *, struct lacp_peerinfo *);
100 static void	lacp_fill_markerinfo(struct lacp_port *,
101 		    struct lacp_markerinfo *);
102 
103 static uint64_t	lacp_aggregator_bandwidth(struct lacp_aggregator *);
104 static void	lacp_suppress_distributing(struct lacp_softc *,
105 		    struct lacp_aggregator *);
106 static void	lacp_transit_expire(void *);
107 static void	lacp_update_portmap(struct lacp_softc *);
108 static void	lacp_select_active_aggregator(struct lacp_softc *);
109 static uint16_t	lacp_compose_key(struct lacp_port *);
110 static int	tlv_check(const void *, size_t, const struct tlvhdr *,
111 		    const struct tlv_template *, boolean_t);
112 static void	lacp_tick(void *);
113 
114 static void	lacp_fill_aggregator_id(struct lacp_aggregator *,
115 		    const struct lacp_port *);
116 static void	lacp_fill_aggregator_id_peer(struct lacp_peerinfo *,
117 		    const struct lacp_peerinfo *);
118 static bool	lacp_aggregator_is_compatible(const struct lacp_aggregator *,
119 		    const struct lacp_port *);
120 static bool	lacp_peerinfo_is_compatible(const struct lacp_peerinfo *,
121 		    const struct lacp_peerinfo *);
122 
123 static struct lacp_aggregator *lacp_aggregator_get(struct lacp_softc *,
124 		    struct lacp_port *);
125 static void	lacp_aggregator_addref(struct lacp_softc *,
126 		    struct lacp_aggregator *);
127 static void	lacp_aggregator_delref(struct lacp_softc *,
128 		    struct lacp_aggregator *);
129 
130 /* receive machine */
131 
132 static int	lacp_pdu_input(struct lacp_port *, struct mbuf *);
133 static int	lacp_marker_input(struct lacp_port *, struct mbuf *);
134 static void	lacp_sm_rx(struct lacp_port *, const struct lacpdu *);
135 static void	lacp_sm_rx_timer(struct lacp_port *);
136 static void	lacp_sm_rx_set_expired(struct lacp_port *);
137 static void	lacp_sm_rx_update_ntt(struct lacp_port *,
138 		    const struct lacpdu *);
139 static void	lacp_sm_rx_record_pdu(struct lacp_port *,
140 		    const struct lacpdu *);
141 static void	lacp_sm_rx_update_selected(struct lacp_port *,
142 		    const struct lacpdu *);
143 static void	lacp_sm_rx_record_default(struct lacp_port *);
144 static void	lacp_sm_rx_update_default_selected(struct lacp_port *);
145 static void	lacp_sm_rx_update_selected_from_peerinfo(struct lacp_port *,
146 		    const struct lacp_peerinfo *);
147 
148 /* mux machine */
149 
150 static void	lacp_sm_mux(struct lacp_port *);
151 static void	lacp_set_mux(struct lacp_port *, enum lacp_mux_state);
152 static void	lacp_sm_mux_timer(struct lacp_port *);
153 
154 /* periodic transmit machine */
155 
156 static void	lacp_sm_ptx_update_timeout(struct lacp_port *, uint8_t);
157 static void	lacp_sm_ptx_tx_schedule(struct lacp_port *);
158 static void	lacp_sm_ptx_timer(struct lacp_port *);
159 
160 /* transmit machine */
161 
162 static void	lacp_sm_tx(struct lacp_port *);
163 static void	lacp_sm_assert_ntt(struct lacp_port *);
164 
165 static void	lacp_run_timers(struct lacp_port *);
166 static int	lacp_compare_peerinfo(const struct lacp_peerinfo *,
167 		    const struct lacp_peerinfo *);
168 static int	lacp_compare_systemid(const struct lacp_systemid *,
169 		    const struct lacp_systemid *);
170 static void	lacp_port_enable(struct lacp_port *);
171 static void	lacp_port_disable(struct lacp_port *);
172 static void	lacp_select(struct lacp_port *);
173 static void	lacp_unselect(struct lacp_port *);
174 static void	lacp_disable_collecting(struct lacp_port *);
175 static void	lacp_enable_collecting(struct lacp_port *);
176 static void	lacp_disable_distributing(struct lacp_port *);
177 static void	lacp_enable_distributing(struct lacp_port *);
178 static int	lacp_xmit_lacpdu(struct lacp_port *);
179 static int	lacp_xmit_marker(struct lacp_port *);
180 
181 /* Debugging */
182 
183 static void	lacp_dump_lacpdu(const struct lacpdu *);
184 static const char *lacp_format_partner(const struct lacp_peerinfo *, char *,
185 		    size_t);
186 static const char *lacp_format_lagid(const struct lacp_peerinfo *,
187 		    const struct lacp_peerinfo *, char *, size_t);
188 static const char *lacp_format_lagid_aggregator(const struct lacp_aggregator *,
189 		    char *, size_t);
190 static const char *lacp_format_state(uint8_t, char *, size_t);
191 static const char *lacp_format_mac(const uint8_t *, char *, size_t);
192 static const char *lacp_format_systemid(const struct lacp_systemid *, char *,
193 		    size_t);
194 static const char *lacp_format_portid(const struct lacp_portid *, char *,
195 		    size_t);
196 static void	lacp_dprintf(const struct lacp_port *, const char *, ...)
197 		    __attribute__((__format__(__printf__, 2, 3)));
198 
199 VNET_DEFINE_STATIC(int, lacp_debug);
200 #define	V_lacp_debug	VNET(lacp_debug)
201 SYSCTL_NODE(_net_link_lagg, OID_AUTO, lacp, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
202     "ieee802.3ad");
203 SYSCTL_INT(_net_link_lagg_lacp, OID_AUTO, debug, CTLFLAG_RWTUN | CTLFLAG_VNET,
204     &VNET_NAME(lacp_debug), 0, "Enable LACP debug logging (1=debug, 2=trace)");
205 
206 VNET_DEFINE_STATIC(int, lacp_default_strict_mode) = 1;
207 SYSCTL_INT(_net_link_lagg_lacp, OID_AUTO, default_strict_mode,
208     CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(lacp_default_strict_mode), 0,
209     "LACP strict protocol compliance default");
210 #define LACP_DPRINTF(a) if (V_lacp_debug & 0x01) { lacp_dprintf a ; }
211 #define LACP_TRACE(a) if (V_lacp_debug & 0x02) { lacp_dprintf(a,"%s\n",__func__); }
212 #define LACP_TPRINTF(a) if (V_lacp_debug & 0x04) { lacp_dprintf a ; }
213 
214 /*
215  * partner administration variables.
216  * XXX should be configurable.
217  */
218 
219 static const struct lacp_peerinfo lacp_partner_admin_optimistic = {
220 	.lip_systemid = { .lsi_prio = 0xffff },
221 	.lip_portid = { .lpi_prio = 0xffff },
222 	.lip_state = LACP_STATE_SYNC | LACP_STATE_AGGREGATION |
223 	    LACP_STATE_COLLECTING | LACP_STATE_DISTRIBUTING,
224 };
225 
226 static const struct lacp_peerinfo lacp_partner_admin_strict = {
227 	.lip_systemid = { .lsi_prio = 0xffff },
228 	.lip_portid = { .lpi_prio = 0xffff },
229 	.lip_state = 0,
230 };
231 
232 static const lacp_timer_func_t lacp_timer_funcs[LACP_NTIMER] = {
233 	[LACP_TIMER_CURRENT_WHILE] = lacp_sm_rx_timer,
234 	[LACP_TIMER_PERIODIC] = lacp_sm_ptx_timer,
235 	[LACP_TIMER_WAIT_WHILE] = lacp_sm_mux_timer,
236 };
237 
238 struct mbuf *
lacp_input(struct lagg_port * lgp,struct mbuf * m)239 lacp_input(struct lagg_port *lgp, struct mbuf *m)
240 {
241 	struct lacp_port *lp = LACP_PORT(lgp);
242 	uint8_t subtype;
243 
244 	if (m->m_pkthdr.len < sizeof(struct ether_header) + sizeof(subtype)) {
245 		m_freem(m);
246 		return (NULL);
247 	}
248 
249 	m_copydata(m, sizeof(struct ether_header), sizeof(subtype), &subtype);
250 	switch (subtype) {
251 		case SLOWPROTOCOLS_SUBTYPE_LACP:
252 			lacp_pdu_input(lp, m);
253 			return (NULL);
254 
255 		case SLOWPROTOCOLS_SUBTYPE_MARKER:
256 			lacp_marker_input(lp, m);
257 			return (NULL);
258 	}
259 
260 	/* Not a subtype we are interested in */
261 	return (m);
262 }
263 
264 /*
265  * lacp_pdu_input: process lacpdu
266  */
267 static int
lacp_pdu_input(struct lacp_port * lp,struct mbuf * m)268 lacp_pdu_input(struct lacp_port *lp, struct mbuf *m)
269 {
270 	struct lacp_softc *lsc = lp->lp_lsc;
271 	struct lacpdu *du;
272 	int error = 0;
273 
274 	if (m->m_pkthdr.len != sizeof(*du)) {
275 		goto bad;
276 	}
277 
278 	if ((m->m_flags & M_MCAST) == 0) {
279 		goto bad;
280 	}
281 
282 	if (m->m_len < sizeof(*du)) {
283 		m = m_pullup(m, sizeof(*du));
284 		if (m == NULL) {
285 			return (ENOMEM);
286 		}
287 	}
288 
289 	du = mtod(m, struct lacpdu *);
290 
291 	if (memcmp(&du->ldu_eh.ether_dhost,
292 	    &ethermulticastaddr_slowprotocols, ETHER_ADDR_LEN)) {
293 		goto bad;
294 	}
295 
296 	/*
297 	 * ignore the version for compatibility with
298 	 * the future protocol revisions.
299 	 */
300 #if 0
301 	if (du->ldu_sph.sph_version != 1) {
302 		goto bad;
303 	}
304 #endif
305 
306 	/*
307 	 * ignore tlv types for compatibility with
308 	 * the future protocol revisions.
309 	 */
310 	if (tlv_check(du, sizeof(*du), &du->ldu_tlv_actor,
311 	    lacp_info_tlv_template, FALSE)) {
312 		goto bad;
313 	}
314 
315         if (V_lacp_debug > 0) {
316 		lacp_dprintf(lp, "lacpdu receive\n");
317 		lacp_dump_lacpdu(du);
318 	}
319 
320 	if ((1 << lp->lp_ifp->if_dunit) & lp->lp_lsc->lsc_debug.lsc_rx_test) {
321 		LACP_TPRINTF((lp, "Dropping RX PDU\n"));
322 		goto bad;
323 	}
324 
325 	LACP_LOCK(lsc);
326 	lacp_sm_rx(lp, du);
327 	LACP_UNLOCK(lsc);
328 
329 	m_freem(m);
330 	return (error);
331 
332 bad:
333 	m_freem(m);
334 	return (EINVAL);
335 }
336 
337 static void
lacp_fill_actorinfo(struct lacp_port * lp,struct lacp_peerinfo * info)338 lacp_fill_actorinfo(struct lacp_port *lp, struct lacp_peerinfo *info)
339 {
340 	struct lagg_port *lgp = lp->lp_lagg;
341 	struct lagg_softc *sc = lgp->lp_softc;
342 
343 	info->lip_systemid.lsi_prio = htons(LACP_SYSTEM_PRIO);
344 	memcpy(&info->lip_systemid.lsi_mac,
345 	    IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
346 	info->lip_portid.lpi_prio = htons(LACP_PORT_PRIO);
347 	info->lip_portid.lpi_portno = htons(lp->lp_ifp->if_index);
348 	info->lip_state = lp->lp_state;
349 }
350 
351 static void
lacp_fill_markerinfo(struct lacp_port * lp,struct lacp_markerinfo * info)352 lacp_fill_markerinfo(struct lacp_port *lp, struct lacp_markerinfo *info)
353 {
354 	struct ifnet *ifp = lp->lp_ifp;
355 
356 	/* Fill in the port index and system id (encoded as the MAC) */
357 	info->mi_rq_port = htons(ifp->if_index);
358 	memcpy(&info->mi_rq_system, lp->lp_systemid.lsi_mac, ETHER_ADDR_LEN);
359 	info->mi_rq_xid = htonl(0);
360 }
361 
362 static int
lacp_xmit_lacpdu(struct lacp_port * lp)363 lacp_xmit_lacpdu(struct lacp_port *lp)
364 {
365 	struct lagg_port *lgp = lp->lp_lagg;
366 	struct mbuf *m;
367 	struct lacpdu *du;
368 	int error;
369 
370 	LACP_LOCK_ASSERT(lp->lp_lsc);
371 
372 	m = m_gethdr(M_NOWAIT, MT_DATA);
373 	if (m == NULL) {
374 		return (ENOMEM);
375 	}
376 	m->m_len = m->m_pkthdr.len = sizeof(*du);
377 
378 	du = mtod(m, struct lacpdu *);
379 	memset(du, 0, sizeof(*du));
380 
381 	memcpy(&du->ldu_eh.ether_dhost, ethermulticastaddr_slowprotocols,
382 	    ETHER_ADDR_LEN);
383 	memcpy(&du->ldu_eh.ether_shost, lgp->lp_lladdr, ETHER_ADDR_LEN);
384 	du->ldu_eh.ether_type = htons(ETHERTYPE_SLOW);
385 
386 	du->ldu_sph.sph_subtype = SLOWPROTOCOLS_SUBTYPE_LACP;
387 	du->ldu_sph.sph_version = 1;
388 
389 	TLV_SET(&du->ldu_tlv_actor, LACP_TYPE_ACTORINFO, sizeof(du->ldu_actor));
390 	du->ldu_actor = lp->lp_actor;
391 
392 	TLV_SET(&du->ldu_tlv_partner, LACP_TYPE_PARTNERINFO,
393 	    sizeof(du->ldu_partner));
394 	du->ldu_partner = lp->lp_partner;
395 
396 	TLV_SET(&du->ldu_tlv_collector, LACP_TYPE_COLLECTORINFO,
397 	    sizeof(du->ldu_collector));
398 	du->ldu_collector.lci_maxdelay = 0;
399 
400 	if (V_lacp_debug > 0) {
401 		lacp_dprintf(lp, "lacpdu transmit\n");
402 		lacp_dump_lacpdu(du);
403 	}
404 
405 	m->m_flags |= M_MCAST;
406 
407 	/*
408 	 * XXX should use higher priority queue.
409 	 * otherwise network congestion can break aggregation.
410 	 */
411 
412 	error = lagg_enqueue(lp->lp_ifp, m);
413 	return (error);
414 }
415 
416 static int
lacp_xmit_marker(struct lacp_port * lp)417 lacp_xmit_marker(struct lacp_port *lp)
418 {
419 	struct lagg_port *lgp = lp->lp_lagg;
420 	struct mbuf *m;
421 	struct markerdu *mdu;
422 	int error;
423 
424 	LACP_LOCK_ASSERT(lp->lp_lsc);
425 
426 	m = m_gethdr(M_NOWAIT, MT_DATA);
427 	if (m == NULL) {
428 		return (ENOMEM);
429 	}
430 	m->m_len = m->m_pkthdr.len = sizeof(*mdu);
431 
432 	mdu = mtod(m, struct markerdu *);
433 	memset(mdu, 0, sizeof(*mdu));
434 
435 	memcpy(&mdu->mdu_eh.ether_dhost, ethermulticastaddr_slowprotocols,
436 	    ETHER_ADDR_LEN);
437 	memcpy(&mdu->mdu_eh.ether_shost, lgp->lp_lladdr, ETHER_ADDR_LEN);
438 	mdu->mdu_eh.ether_type = htons(ETHERTYPE_SLOW);
439 
440 	mdu->mdu_sph.sph_subtype = SLOWPROTOCOLS_SUBTYPE_MARKER;
441 	mdu->mdu_sph.sph_version = 1;
442 
443 	/* Bump the transaction id and copy over the marker info */
444 	lp->lp_marker.mi_rq_xid = htonl(ntohl(lp->lp_marker.mi_rq_xid) + 1);
445 	TLV_SET(&mdu->mdu_tlv, MARKER_TYPE_INFO, sizeof(mdu->mdu_info));
446 	mdu->mdu_info = lp->lp_marker;
447 
448 	LACP_DPRINTF((lp, "marker transmit, port=%u, sys=%6D, id=%u\n",
449 	    ntohs(mdu->mdu_info.mi_rq_port), mdu->mdu_info.mi_rq_system, ":",
450 	    ntohl(mdu->mdu_info.mi_rq_xid)));
451 
452 	m->m_flags |= M_MCAST;
453 	error = lagg_enqueue(lp->lp_ifp, m);
454 	return (error);
455 }
456 
457 void
lacp_linkstate(struct lagg_port * lgp)458 lacp_linkstate(struct lagg_port *lgp)
459 {
460 	struct lacp_port *lp = LACP_PORT(lgp);
461 	struct lacp_softc *lsc = lp->lp_lsc;
462 	struct ifnet *ifp = lgp->lp_ifp;
463 	struct ifmediareq ifmr;
464 	int error = 0;
465 	u_int media;
466 	uint8_t old_state;
467 	uint16_t old_key;
468 
469 	bzero((char *)&ifmr, sizeof(ifmr));
470 	error = (*ifp->if_ioctl)(ifp, SIOCGIFXMEDIA, (caddr_t)&ifmr);
471 	if (error != 0) {
472 		bzero((char *)&ifmr, sizeof(ifmr));
473 		error = (*ifp->if_ioctl)(ifp, SIOCGIFMEDIA, (caddr_t)&ifmr);
474 	}
475 	if (error != 0)
476 		return;
477 
478 	LACP_LOCK(lsc);
479 	media = ifmr.ifm_active;
480 	LACP_DPRINTF((lp, "media changed 0x%x -> 0x%x, ether = %d, fdx = %d, "
481 	    "link = %d\n", lp->lp_media, media, IFM_TYPE(media) == IFM_ETHER,
482 	    (media & IFM_FDX) != 0, ifp->if_link_state == LINK_STATE_UP));
483 	old_state = lp->lp_state;
484 	old_key = lp->lp_key;
485 
486 	lp->lp_media = media;
487 	/*
488 	 * If the port is not an active full duplex Ethernet link then it can
489 	 * not be aggregated.
490 	 */
491 	if (IFM_TYPE(media) != IFM_ETHER || (media & IFM_FDX) == 0 ||
492 	    ifp->if_link_state != LINK_STATE_UP) {
493 		lacp_port_disable(lp);
494 	} else {
495 		lacp_port_enable(lp);
496 	}
497 	lp->lp_key = lacp_compose_key(lp);
498 
499 	if (old_state != lp->lp_state || old_key != lp->lp_key) {
500 		LACP_DPRINTF((lp, "-> UNSELECTED\n"));
501 		lp->lp_selected = LACP_UNSELECTED;
502 	}
503 	LACP_UNLOCK(lsc);
504 }
505 
506 static void
lacp_tick(void * arg)507 lacp_tick(void *arg)
508 {
509 	struct lacp_softc *lsc = arg;
510 	struct lacp_port *lp;
511 
512 	LIST_FOREACH(lp, &lsc->lsc_ports, lp_next) {
513 		if ((lp->lp_state & LACP_STATE_AGGREGATION) == 0)
514 			continue;
515 
516 		CURVNET_SET(lp->lp_ifp->if_vnet);
517 		lacp_run_timers(lp);
518 
519 		lacp_select(lp);
520 		lacp_sm_mux(lp);
521 		lacp_sm_tx(lp);
522 		lacp_sm_ptx_tx_schedule(lp);
523 		CURVNET_RESTORE();
524 	}
525 	callout_reset(&lsc->lsc_callout, hz, lacp_tick, lsc);
526 }
527 
528 int
lacp_port_create(struct lagg_port * lgp)529 lacp_port_create(struct lagg_port *lgp)
530 {
531 	struct lagg_softc *sc = lgp->lp_softc;
532 	struct lacp_softc *lsc = LACP_SOFTC(sc);
533 	struct lacp_port *lp;
534 	struct ifnet *ifp = lgp->lp_ifp;
535 	struct sockaddr_dl sdl;
536 	struct ifmultiaddr *rifma = NULL;
537 	int error;
538 
539 	link_init_sdl(ifp, (struct sockaddr *)&sdl, IFT_ETHER);
540 	sdl.sdl_alen = ETHER_ADDR_LEN;
541 
542 	bcopy(&ethermulticastaddr_slowprotocols,
543 	    LLADDR(&sdl), ETHER_ADDR_LEN);
544 	error = if_addmulti(ifp, (struct sockaddr *)&sdl, &rifma);
545 	if (error) {
546 		printf("%s: ADDMULTI failed on %s\n", __func__,
547 		    lgp->lp_ifp->if_xname);
548 		return (error);
549 	}
550 
551 	lp = malloc(sizeof(struct lacp_port),
552 	    M_DEVBUF, M_NOWAIT|M_ZERO);
553 	if (lp == NULL)
554 		return (ENOMEM);
555 
556 	LACP_LOCK(lsc);
557 	lgp->lp_psc = lp;
558 	lp->lp_ifp = ifp;
559 	lp->lp_lagg = lgp;
560 	lp->lp_lsc = lsc;
561 	lp->lp_ifma = rifma;
562 
563 	LIST_INSERT_HEAD(&lsc->lsc_ports, lp, lp_next);
564 
565 	lacp_fill_actorinfo(lp, &lp->lp_actor);
566 	lacp_fill_markerinfo(lp, &lp->lp_marker);
567 	lp->lp_state = LACP_STATE_ACTIVITY;
568 	lp->lp_aggregator = NULL;
569 	lacp_sm_rx_set_expired(lp);
570 	LACP_UNLOCK(lsc);
571 	lacp_linkstate(lgp);
572 
573 	return (0);
574 }
575 
576 void
lacp_port_destroy(struct lagg_port * lgp)577 lacp_port_destroy(struct lagg_port *lgp)
578 {
579 	struct lacp_port *lp = LACP_PORT(lgp);
580 	struct lacp_softc *lsc = lp->lp_lsc;
581 	int i;
582 
583 	LACP_LOCK(lsc);
584 	for (i = 0; i < LACP_NTIMER; i++) {
585 		LACP_TIMER_DISARM(lp, i);
586 	}
587 
588 	lacp_disable_collecting(lp);
589 	lacp_disable_distributing(lp);
590 	lacp_unselect(lp);
591 
592 	LIST_REMOVE(lp, lp_next);
593 	LACP_UNLOCK(lsc);
594 
595 	/* The address may have already been removed by if_purgemaddrs() */
596 	if (!lgp->lp_detaching)
597 		if_delmulti_ifma(lp->lp_ifma);
598 
599 	free(lp, M_DEVBUF);
600 }
601 
602 void
lacp_req(struct lagg_softc * sc,void * data)603 lacp_req(struct lagg_softc *sc, void *data)
604 {
605 	struct lacp_opreq *req = (struct lacp_opreq *)data;
606 	struct lacp_softc *lsc = LACP_SOFTC(sc);
607 	struct lacp_aggregator *la;
608 
609 	bzero(req, sizeof(struct lacp_opreq));
610 
611 	/*
612 	 * If the LACP softc is NULL, return with the opreq structure full of
613 	 * zeros.  It is normal for the softc to be NULL while the lagg is
614 	 * being destroyed.
615 	 */
616 	if (NULL == lsc)
617 		return;
618 
619 	la = lsc->lsc_active_aggregator;
620 	LACP_LOCK(lsc);
621 	if (la != NULL) {
622 		req->actor_prio = ntohs(la->la_actor.lip_systemid.lsi_prio);
623 		memcpy(&req->actor_mac, &la->la_actor.lip_systemid.lsi_mac,
624 		    ETHER_ADDR_LEN);
625 		req->actor_key = ntohs(la->la_actor.lip_key);
626 		req->actor_portprio = ntohs(la->la_actor.lip_portid.lpi_prio);
627 		req->actor_portno = ntohs(la->la_actor.lip_portid.lpi_portno);
628 		req->actor_state = la->la_actor.lip_state;
629 
630 		req->partner_prio = ntohs(la->la_partner.lip_systemid.lsi_prio);
631 		memcpy(&req->partner_mac, &la->la_partner.lip_systemid.lsi_mac,
632 		    ETHER_ADDR_LEN);
633 		req->partner_key = ntohs(la->la_partner.lip_key);
634 		req->partner_portprio = ntohs(la->la_partner.lip_portid.lpi_prio);
635 		req->partner_portno = ntohs(la->la_partner.lip_portid.lpi_portno);
636 		req->partner_state = la->la_partner.lip_state;
637 	}
638 	LACP_UNLOCK(lsc);
639 }
640 
641 void
lacp_portreq(struct lagg_port * lgp,void * data)642 lacp_portreq(struct lagg_port *lgp, void *data)
643 {
644 	struct lacp_opreq *req = (struct lacp_opreq *)data;
645 	struct lacp_port *lp = LACP_PORT(lgp);
646 	struct lacp_softc *lsc = lp->lp_lsc;
647 
648 	LACP_LOCK(lsc);
649 	req->actor_prio = ntohs(lp->lp_actor.lip_systemid.lsi_prio);
650 	memcpy(&req->actor_mac, &lp->lp_actor.lip_systemid.lsi_mac,
651 	    ETHER_ADDR_LEN);
652 	req->actor_key = ntohs(lp->lp_actor.lip_key);
653 	req->actor_portprio = ntohs(lp->lp_actor.lip_portid.lpi_prio);
654 	req->actor_portno = ntohs(lp->lp_actor.lip_portid.lpi_portno);
655 	req->actor_state = lp->lp_actor.lip_state;
656 
657 	req->partner_prio = ntohs(lp->lp_partner.lip_systemid.lsi_prio);
658 	memcpy(&req->partner_mac, &lp->lp_partner.lip_systemid.lsi_mac,
659 	    ETHER_ADDR_LEN);
660 	req->partner_key = ntohs(lp->lp_partner.lip_key);
661 	req->partner_portprio = ntohs(lp->lp_partner.lip_portid.lpi_prio);
662 	req->partner_portno = ntohs(lp->lp_partner.lip_portid.lpi_portno);
663 	req->partner_state = lp->lp_partner.lip_state;
664 	LACP_UNLOCK(lsc);
665 }
666 
667 static void
lacp_disable_collecting(struct lacp_port * lp)668 lacp_disable_collecting(struct lacp_port *lp)
669 {
670 	LACP_DPRINTF((lp, "collecting disabled\n"));
671 	lp->lp_state &= ~LACP_STATE_COLLECTING;
672 }
673 
674 static void
lacp_enable_collecting(struct lacp_port * lp)675 lacp_enable_collecting(struct lacp_port *lp)
676 {
677 	LACP_DPRINTF((lp, "collecting enabled\n"));
678 	lp->lp_state |= LACP_STATE_COLLECTING;
679 }
680 
681 static void
lacp_disable_distributing(struct lacp_port * lp)682 lacp_disable_distributing(struct lacp_port *lp)
683 {
684 	struct lacp_aggregator *la = lp->lp_aggregator;
685 	struct lacp_softc *lsc = lp->lp_lsc;
686 	struct lagg_softc *sc = lsc->lsc_softc;
687 	char buf[LACP_LAGIDSTR_MAX+1];
688 
689 	LACP_LOCK_ASSERT(lsc);
690 
691 	if (la == NULL || (lp->lp_state & LACP_STATE_DISTRIBUTING) == 0) {
692 		return;
693 	}
694 
695 	KASSERT(!TAILQ_EMPTY(&la->la_ports), ("no aggregator ports"));
696 	KASSERT(la->la_nports > 0, ("nports invalid (%d)", la->la_nports));
697 	KASSERT(la->la_refcnt >= la->la_nports, ("aggregator refcnt invalid"));
698 
699 	LACP_DPRINTF((lp, "disable distributing on aggregator %s, "
700 	    "nports %d -> %d\n",
701 	    lacp_format_lagid_aggregator(la, buf, sizeof(buf)),
702 	    la->la_nports, la->la_nports - 1));
703 
704 	TAILQ_REMOVE(&la->la_ports, lp, lp_dist_q);
705 	la->la_nports--;
706 	sc->sc_active = la->la_nports;
707 
708 	if (lsc->lsc_active_aggregator == la) {
709 		lacp_suppress_distributing(lsc, la);
710 		lacp_select_active_aggregator(lsc);
711 		/* regenerate the port map, the active aggregator has changed */
712 		lacp_update_portmap(lsc);
713 	}
714 
715 	lp->lp_state &= ~LACP_STATE_DISTRIBUTING;
716 	if_link_state_change(sc->sc_ifp,
717 	    sc->sc_active ? LINK_STATE_UP : LINK_STATE_DOWN);
718 }
719 
720 static void
lacp_enable_distributing(struct lacp_port * lp)721 lacp_enable_distributing(struct lacp_port *lp)
722 {
723 	struct lacp_aggregator *la = lp->lp_aggregator;
724 	struct lacp_softc *lsc = lp->lp_lsc;
725 	struct lagg_softc *sc = lsc->lsc_softc;
726 	char buf[LACP_LAGIDSTR_MAX+1];
727 
728 	LACP_LOCK_ASSERT(lsc);
729 
730 	if ((lp->lp_state & LACP_STATE_DISTRIBUTING) != 0) {
731 		return;
732 	}
733 
734 	LACP_DPRINTF((lp, "enable distributing on aggregator %s, "
735 	    "nports %d -> %d\n",
736 	    lacp_format_lagid_aggregator(la, buf, sizeof(buf)),
737 	    la->la_nports, la->la_nports + 1));
738 
739 	KASSERT(la->la_refcnt > la->la_nports, ("aggregator refcnt invalid"));
740 	TAILQ_INSERT_HEAD(&la->la_ports, lp, lp_dist_q);
741 	la->la_nports++;
742 	sc->sc_active = la->la_nports;
743 
744 	lp->lp_state |= LACP_STATE_DISTRIBUTING;
745 
746 	if (lsc->lsc_active_aggregator == la) {
747 		lacp_suppress_distributing(lsc, la);
748 		lacp_update_portmap(lsc);
749 	} else
750 		/* try to become the active aggregator */
751 		lacp_select_active_aggregator(lsc);
752 
753 	if_link_state_change(sc->sc_ifp,
754 	    sc->sc_active ? LINK_STATE_UP : LINK_STATE_DOWN);
755 }
756 
757 static void
lacp_transit_expire(void * vp)758 lacp_transit_expire(void *vp)
759 {
760 	struct lacp_softc *lsc = vp;
761 
762 	LACP_LOCK_ASSERT(lsc);
763 
764 	CURVNET_SET(lsc->lsc_softc->sc_ifp->if_vnet);
765 	LACP_TRACE(NULL);
766 	CURVNET_RESTORE();
767 
768 	lsc->lsc_suppress_distributing = FALSE;
769 }
770 
771 void
lacp_attach(struct lagg_softc * sc)772 lacp_attach(struct lagg_softc *sc)
773 {
774 	struct lacp_softc *lsc;
775 
776 	lsc = malloc(sizeof(struct lacp_softc), M_DEVBUF, M_WAITOK | M_ZERO);
777 
778 	sc->sc_psc = lsc;
779 	lsc->lsc_softc = sc;
780 
781 	lsc->lsc_hashkey = m_ether_tcpip_hash_init();
782 	lsc->lsc_active_aggregator = NULL;
783 	lsc->lsc_strict_mode = VNET(lacp_default_strict_mode);
784 	LACP_LOCK_INIT(lsc);
785 	TAILQ_INIT(&lsc->lsc_aggregators);
786 	LIST_INIT(&lsc->lsc_ports);
787 
788 	callout_init_mtx(&lsc->lsc_transit_callout, &lsc->lsc_mtx, 0);
789 	callout_init_mtx(&lsc->lsc_callout, &lsc->lsc_mtx, 0);
790 
791 	/* if the lagg is already up then do the same */
792 	if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING)
793 		lacp_init(sc);
794 }
795 
796 void
lacp_detach(void * psc)797 lacp_detach(void *psc)
798 {
799 	struct lacp_softc *lsc = (struct lacp_softc *)psc;
800 
801 	KASSERT(TAILQ_EMPTY(&lsc->lsc_aggregators),
802 	    ("aggregators still active"));
803 	KASSERT(lsc->lsc_active_aggregator == NULL,
804 	    ("aggregator still attached"));
805 
806 	callout_drain(&lsc->lsc_transit_callout);
807 	callout_drain(&lsc->lsc_callout);
808 
809 	LACP_LOCK_DESTROY(lsc);
810 	free(lsc, M_DEVBUF);
811 }
812 
813 void
lacp_init(struct lagg_softc * sc)814 lacp_init(struct lagg_softc *sc)
815 {
816 	struct lacp_softc *lsc = LACP_SOFTC(sc);
817 
818 	LACP_LOCK(lsc);
819 	callout_reset(&lsc->lsc_callout, hz, lacp_tick, lsc);
820 	LACP_UNLOCK(lsc);
821 }
822 
823 void
lacp_stop(struct lagg_softc * sc)824 lacp_stop(struct lagg_softc *sc)
825 {
826 	struct lacp_softc *lsc = LACP_SOFTC(sc);
827 
828 	LACP_LOCK(lsc);
829 	callout_stop(&lsc->lsc_transit_callout);
830 	callout_stop(&lsc->lsc_callout);
831 	LACP_UNLOCK(lsc);
832 }
833 
834 struct lagg_port *
lacp_select_tx_port_by_hash(struct lagg_softc * sc,uint32_t hash,uint8_t numa_domain,int * err)835 lacp_select_tx_port_by_hash(struct lagg_softc *sc, uint32_t hash,
836     uint8_t numa_domain, int *err)
837 {
838 	struct lacp_softc *lsc = LACP_SOFTC(sc);
839 	struct lacp_portmap *pm;
840 	struct lacp_port *lp;
841 	struct lacp_port **map;
842 	int count;
843 
844 	if (__predict_false(lsc->lsc_suppress_distributing)) {
845 		LACP_DPRINTF((NULL, "%s: waiting transit\n", __func__));
846 		*err = ENOBUFS;
847 		return (NULL);
848 	}
849 
850 	pm = &lsc->lsc_pmap[lsc->lsc_activemap];
851 	if (pm->pm_count == 0) {
852 		LACP_DPRINTF((NULL, "%s: no active aggregator\n", __func__));
853 		*err = ENETDOWN;
854 		return (NULL);
855 	}
856 
857 #ifdef NUMA
858 	if ((sc->sc_opts & LAGG_OPT_USE_NUMA) &&
859 	    pm->pm_num_dom > 1 && numa_domain < MAXMEMDOM) {
860 		count = pm->pm_numa[numa_domain].count;
861 		if (count > 0) {
862 			map = pm->pm_numa[numa_domain].map;
863 		} else {
864 			/* No ports on this domain; use global hash. */
865 			map = pm->pm_map;
866 			count = pm->pm_count;
867 		}
868 	} else
869 #endif
870 	{
871 		map = pm->pm_map;
872 		count = pm->pm_count;
873 	}
874 
875 	hash %= count;
876 	lp = map[hash];
877 
878 	return (lp->lp_lagg);
879 }
880 
881 struct lagg_port *
lacp_select_tx_port(struct lagg_softc * sc,struct mbuf * m,int * err)882 lacp_select_tx_port(struct lagg_softc *sc, struct mbuf *m, int *err)
883 {
884 	struct lacp_softc *lsc = LACP_SOFTC(sc);
885 	uint32_t hash;
886 	uint8_t numa_domain;
887 
888 	if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) &&
889 	    M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
890 		hash = m->m_pkthdr.flowid >> sc->flowid_shift;
891 	else
892 		hash = m_ether_tcpip_hash(sc->sc_flags, m, lsc->lsc_hashkey);
893 
894 	numa_domain = m->m_pkthdr.numa_domain;
895 	return (lacp_select_tx_port_by_hash(sc, hash, numa_domain, err));
896 }
897 
898 /*
899  * lacp_suppress_distributing: drop transmit packets for a while
900  * to preserve packet ordering.
901  */
902 
903 static void
lacp_suppress_distributing(struct lacp_softc * lsc,struct lacp_aggregator * la)904 lacp_suppress_distributing(struct lacp_softc *lsc, struct lacp_aggregator *la)
905 {
906 	struct lacp_port *lp;
907 
908 	if (lsc->lsc_active_aggregator != la) {
909 		return;
910 	}
911 
912 	LACP_TRACE(NULL);
913 
914 	lsc->lsc_suppress_distributing = TRUE;
915 
916 	/* send a marker frame down each port to verify the queues are empty */
917 	LIST_FOREACH(lp, &lsc->lsc_ports, lp_next) {
918 		lp->lp_flags |= LACP_PORT_MARK;
919 		if (lacp_xmit_marker(lp) != 0)
920 			lp->lp_flags &= ~LACP_PORT_MARK;
921 	}
922 
923 	/* set a timeout for the marker frames */
924 	callout_reset(&lsc->lsc_transit_callout,
925 	    LACP_TRANSIT_DELAY * hz / 1000, lacp_transit_expire, lsc);
926 }
927 
928 static int
lacp_compare_peerinfo(const struct lacp_peerinfo * a,const struct lacp_peerinfo * b)929 lacp_compare_peerinfo(const struct lacp_peerinfo *a,
930     const struct lacp_peerinfo *b)
931 {
932 	return (memcmp(a, b, offsetof(struct lacp_peerinfo, lip_state)));
933 }
934 
935 static int
lacp_compare_systemid(const struct lacp_systemid * a,const struct lacp_systemid * b)936 lacp_compare_systemid(const struct lacp_systemid *a,
937     const struct lacp_systemid *b)
938 {
939 	return (memcmp(a, b, sizeof(*a)));
940 }
941 
942 #if 0	/* unused */
943 static int
944 lacp_compare_portid(const struct lacp_portid *a,
945     const struct lacp_portid *b)
946 {
947 	return (memcmp(a, b, sizeof(*a)));
948 }
949 #endif
950 
951 static uint64_t
lacp_aggregator_bandwidth(struct lacp_aggregator * la)952 lacp_aggregator_bandwidth(struct lacp_aggregator *la)
953 {
954 	struct lacp_port *lp;
955 	uint64_t speed;
956 
957 	lp = TAILQ_FIRST(&la->la_ports);
958 	if (lp == NULL) {
959 		return (0);
960 	}
961 
962 	speed = ifmedia_baudrate(lp->lp_media);
963 	speed *= la->la_nports;
964 	if (speed == 0) {
965 		LACP_DPRINTF((lp, "speed 0? media=0x%x nports=%d\n",
966 		    lp->lp_media, la->la_nports));
967 	}
968 
969 	return (speed);
970 }
971 
972 /*
973  * lacp_select_active_aggregator: select an aggregator to be used to transmit
974  * packets from lagg(4) interface.
975  */
976 
977 static void
lacp_select_active_aggregator(struct lacp_softc * lsc)978 lacp_select_active_aggregator(struct lacp_softc *lsc)
979 {
980 	struct lacp_aggregator *la;
981 	struct lacp_aggregator *best_la = NULL;
982 	uint64_t best_speed = 0;
983 	char buf[LACP_LAGIDSTR_MAX+1];
984 
985 	LACP_TRACE(NULL);
986 
987 	TAILQ_FOREACH(la, &lsc->lsc_aggregators, la_q) {
988 		uint64_t speed;
989 
990 		if (la->la_nports == 0) {
991 			continue;
992 		}
993 
994 		speed = lacp_aggregator_bandwidth(la);
995 		LACP_DPRINTF((NULL, "%s, speed=%jd, nports=%d\n",
996 		    lacp_format_lagid_aggregator(la, buf, sizeof(buf)),
997 		    speed, la->la_nports));
998 
999 		/*
1000 		 * This aggregator is chosen if the partner has a better
1001 		 * system priority or, the total aggregated speed is higher
1002 		 * or, it is already the chosen aggregator
1003 		 */
1004 		if ((best_la != NULL && LACP_SYS_PRI(la->la_partner) <
1005 		    LACP_SYS_PRI(best_la->la_partner)) ||
1006 		    speed > best_speed ||
1007 		    (speed == best_speed &&
1008 		    la == lsc->lsc_active_aggregator)) {
1009 			best_la = la;
1010 			best_speed = speed;
1011 		}
1012 	}
1013 
1014 	KASSERT(best_la == NULL || best_la->la_nports > 0,
1015 	    ("invalid aggregator refcnt"));
1016 	KASSERT(best_la == NULL || !TAILQ_EMPTY(&best_la->la_ports),
1017 	    ("invalid aggregator list"));
1018 
1019 	if (lsc->lsc_active_aggregator != best_la) {
1020 		LACP_DPRINTF((NULL, "active aggregator changed\n"));
1021 		LACP_DPRINTF((NULL, "old %s\n",
1022 		    lacp_format_lagid_aggregator(lsc->lsc_active_aggregator,
1023 		    buf, sizeof(buf))));
1024 	} else {
1025 		LACP_DPRINTF((NULL, "active aggregator not changed\n"));
1026 	}
1027 	LACP_DPRINTF((NULL, "new %s\n",
1028 	    lacp_format_lagid_aggregator(best_la, buf, sizeof(buf))));
1029 
1030 	if (lsc->lsc_active_aggregator != best_la) {
1031 		lsc->lsc_active_aggregator = best_la;
1032 		lacp_update_portmap(lsc);
1033 		if (best_la) {
1034 			lacp_suppress_distributing(lsc, best_la);
1035 		}
1036 	}
1037 }
1038 
1039 /*
1040  * Updated the inactive portmap array with the new list of ports and
1041  * make it live.
1042  */
1043 static void
lacp_update_portmap(struct lacp_softc * lsc)1044 lacp_update_portmap(struct lacp_softc *lsc)
1045 {
1046 	struct lagg_softc *sc = lsc->lsc_softc;
1047 	struct lacp_aggregator *la;
1048 	struct lacp_portmap *p;
1049 	struct lacp_port *lp;
1050 	uint64_t speed;
1051 	u_int newmap;
1052 	int i;
1053 #ifdef NUMA
1054 	int count;
1055 	uint8_t domain;
1056 #endif
1057 
1058 	newmap = lsc->lsc_activemap == 0 ? 1 : 0;
1059 	p = &lsc->lsc_pmap[newmap];
1060 	la = lsc->lsc_active_aggregator;
1061 	speed = 0;
1062 	bzero(p, sizeof(struct lacp_portmap));
1063 
1064 	if (la != NULL && la->la_nports > 0) {
1065 		p->pm_count = la->la_nports;
1066 		i = 0;
1067 		TAILQ_FOREACH(lp, &la->la_ports, lp_dist_q) {
1068 			p->pm_map[i++] = lp;
1069 #ifdef NUMA
1070 			domain = lp->lp_ifp->if_numa_domain;
1071 			if (domain >= MAXMEMDOM)
1072 				continue;
1073 			count = p->pm_numa[domain].count;
1074 			p->pm_numa[domain].map[count] = lp;
1075 			p->pm_numa[domain].count++;
1076 #endif
1077 		}
1078 		KASSERT(i == p->pm_count, ("Invalid port count"));
1079 
1080 #ifdef NUMA
1081 		for (i = 0; i < MAXMEMDOM; i++) {
1082 			if (p->pm_numa[i].count != 0)
1083 				p->pm_num_dom++;
1084 		}
1085 #endif
1086 		speed = lacp_aggregator_bandwidth(la);
1087 	}
1088 	sc->sc_ifp->if_baudrate = speed;
1089 	EVENTHANDLER_INVOKE(ifnet_event, sc->sc_ifp,
1090 	    IFNET_EVENT_UPDATE_BAUDRATE);
1091 
1092 	/* switch the active portmap over */
1093 	atomic_store_rel_int(&lsc->lsc_activemap, newmap);
1094 	LACP_DPRINTF((NULL, "Set table %d with %d ports\n",
1095 		    lsc->lsc_activemap,
1096 		    lsc->lsc_pmap[lsc->lsc_activemap].pm_count));
1097 }
1098 
1099 static uint16_t
lacp_compose_key(struct lacp_port * lp)1100 lacp_compose_key(struct lacp_port *lp)
1101 {
1102 	struct lagg_port *lgp = lp->lp_lagg;
1103 	struct lagg_softc *sc = lgp->lp_softc;
1104 	u_int media = lp->lp_media;
1105 	uint16_t key;
1106 
1107 	if ((lp->lp_state & LACP_STATE_AGGREGATION) == 0) {
1108 		/*
1109 		 * non-aggregatable links should have unique keys.
1110 		 *
1111 		 * XXX this isn't really unique as if_index is 16 bit.
1112 		 */
1113 
1114 		/* bit 0..14:	(some bits of) if_index of this port */
1115 		key = lp->lp_ifp->if_index;
1116 		/* bit 15:	1 */
1117 		key |= 0x8000;
1118 	} else {
1119 		u_int subtype = IFM_SUBTYPE(media);
1120 
1121 		KASSERT(IFM_TYPE(media) == IFM_ETHER, ("invalid media type"));
1122 		KASSERT((media & IFM_FDX) != 0, ("aggregating HDX interface"));
1123 
1124 		/* bit 0..4:	IFM_SUBTYPE modulo speed */
1125 		switch (subtype) {
1126 		case IFM_10_T:
1127 		case IFM_10_2:
1128 		case IFM_10_5:
1129 		case IFM_10_STP:
1130 		case IFM_10_FL:
1131 			key = IFM_10_T;
1132 			break;
1133 		case IFM_100_TX:
1134 		case IFM_100_FX:
1135 		case IFM_100_T4:
1136 		case IFM_100_VG:
1137 		case IFM_100_T2:
1138 		case IFM_100_T:
1139 		case IFM_100_SGMII:
1140 			key = IFM_100_TX;
1141 			break;
1142 		case IFM_1000_SX:
1143 		case IFM_1000_LX:
1144 		case IFM_1000_CX:
1145 		case IFM_1000_T:
1146 		case IFM_1000_KX:
1147 		case IFM_1000_SGMII:
1148 		case IFM_1000_CX_SGMII:
1149 			key = IFM_1000_SX;
1150 			break;
1151 		case IFM_10G_LR:
1152 		case IFM_10G_SR:
1153 		case IFM_10G_CX4:
1154 		case IFM_10G_TWINAX:
1155 		case IFM_10G_TWINAX_LONG:
1156 		case IFM_10G_LRM:
1157 		case IFM_10G_T:
1158 		case IFM_10G_KX4:
1159 		case IFM_10G_KR:
1160 		case IFM_10G_CR1:
1161 		case IFM_10G_ER:
1162 		case IFM_10G_SFI:
1163 		case IFM_10G_AOC:
1164 			key = IFM_10G_LR;
1165 			break;
1166 		case IFM_20G_KR2:
1167 			key = IFM_20G_KR2;
1168 			break;
1169 		case IFM_2500_KX:
1170 		case IFM_2500_T:
1171 		case IFM_2500_X:
1172 			key = IFM_2500_KX;
1173 			break;
1174 		case IFM_5000_T:
1175 		case IFM_5000_KR:
1176 		case IFM_5000_KR_S:
1177 		case IFM_5000_KR1:
1178 			key = IFM_5000_T;
1179 			break;
1180 		case IFM_50G_PCIE:
1181 		case IFM_50G_CR2:
1182 		case IFM_50G_KR2:
1183 		case IFM_50G_KR4:
1184 		case IFM_50G_SR2:
1185 		case IFM_50G_LR2:
1186 		case IFM_50G_LAUI2_AC:
1187 		case IFM_50G_LAUI2:
1188 		case IFM_50G_AUI2_AC:
1189 		case IFM_50G_AUI2:
1190 		case IFM_50G_CP:
1191 		case IFM_50G_SR:
1192 		case IFM_50G_LR:
1193 		case IFM_50G_FR:
1194 		case IFM_50G_KR_PAM4:
1195 		case IFM_50G_AUI1_AC:
1196 		case IFM_50G_AUI1:
1197 			key = IFM_50G_PCIE;
1198 			break;
1199 		case IFM_56G_R4:
1200 			key = IFM_56G_R4;
1201 			break;
1202 		case IFM_25G_PCIE:
1203 		case IFM_25G_CR:
1204 		case IFM_25G_KR:
1205 		case IFM_25G_SR:
1206 		case IFM_25G_LR:
1207 		case IFM_25G_ACC:
1208 		case IFM_25G_AOC:
1209 		case IFM_25G_T:
1210 		case IFM_25G_CR_S:
1211 		case IFM_25G_CR1:
1212 		case IFM_25G_KR_S:
1213 		case IFM_25G_AUI:
1214 		case IFM_25G_KR1:
1215 			key = IFM_25G_PCIE;
1216 			break;
1217 		case IFM_40G_CR4:
1218 		case IFM_40G_SR4:
1219 		case IFM_40G_LR4:
1220 		case IFM_40G_LM4:
1221 		case IFM_40G_XLPPI:
1222 		case IFM_40G_KR4:
1223 		case IFM_40G_XLAUI:
1224 		case IFM_40G_XLAUI_AC:
1225 		case IFM_40G_ER4:
1226 			key = IFM_40G_CR4;
1227 			break;
1228 		case IFM_100G_CR4:
1229 		case IFM_100G_SR4:
1230 		case IFM_100G_KR4:
1231 		case IFM_100G_LR4:
1232 		case IFM_100G_CAUI4_AC:
1233 		case IFM_100G_CAUI4:
1234 		case IFM_100G_AUI4_AC:
1235 		case IFM_100G_AUI4:
1236 		case IFM_100G_CR_PAM4:
1237 		case IFM_100G_KR_PAM4:
1238 		case IFM_100G_CP2:
1239 		case IFM_100G_SR2:
1240 		case IFM_100G_DR:
1241 		case IFM_100G_KR2_PAM4:
1242 		case IFM_100G_CAUI2_AC:
1243 		case IFM_100G_CAUI2:
1244 		case IFM_100G_AUI2_AC:
1245 		case IFM_100G_AUI2:
1246 			key = IFM_100G_CR4;
1247 			break;
1248 		case IFM_200G_CR4_PAM4:
1249 		case IFM_200G_SR4:
1250 		case IFM_200G_FR4:
1251 		case IFM_200G_LR4:
1252 		case IFM_200G_DR4:
1253 		case IFM_200G_KR4_PAM4:
1254 		case IFM_200G_AUI4_AC:
1255 		case IFM_200G_AUI4:
1256 		case IFM_200G_AUI8_AC:
1257 		case IFM_200G_AUI8:
1258 			key = IFM_200G_CR4_PAM4;
1259 			break;
1260 		case IFM_400G_FR8:
1261 		case IFM_400G_LR8:
1262 		case IFM_400G_DR4:
1263 		case IFM_400G_AUI8_AC:
1264 		case IFM_400G_AUI8:
1265 			key = IFM_400G_FR8;
1266 			break;
1267 		default:
1268 			key = subtype;
1269 			break;
1270 		}
1271 		/* bit 5..14:	(some bits of) if_index of lagg device */
1272 		key |= 0x7fe0 & ((sc->sc_ifp->if_index) << 5);
1273 		/* bit 15:	0 */
1274 	}
1275 	return (htons(key));
1276 }
1277 
1278 static void
lacp_aggregator_addref(struct lacp_softc * lsc,struct lacp_aggregator * la)1279 lacp_aggregator_addref(struct lacp_softc *lsc, struct lacp_aggregator *la)
1280 {
1281 	char buf[LACP_LAGIDSTR_MAX+1];
1282 
1283 	LACP_DPRINTF((NULL, "%s: lagid=%s, refcnt %d -> %d\n",
1284 	    __func__,
1285 	    lacp_format_lagid(&la->la_actor, &la->la_partner,
1286 	    buf, sizeof(buf)),
1287 	    la->la_refcnt, la->la_refcnt + 1));
1288 
1289 	KASSERT(la->la_refcnt > 0, ("refcount <= 0"));
1290 	la->la_refcnt++;
1291 	KASSERT(la->la_refcnt > la->la_nports, ("invalid refcount"));
1292 }
1293 
1294 static void
lacp_aggregator_delref(struct lacp_softc * lsc,struct lacp_aggregator * la)1295 lacp_aggregator_delref(struct lacp_softc *lsc, struct lacp_aggregator *la)
1296 {
1297 	char buf[LACP_LAGIDSTR_MAX+1];
1298 
1299 	LACP_DPRINTF((NULL, "%s: lagid=%s, refcnt %d -> %d\n",
1300 	    __func__,
1301 	    lacp_format_lagid(&la->la_actor, &la->la_partner,
1302 	    buf, sizeof(buf)),
1303 	    la->la_refcnt, la->la_refcnt - 1));
1304 
1305 	KASSERT(la->la_refcnt > la->la_nports, ("invalid refcnt"));
1306 	la->la_refcnt--;
1307 	if (la->la_refcnt > 0) {
1308 		return;
1309 	}
1310 
1311 	KASSERT(la->la_refcnt == 0, ("refcount not zero"));
1312 	KASSERT(lsc->lsc_active_aggregator != la, ("aggregator active"));
1313 
1314 	TAILQ_REMOVE(&lsc->lsc_aggregators, la, la_q);
1315 
1316 	free(la, M_DEVBUF);
1317 }
1318 
1319 /*
1320  * lacp_aggregator_get: allocate an aggregator.
1321  */
1322 
1323 static struct lacp_aggregator *
lacp_aggregator_get(struct lacp_softc * lsc,struct lacp_port * lp)1324 lacp_aggregator_get(struct lacp_softc *lsc, struct lacp_port *lp)
1325 {
1326 	struct lacp_aggregator *la;
1327 
1328 	la = malloc(sizeof(*la), M_DEVBUF, M_NOWAIT);
1329 	if (la) {
1330 		la->la_refcnt = 1;
1331 		la->la_nports = 0;
1332 		TAILQ_INIT(&la->la_ports);
1333 		la->la_pending = 0;
1334 		TAILQ_INSERT_TAIL(&lsc->lsc_aggregators, la, la_q);
1335 	}
1336 
1337 	return (la);
1338 }
1339 
1340 /*
1341  * lacp_fill_aggregator_id: setup a newly allocated aggregator from a port.
1342  */
1343 
1344 static void
lacp_fill_aggregator_id(struct lacp_aggregator * la,const struct lacp_port * lp)1345 lacp_fill_aggregator_id(struct lacp_aggregator *la, const struct lacp_port *lp)
1346 {
1347 	lacp_fill_aggregator_id_peer(&la->la_partner, &lp->lp_partner);
1348 	lacp_fill_aggregator_id_peer(&la->la_actor, &lp->lp_actor);
1349 
1350 	la->la_actor.lip_state = lp->lp_state & LACP_STATE_AGGREGATION;
1351 }
1352 
1353 static void
lacp_fill_aggregator_id_peer(struct lacp_peerinfo * lpi_aggr,const struct lacp_peerinfo * lpi_port)1354 lacp_fill_aggregator_id_peer(struct lacp_peerinfo *lpi_aggr,
1355     const struct lacp_peerinfo *lpi_port)
1356 {
1357 	memset(lpi_aggr, 0, sizeof(*lpi_aggr));
1358 	lpi_aggr->lip_systemid = lpi_port->lip_systemid;
1359 	lpi_aggr->lip_key = lpi_port->lip_key;
1360 }
1361 
1362 /*
1363  * lacp_aggregator_is_compatible: check if a port can join to an aggregator.
1364  */
1365 
1366 static bool
lacp_aggregator_is_compatible(const struct lacp_aggregator * la,const struct lacp_port * lp)1367 lacp_aggregator_is_compatible(const struct lacp_aggregator *la,
1368     const struct lacp_port *lp)
1369 {
1370 	if (!(lp->lp_state & LACP_STATE_AGGREGATION) ||
1371 	    !(lp->lp_partner.lip_state & LACP_STATE_AGGREGATION)) {
1372 		return (false);
1373 	}
1374 
1375 	if (!(la->la_actor.lip_state & LACP_STATE_AGGREGATION))
1376 		return (false);
1377 
1378 	if (!lacp_peerinfo_is_compatible(&la->la_partner, &lp->lp_partner))
1379 		return (false);
1380 
1381 	if (!lacp_peerinfo_is_compatible(&la->la_actor, &lp->lp_actor))
1382 		return (false);
1383 
1384 	return (true);
1385 }
1386 
1387 static bool
lacp_peerinfo_is_compatible(const struct lacp_peerinfo * a,const struct lacp_peerinfo * b)1388 lacp_peerinfo_is_compatible(const struct lacp_peerinfo *a,
1389     const struct lacp_peerinfo *b)
1390 {
1391 	if (memcmp(&a->lip_systemid, &b->lip_systemid,
1392 	    sizeof(a->lip_systemid)) != 0) {
1393 		return (false);
1394 	}
1395 
1396 	if (memcmp(&a->lip_key, &b->lip_key, sizeof(a->lip_key)) != 0)
1397 		return (false);
1398 
1399 	return (true);
1400 }
1401 
1402 static void
lacp_port_enable(struct lacp_port * lp)1403 lacp_port_enable(struct lacp_port *lp)
1404 {
1405 	lp->lp_state |= LACP_STATE_AGGREGATION;
1406 }
1407 
1408 static void
lacp_port_disable(struct lacp_port * lp)1409 lacp_port_disable(struct lacp_port *lp)
1410 {
1411 	lacp_set_mux(lp, LACP_MUX_DETACHED);
1412 
1413 	lp->lp_state &= ~LACP_STATE_AGGREGATION;
1414 	lp->lp_selected = LACP_UNSELECTED;
1415 	lacp_sm_rx_record_default(lp);
1416 	lp->lp_partner.lip_state &= ~LACP_STATE_AGGREGATION;
1417 	lp->lp_state &= ~LACP_STATE_EXPIRED;
1418 }
1419 
1420 /*
1421  * lacp_select: select an aggregator.  create one if necessary.
1422  */
1423 static void
lacp_select(struct lacp_port * lp)1424 lacp_select(struct lacp_port *lp)
1425 {
1426 	struct lacp_softc *lsc = lp->lp_lsc;
1427 	struct lacp_aggregator *la;
1428 	char buf[LACP_LAGIDSTR_MAX+1];
1429 
1430 	if (lp->lp_aggregator) {
1431 		return;
1432 	}
1433 
1434 	/* If we haven't heard from our peer, skip this step. */
1435 	if (lp->lp_state & LACP_STATE_DEFAULTED)
1436 		return;
1437 
1438 	KASSERT(!LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE),
1439 	    ("timer_wait_while still active"));
1440 
1441 	LACP_DPRINTF((lp, "port lagid=%s\n",
1442 	    lacp_format_lagid(&lp->lp_actor, &lp->lp_partner,
1443 	    buf, sizeof(buf))));
1444 
1445 	TAILQ_FOREACH(la, &lsc->lsc_aggregators, la_q) {
1446 		if (lacp_aggregator_is_compatible(la, lp)) {
1447 			break;
1448 		}
1449 	}
1450 
1451 	if (la == NULL) {
1452 		la = lacp_aggregator_get(lsc, lp);
1453 		if (la == NULL) {
1454 			LACP_DPRINTF((lp, "aggregator creation failed\n"));
1455 
1456 			/*
1457 			 * will retry on the next tick.
1458 			 */
1459 
1460 			return;
1461 		}
1462 		lacp_fill_aggregator_id(la, lp);
1463 		LACP_DPRINTF((lp, "aggregator created\n"));
1464 	} else {
1465 		LACP_DPRINTF((lp, "compatible aggregator found\n"));
1466 		if (la->la_refcnt == LACP_MAX_PORTS)
1467 			return;
1468 		lacp_aggregator_addref(lsc, la);
1469 	}
1470 
1471 	LACP_DPRINTF((lp, "aggregator lagid=%s\n",
1472 	    lacp_format_lagid(&la->la_actor, &la->la_partner,
1473 	    buf, sizeof(buf))));
1474 
1475 	lp->lp_aggregator = la;
1476 	lp->lp_selected = LACP_SELECTED;
1477 }
1478 
1479 /*
1480  * lacp_unselect: finish unselect/detach process.
1481  */
1482 
1483 static void
lacp_unselect(struct lacp_port * lp)1484 lacp_unselect(struct lacp_port *lp)
1485 {
1486 	struct lacp_softc *lsc = lp->lp_lsc;
1487 	struct lacp_aggregator *la = lp->lp_aggregator;
1488 
1489 	KASSERT(!LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE),
1490 	    ("timer_wait_while still active"));
1491 
1492 	if (la == NULL) {
1493 		return;
1494 	}
1495 
1496 	lp->lp_aggregator = NULL;
1497 	lacp_aggregator_delref(lsc, la);
1498 }
1499 
1500 /* mux machine */
1501 
1502 static void
lacp_sm_mux(struct lacp_port * lp)1503 lacp_sm_mux(struct lacp_port *lp)
1504 {
1505 	struct lagg_port *lgp = lp->lp_lagg;
1506 	struct lagg_softc *sc = lgp->lp_softc;
1507 	enum lacp_mux_state new_state;
1508 	boolean_t p_sync =
1509 		    (lp->lp_partner.lip_state & LACP_STATE_SYNC) != 0;
1510 	boolean_t p_collecting =
1511 	    (lp->lp_partner.lip_state & LACP_STATE_COLLECTING) != 0;
1512 	enum lacp_selected selected = lp->lp_selected;
1513 	struct lacp_aggregator *la;
1514 
1515 	if (V_lacp_debug > 1)
1516 		lacp_dprintf(lp, "%s: state= 0x%x, selected= 0x%x, "
1517 		    "p_sync= 0x%x, p_collecting= 0x%x\n", __func__,
1518 		    lp->lp_mux_state, selected, p_sync, p_collecting);
1519 
1520 re_eval:
1521 	la = lp->lp_aggregator;
1522 	KASSERT(lp->lp_mux_state == LACP_MUX_DETACHED || la != NULL,
1523 	    ("MUX not detached"));
1524 	new_state = lp->lp_mux_state;
1525 	switch (lp->lp_mux_state) {
1526 	case LACP_MUX_DETACHED:
1527 		if (selected != LACP_UNSELECTED) {
1528 			new_state = LACP_MUX_WAITING;
1529 		}
1530 		break;
1531 	case LACP_MUX_WAITING:
1532 		KASSERT(la->la_pending > 0 ||
1533 		    !LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE),
1534 		    ("timer_wait_while still active"));
1535 		if (selected == LACP_SELECTED && la->la_pending == 0) {
1536 			new_state = LACP_MUX_ATTACHED;
1537 		} else if (selected == LACP_UNSELECTED) {
1538 			new_state = LACP_MUX_DETACHED;
1539 		}
1540 		break;
1541 	case LACP_MUX_ATTACHED:
1542 		if (selected == LACP_SELECTED && p_sync) {
1543 			new_state = LACP_MUX_COLLECTING;
1544 		} else if (selected != LACP_SELECTED) {
1545 			new_state = LACP_MUX_DETACHED;
1546 		}
1547 		break;
1548 	case LACP_MUX_COLLECTING:
1549 		if (selected == LACP_SELECTED && p_sync && p_collecting) {
1550 			new_state = LACP_MUX_DISTRIBUTING;
1551 		} else if (selected != LACP_SELECTED || !p_sync) {
1552 			new_state = LACP_MUX_ATTACHED;
1553 		}
1554 		break;
1555 	case LACP_MUX_DISTRIBUTING:
1556 		if (selected != LACP_SELECTED || !p_sync || !p_collecting) {
1557 			new_state = LACP_MUX_COLLECTING;
1558 			lacp_dprintf(lp, "Interface stopped DISTRIBUTING, possible flapping\n");
1559 			sc->sc_flapping++;
1560 		}
1561 		break;
1562 	default:
1563 		panic("%s: unknown state", __func__);
1564 	}
1565 
1566 	if (lp->lp_mux_state == new_state) {
1567 		return;
1568 	}
1569 
1570 	lacp_set_mux(lp, new_state);
1571 	goto re_eval;
1572 }
1573 
1574 static void
lacp_set_mux(struct lacp_port * lp,enum lacp_mux_state new_state)1575 lacp_set_mux(struct lacp_port *lp, enum lacp_mux_state new_state)
1576 {
1577 	struct lacp_aggregator *la = lp->lp_aggregator;
1578 
1579 	if (lp->lp_mux_state == new_state) {
1580 		return;
1581 	}
1582 
1583 	switch (new_state) {
1584 	case LACP_MUX_DETACHED:
1585 		lp->lp_state &= ~LACP_STATE_SYNC;
1586 		lacp_disable_distributing(lp);
1587 		lacp_disable_collecting(lp);
1588 		lacp_sm_assert_ntt(lp);
1589 		/* cancel timer */
1590 		if (LACP_TIMER_ISARMED(lp, LACP_TIMER_WAIT_WHILE)) {
1591 			KASSERT(la->la_pending > 0,
1592 			    ("timer_wait_while not active"));
1593 			la->la_pending--;
1594 		}
1595 		LACP_TIMER_DISARM(lp, LACP_TIMER_WAIT_WHILE);
1596 		lacp_unselect(lp);
1597 		break;
1598 	case LACP_MUX_WAITING:
1599 		LACP_TIMER_ARM(lp, LACP_TIMER_WAIT_WHILE,
1600 		    LACP_AGGREGATE_WAIT_TIME);
1601 		la->la_pending++;
1602 		break;
1603 	case LACP_MUX_ATTACHED:
1604 		lp->lp_state |= LACP_STATE_SYNC;
1605 		lacp_disable_collecting(lp);
1606 		lacp_sm_assert_ntt(lp);
1607 		break;
1608 	case LACP_MUX_COLLECTING:
1609 		lacp_enable_collecting(lp);
1610 		lacp_disable_distributing(lp);
1611 		lacp_sm_assert_ntt(lp);
1612 		break;
1613 	case LACP_MUX_DISTRIBUTING:
1614 		lacp_enable_distributing(lp);
1615 		break;
1616 	default:
1617 		panic("%s: unknown state", __func__);
1618 	}
1619 
1620 	LACP_DPRINTF((lp, "mux_state %d -> %d\n", lp->lp_mux_state, new_state));
1621 
1622 	lp->lp_mux_state = new_state;
1623 }
1624 
1625 static void
lacp_sm_mux_timer(struct lacp_port * lp)1626 lacp_sm_mux_timer(struct lacp_port *lp)
1627 {
1628 	struct lacp_aggregator *la = lp->lp_aggregator;
1629 	char buf[LACP_LAGIDSTR_MAX+1];
1630 
1631 	KASSERT(la->la_pending > 0, ("no pending event"));
1632 
1633 	LACP_DPRINTF((lp, "%s: aggregator %s, pending %d -> %d\n", __func__,
1634 	    lacp_format_lagid(&la->la_actor, &la->la_partner,
1635 	    buf, sizeof(buf)),
1636 	    la->la_pending, la->la_pending - 1));
1637 
1638 	la->la_pending--;
1639 }
1640 
1641 /* periodic transmit machine */
1642 
1643 static void
lacp_sm_ptx_update_timeout(struct lacp_port * lp,uint8_t oldpstate)1644 lacp_sm_ptx_update_timeout(struct lacp_port *lp, uint8_t oldpstate)
1645 {
1646 	if (LACP_STATE_EQ(oldpstate, lp->lp_partner.lip_state,
1647 	    LACP_STATE_TIMEOUT)) {
1648 		return;
1649 	}
1650 
1651 	LACP_DPRINTF((lp, "partner timeout changed\n"));
1652 
1653 	/*
1654 	 * FAST_PERIODIC -> SLOW_PERIODIC
1655 	 * or
1656 	 * SLOW_PERIODIC (-> PERIODIC_TX) -> FAST_PERIODIC
1657 	 *
1658 	 * let lacp_sm_ptx_tx_schedule to update timeout.
1659 	 */
1660 
1661 	LACP_TIMER_DISARM(lp, LACP_TIMER_PERIODIC);
1662 
1663 	/*
1664 	 * if timeout has been shortened, assert NTT.
1665 	 */
1666 
1667 	if ((lp->lp_partner.lip_state & LACP_STATE_TIMEOUT)) {
1668 		lacp_sm_assert_ntt(lp);
1669 	}
1670 }
1671 
1672 static void
lacp_sm_ptx_tx_schedule(struct lacp_port * lp)1673 lacp_sm_ptx_tx_schedule(struct lacp_port *lp)
1674 {
1675 	int timeout;
1676 
1677 	if (!(lp->lp_state & LACP_STATE_ACTIVITY) &&
1678 	    !(lp->lp_partner.lip_state & LACP_STATE_ACTIVITY)) {
1679 		/*
1680 		 * NO_PERIODIC
1681 		 */
1682 
1683 		LACP_TIMER_DISARM(lp, LACP_TIMER_PERIODIC);
1684 		return;
1685 	}
1686 
1687 	if (LACP_TIMER_ISARMED(lp, LACP_TIMER_PERIODIC)) {
1688 		return;
1689 	}
1690 
1691 	timeout = (lp->lp_partner.lip_state & LACP_STATE_TIMEOUT) ?
1692 	    LACP_FAST_PERIODIC_TIME : LACP_SLOW_PERIODIC_TIME;
1693 
1694 	LACP_TIMER_ARM(lp, LACP_TIMER_PERIODIC, timeout);
1695 }
1696 
1697 static void
lacp_sm_ptx_timer(struct lacp_port * lp)1698 lacp_sm_ptx_timer(struct lacp_port *lp)
1699 {
1700 	lacp_sm_assert_ntt(lp);
1701 }
1702 
1703 static void
lacp_sm_rx(struct lacp_port * lp,const struct lacpdu * du)1704 lacp_sm_rx(struct lacp_port *lp, const struct lacpdu *du)
1705 {
1706 	int timeout;
1707 
1708 	/*
1709 	 * check LACP_DISABLED first
1710 	 */
1711 
1712 	if (!(lp->lp_state & LACP_STATE_AGGREGATION)) {
1713 		return;
1714 	}
1715 
1716 	/*
1717 	 * check loopback condition.
1718 	 */
1719 
1720 	if (!lacp_compare_systemid(&du->ldu_actor.lip_systemid,
1721 	    &lp->lp_actor.lip_systemid)) {
1722 		return;
1723 	}
1724 
1725 	/*
1726 	 * EXPIRED, DEFAULTED, CURRENT -> CURRENT
1727 	 */
1728 
1729 	microuptime(&lp->lp_last_lacpdu_rx);
1730 	lacp_sm_rx_update_selected(lp, du);
1731 	lacp_sm_rx_update_ntt(lp, du);
1732 	lacp_sm_rx_record_pdu(lp, du);
1733 
1734 	timeout = (lp->lp_state & LACP_STATE_TIMEOUT) ?
1735 	    LACP_SHORT_TIMEOUT_TIME : LACP_LONG_TIMEOUT_TIME;
1736 	LACP_TIMER_ARM(lp, LACP_TIMER_CURRENT_WHILE, timeout);
1737 
1738 	lp->lp_state &= ~LACP_STATE_EXPIRED;
1739 
1740 	/*
1741 	 * kick transmit machine without waiting the next tick.
1742 	 */
1743 
1744 	lacp_sm_tx(lp);
1745 }
1746 
1747 static void
lacp_sm_rx_set_expired(struct lacp_port * lp)1748 lacp_sm_rx_set_expired(struct lacp_port *lp)
1749 {
1750 	lp->lp_partner.lip_state &= ~LACP_STATE_SYNC;
1751 	lp->lp_partner.lip_state |= LACP_STATE_TIMEOUT;
1752 	LACP_TIMER_ARM(lp, LACP_TIMER_CURRENT_WHILE, LACP_SHORT_TIMEOUT_TIME);
1753 	lp->lp_state |= LACP_STATE_EXPIRED;
1754 }
1755 
1756 static void
lacp_sm_rx_timer(struct lacp_port * lp)1757 lacp_sm_rx_timer(struct lacp_port *lp)
1758 {
1759 	if ((lp->lp_state & LACP_STATE_EXPIRED) == 0) {
1760 		/* CURRENT -> EXPIRED */
1761 		LACP_DPRINTF((lp, "%s: CURRENT -> EXPIRED\n", __func__));
1762 		lacp_sm_rx_set_expired(lp);
1763 	} else {
1764 		/* EXPIRED -> DEFAULTED */
1765 		LACP_DPRINTF((lp, "%s: EXPIRED -> DEFAULTED\n", __func__));
1766 		lacp_sm_rx_update_default_selected(lp);
1767 		lacp_sm_rx_record_default(lp);
1768 		lp->lp_state &= ~LACP_STATE_EXPIRED;
1769 	}
1770 }
1771 
1772 static void
lacp_sm_rx_record_pdu(struct lacp_port * lp,const struct lacpdu * du)1773 lacp_sm_rx_record_pdu(struct lacp_port *lp, const struct lacpdu *du)
1774 {
1775 	boolean_t active;
1776 	uint8_t oldpstate;
1777 	char buf[LACP_STATESTR_MAX+1];
1778 
1779 	LACP_TRACE(lp);
1780 
1781 	oldpstate = lp->lp_partner.lip_state;
1782 
1783 	active = (du->ldu_actor.lip_state & LACP_STATE_ACTIVITY)
1784 	    || ((lp->lp_state & LACP_STATE_ACTIVITY) &&
1785 	    (du->ldu_partner.lip_state & LACP_STATE_ACTIVITY));
1786 
1787 	lp->lp_partner = du->ldu_actor;
1788 	if (active &&
1789 	    ((LACP_STATE_EQ(lp->lp_state, du->ldu_partner.lip_state,
1790 	    LACP_STATE_AGGREGATION) &&
1791 	    !lacp_compare_peerinfo(&lp->lp_actor, &du->ldu_partner))
1792 	    || (du->ldu_partner.lip_state & LACP_STATE_AGGREGATION) == 0)) {
1793 		/*
1794 		 * XXX Maintain legacy behavior of leaving the
1795 		 * LACP_STATE_SYNC bit unchanged from the partner's
1796 		 * advertisement if lsc_strict_mode is false.
1797 		 * TODO: We should re-examine the concept of the "strict mode"
1798 		 * to ensure it makes sense to maintain a non-strict mode.
1799 		 */
1800 		if (lp->lp_lsc->lsc_strict_mode)
1801 			lp->lp_partner.lip_state |= LACP_STATE_SYNC;
1802 	} else {
1803 		lp->lp_partner.lip_state &= ~LACP_STATE_SYNC;
1804 	}
1805 
1806 	lp->lp_state &= ~LACP_STATE_DEFAULTED;
1807 
1808 	if (oldpstate != lp->lp_partner.lip_state) {
1809 		LACP_DPRINTF((lp, "old pstate %s\n",
1810 		    lacp_format_state(oldpstate, buf, sizeof(buf))));
1811 		LACP_DPRINTF((lp, "new pstate %s\n",
1812 		    lacp_format_state(lp->lp_partner.lip_state, buf,
1813 		    sizeof(buf))));
1814 	}
1815 
1816 	lacp_sm_ptx_update_timeout(lp, oldpstate);
1817 }
1818 
1819 static void
lacp_sm_rx_update_ntt(struct lacp_port * lp,const struct lacpdu * du)1820 lacp_sm_rx_update_ntt(struct lacp_port *lp, const struct lacpdu *du)
1821 {
1822 
1823 	LACP_TRACE(lp);
1824 
1825 	if (lacp_compare_peerinfo(&lp->lp_actor, &du->ldu_partner) ||
1826 	    !LACP_STATE_EQ(lp->lp_state, du->ldu_partner.lip_state,
1827 	    LACP_STATE_ACTIVITY | LACP_STATE_SYNC | LACP_STATE_AGGREGATION)) {
1828 		LACP_DPRINTF((lp, "%s: assert ntt\n", __func__));
1829 		lacp_sm_assert_ntt(lp);
1830 	}
1831 }
1832 
1833 static void
lacp_sm_rx_record_default(struct lacp_port * lp)1834 lacp_sm_rx_record_default(struct lacp_port *lp)
1835 {
1836 	uint8_t oldpstate;
1837 
1838 	LACP_TRACE(lp);
1839 
1840 	oldpstate = lp->lp_partner.lip_state;
1841 	if (lp->lp_lsc->lsc_strict_mode)
1842 		lp->lp_partner = lacp_partner_admin_strict;
1843 	else
1844 		lp->lp_partner = lacp_partner_admin_optimistic;
1845 	lp->lp_state |= LACP_STATE_DEFAULTED;
1846 	lacp_sm_ptx_update_timeout(lp, oldpstate);
1847 }
1848 
1849 static void
lacp_sm_rx_update_selected_from_peerinfo(struct lacp_port * lp,const struct lacp_peerinfo * info)1850 lacp_sm_rx_update_selected_from_peerinfo(struct lacp_port *lp,
1851     const struct lacp_peerinfo *info)
1852 {
1853 
1854 	LACP_TRACE(lp);
1855 
1856 	if (lacp_compare_peerinfo(&lp->lp_partner, info) ||
1857 	    !LACP_STATE_EQ(lp->lp_partner.lip_state, info->lip_state,
1858 	    LACP_STATE_AGGREGATION)) {
1859 		lp->lp_selected = LACP_UNSELECTED;
1860 		/* mux machine will clean up lp->lp_aggregator */
1861 	}
1862 }
1863 
1864 static void
lacp_sm_rx_update_selected(struct lacp_port * lp,const struct lacpdu * du)1865 lacp_sm_rx_update_selected(struct lacp_port *lp, const struct lacpdu *du)
1866 {
1867 
1868 	LACP_TRACE(lp);
1869 
1870 	lacp_sm_rx_update_selected_from_peerinfo(lp, &du->ldu_actor);
1871 }
1872 
1873 static void
lacp_sm_rx_update_default_selected(struct lacp_port * lp)1874 lacp_sm_rx_update_default_selected(struct lacp_port *lp)
1875 {
1876 
1877 	LACP_TRACE(lp);
1878 
1879 	if (lp->lp_lsc->lsc_strict_mode)
1880 		lacp_sm_rx_update_selected_from_peerinfo(lp,
1881 		    &lacp_partner_admin_strict);
1882 	else
1883 		lacp_sm_rx_update_selected_from_peerinfo(lp,
1884 		    &lacp_partner_admin_optimistic);
1885 }
1886 
1887 /* transmit machine */
1888 
1889 static void
lacp_sm_tx(struct lacp_port * lp)1890 lacp_sm_tx(struct lacp_port *lp)
1891 {
1892 	int error = 0;
1893 
1894 	if (!(lp->lp_state & LACP_STATE_AGGREGATION)
1895 #if 1
1896 	    || (!(lp->lp_state & LACP_STATE_ACTIVITY)
1897 	    && !(lp->lp_partner.lip_state & LACP_STATE_ACTIVITY))
1898 #endif
1899 	    ) {
1900 		lp->lp_flags &= ~LACP_PORT_NTT;
1901 	}
1902 
1903 	if (!(lp->lp_flags & LACP_PORT_NTT)) {
1904 		return;
1905 	}
1906 
1907 	/* Rate limit to 3 PDUs per LACP_FAST_PERIODIC_TIME */
1908 	if (ppsratecheck(&lp->lp_last_lacpdu, &lp->lp_lacpdu_sent,
1909 		    (3 / LACP_FAST_PERIODIC_TIME)) == 0) {
1910 		LACP_DPRINTF((lp, "rate limited pdu\n"));
1911 		return;
1912 	}
1913 
1914 	if (((1 << lp->lp_ifp->if_dunit) & lp->lp_lsc->lsc_debug.lsc_tx_test) == 0) {
1915 		error = lacp_xmit_lacpdu(lp);
1916 	} else {
1917 		LACP_TPRINTF((lp, "Dropping TX PDU\n"));
1918 	}
1919 
1920 	if (error == 0) {
1921 		lp->lp_flags &= ~LACP_PORT_NTT;
1922 	} else {
1923 		LACP_DPRINTF((lp, "lacpdu transmit failure, error %d\n",
1924 		    error));
1925 	}
1926 }
1927 
1928 static void
lacp_sm_assert_ntt(struct lacp_port * lp)1929 lacp_sm_assert_ntt(struct lacp_port *lp)
1930 {
1931 
1932 	lp->lp_flags |= LACP_PORT_NTT;
1933 }
1934 
1935 static void
lacp_run_timers(struct lacp_port * lp)1936 lacp_run_timers(struct lacp_port *lp)
1937 {
1938 	int i;
1939 	struct timeval time_diff;
1940 
1941 	for (i = 0; i < LACP_NTIMER; i++) {
1942 		KASSERT(lp->lp_timer[i] >= 0,
1943 		    ("invalid timer value %d", lp->lp_timer[i]));
1944 		if (lp->lp_timer[i] == 0) {
1945 			continue;
1946 		} else {
1947 			if (i == LACP_TIMER_CURRENT_WHILE) {
1948 				microuptime(&time_diff);
1949 				timevalsub(&time_diff, &lp->lp_last_lacpdu_rx);
1950 				if (time_diff.tv_sec) {
1951 					/* At least one sec has elapsed since last LACP packet. */
1952 					--lp->lp_timer[i];
1953 				}
1954 			} else {
1955 				--lp->lp_timer[i];
1956 			}
1957 
1958 			if ((lp->lp_timer[i] <= 0) && (lacp_timer_funcs[i])) {
1959 				(*lacp_timer_funcs[i])(lp);
1960 			}
1961 		}
1962 	}
1963 }
1964 
1965 int
lacp_marker_input(struct lacp_port * lp,struct mbuf * m)1966 lacp_marker_input(struct lacp_port *lp, struct mbuf *m)
1967 {
1968 	struct lacp_softc *lsc = lp->lp_lsc;
1969 	struct lagg_port *lgp = lp->lp_lagg;
1970 	struct lacp_port *lp2;
1971 	struct markerdu *mdu;
1972 	int error = 0;
1973 	int pending = 0;
1974 
1975 	if (m->m_pkthdr.len != sizeof(*mdu)) {
1976 		goto bad;
1977 	}
1978 
1979 	if ((m->m_flags & M_MCAST) == 0) {
1980 		goto bad;
1981 	}
1982 
1983 	if (m->m_len < sizeof(*mdu)) {
1984 		m = m_pullup(m, sizeof(*mdu));
1985 		if (m == NULL) {
1986 			return (ENOMEM);
1987 		}
1988 	}
1989 
1990 	mdu = mtod(m, struct markerdu *);
1991 
1992 	if (memcmp(&mdu->mdu_eh.ether_dhost,
1993 	    &ethermulticastaddr_slowprotocols, ETHER_ADDR_LEN)) {
1994 		goto bad;
1995 	}
1996 
1997 	if (mdu->mdu_sph.sph_version != 1) {
1998 		goto bad;
1999 	}
2000 
2001 	switch (mdu->mdu_tlv.tlv_type) {
2002 	case MARKER_TYPE_INFO:
2003 		if (tlv_check(mdu, sizeof(*mdu), &mdu->mdu_tlv,
2004 		    marker_info_tlv_template, TRUE)) {
2005 			goto bad;
2006 		}
2007 		mdu->mdu_tlv.tlv_type = MARKER_TYPE_RESPONSE;
2008 		memcpy(&mdu->mdu_eh.ether_dhost,
2009 		    &ethermulticastaddr_slowprotocols, ETHER_ADDR_LEN);
2010 		memcpy(&mdu->mdu_eh.ether_shost,
2011 		    lgp->lp_lladdr, ETHER_ADDR_LEN);
2012 		error = lagg_enqueue(lp->lp_ifp, m);
2013 		break;
2014 
2015 	case MARKER_TYPE_RESPONSE:
2016 		if (tlv_check(mdu, sizeof(*mdu), &mdu->mdu_tlv,
2017 		    marker_response_tlv_template, TRUE)) {
2018 			goto bad;
2019 		}
2020 		LACP_DPRINTF((lp, "marker response, port=%u, sys=%6D, id=%u\n",
2021 		    ntohs(mdu->mdu_info.mi_rq_port), mdu->mdu_info.mi_rq_system,
2022 		    ":", ntohl(mdu->mdu_info.mi_rq_xid)));
2023 
2024 		/* Verify that it is the last marker we sent out */
2025 		if (memcmp(&mdu->mdu_info, &lp->lp_marker,
2026 		    sizeof(struct lacp_markerinfo)))
2027 			goto bad;
2028 
2029 		LACP_LOCK(lsc);
2030 		lp->lp_flags &= ~LACP_PORT_MARK;
2031 
2032 		if (lsc->lsc_suppress_distributing) {
2033 			/* Check if any ports are waiting for a response */
2034 			LIST_FOREACH(lp2, &lsc->lsc_ports, lp_next) {
2035 				if (lp2->lp_flags & LACP_PORT_MARK) {
2036 					pending = 1;
2037 					break;
2038 				}
2039 			}
2040 
2041 			if (pending == 0) {
2042 				/* All interface queues are clear */
2043 				LACP_DPRINTF((NULL, "queue flush complete\n"));
2044 				lsc->lsc_suppress_distributing = FALSE;
2045 			}
2046 		}
2047 		LACP_UNLOCK(lsc);
2048 		m_freem(m);
2049 		break;
2050 
2051 	default:
2052 		goto bad;
2053 	}
2054 
2055 	return (error);
2056 
2057 bad:
2058 	LACP_DPRINTF((lp, "bad marker frame\n"));
2059 	m_freem(m);
2060 	return (EINVAL);
2061 }
2062 
2063 static int
tlv_check(const void * p,size_t size,const struct tlvhdr * tlv,const struct tlv_template * tmpl,boolean_t check_type)2064 tlv_check(const void *p, size_t size, const struct tlvhdr *tlv,
2065     const struct tlv_template *tmpl, boolean_t check_type)
2066 {
2067 	while (/* CONSTCOND */ 1) {
2068 		if ((const char *)tlv - (const char *)p + sizeof(*tlv) > size) {
2069 			return (EINVAL);
2070 		}
2071 		if ((check_type && tlv->tlv_type != tmpl->tmpl_type) ||
2072 		    tlv->tlv_length != tmpl->tmpl_length) {
2073 			return (EINVAL);
2074 		}
2075 		if (tmpl->tmpl_type == 0) {
2076 			break;
2077 		}
2078 		tlv = (const struct tlvhdr *)
2079 		    ((const char *)tlv + tlv->tlv_length);
2080 		tmpl++;
2081 	}
2082 
2083 	return (0);
2084 }
2085 
2086 /* Debugging */
2087 const char *
lacp_format_mac(const uint8_t * mac,char * buf,size_t buflen)2088 lacp_format_mac(const uint8_t *mac, char *buf, size_t buflen)
2089 {
2090 	snprintf(buf, buflen, "%02X-%02X-%02X-%02X-%02X-%02X",
2091 	    (int)mac[0],
2092 	    (int)mac[1],
2093 	    (int)mac[2],
2094 	    (int)mac[3],
2095 	    (int)mac[4],
2096 	    (int)mac[5]);
2097 
2098 	return (buf);
2099 }
2100 
2101 const char *
lacp_format_systemid(const struct lacp_systemid * sysid,char * buf,size_t buflen)2102 lacp_format_systemid(const struct lacp_systemid *sysid,
2103     char *buf, size_t buflen)
2104 {
2105 	char macbuf[LACP_MACSTR_MAX+1];
2106 
2107 	snprintf(buf, buflen, "%04X,%s",
2108 	    ntohs(sysid->lsi_prio),
2109 	    lacp_format_mac(sysid->lsi_mac, macbuf, sizeof(macbuf)));
2110 
2111 	return (buf);
2112 }
2113 
2114 const char *
lacp_format_portid(const struct lacp_portid * portid,char * buf,size_t buflen)2115 lacp_format_portid(const struct lacp_portid *portid, char *buf, size_t buflen)
2116 {
2117 	snprintf(buf, buflen, "%04X,%04X",
2118 	    ntohs(portid->lpi_prio),
2119 	    ntohs(portid->lpi_portno));
2120 
2121 	return (buf);
2122 }
2123 
2124 const char *
lacp_format_partner(const struct lacp_peerinfo * peer,char * buf,size_t buflen)2125 lacp_format_partner(const struct lacp_peerinfo *peer, char *buf, size_t buflen)
2126 {
2127 	char sysid[LACP_SYSTEMIDSTR_MAX+1];
2128 	char portid[LACP_PORTIDSTR_MAX+1];
2129 
2130 	snprintf(buf, buflen, "(%s,%04X,%s)",
2131 	    lacp_format_systemid(&peer->lip_systemid, sysid, sizeof(sysid)),
2132 	    ntohs(peer->lip_key),
2133 	    lacp_format_portid(&peer->lip_portid, portid, sizeof(portid)));
2134 
2135 	return (buf);
2136 }
2137 
2138 const char *
lacp_format_lagid(const struct lacp_peerinfo * a,const struct lacp_peerinfo * b,char * buf,size_t buflen)2139 lacp_format_lagid(const struct lacp_peerinfo *a,
2140     const struct lacp_peerinfo *b, char *buf, size_t buflen)
2141 {
2142 	char astr[LACP_PARTNERSTR_MAX+1];
2143 	char bstr[LACP_PARTNERSTR_MAX+1];
2144 
2145 #if 0
2146 	/*
2147 	 * there's a convention to display small numbered peer
2148 	 * in the left.
2149 	 */
2150 
2151 	if (lacp_compare_peerinfo(a, b) > 0) {
2152 		const struct lacp_peerinfo *t;
2153 
2154 		t = a;
2155 		a = b;
2156 		b = t;
2157 	}
2158 #endif
2159 
2160 	snprintf(buf, buflen, "[%s,%s]",
2161 	    lacp_format_partner(a, astr, sizeof(astr)),
2162 	    lacp_format_partner(b, bstr, sizeof(bstr)));
2163 
2164 	return (buf);
2165 }
2166 
2167 const char *
lacp_format_lagid_aggregator(const struct lacp_aggregator * la,char * buf,size_t buflen)2168 lacp_format_lagid_aggregator(const struct lacp_aggregator *la,
2169     char *buf, size_t buflen)
2170 {
2171 	if (la == NULL) {
2172 		return ("(none)");
2173 	}
2174 
2175 	return (lacp_format_lagid(&la->la_actor, &la->la_partner, buf, buflen));
2176 }
2177 
2178 const char *
lacp_format_state(uint8_t state,char * buf,size_t buflen)2179 lacp_format_state(uint8_t state, char *buf, size_t buflen)
2180 {
2181 	snprintf(buf, buflen, "%b", state, LACP_STATE_BITS);
2182 	return (buf);
2183 }
2184 
2185 static void
lacp_dump_lacpdu(const struct lacpdu * du)2186 lacp_dump_lacpdu(const struct lacpdu *du)
2187 {
2188 	char buf[LACP_PARTNERSTR_MAX+1];
2189 	char buf2[LACP_STATESTR_MAX+1];
2190 
2191 	printf("actor=%s\n",
2192 	    lacp_format_partner(&du->ldu_actor, buf, sizeof(buf)));
2193 	printf("actor.state=%s\n",
2194 	    lacp_format_state(du->ldu_actor.lip_state, buf2, sizeof(buf2)));
2195 	printf("partner=%s\n",
2196 	    lacp_format_partner(&du->ldu_partner, buf, sizeof(buf)));
2197 	printf("partner.state=%s\n",
2198 	    lacp_format_state(du->ldu_partner.lip_state, buf2, sizeof(buf2)));
2199 
2200 	printf("maxdelay=%d\n", ntohs(du->ldu_collector.lci_maxdelay));
2201 }
2202 
2203 static void
lacp_dprintf(const struct lacp_port * lp,const char * fmt,...)2204 lacp_dprintf(const struct lacp_port *lp, const char *fmt, ...)
2205 {
2206 	va_list va;
2207 
2208 	if (lp) {
2209 		printf("%s: ", lp->lp_ifp->if_xname);
2210 	}
2211 
2212 	va_start(va, fmt);
2213 	vprintf(fmt, va);
2214 	va_end(va);
2215 }
2216