xref: /linux/drivers/net/bonding/bond_main.c (revision 6c8c1406)
1 /*
2  * originally based on the dummy device.
3  *
4  * Copyright 1999, Thomas Davis, tadavis@lbl.gov.
5  * Licensed under the GPL. Based on dummy.c, and eql.c devices.
6  *
7  * bonding.c: an Ethernet Bonding driver
8  *
9  * This is useful to talk to a Cisco EtherChannel compatible equipment:
10  *	Cisco 5500
11  *	Sun Trunking (Solaris)
12  *	Alteon AceDirector Trunks
13  *	Linux Bonding
14  *	and probably many L2 switches ...
15  *
16  * How it works:
17  *    ifconfig bond0 ipaddress netmask up
18  *      will setup a network device, with an ip address.  No mac address
19  *	will be assigned at this time.  The hw mac address will come from
20  *	the first slave bonded to the channel.  All slaves will then use
21  *	this hw mac address.
22  *
23  *    ifconfig bond0 down
24  *         will release all slaves, marking them as down.
25  *
26  *    ifenslave bond0 eth0
27  *	will attach eth0 to bond0 as a slave.  eth0 hw mac address will either
28  *	a: be used as initial mac address
29  *	b: if a hw mac address already is there, eth0's hw mac address
30  *	   will then be set from bond0.
31  *
32  */
33 
34 #include <linux/kernel.h>
35 #include <linux/module.h>
36 #include <linux/types.h>
37 #include <linux/fcntl.h>
38 #include <linux/filter.h>
39 #include <linux/interrupt.h>
40 #include <linux/ptrace.h>
41 #include <linux/ioport.h>
42 #include <linux/in.h>
43 #include <net/ip.h>
44 #include <linux/ip.h>
45 #include <linux/icmp.h>
46 #include <linux/icmpv6.h>
47 #include <linux/tcp.h>
48 #include <linux/udp.h>
49 #include <linux/slab.h>
50 #include <linux/string.h>
51 #include <linux/init.h>
52 #include <linux/timer.h>
53 #include <linux/socket.h>
54 #include <linux/ctype.h>
55 #include <linux/inet.h>
56 #include <linux/bitops.h>
57 #include <linux/io.h>
58 #include <asm/dma.h>
59 #include <linux/uaccess.h>
60 #include <linux/errno.h>
61 #include <linux/netdevice.h>
62 #include <linux/inetdevice.h>
63 #include <linux/igmp.h>
64 #include <linux/etherdevice.h>
65 #include <linux/skbuff.h>
66 #include <net/sock.h>
67 #include <linux/rtnetlink.h>
68 #include <linux/smp.h>
69 #include <linux/if_ether.h>
70 #include <net/arp.h>
71 #include <linux/mii.h>
72 #include <linux/ethtool.h>
73 #include <linux/if_vlan.h>
74 #include <linux/if_bonding.h>
75 #include <linux/phy.h>
76 #include <linux/jiffies.h>
77 #include <linux/preempt.h>
78 #include <net/route.h>
79 #include <net/net_namespace.h>
80 #include <net/netns/generic.h>
81 #include <net/pkt_sched.h>
82 #include <linux/rculist.h>
83 #include <net/flow_dissector.h>
84 #include <net/xfrm.h>
85 #include <net/bonding.h>
86 #include <net/bond_3ad.h>
87 #include <net/bond_alb.h>
88 #if IS_ENABLED(CONFIG_TLS_DEVICE)
89 #include <net/tls.h>
90 #endif
91 #include <net/ip6_route.h>
92 
93 #include "bonding_priv.h"
94 
95 /*---------------------------- Module parameters ----------------------------*/
96 
97 /* monitor all links that often (in milliseconds). <=0 disables monitoring */
98 
99 static int max_bonds	= BOND_DEFAULT_MAX_BONDS;
100 static int tx_queues	= BOND_DEFAULT_TX_QUEUES;
101 static int num_peer_notif = 1;
102 static int miimon;
103 static int updelay;
104 static int downdelay;
105 static int use_carrier	= 1;
106 static char *mode;
107 static char *primary;
108 static char *primary_reselect;
109 static char *lacp_rate;
110 static int min_links;
111 static char *ad_select;
112 static char *xmit_hash_policy;
113 static int arp_interval;
114 static char *arp_ip_target[BOND_MAX_ARP_TARGETS];
115 static char *arp_validate;
116 static char *arp_all_targets;
117 static char *fail_over_mac;
118 static int all_slaves_active;
119 static struct bond_params bonding_defaults;
120 static int resend_igmp = BOND_DEFAULT_RESEND_IGMP;
121 static int packets_per_slave = 1;
122 static int lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
123 
124 module_param(max_bonds, int, 0);
125 MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
126 module_param(tx_queues, int, 0);
127 MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)");
128 module_param_named(num_grat_arp, num_peer_notif, int, 0644);
129 MODULE_PARM_DESC(num_grat_arp, "Number of peer notifications to send on "
130 			       "failover event (alias of num_unsol_na)");
131 module_param_named(num_unsol_na, num_peer_notif, int, 0644);
132 MODULE_PARM_DESC(num_unsol_na, "Number of peer notifications to send on "
133 			       "failover event (alias of num_grat_arp)");
134 module_param(miimon, int, 0);
135 MODULE_PARM_DESC(miimon, "Link check interval in milliseconds");
136 module_param(updelay, int, 0);
137 MODULE_PARM_DESC(updelay, "Delay before considering link up, in milliseconds");
138 module_param(downdelay, int, 0);
139 MODULE_PARM_DESC(downdelay, "Delay before considering link down, "
140 			    "in milliseconds");
141 module_param(use_carrier, int, 0);
142 MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; "
143 			      "0 for off, 1 for on (default)");
144 module_param(mode, charp, 0);
145 MODULE_PARM_DESC(mode, "Mode of operation; 0 for balance-rr, "
146 		       "1 for active-backup, 2 for balance-xor, "
147 		       "3 for broadcast, 4 for 802.3ad, 5 for balance-tlb, "
148 		       "6 for balance-alb");
149 module_param(primary, charp, 0);
150 MODULE_PARM_DESC(primary, "Primary network device to use");
151 module_param(primary_reselect, charp, 0);
152 MODULE_PARM_DESC(primary_reselect, "Reselect primary slave "
153 				   "once it comes up; "
154 				   "0 for always (default), "
155 				   "1 for only if speed of primary is "
156 				   "better, "
157 				   "2 for only on active slave "
158 				   "failure");
159 module_param(lacp_rate, charp, 0);
160 MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; "
161 			    "0 for slow, 1 for fast");
162 module_param(ad_select, charp, 0);
163 MODULE_PARM_DESC(ad_select, "802.3ad aggregation selection logic; "
164 			    "0 for stable (default), 1 for bandwidth, "
165 			    "2 for count");
166 module_param(min_links, int, 0);
167 MODULE_PARM_DESC(min_links, "Minimum number of available links before turning on carrier");
168 
169 module_param(xmit_hash_policy, charp, 0);
170 MODULE_PARM_DESC(xmit_hash_policy, "balance-alb, balance-tlb, balance-xor, 802.3ad hashing method; "
171 				   "0 for layer 2 (default), 1 for layer 3+4, "
172 				   "2 for layer 2+3, 3 for encap layer 2+3, "
173 				   "4 for encap layer 3+4, 5 for vlan+srcmac");
174 module_param(arp_interval, int, 0);
175 MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds");
176 module_param_array(arp_ip_target, charp, NULL, 0);
177 MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form");
178 module_param(arp_validate, charp, 0);
179 MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes; "
180 			       "0 for none (default), 1 for active, "
181 			       "2 for backup, 3 for all");
182 module_param(arp_all_targets, charp, 0);
183 MODULE_PARM_DESC(arp_all_targets, "fail on any/all arp targets timeout; 0 for any (default), 1 for all");
184 module_param(fail_over_mac, charp, 0);
185 MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to "
186 				"the same MAC; 0 for none (default), "
187 				"1 for active, 2 for follow");
188 module_param(all_slaves_active, int, 0);
189 MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface "
190 				     "by setting active flag for all slaves; "
191 				     "0 for never (default), 1 for always.");
192 module_param(resend_igmp, int, 0);
193 MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on "
194 			      "link failure");
195 module_param(packets_per_slave, int, 0);
196 MODULE_PARM_DESC(packets_per_slave, "Packets to send per slave in balance-rr "
197 				    "mode; 0 for a random slave, 1 packet per "
198 				    "slave (default), >1 packets per slave.");
199 module_param(lp_interval, uint, 0);
200 MODULE_PARM_DESC(lp_interval, "The number of seconds between instances where "
201 			      "the bonding driver sends learning packets to "
202 			      "each slaves peer switch. The default is 1.");
203 
204 /*----------------------------- Global variables ----------------------------*/
205 
206 #ifdef CONFIG_NET_POLL_CONTROLLER
207 atomic_t netpoll_block_tx = ATOMIC_INIT(0);
208 #endif
209 
210 unsigned int bond_net_id __read_mostly;
211 
212 static const struct flow_dissector_key flow_keys_bonding_keys[] = {
213 	{
214 		.key_id = FLOW_DISSECTOR_KEY_CONTROL,
215 		.offset = offsetof(struct flow_keys, control),
216 	},
217 	{
218 		.key_id = FLOW_DISSECTOR_KEY_BASIC,
219 		.offset = offsetof(struct flow_keys, basic),
220 	},
221 	{
222 		.key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
223 		.offset = offsetof(struct flow_keys, addrs.v4addrs),
224 	},
225 	{
226 		.key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
227 		.offset = offsetof(struct flow_keys, addrs.v6addrs),
228 	},
229 	{
230 		.key_id = FLOW_DISSECTOR_KEY_TIPC,
231 		.offset = offsetof(struct flow_keys, addrs.tipckey),
232 	},
233 	{
234 		.key_id = FLOW_DISSECTOR_KEY_PORTS,
235 		.offset = offsetof(struct flow_keys, ports),
236 	},
237 	{
238 		.key_id = FLOW_DISSECTOR_KEY_ICMP,
239 		.offset = offsetof(struct flow_keys, icmp),
240 	},
241 	{
242 		.key_id = FLOW_DISSECTOR_KEY_VLAN,
243 		.offset = offsetof(struct flow_keys, vlan),
244 	},
245 	{
246 		.key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL,
247 		.offset = offsetof(struct flow_keys, tags),
248 	},
249 	{
250 		.key_id = FLOW_DISSECTOR_KEY_GRE_KEYID,
251 		.offset = offsetof(struct flow_keys, keyid),
252 	},
253 };
254 
255 static struct flow_dissector flow_keys_bonding __read_mostly;
256 
257 /*-------------------------- Forward declarations ---------------------------*/
258 
259 static int bond_init(struct net_device *bond_dev);
260 static void bond_uninit(struct net_device *bond_dev);
261 static void bond_get_stats(struct net_device *bond_dev,
262 			   struct rtnl_link_stats64 *stats);
263 static void bond_slave_arr_handler(struct work_struct *work);
264 static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
265 				  int mod);
266 static void bond_netdev_notify_work(struct work_struct *work);
267 
268 /*---------------------------- General routines -----------------------------*/
269 
270 const char *bond_mode_name(int mode)
271 {
272 	static const char *names[] = {
273 		[BOND_MODE_ROUNDROBIN] = "load balancing (round-robin)",
274 		[BOND_MODE_ACTIVEBACKUP] = "fault-tolerance (active-backup)",
275 		[BOND_MODE_XOR] = "load balancing (xor)",
276 		[BOND_MODE_BROADCAST] = "fault-tolerance (broadcast)",
277 		[BOND_MODE_8023AD] = "IEEE 802.3ad Dynamic link aggregation",
278 		[BOND_MODE_TLB] = "transmit load balancing",
279 		[BOND_MODE_ALB] = "adaptive load balancing",
280 	};
281 
282 	if (mode < BOND_MODE_ROUNDROBIN || mode > BOND_MODE_ALB)
283 		return "unknown";
284 
285 	return names[mode];
286 }
287 
288 /**
289  * bond_dev_queue_xmit - Prepare skb for xmit.
290  *
291  * @bond: bond device that got this skb for tx.
292  * @skb: hw accel VLAN tagged skb to transmit
293  * @slave_dev: slave that is supposed to xmit this skbuff
294  */
295 netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
296 			struct net_device *slave_dev)
297 {
298 	skb->dev = slave_dev;
299 
300 	BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
301 		     sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
302 	skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
303 
304 	if (unlikely(netpoll_tx_running(bond->dev)))
305 		return bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
306 
307 	return dev_queue_xmit(skb);
308 }
309 
310 bool bond_sk_check(struct bonding *bond)
311 {
312 	switch (BOND_MODE(bond)) {
313 	case BOND_MODE_8023AD:
314 	case BOND_MODE_XOR:
315 		if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34)
316 			return true;
317 		fallthrough;
318 	default:
319 		return false;
320 	}
321 }
322 
323 static bool bond_xdp_check(struct bonding *bond)
324 {
325 	switch (BOND_MODE(bond)) {
326 	case BOND_MODE_ROUNDROBIN:
327 	case BOND_MODE_ACTIVEBACKUP:
328 		return true;
329 	case BOND_MODE_8023AD:
330 	case BOND_MODE_XOR:
331 		/* vlan+srcmac is not supported with XDP as in most cases the 802.1q
332 		 * payload is not in the packet due to hardware offload.
333 		 */
334 		if (bond->params.xmit_policy != BOND_XMIT_POLICY_VLAN_SRCMAC)
335 			return true;
336 		fallthrough;
337 	default:
338 		return false;
339 	}
340 }
341 
342 /*---------------------------------- VLAN -----------------------------------*/
343 
344 /* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid,
345  * We don't protect the slave list iteration with a lock because:
346  * a. This operation is performed in IOCTL context,
347  * b. The operation is protected by the RTNL semaphore in the 8021q code,
348  * c. Holding a lock with BH disabled while directly calling a base driver
349  *    entry point is generally a BAD idea.
350  *
351  * The design of synchronization/protection for this operation in the 8021q
352  * module is good for one or more VLAN devices over a single physical device
353  * and cannot be extended for a teaming solution like bonding, so there is a
354  * potential race condition here where a net device from the vlan group might
355  * be referenced (either by a base driver or the 8021q code) while it is being
356  * removed from the system. However, it turns out we're not making matters
357  * worse, and if it works for regular VLAN usage it will work here too.
358 */
359 
360 /**
361  * bond_vlan_rx_add_vid - Propagates adding an id to slaves
362  * @bond_dev: bonding net device that got called
363  * @proto: network protocol ID
364  * @vid: vlan id being added
365  */
366 static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
367 				__be16 proto, u16 vid)
368 {
369 	struct bonding *bond = netdev_priv(bond_dev);
370 	struct slave *slave, *rollback_slave;
371 	struct list_head *iter;
372 	int res;
373 
374 	bond_for_each_slave(bond, slave, iter) {
375 		res = vlan_vid_add(slave->dev, proto, vid);
376 		if (res)
377 			goto unwind;
378 	}
379 
380 	return 0;
381 
382 unwind:
383 	/* unwind to the slave that failed */
384 	bond_for_each_slave(bond, rollback_slave, iter) {
385 		if (rollback_slave == slave)
386 			break;
387 
388 		vlan_vid_del(rollback_slave->dev, proto, vid);
389 	}
390 
391 	return res;
392 }
393 
394 /**
395  * bond_vlan_rx_kill_vid - Propagates deleting an id to slaves
396  * @bond_dev: bonding net device that got called
397  * @proto: network protocol ID
398  * @vid: vlan id being removed
399  */
400 static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
401 				 __be16 proto, u16 vid)
402 {
403 	struct bonding *bond = netdev_priv(bond_dev);
404 	struct list_head *iter;
405 	struct slave *slave;
406 
407 	bond_for_each_slave(bond, slave, iter)
408 		vlan_vid_del(slave->dev, proto, vid);
409 
410 	if (bond_is_lb(bond))
411 		bond_alb_clear_vlan(bond, vid);
412 
413 	return 0;
414 }
415 
416 /*---------------------------------- XFRM -----------------------------------*/
417 
418 #ifdef CONFIG_XFRM_OFFLOAD
419 /**
420  * bond_ipsec_add_sa - program device with a security association
421  * @xs: pointer to transformer state struct
422  **/
423 static int bond_ipsec_add_sa(struct xfrm_state *xs)
424 {
425 	struct net_device *bond_dev = xs->xso.dev;
426 	struct bond_ipsec *ipsec;
427 	struct bonding *bond;
428 	struct slave *slave;
429 	int err;
430 
431 	if (!bond_dev)
432 		return -EINVAL;
433 
434 	rcu_read_lock();
435 	bond = netdev_priv(bond_dev);
436 	slave = rcu_dereference(bond->curr_active_slave);
437 	if (!slave) {
438 		rcu_read_unlock();
439 		return -ENODEV;
440 	}
441 
442 	if (!slave->dev->xfrmdev_ops ||
443 	    !slave->dev->xfrmdev_ops->xdo_dev_state_add ||
444 	    netif_is_bond_master(slave->dev)) {
445 		slave_warn(bond_dev, slave->dev, "Slave does not support ipsec offload\n");
446 		rcu_read_unlock();
447 		return -EINVAL;
448 	}
449 
450 	ipsec = kmalloc(sizeof(*ipsec), GFP_ATOMIC);
451 	if (!ipsec) {
452 		rcu_read_unlock();
453 		return -ENOMEM;
454 	}
455 	xs->xso.real_dev = slave->dev;
456 
457 	err = slave->dev->xfrmdev_ops->xdo_dev_state_add(xs);
458 	if (!err) {
459 		ipsec->xs = xs;
460 		INIT_LIST_HEAD(&ipsec->list);
461 		spin_lock_bh(&bond->ipsec_lock);
462 		list_add(&ipsec->list, &bond->ipsec_list);
463 		spin_unlock_bh(&bond->ipsec_lock);
464 	} else {
465 		kfree(ipsec);
466 	}
467 	rcu_read_unlock();
468 	return err;
469 }
470 
471 static void bond_ipsec_add_sa_all(struct bonding *bond)
472 {
473 	struct net_device *bond_dev = bond->dev;
474 	struct bond_ipsec *ipsec;
475 	struct slave *slave;
476 
477 	rcu_read_lock();
478 	slave = rcu_dereference(bond->curr_active_slave);
479 	if (!slave)
480 		goto out;
481 
482 	if (!slave->dev->xfrmdev_ops ||
483 	    !slave->dev->xfrmdev_ops->xdo_dev_state_add ||
484 	    netif_is_bond_master(slave->dev)) {
485 		spin_lock_bh(&bond->ipsec_lock);
486 		if (!list_empty(&bond->ipsec_list))
487 			slave_warn(bond_dev, slave->dev,
488 				   "%s: no slave xdo_dev_state_add\n",
489 				   __func__);
490 		spin_unlock_bh(&bond->ipsec_lock);
491 		goto out;
492 	}
493 
494 	spin_lock_bh(&bond->ipsec_lock);
495 	list_for_each_entry(ipsec, &bond->ipsec_list, list) {
496 		ipsec->xs->xso.real_dev = slave->dev;
497 		if (slave->dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs)) {
498 			slave_warn(bond_dev, slave->dev, "%s: failed to add SA\n", __func__);
499 			ipsec->xs->xso.real_dev = NULL;
500 		}
501 	}
502 	spin_unlock_bh(&bond->ipsec_lock);
503 out:
504 	rcu_read_unlock();
505 }
506 
507 /**
508  * bond_ipsec_del_sa - clear out this specific SA
509  * @xs: pointer to transformer state struct
510  **/
511 static void bond_ipsec_del_sa(struct xfrm_state *xs)
512 {
513 	struct net_device *bond_dev = xs->xso.dev;
514 	struct bond_ipsec *ipsec;
515 	struct bonding *bond;
516 	struct slave *slave;
517 
518 	if (!bond_dev)
519 		return;
520 
521 	rcu_read_lock();
522 	bond = netdev_priv(bond_dev);
523 	slave = rcu_dereference(bond->curr_active_slave);
524 
525 	if (!slave)
526 		goto out;
527 
528 	if (!xs->xso.real_dev)
529 		goto out;
530 
531 	WARN_ON(xs->xso.real_dev != slave->dev);
532 
533 	if (!slave->dev->xfrmdev_ops ||
534 	    !slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
535 	    netif_is_bond_master(slave->dev)) {
536 		slave_warn(bond_dev, slave->dev, "%s: no slave xdo_dev_state_delete\n", __func__);
537 		goto out;
538 	}
539 
540 	slave->dev->xfrmdev_ops->xdo_dev_state_delete(xs);
541 out:
542 	spin_lock_bh(&bond->ipsec_lock);
543 	list_for_each_entry(ipsec, &bond->ipsec_list, list) {
544 		if (ipsec->xs == xs) {
545 			list_del(&ipsec->list);
546 			kfree(ipsec);
547 			break;
548 		}
549 	}
550 	spin_unlock_bh(&bond->ipsec_lock);
551 	rcu_read_unlock();
552 }
553 
554 static void bond_ipsec_del_sa_all(struct bonding *bond)
555 {
556 	struct net_device *bond_dev = bond->dev;
557 	struct bond_ipsec *ipsec;
558 	struct slave *slave;
559 
560 	rcu_read_lock();
561 	slave = rcu_dereference(bond->curr_active_slave);
562 	if (!slave) {
563 		rcu_read_unlock();
564 		return;
565 	}
566 
567 	spin_lock_bh(&bond->ipsec_lock);
568 	list_for_each_entry(ipsec, &bond->ipsec_list, list) {
569 		if (!ipsec->xs->xso.real_dev)
570 			continue;
571 
572 		if (!slave->dev->xfrmdev_ops ||
573 		    !slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
574 		    netif_is_bond_master(slave->dev)) {
575 			slave_warn(bond_dev, slave->dev,
576 				   "%s: no slave xdo_dev_state_delete\n",
577 				   __func__);
578 		} else {
579 			slave->dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs);
580 		}
581 		ipsec->xs->xso.real_dev = NULL;
582 	}
583 	spin_unlock_bh(&bond->ipsec_lock);
584 	rcu_read_unlock();
585 }
586 
587 /**
588  * bond_ipsec_offload_ok - can this packet use the xfrm hw offload
589  * @skb: current data packet
590  * @xs: pointer to transformer state struct
591  **/
592 static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
593 {
594 	struct net_device *bond_dev = xs->xso.dev;
595 	struct net_device *real_dev;
596 	struct slave *curr_active;
597 	struct bonding *bond;
598 	int err;
599 
600 	bond = netdev_priv(bond_dev);
601 	rcu_read_lock();
602 	curr_active = rcu_dereference(bond->curr_active_slave);
603 	real_dev = curr_active->dev;
604 
605 	if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
606 		err = false;
607 		goto out;
608 	}
609 
610 	if (!xs->xso.real_dev) {
611 		err = false;
612 		goto out;
613 	}
614 
615 	if (!real_dev->xfrmdev_ops ||
616 	    !real_dev->xfrmdev_ops->xdo_dev_offload_ok ||
617 	    netif_is_bond_master(real_dev)) {
618 		err = false;
619 		goto out;
620 	}
621 
622 	err = real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs);
623 out:
624 	rcu_read_unlock();
625 	return err;
626 }
627 
628 static const struct xfrmdev_ops bond_xfrmdev_ops = {
629 	.xdo_dev_state_add = bond_ipsec_add_sa,
630 	.xdo_dev_state_delete = bond_ipsec_del_sa,
631 	.xdo_dev_offload_ok = bond_ipsec_offload_ok,
632 };
633 #endif /* CONFIG_XFRM_OFFLOAD */
634 
635 /*------------------------------- Link status -------------------------------*/
636 
637 /* Set the carrier state for the master according to the state of its
638  * slaves.  If any slaves are up, the master is up.  In 802.3ad mode,
639  * do special 802.3ad magic.
640  *
641  * Returns zero if carrier state does not change, nonzero if it does.
642  */
643 int bond_set_carrier(struct bonding *bond)
644 {
645 	struct list_head *iter;
646 	struct slave *slave;
647 
648 	if (!bond_has_slaves(bond))
649 		goto down;
650 
651 	if (BOND_MODE(bond) == BOND_MODE_8023AD)
652 		return bond_3ad_set_carrier(bond);
653 
654 	bond_for_each_slave(bond, slave, iter) {
655 		if (slave->link == BOND_LINK_UP) {
656 			if (!netif_carrier_ok(bond->dev)) {
657 				netif_carrier_on(bond->dev);
658 				return 1;
659 			}
660 			return 0;
661 		}
662 	}
663 
664 down:
665 	if (netif_carrier_ok(bond->dev)) {
666 		netif_carrier_off(bond->dev);
667 		return 1;
668 	}
669 	return 0;
670 }
671 
672 /* Get link speed and duplex from the slave's base driver
673  * using ethtool. If for some reason the call fails or the
674  * values are invalid, set speed and duplex to -1,
675  * and return. Return 1 if speed or duplex settings are
676  * UNKNOWN; 0 otherwise.
677  */
678 static int bond_update_speed_duplex(struct slave *slave)
679 {
680 	struct net_device *slave_dev = slave->dev;
681 	struct ethtool_link_ksettings ecmd;
682 	int res;
683 
684 	slave->speed = SPEED_UNKNOWN;
685 	slave->duplex = DUPLEX_UNKNOWN;
686 
687 	res = __ethtool_get_link_ksettings(slave_dev, &ecmd);
688 	if (res < 0)
689 		return 1;
690 	if (ecmd.base.speed == 0 || ecmd.base.speed == ((__u32)-1))
691 		return 1;
692 	switch (ecmd.base.duplex) {
693 	case DUPLEX_FULL:
694 	case DUPLEX_HALF:
695 		break;
696 	default:
697 		return 1;
698 	}
699 
700 	slave->speed = ecmd.base.speed;
701 	slave->duplex = ecmd.base.duplex;
702 
703 	return 0;
704 }
705 
706 const char *bond_slave_link_status(s8 link)
707 {
708 	switch (link) {
709 	case BOND_LINK_UP:
710 		return "up";
711 	case BOND_LINK_FAIL:
712 		return "going down";
713 	case BOND_LINK_DOWN:
714 		return "down";
715 	case BOND_LINK_BACK:
716 		return "going back";
717 	default:
718 		return "unknown";
719 	}
720 }
721 
722 /* if <dev> supports MII link status reporting, check its link status.
723  *
724  * We either do MII/ETHTOOL ioctls, or check netif_carrier_ok(),
725  * depending upon the setting of the use_carrier parameter.
726  *
727  * Return either BMSR_LSTATUS, meaning that the link is up (or we
728  * can't tell and just pretend it is), or 0, meaning that the link is
729  * down.
730  *
731  * If reporting is non-zero, instead of faking link up, return -1 if
732  * both ETHTOOL and MII ioctls fail (meaning the device does not
733  * support them).  If use_carrier is set, return whatever it says.
734  * It'd be nice if there was a good way to tell if a driver supports
735  * netif_carrier, but there really isn't.
736  */
737 static int bond_check_dev_link(struct bonding *bond,
738 			       struct net_device *slave_dev, int reporting)
739 {
740 	const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
741 	int (*ioctl)(struct net_device *, struct ifreq *, int);
742 	struct ifreq ifr;
743 	struct mii_ioctl_data *mii;
744 
745 	if (!reporting && !netif_running(slave_dev))
746 		return 0;
747 
748 	if (bond->params.use_carrier)
749 		return netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0;
750 
751 	/* Try to get link status using Ethtool first. */
752 	if (slave_dev->ethtool_ops->get_link)
753 		return slave_dev->ethtool_ops->get_link(slave_dev) ?
754 			BMSR_LSTATUS : 0;
755 
756 	/* Ethtool can't be used, fallback to MII ioctls. */
757 	ioctl = slave_ops->ndo_eth_ioctl;
758 	if (ioctl) {
759 		/* TODO: set pointer to correct ioctl on a per team member
760 		 *       bases to make this more efficient. that is, once
761 		 *       we determine the correct ioctl, we will always
762 		 *       call it and not the others for that team
763 		 *       member.
764 		 */
765 
766 		/* We cannot assume that SIOCGMIIPHY will also read a
767 		 * register; not all network drivers (e.g., e100)
768 		 * support that.
769 		 */
770 
771 		/* Yes, the mii is overlaid on the ifreq.ifr_ifru */
772 		strscpy_pad(ifr.ifr_name, slave_dev->name, IFNAMSIZ);
773 		mii = if_mii(&ifr);
774 		if (ioctl(slave_dev, &ifr, SIOCGMIIPHY) == 0) {
775 			mii->reg_num = MII_BMSR;
776 			if (ioctl(slave_dev, &ifr, SIOCGMIIREG) == 0)
777 				return mii->val_out & BMSR_LSTATUS;
778 		}
779 	}
780 
781 	/* If reporting, report that either there's no ndo_eth_ioctl,
782 	 * or both SIOCGMIIREG and get_link failed (meaning that we
783 	 * cannot report link status).  If not reporting, pretend
784 	 * we're ok.
785 	 */
786 	return reporting ? -1 : BMSR_LSTATUS;
787 }
788 
789 /*----------------------------- Multicast list ------------------------------*/
790 
791 /* Push the promiscuity flag down to appropriate slaves */
792 static int bond_set_promiscuity(struct bonding *bond, int inc)
793 {
794 	struct list_head *iter;
795 	int err = 0;
796 
797 	if (bond_uses_primary(bond)) {
798 		struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
799 
800 		if (curr_active)
801 			err = dev_set_promiscuity(curr_active->dev, inc);
802 	} else {
803 		struct slave *slave;
804 
805 		bond_for_each_slave(bond, slave, iter) {
806 			err = dev_set_promiscuity(slave->dev, inc);
807 			if (err)
808 				return err;
809 		}
810 	}
811 	return err;
812 }
813 
814 /* Push the allmulti flag down to all slaves */
815 static int bond_set_allmulti(struct bonding *bond, int inc)
816 {
817 	struct list_head *iter;
818 	int err = 0;
819 
820 	if (bond_uses_primary(bond)) {
821 		struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
822 
823 		if (curr_active)
824 			err = dev_set_allmulti(curr_active->dev, inc);
825 	} else {
826 		struct slave *slave;
827 
828 		bond_for_each_slave(bond, slave, iter) {
829 			err = dev_set_allmulti(slave->dev, inc);
830 			if (err)
831 				return err;
832 		}
833 	}
834 	return err;
835 }
836 
837 /* Retrieve the list of registered multicast addresses for the bonding
838  * device and retransmit an IGMP JOIN request to the current active
839  * slave.
840  */
841 static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
842 {
843 	struct bonding *bond = container_of(work, struct bonding,
844 					    mcast_work.work);
845 
846 	if (!rtnl_trylock()) {
847 		queue_delayed_work(bond->wq, &bond->mcast_work, 1);
848 		return;
849 	}
850 	call_netdevice_notifiers(NETDEV_RESEND_IGMP, bond->dev);
851 
852 	if (bond->igmp_retrans > 1) {
853 		bond->igmp_retrans--;
854 		queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
855 	}
856 	rtnl_unlock();
857 }
858 
859 /* Flush bond's hardware addresses from slave */
860 static void bond_hw_addr_flush(struct net_device *bond_dev,
861 			       struct net_device *slave_dev)
862 {
863 	struct bonding *bond = netdev_priv(bond_dev);
864 
865 	dev_uc_unsync(slave_dev, bond_dev);
866 	dev_mc_unsync(slave_dev, bond_dev);
867 
868 	if (BOND_MODE(bond) == BOND_MODE_8023AD)
869 		dev_mc_del(slave_dev, lacpdu_mcast_addr);
870 }
871 
872 /*--------------------------- Active slave change ---------------------------*/
873 
874 /* Update the hardware address list and promisc/allmulti for the new and
875  * old active slaves (if any).  Modes that are not using primary keep all
876  * slaves up date at all times; only the modes that use primary need to call
877  * this function to swap these settings during a failover.
878  */
879 static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
880 			      struct slave *old_active)
881 {
882 	if (old_active) {
883 		if (bond->dev->flags & IFF_PROMISC)
884 			dev_set_promiscuity(old_active->dev, -1);
885 
886 		if (bond->dev->flags & IFF_ALLMULTI)
887 			dev_set_allmulti(old_active->dev, -1);
888 
889 		if (bond->dev->flags & IFF_UP)
890 			bond_hw_addr_flush(bond->dev, old_active->dev);
891 	}
892 
893 	if (new_active) {
894 		/* FIXME: Signal errors upstream. */
895 		if (bond->dev->flags & IFF_PROMISC)
896 			dev_set_promiscuity(new_active->dev, 1);
897 
898 		if (bond->dev->flags & IFF_ALLMULTI)
899 			dev_set_allmulti(new_active->dev, 1);
900 
901 		if (bond->dev->flags & IFF_UP) {
902 			netif_addr_lock_bh(bond->dev);
903 			dev_uc_sync(new_active->dev, bond->dev);
904 			dev_mc_sync(new_active->dev, bond->dev);
905 			netif_addr_unlock_bh(bond->dev);
906 		}
907 	}
908 }
909 
910 /**
911  * bond_set_dev_addr - clone slave's address to bond
912  * @bond_dev: bond net device
913  * @slave_dev: slave net device
914  *
915  * Should be called with RTNL held.
916  */
917 static int bond_set_dev_addr(struct net_device *bond_dev,
918 			     struct net_device *slave_dev)
919 {
920 	int err;
921 
922 	slave_dbg(bond_dev, slave_dev, "bond_dev=%p slave_dev=%p slave_dev->addr_len=%d\n",
923 		  bond_dev, slave_dev, slave_dev->addr_len);
924 	err = dev_pre_changeaddr_notify(bond_dev, slave_dev->dev_addr, NULL);
925 	if (err)
926 		return err;
927 
928 	__dev_addr_set(bond_dev, slave_dev->dev_addr, slave_dev->addr_len);
929 	bond_dev->addr_assign_type = NET_ADDR_STOLEN;
930 	call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
931 	return 0;
932 }
933 
934 static struct slave *bond_get_old_active(struct bonding *bond,
935 					 struct slave *new_active)
936 {
937 	struct slave *slave;
938 	struct list_head *iter;
939 
940 	bond_for_each_slave(bond, slave, iter) {
941 		if (slave == new_active)
942 			continue;
943 
944 		if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr))
945 			return slave;
946 	}
947 
948 	return NULL;
949 }
950 
951 /* bond_do_fail_over_mac
952  *
953  * Perform special MAC address swapping for fail_over_mac settings
954  *
955  * Called with RTNL
956  */
957 static void bond_do_fail_over_mac(struct bonding *bond,
958 				  struct slave *new_active,
959 				  struct slave *old_active)
960 {
961 	u8 tmp_mac[MAX_ADDR_LEN];
962 	struct sockaddr_storage ss;
963 	int rv;
964 
965 	switch (bond->params.fail_over_mac) {
966 	case BOND_FOM_ACTIVE:
967 		if (new_active) {
968 			rv = bond_set_dev_addr(bond->dev, new_active->dev);
969 			if (rv)
970 				slave_err(bond->dev, new_active->dev, "Error %d setting bond MAC from slave\n",
971 					  -rv);
972 		}
973 		break;
974 	case BOND_FOM_FOLLOW:
975 		/* if new_active && old_active, swap them
976 		 * if just old_active, do nothing (going to no active slave)
977 		 * if just new_active, set new_active to bond's MAC
978 		 */
979 		if (!new_active)
980 			return;
981 
982 		if (!old_active)
983 			old_active = bond_get_old_active(bond, new_active);
984 
985 		if (old_active) {
986 			bond_hw_addr_copy(tmp_mac, new_active->dev->dev_addr,
987 					  new_active->dev->addr_len);
988 			bond_hw_addr_copy(ss.__data,
989 					  old_active->dev->dev_addr,
990 					  old_active->dev->addr_len);
991 			ss.ss_family = new_active->dev->type;
992 		} else {
993 			bond_hw_addr_copy(ss.__data, bond->dev->dev_addr,
994 					  bond->dev->addr_len);
995 			ss.ss_family = bond->dev->type;
996 		}
997 
998 		rv = dev_set_mac_address(new_active->dev,
999 					 (struct sockaddr *)&ss, NULL);
1000 		if (rv) {
1001 			slave_err(bond->dev, new_active->dev, "Error %d setting MAC of new active slave\n",
1002 				  -rv);
1003 			goto out;
1004 		}
1005 
1006 		if (!old_active)
1007 			goto out;
1008 
1009 		bond_hw_addr_copy(ss.__data, tmp_mac,
1010 				  new_active->dev->addr_len);
1011 		ss.ss_family = old_active->dev->type;
1012 
1013 		rv = dev_set_mac_address(old_active->dev,
1014 					 (struct sockaddr *)&ss, NULL);
1015 		if (rv)
1016 			slave_err(bond->dev, old_active->dev, "Error %d setting MAC of old active slave\n",
1017 				  -rv);
1018 out:
1019 		break;
1020 	default:
1021 		netdev_err(bond->dev, "bond_do_fail_over_mac impossible: bad policy %d\n",
1022 			   bond->params.fail_over_mac);
1023 		break;
1024 	}
1025 
1026 }
1027 
1028 /**
1029  * bond_choose_primary_or_current - select the primary or high priority slave
1030  * @bond: our bonding struct
1031  *
1032  * - Check if there is a primary link. If the primary link was set and is up,
1033  *   go on and do link reselection.
1034  *
1035  * - If primary link is not set or down, find the highest priority link.
1036  *   If the highest priority link is not current slave, set it as primary
1037  *   link and do link reselection.
1038  */
1039 static struct slave *bond_choose_primary_or_current(struct bonding *bond)
1040 {
1041 	struct slave *prim = rtnl_dereference(bond->primary_slave);
1042 	struct slave *curr = rtnl_dereference(bond->curr_active_slave);
1043 	struct slave *slave, *hprio = NULL;
1044 	struct list_head *iter;
1045 
1046 	if (!prim || prim->link != BOND_LINK_UP) {
1047 		bond_for_each_slave(bond, slave, iter) {
1048 			if (slave->link == BOND_LINK_UP) {
1049 				hprio = hprio ?: slave;
1050 				if (slave->prio > hprio->prio)
1051 					hprio = slave;
1052 			}
1053 		}
1054 
1055 		if (hprio && hprio != curr) {
1056 			prim = hprio;
1057 			goto link_reselect;
1058 		}
1059 
1060 		if (!curr || curr->link != BOND_LINK_UP)
1061 			return NULL;
1062 		return curr;
1063 	}
1064 
1065 	if (bond->force_primary) {
1066 		bond->force_primary = false;
1067 		return prim;
1068 	}
1069 
1070 link_reselect:
1071 	if (!curr || curr->link != BOND_LINK_UP)
1072 		return prim;
1073 
1074 	/* At this point, prim and curr are both up */
1075 	switch (bond->params.primary_reselect) {
1076 	case BOND_PRI_RESELECT_ALWAYS:
1077 		return prim;
1078 	case BOND_PRI_RESELECT_BETTER:
1079 		if (prim->speed < curr->speed)
1080 			return curr;
1081 		if (prim->speed == curr->speed && prim->duplex <= curr->duplex)
1082 			return curr;
1083 		return prim;
1084 	case BOND_PRI_RESELECT_FAILURE:
1085 		return curr;
1086 	default:
1087 		netdev_err(bond->dev, "impossible primary_reselect %d\n",
1088 			   bond->params.primary_reselect);
1089 		return curr;
1090 	}
1091 }
1092 
1093 /**
1094  * bond_find_best_slave - select the best available slave to be the active one
1095  * @bond: our bonding struct
1096  */
1097 static struct slave *bond_find_best_slave(struct bonding *bond)
1098 {
1099 	struct slave *slave, *bestslave = NULL;
1100 	struct list_head *iter;
1101 	int mintime = bond->params.updelay;
1102 
1103 	slave = bond_choose_primary_or_current(bond);
1104 	if (slave)
1105 		return slave;
1106 
1107 	bond_for_each_slave(bond, slave, iter) {
1108 		if (slave->link == BOND_LINK_UP)
1109 			return slave;
1110 		if (slave->link == BOND_LINK_BACK && bond_slave_is_up(slave) &&
1111 		    slave->delay < mintime) {
1112 			mintime = slave->delay;
1113 			bestslave = slave;
1114 		}
1115 	}
1116 
1117 	return bestslave;
1118 }
1119 
1120 static bool bond_should_notify_peers(struct bonding *bond)
1121 {
1122 	struct slave *slave;
1123 
1124 	rcu_read_lock();
1125 	slave = rcu_dereference(bond->curr_active_slave);
1126 	rcu_read_unlock();
1127 
1128 	if (!slave || !bond->send_peer_notif ||
1129 	    bond->send_peer_notif %
1130 	    max(1, bond->params.peer_notif_delay) != 0 ||
1131 	    !netif_carrier_ok(bond->dev) ||
1132 	    test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
1133 		return false;
1134 
1135 	netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n",
1136 		   slave ? slave->dev->name : "NULL");
1137 
1138 	return true;
1139 }
1140 
1141 /**
1142  * bond_change_active_slave - change the active slave into the specified one
1143  * @bond: our bonding struct
1144  * @new_active: the new slave to make the active one
1145  *
1146  * Set the new slave to the bond's settings and unset them on the old
1147  * curr_active_slave.
1148  * Setting include flags, mc-list, promiscuity, allmulti, etc.
1149  *
1150  * If @new's link state is %BOND_LINK_BACK we'll set it to %BOND_LINK_UP,
1151  * because it is apparently the best available slave we have, even though its
1152  * updelay hasn't timed out yet.
1153  *
1154  * Caller must hold RTNL.
1155  */
1156 void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1157 {
1158 	struct slave *old_active;
1159 
1160 	ASSERT_RTNL();
1161 
1162 	old_active = rtnl_dereference(bond->curr_active_slave);
1163 
1164 	if (old_active == new_active)
1165 		return;
1166 
1167 #ifdef CONFIG_XFRM_OFFLOAD
1168 	bond_ipsec_del_sa_all(bond);
1169 #endif /* CONFIG_XFRM_OFFLOAD */
1170 
1171 	if (new_active) {
1172 		new_active->last_link_up = jiffies;
1173 
1174 		if (new_active->link == BOND_LINK_BACK) {
1175 			if (bond_uses_primary(bond)) {
1176 				slave_info(bond->dev, new_active->dev, "making interface the new active one %d ms earlier\n",
1177 					   (bond->params.updelay - new_active->delay) * bond->params.miimon);
1178 			}
1179 
1180 			new_active->delay = 0;
1181 			bond_set_slave_link_state(new_active, BOND_LINK_UP,
1182 						  BOND_SLAVE_NOTIFY_NOW);
1183 
1184 			if (BOND_MODE(bond) == BOND_MODE_8023AD)
1185 				bond_3ad_handle_link_change(new_active, BOND_LINK_UP);
1186 
1187 			if (bond_is_lb(bond))
1188 				bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
1189 		} else {
1190 			if (bond_uses_primary(bond))
1191 				slave_info(bond->dev, new_active->dev, "making interface the new active one\n");
1192 		}
1193 	}
1194 
1195 	if (bond_uses_primary(bond))
1196 		bond_hw_addr_swap(bond, new_active, old_active);
1197 
1198 	if (bond_is_lb(bond)) {
1199 		bond_alb_handle_active_change(bond, new_active);
1200 		if (old_active)
1201 			bond_set_slave_inactive_flags(old_active,
1202 						      BOND_SLAVE_NOTIFY_NOW);
1203 		if (new_active)
1204 			bond_set_slave_active_flags(new_active,
1205 						    BOND_SLAVE_NOTIFY_NOW);
1206 	} else {
1207 		rcu_assign_pointer(bond->curr_active_slave, new_active);
1208 	}
1209 
1210 	if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
1211 		if (old_active)
1212 			bond_set_slave_inactive_flags(old_active,
1213 						      BOND_SLAVE_NOTIFY_NOW);
1214 
1215 		if (new_active) {
1216 			bool should_notify_peers = false;
1217 
1218 			bond_set_slave_active_flags(new_active,
1219 						    BOND_SLAVE_NOTIFY_NOW);
1220 
1221 			if (bond->params.fail_over_mac)
1222 				bond_do_fail_over_mac(bond, new_active,
1223 						      old_active);
1224 
1225 			if (netif_running(bond->dev)) {
1226 				bond->send_peer_notif =
1227 					bond->params.num_peer_notif *
1228 					max(1, bond->params.peer_notif_delay);
1229 				should_notify_peers =
1230 					bond_should_notify_peers(bond);
1231 			}
1232 
1233 			call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev);
1234 			if (should_notify_peers) {
1235 				bond->send_peer_notif--;
1236 				call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
1237 							 bond->dev);
1238 			}
1239 		}
1240 	}
1241 
1242 #ifdef CONFIG_XFRM_OFFLOAD
1243 	bond_ipsec_add_sa_all(bond);
1244 #endif /* CONFIG_XFRM_OFFLOAD */
1245 
1246 	/* resend IGMP joins since active slave has changed or
1247 	 * all were sent on curr_active_slave.
1248 	 * resend only if bond is brought up with the affected
1249 	 * bonding modes and the retransmission is enabled
1250 	 */
1251 	if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) &&
1252 	    ((bond_uses_primary(bond) && new_active) ||
1253 	     BOND_MODE(bond) == BOND_MODE_ROUNDROBIN)) {
1254 		bond->igmp_retrans = bond->params.resend_igmp;
1255 		queue_delayed_work(bond->wq, &bond->mcast_work, 1);
1256 	}
1257 }
1258 
1259 /**
1260  * bond_select_active_slave - select a new active slave, if needed
1261  * @bond: our bonding struct
1262  *
1263  * This functions should be called when one of the following occurs:
1264  * - The old curr_active_slave has been released or lost its link.
1265  * - The primary_slave has got its link back.
1266  * - A slave has got its link back and there's no old curr_active_slave.
1267  *
1268  * Caller must hold RTNL.
1269  */
1270 void bond_select_active_slave(struct bonding *bond)
1271 {
1272 	struct slave *best_slave;
1273 	int rv;
1274 
1275 	ASSERT_RTNL();
1276 
1277 	best_slave = bond_find_best_slave(bond);
1278 	if (best_slave != rtnl_dereference(bond->curr_active_slave)) {
1279 		bond_change_active_slave(bond, best_slave);
1280 		rv = bond_set_carrier(bond);
1281 		if (!rv)
1282 			return;
1283 
1284 		if (netif_carrier_ok(bond->dev))
1285 			netdev_info(bond->dev, "active interface up!\n");
1286 		else
1287 			netdev_info(bond->dev, "now running without any active interface!\n");
1288 	}
1289 }
1290 
1291 #ifdef CONFIG_NET_POLL_CONTROLLER
1292 static inline int slave_enable_netpoll(struct slave *slave)
1293 {
1294 	struct netpoll *np;
1295 	int err = 0;
1296 
1297 	np = kzalloc(sizeof(*np), GFP_KERNEL);
1298 	err = -ENOMEM;
1299 	if (!np)
1300 		goto out;
1301 
1302 	err = __netpoll_setup(np, slave->dev);
1303 	if (err) {
1304 		kfree(np);
1305 		goto out;
1306 	}
1307 	slave->np = np;
1308 out:
1309 	return err;
1310 }
1311 static inline void slave_disable_netpoll(struct slave *slave)
1312 {
1313 	struct netpoll *np = slave->np;
1314 
1315 	if (!np)
1316 		return;
1317 
1318 	slave->np = NULL;
1319 
1320 	__netpoll_free(np);
1321 }
1322 
1323 static void bond_poll_controller(struct net_device *bond_dev)
1324 {
1325 	struct bonding *bond = netdev_priv(bond_dev);
1326 	struct slave *slave = NULL;
1327 	struct list_head *iter;
1328 	struct ad_info ad_info;
1329 
1330 	if (BOND_MODE(bond) == BOND_MODE_8023AD)
1331 		if (bond_3ad_get_active_agg_info(bond, &ad_info))
1332 			return;
1333 
1334 	bond_for_each_slave_rcu(bond, slave, iter) {
1335 		if (!bond_slave_is_up(slave))
1336 			continue;
1337 
1338 		if (BOND_MODE(bond) == BOND_MODE_8023AD) {
1339 			struct aggregator *agg =
1340 			    SLAVE_AD_INFO(slave)->port.aggregator;
1341 
1342 			if (agg &&
1343 			    agg->aggregator_identifier != ad_info.aggregator_id)
1344 				continue;
1345 		}
1346 
1347 		netpoll_poll_dev(slave->dev);
1348 	}
1349 }
1350 
1351 static void bond_netpoll_cleanup(struct net_device *bond_dev)
1352 {
1353 	struct bonding *bond = netdev_priv(bond_dev);
1354 	struct list_head *iter;
1355 	struct slave *slave;
1356 
1357 	bond_for_each_slave(bond, slave, iter)
1358 		if (bond_slave_is_up(slave))
1359 			slave_disable_netpoll(slave);
1360 }
1361 
1362 static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
1363 {
1364 	struct bonding *bond = netdev_priv(dev);
1365 	struct list_head *iter;
1366 	struct slave *slave;
1367 	int err = 0;
1368 
1369 	bond_for_each_slave(bond, slave, iter) {
1370 		err = slave_enable_netpoll(slave);
1371 		if (err) {
1372 			bond_netpoll_cleanup(dev);
1373 			break;
1374 		}
1375 	}
1376 	return err;
1377 }
1378 #else
1379 static inline int slave_enable_netpoll(struct slave *slave)
1380 {
1381 	return 0;
1382 }
1383 static inline void slave_disable_netpoll(struct slave *slave)
1384 {
1385 }
1386 static void bond_netpoll_cleanup(struct net_device *bond_dev)
1387 {
1388 }
1389 #endif
1390 
1391 /*---------------------------------- IOCTL ----------------------------------*/
1392 
1393 static netdev_features_t bond_fix_features(struct net_device *dev,
1394 					   netdev_features_t features)
1395 {
1396 	struct bonding *bond = netdev_priv(dev);
1397 	struct list_head *iter;
1398 	netdev_features_t mask;
1399 	struct slave *slave;
1400 
1401 #if IS_ENABLED(CONFIG_TLS_DEVICE)
1402 	if (bond_sk_check(bond))
1403 		features |= BOND_TLS_FEATURES;
1404 	else
1405 		features &= ~BOND_TLS_FEATURES;
1406 #endif
1407 
1408 	mask = features;
1409 
1410 	features &= ~NETIF_F_ONE_FOR_ALL;
1411 	features |= NETIF_F_ALL_FOR_ALL;
1412 
1413 	bond_for_each_slave(bond, slave, iter) {
1414 		features = netdev_increment_features(features,
1415 						     slave->dev->features,
1416 						     mask);
1417 	}
1418 	features = netdev_add_tso_features(features, mask);
1419 
1420 	return features;
1421 }
1422 
1423 #define BOND_VLAN_FEATURES	(NETIF_F_HW_CSUM | NETIF_F_SG | \
1424 				 NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \
1425 				 NETIF_F_HIGHDMA | NETIF_F_LRO)
1426 
1427 #define BOND_ENC_FEATURES	(NETIF_F_HW_CSUM | NETIF_F_SG | \
1428 				 NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE)
1429 
1430 #define BOND_MPLS_FEATURES	(NETIF_F_HW_CSUM | NETIF_F_SG | \
1431 				 NETIF_F_GSO_SOFTWARE)
1432 
1433 
1434 static void bond_compute_features(struct bonding *bond)
1435 {
1436 	unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
1437 					IFF_XMIT_DST_RELEASE_PERM;
1438 	netdev_features_t vlan_features = BOND_VLAN_FEATURES;
1439 	netdev_features_t enc_features  = BOND_ENC_FEATURES;
1440 #ifdef CONFIG_XFRM_OFFLOAD
1441 	netdev_features_t xfrm_features  = BOND_XFRM_FEATURES;
1442 #endif /* CONFIG_XFRM_OFFLOAD */
1443 	netdev_features_t mpls_features  = BOND_MPLS_FEATURES;
1444 	struct net_device *bond_dev = bond->dev;
1445 	struct list_head *iter;
1446 	struct slave *slave;
1447 	unsigned short max_hard_header_len = ETH_HLEN;
1448 	unsigned int tso_max_size = TSO_MAX_SIZE;
1449 	u16 tso_max_segs = TSO_MAX_SEGS;
1450 
1451 	if (!bond_has_slaves(bond))
1452 		goto done;
1453 	vlan_features &= NETIF_F_ALL_FOR_ALL;
1454 	mpls_features &= NETIF_F_ALL_FOR_ALL;
1455 
1456 	bond_for_each_slave(bond, slave, iter) {
1457 		vlan_features = netdev_increment_features(vlan_features,
1458 			slave->dev->vlan_features, BOND_VLAN_FEATURES);
1459 
1460 		enc_features = netdev_increment_features(enc_features,
1461 							 slave->dev->hw_enc_features,
1462 							 BOND_ENC_FEATURES);
1463 
1464 #ifdef CONFIG_XFRM_OFFLOAD
1465 		xfrm_features = netdev_increment_features(xfrm_features,
1466 							  slave->dev->hw_enc_features,
1467 							  BOND_XFRM_FEATURES);
1468 #endif /* CONFIG_XFRM_OFFLOAD */
1469 
1470 		mpls_features = netdev_increment_features(mpls_features,
1471 							  slave->dev->mpls_features,
1472 							  BOND_MPLS_FEATURES);
1473 
1474 		dst_release_flag &= slave->dev->priv_flags;
1475 		if (slave->dev->hard_header_len > max_hard_header_len)
1476 			max_hard_header_len = slave->dev->hard_header_len;
1477 
1478 		tso_max_size = min(tso_max_size, slave->dev->tso_max_size);
1479 		tso_max_segs = min(tso_max_segs, slave->dev->tso_max_segs);
1480 	}
1481 	bond_dev->hard_header_len = max_hard_header_len;
1482 
1483 done:
1484 	bond_dev->vlan_features = vlan_features;
1485 	bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
1486 				    NETIF_F_HW_VLAN_CTAG_TX |
1487 				    NETIF_F_HW_VLAN_STAG_TX;
1488 #ifdef CONFIG_XFRM_OFFLOAD
1489 	bond_dev->hw_enc_features |= xfrm_features;
1490 #endif /* CONFIG_XFRM_OFFLOAD */
1491 	bond_dev->mpls_features = mpls_features;
1492 	netif_set_tso_max_segs(bond_dev, tso_max_segs);
1493 	netif_set_tso_max_size(bond_dev, tso_max_size);
1494 
1495 	bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1496 	if ((bond_dev->priv_flags & IFF_XMIT_DST_RELEASE_PERM) &&
1497 	    dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
1498 		bond_dev->priv_flags |= IFF_XMIT_DST_RELEASE;
1499 
1500 	netdev_change_features(bond_dev);
1501 }
1502 
1503 static void bond_setup_by_slave(struct net_device *bond_dev,
1504 				struct net_device *slave_dev)
1505 {
1506 	bond_dev->header_ops	    = slave_dev->header_ops;
1507 
1508 	bond_dev->type		    = slave_dev->type;
1509 	bond_dev->hard_header_len   = slave_dev->hard_header_len;
1510 	bond_dev->needed_headroom   = slave_dev->needed_headroom;
1511 	bond_dev->addr_len	    = slave_dev->addr_len;
1512 
1513 	memcpy(bond_dev->broadcast, slave_dev->broadcast,
1514 		slave_dev->addr_len);
1515 }
1516 
1517 /* On bonding slaves other than the currently active slave, suppress
1518  * duplicates except for alb non-mcast/bcast.
1519  */
1520 static bool bond_should_deliver_exact_match(struct sk_buff *skb,
1521 					    struct slave *slave,
1522 					    struct bonding *bond)
1523 {
1524 	if (bond_is_slave_inactive(slave)) {
1525 		if (BOND_MODE(bond) == BOND_MODE_ALB &&
1526 		    skb->pkt_type != PACKET_BROADCAST &&
1527 		    skb->pkt_type != PACKET_MULTICAST)
1528 			return false;
1529 		return true;
1530 	}
1531 	return false;
1532 }
1533 
1534 static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1535 {
1536 	struct sk_buff *skb = *pskb;
1537 	struct slave *slave;
1538 	struct bonding *bond;
1539 	int (*recv_probe)(const struct sk_buff *, struct bonding *,
1540 			  struct slave *);
1541 	int ret = RX_HANDLER_ANOTHER;
1542 
1543 	skb = skb_share_check(skb, GFP_ATOMIC);
1544 	if (unlikely(!skb))
1545 		return RX_HANDLER_CONSUMED;
1546 
1547 	*pskb = skb;
1548 
1549 	slave = bond_slave_get_rcu(skb->dev);
1550 	bond = slave->bond;
1551 
1552 	recv_probe = READ_ONCE(bond->recv_probe);
1553 	if (recv_probe) {
1554 		ret = recv_probe(skb, bond, slave);
1555 		if (ret == RX_HANDLER_CONSUMED) {
1556 			consume_skb(skb);
1557 			return ret;
1558 		}
1559 	}
1560 
1561 	/*
1562 	 * For packets determined by bond_should_deliver_exact_match() call to
1563 	 * be suppressed we want to make an exception for link-local packets.
1564 	 * This is necessary for e.g. LLDP daemons to be able to monitor
1565 	 * inactive slave links without being forced to bind to them
1566 	 * explicitly.
1567 	 *
1568 	 * At the same time, packets that are passed to the bonding master
1569 	 * (including link-local ones) can have their originating interface
1570 	 * determined via PACKET_ORIGDEV socket option.
1571 	 */
1572 	if (bond_should_deliver_exact_match(skb, slave, bond)) {
1573 		if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
1574 			return RX_HANDLER_PASS;
1575 		return RX_HANDLER_EXACT;
1576 	}
1577 
1578 	skb->dev = bond->dev;
1579 
1580 	if (BOND_MODE(bond) == BOND_MODE_ALB &&
1581 	    netif_is_bridge_port(bond->dev) &&
1582 	    skb->pkt_type == PACKET_HOST) {
1583 
1584 		if (unlikely(skb_cow_head(skb,
1585 					  skb->data - skb_mac_header(skb)))) {
1586 			kfree_skb(skb);
1587 			return RX_HANDLER_CONSUMED;
1588 		}
1589 		bond_hw_addr_copy(eth_hdr(skb)->h_dest, bond->dev->dev_addr,
1590 				  bond->dev->addr_len);
1591 	}
1592 
1593 	return ret;
1594 }
1595 
1596 static enum netdev_lag_tx_type bond_lag_tx_type(struct bonding *bond)
1597 {
1598 	switch (BOND_MODE(bond)) {
1599 	case BOND_MODE_ROUNDROBIN:
1600 		return NETDEV_LAG_TX_TYPE_ROUNDROBIN;
1601 	case BOND_MODE_ACTIVEBACKUP:
1602 		return NETDEV_LAG_TX_TYPE_ACTIVEBACKUP;
1603 	case BOND_MODE_BROADCAST:
1604 		return NETDEV_LAG_TX_TYPE_BROADCAST;
1605 	case BOND_MODE_XOR:
1606 	case BOND_MODE_8023AD:
1607 		return NETDEV_LAG_TX_TYPE_HASH;
1608 	default:
1609 		return NETDEV_LAG_TX_TYPE_UNKNOWN;
1610 	}
1611 }
1612 
1613 static enum netdev_lag_hash bond_lag_hash_type(struct bonding *bond,
1614 					       enum netdev_lag_tx_type type)
1615 {
1616 	if (type != NETDEV_LAG_TX_TYPE_HASH)
1617 		return NETDEV_LAG_HASH_NONE;
1618 
1619 	switch (bond->params.xmit_policy) {
1620 	case BOND_XMIT_POLICY_LAYER2:
1621 		return NETDEV_LAG_HASH_L2;
1622 	case BOND_XMIT_POLICY_LAYER34:
1623 		return NETDEV_LAG_HASH_L34;
1624 	case BOND_XMIT_POLICY_LAYER23:
1625 		return NETDEV_LAG_HASH_L23;
1626 	case BOND_XMIT_POLICY_ENCAP23:
1627 		return NETDEV_LAG_HASH_E23;
1628 	case BOND_XMIT_POLICY_ENCAP34:
1629 		return NETDEV_LAG_HASH_E34;
1630 	case BOND_XMIT_POLICY_VLAN_SRCMAC:
1631 		return NETDEV_LAG_HASH_VLAN_SRCMAC;
1632 	default:
1633 		return NETDEV_LAG_HASH_UNKNOWN;
1634 	}
1635 }
1636 
1637 static int bond_master_upper_dev_link(struct bonding *bond, struct slave *slave,
1638 				      struct netlink_ext_ack *extack)
1639 {
1640 	struct netdev_lag_upper_info lag_upper_info;
1641 	enum netdev_lag_tx_type type;
1642 
1643 	type = bond_lag_tx_type(bond);
1644 	lag_upper_info.tx_type = type;
1645 	lag_upper_info.hash_type = bond_lag_hash_type(bond, type);
1646 
1647 	return netdev_master_upper_dev_link(slave->dev, bond->dev, slave,
1648 					    &lag_upper_info, extack);
1649 }
1650 
1651 static void bond_upper_dev_unlink(struct bonding *bond, struct slave *slave)
1652 {
1653 	netdev_upper_dev_unlink(slave->dev, bond->dev);
1654 	slave->dev->flags &= ~IFF_SLAVE;
1655 }
1656 
1657 static void slave_kobj_release(struct kobject *kobj)
1658 {
1659 	struct slave *slave = to_slave(kobj);
1660 	struct bonding *bond = bond_get_bond_by_slave(slave);
1661 
1662 	cancel_delayed_work_sync(&slave->notify_work);
1663 	if (BOND_MODE(bond) == BOND_MODE_8023AD)
1664 		kfree(SLAVE_AD_INFO(slave));
1665 
1666 	kfree(slave);
1667 }
1668 
1669 static struct kobj_type slave_ktype = {
1670 	.release = slave_kobj_release,
1671 #ifdef CONFIG_SYSFS
1672 	.sysfs_ops = &slave_sysfs_ops,
1673 #endif
1674 };
1675 
1676 static int bond_kobj_init(struct slave *slave)
1677 {
1678 	int err;
1679 
1680 	err = kobject_init_and_add(&slave->kobj, &slave_ktype,
1681 				   &(slave->dev->dev.kobj), "bonding_slave");
1682 	if (err)
1683 		kobject_put(&slave->kobj);
1684 
1685 	return err;
1686 }
1687 
1688 static struct slave *bond_alloc_slave(struct bonding *bond,
1689 				      struct net_device *slave_dev)
1690 {
1691 	struct slave *slave = NULL;
1692 
1693 	slave = kzalloc(sizeof(*slave), GFP_KERNEL);
1694 	if (!slave)
1695 		return NULL;
1696 
1697 	slave->bond = bond;
1698 	slave->dev = slave_dev;
1699 	INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
1700 
1701 	if (bond_kobj_init(slave))
1702 		return NULL;
1703 
1704 	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
1705 		SLAVE_AD_INFO(slave) = kzalloc(sizeof(struct ad_slave_info),
1706 					       GFP_KERNEL);
1707 		if (!SLAVE_AD_INFO(slave)) {
1708 			kobject_put(&slave->kobj);
1709 			return NULL;
1710 		}
1711 	}
1712 
1713 	return slave;
1714 }
1715 
1716 static void bond_fill_ifbond(struct bonding *bond, struct ifbond *info)
1717 {
1718 	info->bond_mode = BOND_MODE(bond);
1719 	info->miimon = bond->params.miimon;
1720 	info->num_slaves = bond->slave_cnt;
1721 }
1722 
1723 static void bond_fill_ifslave(struct slave *slave, struct ifslave *info)
1724 {
1725 	strcpy(info->slave_name, slave->dev->name);
1726 	info->link = slave->link;
1727 	info->state = bond_slave_state(slave);
1728 	info->link_failure_count = slave->link_failure_count;
1729 }
1730 
1731 static void bond_netdev_notify_work(struct work_struct *_work)
1732 {
1733 	struct slave *slave = container_of(_work, struct slave,
1734 					   notify_work.work);
1735 
1736 	if (rtnl_trylock()) {
1737 		struct netdev_bonding_info binfo;
1738 
1739 		bond_fill_ifslave(slave, &binfo.slave);
1740 		bond_fill_ifbond(slave->bond, &binfo.master);
1741 		netdev_bonding_info_change(slave->dev, &binfo);
1742 		rtnl_unlock();
1743 	} else {
1744 		queue_delayed_work(slave->bond->wq, &slave->notify_work, 1);
1745 	}
1746 }
1747 
1748 void bond_queue_slave_event(struct slave *slave)
1749 {
1750 	queue_delayed_work(slave->bond->wq, &slave->notify_work, 0);
1751 }
1752 
1753 void bond_lower_state_changed(struct slave *slave)
1754 {
1755 	struct netdev_lag_lower_state_info info;
1756 
1757 	info.link_up = slave->link == BOND_LINK_UP ||
1758 		       slave->link == BOND_LINK_FAIL;
1759 	info.tx_enabled = bond_is_active_slave(slave);
1760 	netdev_lower_state_changed(slave->dev, &info);
1761 }
1762 
1763 #define BOND_NL_ERR(bond_dev, extack, errmsg) do {		\
1764 	if (extack)						\
1765 		NL_SET_ERR_MSG(extack, errmsg);			\
1766 	else							\
1767 		netdev_err(bond_dev, "Error: %s\n", errmsg);	\
1768 } while (0)
1769 
1770 #define SLAVE_NL_ERR(bond_dev, slave_dev, extack, errmsg) do {		\
1771 	if (extack)							\
1772 		NL_SET_ERR_MSG(extack, errmsg);				\
1773 	else								\
1774 		slave_err(bond_dev, slave_dev, "Error: %s\n", errmsg);	\
1775 } while (0)
1776 
1777 /* enslave device <slave> to bond device <master> */
1778 int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
1779 		 struct netlink_ext_ack *extack)
1780 {
1781 	struct bonding *bond = netdev_priv(bond_dev);
1782 	const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
1783 	struct slave *new_slave = NULL, *prev_slave;
1784 	struct sockaddr_storage ss;
1785 	int link_reporting;
1786 	int res = 0, i;
1787 
1788 	if (slave_dev->flags & IFF_MASTER &&
1789 	    !netif_is_bond_master(slave_dev)) {
1790 		BOND_NL_ERR(bond_dev, extack,
1791 			    "Device type (master device) cannot be enslaved");
1792 		return -EPERM;
1793 	}
1794 
1795 	if (!bond->params.use_carrier &&
1796 	    slave_dev->ethtool_ops->get_link == NULL &&
1797 	    slave_ops->ndo_eth_ioctl == NULL) {
1798 		slave_warn(bond_dev, slave_dev, "no link monitoring support\n");
1799 	}
1800 
1801 	/* already in-use? */
1802 	if (netdev_is_rx_handler_busy(slave_dev)) {
1803 		SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1804 			     "Device is in use and cannot be enslaved");
1805 		return -EBUSY;
1806 	}
1807 
1808 	if (bond_dev == slave_dev) {
1809 		BOND_NL_ERR(bond_dev, extack, "Cannot enslave bond to itself.");
1810 		return -EPERM;
1811 	}
1812 
1813 	/* vlan challenged mutual exclusion */
1814 	/* no need to lock since we're protected by rtnl_lock */
1815 	if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) {
1816 		slave_dbg(bond_dev, slave_dev, "is NETIF_F_VLAN_CHALLENGED\n");
1817 		if (vlan_uses_dev(bond_dev)) {
1818 			SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1819 				     "Can not enslave VLAN challenged device to VLAN enabled bond");
1820 			return -EPERM;
1821 		} else {
1822 			slave_warn(bond_dev, slave_dev, "enslaved VLAN challenged slave. Adding VLANs will be blocked as long as it is part of bond.\n");
1823 		}
1824 	} else {
1825 		slave_dbg(bond_dev, slave_dev, "is !NETIF_F_VLAN_CHALLENGED\n");
1826 	}
1827 
1828 	if (slave_dev->features & NETIF_F_HW_ESP)
1829 		slave_dbg(bond_dev, slave_dev, "is esp-hw-offload capable\n");
1830 
1831 	/* Old ifenslave binaries are no longer supported.  These can
1832 	 * be identified with moderate accuracy by the state of the slave:
1833 	 * the current ifenslave will set the interface down prior to
1834 	 * enslaving it; the old ifenslave will not.
1835 	 */
1836 	if (slave_dev->flags & IFF_UP) {
1837 		SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1838 			     "Device can not be enslaved while up");
1839 		return -EPERM;
1840 	}
1841 
1842 	/* set bonding device ether type by slave - bonding netdevices are
1843 	 * created with ether_setup, so when the slave type is not ARPHRD_ETHER
1844 	 * there is a need to override some of the type dependent attribs/funcs.
1845 	 *
1846 	 * bond ether type mutual exclusion - don't allow slaves of dissimilar
1847 	 * ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond
1848 	 */
1849 	if (!bond_has_slaves(bond)) {
1850 		if (bond_dev->type != slave_dev->type) {
1851 			slave_dbg(bond_dev, slave_dev, "change device type from %d to %d\n",
1852 				  bond_dev->type, slave_dev->type);
1853 
1854 			res = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE,
1855 						       bond_dev);
1856 			res = notifier_to_errno(res);
1857 			if (res) {
1858 				slave_err(bond_dev, slave_dev, "refused to change device type\n");
1859 				return -EBUSY;
1860 			}
1861 
1862 			/* Flush unicast and multicast addresses */
1863 			dev_uc_flush(bond_dev);
1864 			dev_mc_flush(bond_dev);
1865 
1866 			if (slave_dev->type != ARPHRD_ETHER)
1867 				bond_setup_by_slave(bond_dev, slave_dev);
1868 			else {
1869 				ether_setup(bond_dev);
1870 				bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1871 			}
1872 
1873 			call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
1874 						 bond_dev);
1875 		}
1876 	} else if (bond_dev->type != slave_dev->type) {
1877 		SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1878 			     "Device type is different from other slaves");
1879 		return -EINVAL;
1880 	}
1881 
1882 	if (slave_dev->type == ARPHRD_INFINIBAND &&
1883 	    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1884 		SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1885 			     "Only active-backup mode is supported for infiniband slaves");
1886 		res = -EOPNOTSUPP;
1887 		goto err_undo_flags;
1888 	}
1889 
1890 	if (!slave_ops->ndo_set_mac_address ||
1891 	    slave_dev->type == ARPHRD_INFINIBAND) {
1892 		slave_warn(bond_dev, slave_dev, "The slave device specified does not support setting the MAC address\n");
1893 		if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
1894 		    bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
1895 			if (!bond_has_slaves(bond)) {
1896 				bond->params.fail_over_mac = BOND_FOM_ACTIVE;
1897 				slave_warn(bond_dev, slave_dev, "Setting fail_over_mac to active for active-backup mode\n");
1898 			} else {
1899 				SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1900 					     "Slave device does not support setting the MAC address, but fail_over_mac is not set to active");
1901 				res = -EOPNOTSUPP;
1902 				goto err_undo_flags;
1903 			}
1904 		}
1905 	}
1906 
1907 	call_netdevice_notifiers(NETDEV_JOIN, slave_dev);
1908 
1909 	/* If this is the first slave, then we need to set the master's hardware
1910 	 * address to be the same as the slave's.
1911 	 */
1912 	if (!bond_has_slaves(bond) &&
1913 	    bond->dev->addr_assign_type == NET_ADDR_RANDOM) {
1914 		res = bond_set_dev_addr(bond->dev, slave_dev);
1915 		if (res)
1916 			goto err_undo_flags;
1917 	}
1918 
1919 	new_slave = bond_alloc_slave(bond, slave_dev);
1920 	if (!new_slave) {
1921 		res = -ENOMEM;
1922 		goto err_undo_flags;
1923 	}
1924 
1925 	/* Set the new_slave's queue_id to be zero.  Queue ID mapping
1926 	 * is set via sysfs or module option if desired.
1927 	 */
1928 	new_slave->queue_id = 0;
1929 
1930 	/* Save slave's original mtu and then set it to match the bond */
1931 	new_slave->original_mtu = slave_dev->mtu;
1932 	res = dev_set_mtu(slave_dev, bond->dev->mtu);
1933 	if (res) {
1934 		slave_err(bond_dev, slave_dev, "Error %d calling dev_set_mtu\n", res);
1935 		goto err_free;
1936 	}
1937 
1938 	/* Save slave's original ("permanent") mac address for modes
1939 	 * that need it, and for restoring it upon release, and then
1940 	 * set it to the master's address
1941 	 */
1942 	bond_hw_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr,
1943 			  slave_dev->addr_len);
1944 
1945 	if (!bond->params.fail_over_mac ||
1946 	    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1947 		/* Set slave to master's mac address.  The application already
1948 		 * set the master's mac address to that of the first slave
1949 		 */
1950 		memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
1951 		ss.ss_family = slave_dev->type;
1952 		res = dev_set_mac_address(slave_dev, (struct sockaddr *)&ss,
1953 					  extack);
1954 		if (res) {
1955 			slave_err(bond_dev, slave_dev, "Error %d calling set_mac_address\n", res);
1956 			goto err_restore_mtu;
1957 		}
1958 	}
1959 
1960 	/* set slave flag before open to prevent IPv6 addrconf */
1961 	slave_dev->flags |= IFF_SLAVE;
1962 
1963 	/* open the slave since the application closed it */
1964 	res = dev_open(slave_dev, extack);
1965 	if (res) {
1966 		slave_err(bond_dev, slave_dev, "Opening slave failed\n");
1967 		goto err_restore_mac;
1968 	}
1969 
1970 	slave_dev->priv_flags |= IFF_BONDING;
1971 	/* initialize slave stats */
1972 	dev_get_stats(new_slave->dev, &new_slave->slave_stats);
1973 
1974 	if (bond_is_lb(bond)) {
1975 		/* bond_alb_init_slave() must be called before all other stages since
1976 		 * it might fail and we do not want to have to undo everything
1977 		 */
1978 		res = bond_alb_init_slave(bond, new_slave);
1979 		if (res)
1980 			goto err_close;
1981 	}
1982 
1983 	res = vlan_vids_add_by_dev(slave_dev, bond_dev);
1984 	if (res) {
1985 		slave_err(bond_dev, slave_dev, "Couldn't add bond vlan ids\n");
1986 		goto err_close;
1987 	}
1988 
1989 	prev_slave = bond_last_slave(bond);
1990 
1991 	new_slave->delay = 0;
1992 	new_slave->link_failure_count = 0;
1993 
1994 	if (bond_update_speed_duplex(new_slave) &&
1995 	    bond_needs_speed_duplex(bond))
1996 		new_slave->link = BOND_LINK_DOWN;
1997 
1998 	new_slave->last_rx = jiffies -
1999 		(msecs_to_jiffies(bond->params.arp_interval) + 1);
2000 	for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
2001 		new_slave->target_last_arp_rx[i] = new_slave->last_rx;
2002 
2003 	new_slave->last_tx = new_slave->last_rx;
2004 
2005 	if (bond->params.miimon && !bond->params.use_carrier) {
2006 		link_reporting = bond_check_dev_link(bond, slave_dev, 1);
2007 
2008 		if ((link_reporting == -1) && !bond->params.arp_interval) {
2009 			/* miimon is set but a bonded network driver
2010 			 * does not support ETHTOOL/MII and
2011 			 * arp_interval is not set.  Note: if
2012 			 * use_carrier is enabled, we will never go
2013 			 * here (because netif_carrier is always
2014 			 * supported); thus, we don't need to change
2015 			 * the messages for netif_carrier.
2016 			 */
2017 			slave_warn(bond_dev, slave_dev, "MII and ETHTOOL support not available for slave, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details\n");
2018 		} else if (link_reporting == -1) {
2019 			/* unable get link status using mii/ethtool */
2020 			slave_warn(bond_dev, slave_dev, "can't get link status from slave; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface\n");
2021 		}
2022 	}
2023 
2024 	/* check for initial state */
2025 	new_slave->link = BOND_LINK_NOCHANGE;
2026 	if (bond->params.miimon) {
2027 		if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
2028 			if (bond->params.updelay) {
2029 				bond_set_slave_link_state(new_slave,
2030 							  BOND_LINK_BACK,
2031 							  BOND_SLAVE_NOTIFY_NOW);
2032 				new_slave->delay = bond->params.updelay;
2033 			} else {
2034 				bond_set_slave_link_state(new_slave,
2035 							  BOND_LINK_UP,
2036 							  BOND_SLAVE_NOTIFY_NOW);
2037 			}
2038 		} else {
2039 			bond_set_slave_link_state(new_slave, BOND_LINK_DOWN,
2040 						  BOND_SLAVE_NOTIFY_NOW);
2041 		}
2042 	} else if (bond->params.arp_interval) {
2043 		bond_set_slave_link_state(new_slave,
2044 					  (netif_carrier_ok(slave_dev) ?
2045 					  BOND_LINK_UP : BOND_LINK_DOWN),
2046 					  BOND_SLAVE_NOTIFY_NOW);
2047 	} else {
2048 		bond_set_slave_link_state(new_slave, BOND_LINK_UP,
2049 					  BOND_SLAVE_NOTIFY_NOW);
2050 	}
2051 
2052 	if (new_slave->link != BOND_LINK_DOWN)
2053 		new_slave->last_link_up = jiffies;
2054 	slave_dbg(bond_dev, slave_dev, "Initial state of slave is BOND_LINK_%s\n",
2055 		  new_slave->link == BOND_LINK_DOWN ? "DOWN" :
2056 		  (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
2057 
2058 	if (bond_uses_primary(bond) && bond->params.primary[0]) {
2059 		/* if there is a primary slave, remember it */
2060 		if (strcmp(bond->params.primary, new_slave->dev->name) == 0) {
2061 			rcu_assign_pointer(bond->primary_slave, new_slave);
2062 			bond->force_primary = true;
2063 		}
2064 	}
2065 
2066 	switch (BOND_MODE(bond)) {
2067 	case BOND_MODE_ACTIVEBACKUP:
2068 		bond_set_slave_inactive_flags(new_slave,
2069 					      BOND_SLAVE_NOTIFY_NOW);
2070 		break;
2071 	case BOND_MODE_8023AD:
2072 		/* in 802.3ad mode, the internal mechanism
2073 		 * will activate the slaves in the selected
2074 		 * aggregator
2075 		 */
2076 		bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
2077 		/* if this is the first slave */
2078 		if (!prev_slave) {
2079 			SLAVE_AD_INFO(new_slave)->id = 1;
2080 			/* Initialize AD with the number of times that the AD timer is called in 1 second
2081 			 * can be called only after the mac address of the bond is set
2082 			 */
2083 			bond_3ad_initialize(bond);
2084 		} else {
2085 			SLAVE_AD_INFO(new_slave)->id =
2086 				SLAVE_AD_INFO(prev_slave)->id + 1;
2087 		}
2088 
2089 		bond_3ad_bind_slave(new_slave);
2090 		break;
2091 	case BOND_MODE_TLB:
2092 	case BOND_MODE_ALB:
2093 		bond_set_active_slave(new_slave);
2094 		bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
2095 		break;
2096 	default:
2097 		slave_dbg(bond_dev, slave_dev, "This slave is always active in trunk mode\n");
2098 
2099 		/* always active in trunk mode */
2100 		bond_set_active_slave(new_slave);
2101 
2102 		/* In trunking mode there is little meaning to curr_active_slave
2103 		 * anyway (it holds no special properties of the bond device),
2104 		 * so we can change it without calling change_active_interface()
2105 		 */
2106 		if (!rcu_access_pointer(bond->curr_active_slave) &&
2107 		    new_slave->link == BOND_LINK_UP)
2108 			rcu_assign_pointer(bond->curr_active_slave, new_slave);
2109 
2110 		break;
2111 	} /* switch(bond_mode) */
2112 
2113 #ifdef CONFIG_NET_POLL_CONTROLLER
2114 	if (bond->dev->npinfo) {
2115 		if (slave_enable_netpoll(new_slave)) {
2116 			slave_info(bond_dev, slave_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
2117 			res = -EBUSY;
2118 			goto err_detach;
2119 		}
2120 	}
2121 #endif
2122 
2123 	if (!(bond_dev->features & NETIF_F_LRO))
2124 		dev_disable_lro(slave_dev);
2125 
2126 	res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
2127 					 new_slave);
2128 	if (res) {
2129 		slave_dbg(bond_dev, slave_dev, "Error %d calling netdev_rx_handler_register\n", res);
2130 		goto err_detach;
2131 	}
2132 
2133 	res = bond_master_upper_dev_link(bond, new_slave, extack);
2134 	if (res) {
2135 		slave_dbg(bond_dev, slave_dev, "Error %d calling bond_master_upper_dev_link\n", res);
2136 		goto err_unregister;
2137 	}
2138 
2139 	bond_lower_state_changed(new_slave);
2140 
2141 	res = bond_sysfs_slave_add(new_slave);
2142 	if (res) {
2143 		slave_dbg(bond_dev, slave_dev, "Error %d calling bond_sysfs_slave_add\n", res);
2144 		goto err_upper_unlink;
2145 	}
2146 
2147 	/* If the mode uses primary, then the following is handled by
2148 	 * bond_change_active_slave().
2149 	 */
2150 	if (!bond_uses_primary(bond)) {
2151 		/* set promiscuity level to new slave */
2152 		if (bond_dev->flags & IFF_PROMISC) {
2153 			res = dev_set_promiscuity(slave_dev, 1);
2154 			if (res)
2155 				goto err_sysfs_del;
2156 		}
2157 
2158 		/* set allmulti level to new slave */
2159 		if (bond_dev->flags & IFF_ALLMULTI) {
2160 			res = dev_set_allmulti(slave_dev, 1);
2161 			if (res) {
2162 				if (bond_dev->flags & IFF_PROMISC)
2163 					dev_set_promiscuity(slave_dev, -1);
2164 				goto err_sysfs_del;
2165 			}
2166 		}
2167 
2168 		if (bond_dev->flags & IFF_UP) {
2169 			netif_addr_lock_bh(bond_dev);
2170 			dev_mc_sync_multiple(slave_dev, bond_dev);
2171 			dev_uc_sync_multiple(slave_dev, bond_dev);
2172 			netif_addr_unlock_bh(bond_dev);
2173 
2174 			if (BOND_MODE(bond) == BOND_MODE_8023AD)
2175 				dev_mc_add(slave_dev, lacpdu_mcast_addr);
2176 		}
2177 	}
2178 
2179 	bond->slave_cnt++;
2180 	bond_compute_features(bond);
2181 	bond_set_carrier(bond);
2182 
2183 	if (bond_uses_primary(bond)) {
2184 		block_netpoll_tx();
2185 		bond_select_active_slave(bond);
2186 		unblock_netpoll_tx();
2187 	}
2188 
2189 	if (bond_mode_can_use_xmit_hash(bond))
2190 		bond_update_slave_arr(bond, NULL);
2191 
2192 
2193 	if (!slave_dev->netdev_ops->ndo_bpf ||
2194 	    !slave_dev->netdev_ops->ndo_xdp_xmit) {
2195 		if (bond->xdp_prog) {
2196 			SLAVE_NL_ERR(bond_dev, slave_dev, extack,
2197 				     "Slave does not support XDP");
2198 			res = -EOPNOTSUPP;
2199 			goto err_sysfs_del;
2200 		}
2201 	} else if (bond->xdp_prog) {
2202 		struct netdev_bpf xdp = {
2203 			.command = XDP_SETUP_PROG,
2204 			.flags   = 0,
2205 			.prog    = bond->xdp_prog,
2206 			.extack  = extack,
2207 		};
2208 
2209 		if (dev_xdp_prog_count(slave_dev) > 0) {
2210 			SLAVE_NL_ERR(bond_dev, slave_dev, extack,
2211 				     "Slave has XDP program loaded, please unload before enslaving");
2212 			res = -EOPNOTSUPP;
2213 			goto err_sysfs_del;
2214 		}
2215 
2216 		res = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
2217 		if (res < 0) {
2218 			/* ndo_bpf() sets extack error message */
2219 			slave_dbg(bond_dev, slave_dev, "Error %d calling ndo_bpf\n", res);
2220 			goto err_sysfs_del;
2221 		}
2222 		if (bond->xdp_prog)
2223 			bpf_prog_inc(bond->xdp_prog);
2224 	}
2225 
2226 	slave_info(bond_dev, slave_dev, "Enslaving as %s interface with %s link\n",
2227 		   bond_is_active_slave(new_slave) ? "an active" : "a backup",
2228 		   new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
2229 
2230 	/* enslave is successful */
2231 	bond_queue_slave_event(new_slave);
2232 	return 0;
2233 
2234 /* Undo stages on error */
2235 err_sysfs_del:
2236 	bond_sysfs_slave_del(new_slave);
2237 
2238 err_upper_unlink:
2239 	bond_upper_dev_unlink(bond, new_slave);
2240 
2241 err_unregister:
2242 	netdev_rx_handler_unregister(slave_dev);
2243 
2244 err_detach:
2245 	vlan_vids_del_by_dev(slave_dev, bond_dev);
2246 	if (rcu_access_pointer(bond->primary_slave) == new_slave)
2247 		RCU_INIT_POINTER(bond->primary_slave, NULL);
2248 	if (rcu_access_pointer(bond->curr_active_slave) == new_slave) {
2249 		block_netpoll_tx();
2250 		bond_change_active_slave(bond, NULL);
2251 		bond_select_active_slave(bond);
2252 		unblock_netpoll_tx();
2253 	}
2254 	/* either primary_slave or curr_active_slave might've changed */
2255 	synchronize_rcu();
2256 	slave_disable_netpoll(new_slave);
2257 
2258 err_close:
2259 	if (!netif_is_bond_master(slave_dev))
2260 		slave_dev->priv_flags &= ~IFF_BONDING;
2261 	dev_close(slave_dev);
2262 
2263 err_restore_mac:
2264 	slave_dev->flags &= ~IFF_SLAVE;
2265 	if (!bond->params.fail_over_mac ||
2266 	    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
2267 		/* XXX TODO - fom follow mode needs to change master's
2268 		 * MAC if this slave's MAC is in use by the bond, or at
2269 		 * least print a warning.
2270 		 */
2271 		bond_hw_addr_copy(ss.__data, new_slave->perm_hwaddr,
2272 				  new_slave->dev->addr_len);
2273 		ss.ss_family = slave_dev->type;
2274 		dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL);
2275 	}
2276 
2277 err_restore_mtu:
2278 	dev_set_mtu(slave_dev, new_slave->original_mtu);
2279 
2280 err_free:
2281 	kobject_put(&new_slave->kobj);
2282 
2283 err_undo_flags:
2284 	/* Enslave of first slave has failed and we need to fix master's mac */
2285 	if (!bond_has_slaves(bond)) {
2286 		if (ether_addr_equal_64bits(bond_dev->dev_addr,
2287 					    slave_dev->dev_addr))
2288 			eth_hw_addr_random(bond_dev);
2289 		if (bond_dev->type != ARPHRD_ETHER) {
2290 			dev_close(bond_dev);
2291 			ether_setup(bond_dev);
2292 			bond_dev->flags |= IFF_MASTER;
2293 			bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
2294 		}
2295 	}
2296 
2297 	return res;
2298 }
2299 
2300 /* Try to release the slave device <slave> from the bond device <master>
2301  * It is legal to access curr_active_slave without a lock because all the function
2302  * is RTNL-locked. If "all" is true it means that the function is being called
2303  * while destroying a bond interface and all slaves are being released.
2304  *
2305  * The rules for slave state should be:
2306  *   for Active/Backup:
2307  *     Active stays on all backups go down
2308  *   for Bonded connections:
2309  *     The first up interface should be left on and all others downed.
2310  */
2311 static int __bond_release_one(struct net_device *bond_dev,
2312 			      struct net_device *slave_dev,
2313 			      bool all, bool unregister)
2314 {
2315 	struct bonding *bond = netdev_priv(bond_dev);
2316 	struct slave *slave, *oldcurrent;
2317 	struct sockaddr_storage ss;
2318 	int old_flags = bond_dev->flags;
2319 	netdev_features_t old_features = bond_dev->features;
2320 
2321 	/* slave is not a slave or master is not master of this slave */
2322 	if (!(slave_dev->flags & IFF_SLAVE) ||
2323 	    !netdev_has_upper_dev(slave_dev, bond_dev)) {
2324 		slave_dbg(bond_dev, slave_dev, "cannot release slave\n");
2325 		return -EINVAL;
2326 	}
2327 
2328 	block_netpoll_tx();
2329 
2330 	slave = bond_get_slave_by_dev(bond, slave_dev);
2331 	if (!slave) {
2332 		/* not a slave of this bond */
2333 		slave_info(bond_dev, slave_dev, "interface not enslaved\n");
2334 		unblock_netpoll_tx();
2335 		return -EINVAL;
2336 	}
2337 
2338 	bond_set_slave_inactive_flags(slave, BOND_SLAVE_NOTIFY_NOW);
2339 
2340 	bond_sysfs_slave_del(slave);
2341 
2342 	/* recompute stats just before removing the slave */
2343 	bond_get_stats(bond->dev, &bond->bond_stats);
2344 
2345 	if (bond->xdp_prog) {
2346 		struct netdev_bpf xdp = {
2347 			.command = XDP_SETUP_PROG,
2348 			.flags   = 0,
2349 			.prog	 = NULL,
2350 			.extack  = NULL,
2351 		};
2352 		if (slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp))
2353 			slave_warn(bond_dev, slave_dev, "failed to unload XDP program\n");
2354 	}
2355 
2356 	/* unregister rx_handler early so bond_handle_frame wouldn't be called
2357 	 * for this slave anymore.
2358 	 */
2359 	netdev_rx_handler_unregister(slave_dev);
2360 
2361 	if (BOND_MODE(bond) == BOND_MODE_8023AD)
2362 		bond_3ad_unbind_slave(slave);
2363 
2364 	bond_upper_dev_unlink(bond, slave);
2365 
2366 	if (bond_mode_can_use_xmit_hash(bond))
2367 		bond_update_slave_arr(bond, slave);
2368 
2369 	slave_info(bond_dev, slave_dev, "Releasing %s interface\n",
2370 		    bond_is_active_slave(slave) ? "active" : "backup");
2371 
2372 	oldcurrent = rcu_access_pointer(bond->curr_active_slave);
2373 
2374 	RCU_INIT_POINTER(bond->current_arp_slave, NULL);
2375 
2376 	if (!all && (!bond->params.fail_over_mac ||
2377 		     BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) {
2378 		if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
2379 		    bond_has_slaves(bond))
2380 			slave_warn(bond_dev, slave_dev, "the permanent HWaddr of slave - %pM - is still in use by bond - set the HWaddr of slave to a different address to avoid conflicts\n",
2381 				   slave->perm_hwaddr);
2382 	}
2383 
2384 	if (rtnl_dereference(bond->primary_slave) == slave)
2385 		RCU_INIT_POINTER(bond->primary_slave, NULL);
2386 
2387 	if (oldcurrent == slave)
2388 		bond_change_active_slave(bond, NULL);
2389 
2390 	if (bond_is_lb(bond)) {
2391 		/* Must be called only after the slave has been
2392 		 * detached from the list and the curr_active_slave
2393 		 * has been cleared (if our_slave == old_current),
2394 		 * but before a new active slave is selected.
2395 		 */
2396 		bond_alb_deinit_slave(bond, slave);
2397 	}
2398 
2399 	if (all) {
2400 		RCU_INIT_POINTER(bond->curr_active_slave, NULL);
2401 	} else if (oldcurrent == slave) {
2402 		/* Note that we hold RTNL over this sequence, so there
2403 		 * is no concern that another slave add/remove event
2404 		 * will interfere.
2405 		 */
2406 		bond_select_active_slave(bond);
2407 	}
2408 
2409 	bond_set_carrier(bond);
2410 	if (!bond_has_slaves(bond))
2411 		eth_hw_addr_random(bond_dev);
2412 
2413 	unblock_netpoll_tx();
2414 	synchronize_rcu();
2415 	bond->slave_cnt--;
2416 
2417 	if (!bond_has_slaves(bond)) {
2418 		call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
2419 		call_netdevice_notifiers(NETDEV_RELEASE, bond->dev);
2420 	}
2421 
2422 	bond_compute_features(bond);
2423 	if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
2424 	    (old_features & NETIF_F_VLAN_CHALLENGED))
2425 		slave_info(bond_dev, slave_dev, "last VLAN challenged slave left bond - VLAN blocking is removed\n");
2426 
2427 	vlan_vids_del_by_dev(slave_dev, bond_dev);
2428 
2429 	/* If the mode uses primary, then this case was handled above by
2430 	 * bond_change_active_slave(..., NULL)
2431 	 */
2432 	if (!bond_uses_primary(bond)) {
2433 		/* unset promiscuity level from slave
2434 		 * NOTE: The NETDEV_CHANGEADDR call above may change the value
2435 		 * of the IFF_PROMISC flag in the bond_dev, but we need the
2436 		 * value of that flag before that change, as that was the value
2437 		 * when this slave was attached, so we cache at the start of the
2438 		 * function and use it here. Same goes for ALLMULTI below
2439 		 */
2440 		if (old_flags & IFF_PROMISC)
2441 			dev_set_promiscuity(slave_dev, -1);
2442 
2443 		/* unset allmulti level from slave */
2444 		if (old_flags & IFF_ALLMULTI)
2445 			dev_set_allmulti(slave_dev, -1);
2446 
2447 		if (old_flags & IFF_UP)
2448 			bond_hw_addr_flush(bond_dev, slave_dev);
2449 	}
2450 
2451 	slave_disable_netpoll(slave);
2452 
2453 	/* close slave before restoring its mac address */
2454 	dev_close(slave_dev);
2455 
2456 	if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
2457 	    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
2458 		/* restore original ("permanent") mac address */
2459 		bond_hw_addr_copy(ss.__data, slave->perm_hwaddr,
2460 				  slave->dev->addr_len);
2461 		ss.ss_family = slave_dev->type;
2462 		dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL);
2463 	}
2464 
2465 	if (unregister)
2466 		__dev_set_mtu(slave_dev, slave->original_mtu);
2467 	else
2468 		dev_set_mtu(slave_dev, slave->original_mtu);
2469 
2470 	if (!netif_is_bond_master(slave_dev))
2471 		slave_dev->priv_flags &= ~IFF_BONDING;
2472 
2473 	kobject_put(&slave->kobj);
2474 
2475 	return 0;
2476 }
2477 
2478 /* A wrapper used because of ndo_del_link */
2479 int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
2480 {
2481 	return __bond_release_one(bond_dev, slave_dev, false, false);
2482 }
2483 
2484 /* First release a slave and then destroy the bond if no more slaves are left.
2485  * Must be under rtnl_lock when this function is called.
2486  */
2487 static int bond_release_and_destroy(struct net_device *bond_dev,
2488 				    struct net_device *slave_dev)
2489 {
2490 	struct bonding *bond = netdev_priv(bond_dev);
2491 	int ret;
2492 
2493 	ret = __bond_release_one(bond_dev, slave_dev, false, true);
2494 	if (ret == 0 && !bond_has_slaves(bond) &&
2495 	    bond_dev->reg_state != NETREG_UNREGISTERING) {
2496 		bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
2497 		netdev_info(bond_dev, "Destroying bond\n");
2498 		bond_remove_proc_entry(bond);
2499 		unregister_netdevice(bond_dev);
2500 	}
2501 	return ret;
2502 }
2503 
2504 static void bond_info_query(struct net_device *bond_dev, struct ifbond *info)
2505 {
2506 	struct bonding *bond = netdev_priv(bond_dev);
2507 
2508 	bond_fill_ifbond(bond, info);
2509 }
2510 
2511 static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info)
2512 {
2513 	struct bonding *bond = netdev_priv(bond_dev);
2514 	struct list_head *iter;
2515 	int i = 0, res = -ENODEV;
2516 	struct slave *slave;
2517 
2518 	bond_for_each_slave(bond, slave, iter) {
2519 		if (i++ == (int)info->slave_id) {
2520 			res = 0;
2521 			bond_fill_ifslave(slave, info);
2522 			break;
2523 		}
2524 	}
2525 
2526 	return res;
2527 }
2528 
2529 /*-------------------------------- Monitoring -------------------------------*/
2530 
2531 /* called with rcu_read_lock() */
2532 static int bond_miimon_inspect(struct bonding *bond)
2533 {
2534 	int link_state, commit = 0;
2535 	struct list_head *iter;
2536 	struct slave *slave;
2537 	bool ignore_updelay;
2538 
2539 	ignore_updelay = !rcu_dereference(bond->curr_active_slave);
2540 
2541 	bond_for_each_slave_rcu(bond, slave, iter) {
2542 		bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
2543 
2544 		link_state = bond_check_dev_link(bond, slave->dev, 0);
2545 
2546 		switch (slave->link) {
2547 		case BOND_LINK_UP:
2548 			if (link_state)
2549 				continue;
2550 
2551 			bond_propose_link_state(slave, BOND_LINK_FAIL);
2552 			commit++;
2553 			slave->delay = bond->params.downdelay;
2554 			if (slave->delay) {
2555 				slave_info(bond->dev, slave->dev, "link status down for %sinterface, disabling it in %d ms\n",
2556 					   (BOND_MODE(bond) ==
2557 					    BOND_MODE_ACTIVEBACKUP) ?
2558 					    (bond_is_active_slave(slave) ?
2559 					     "active " : "backup ") : "",
2560 					   bond->params.downdelay * bond->params.miimon);
2561 			}
2562 			fallthrough;
2563 		case BOND_LINK_FAIL:
2564 			if (link_state) {
2565 				/* recovered before downdelay expired */
2566 				bond_propose_link_state(slave, BOND_LINK_UP);
2567 				slave->last_link_up = jiffies;
2568 				slave_info(bond->dev, slave->dev, "link status up again after %d ms\n",
2569 					   (bond->params.downdelay - slave->delay) *
2570 					   bond->params.miimon);
2571 				commit++;
2572 				continue;
2573 			}
2574 
2575 			if (slave->delay <= 0) {
2576 				bond_propose_link_state(slave, BOND_LINK_DOWN);
2577 				commit++;
2578 				continue;
2579 			}
2580 
2581 			slave->delay--;
2582 			break;
2583 
2584 		case BOND_LINK_DOWN:
2585 			if (!link_state)
2586 				continue;
2587 
2588 			bond_propose_link_state(slave, BOND_LINK_BACK);
2589 			commit++;
2590 			slave->delay = bond->params.updelay;
2591 
2592 			if (slave->delay) {
2593 				slave_info(bond->dev, slave->dev, "link status up, enabling it in %d ms\n",
2594 					   ignore_updelay ? 0 :
2595 					   bond->params.updelay *
2596 					   bond->params.miimon);
2597 			}
2598 			fallthrough;
2599 		case BOND_LINK_BACK:
2600 			if (!link_state) {
2601 				bond_propose_link_state(slave, BOND_LINK_DOWN);
2602 				slave_info(bond->dev, slave->dev, "link status down again after %d ms\n",
2603 					   (bond->params.updelay - slave->delay) *
2604 					   bond->params.miimon);
2605 				commit++;
2606 				continue;
2607 			}
2608 
2609 			if (ignore_updelay)
2610 				slave->delay = 0;
2611 
2612 			if (slave->delay <= 0) {
2613 				bond_propose_link_state(slave, BOND_LINK_UP);
2614 				commit++;
2615 				ignore_updelay = false;
2616 				continue;
2617 			}
2618 
2619 			slave->delay--;
2620 			break;
2621 		}
2622 	}
2623 
2624 	return commit;
2625 }
2626 
2627 static void bond_miimon_link_change(struct bonding *bond,
2628 				    struct slave *slave,
2629 				    char link)
2630 {
2631 	switch (BOND_MODE(bond)) {
2632 	case BOND_MODE_8023AD:
2633 		bond_3ad_handle_link_change(slave, link);
2634 		break;
2635 	case BOND_MODE_TLB:
2636 	case BOND_MODE_ALB:
2637 		bond_alb_handle_link_change(bond, slave, link);
2638 		break;
2639 	case BOND_MODE_XOR:
2640 		bond_update_slave_arr(bond, NULL);
2641 		break;
2642 	}
2643 }
2644 
2645 static void bond_miimon_commit(struct bonding *bond)
2646 {
2647 	struct list_head *iter;
2648 	struct slave *slave, *primary;
2649 
2650 	bond_for_each_slave(bond, slave, iter) {
2651 		switch (slave->link_new_state) {
2652 		case BOND_LINK_NOCHANGE:
2653 			/* For 802.3ad mode, check current slave speed and
2654 			 * duplex again in case its port was disabled after
2655 			 * invalid speed/duplex reporting but recovered before
2656 			 * link monitoring could make a decision on the actual
2657 			 * link status
2658 			 */
2659 			if (BOND_MODE(bond) == BOND_MODE_8023AD &&
2660 			    slave->link == BOND_LINK_UP)
2661 				bond_3ad_adapter_speed_duplex_changed(slave);
2662 			continue;
2663 
2664 		case BOND_LINK_UP:
2665 			if (bond_update_speed_duplex(slave) &&
2666 			    bond_needs_speed_duplex(bond)) {
2667 				slave->link = BOND_LINK_DOWN;
2668 				if (net_ratelimit())
2669 					slave_warn(bond->dev, slave->dev,
2670 						   "failed to get link speed/duplex\n");
2671 				continue;
2672 			}
2673 			bond_set_slave_link_state(slave, BOND_LINK_UP,
2674 						  BOND_SLAVE_NOTIFY_NOW);
2675 			slave->last_link_up = jiffies;
2676 
2677 			primary = rtnl_dereference(bond->primary_slave);
2678 			if (BOND_MODE(bond) == BOND_MODE_8023AD) {
2679 				/* prevent it from being the active one */
2680 				bond_set_backup_slave(slave);
2681 			} else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
2682 				/* make it immediately active */
2683 				bond_set_active_slave(slave);
2684 			}
2685 
2686 			slave_info(bond->dev, slave->dev, "link status definitely up, %u Mbps %s duplex\n",
2687 				   slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
2688 				   slave->duplex ? "full" : "half");
2689 
2690 			bond_miimon_link_change(bond, slave, BOND_LINK_UP);
2691 
2692 			if (!bond->curr_active_slave || slave == primary)
2693 				goto do_failover;
2694 
2695 			continue;
2696 
2697 		case BOND_LINK_DOWN:
2698 			if (slave->link_failure_count < UINT_MAX)
2699 				slave->link_failure_count++;
2700 
2701 			bond_set_slave_link_state(slave, BOND_LINK_DOWN,
2702 						  BOND_SLAVE_NOTIFY_NOW);
2703 
2704 			if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP ||
2705 			    BOND_MODE(bond) == BOND_MODE_8023AD)
2706 				bond_set_slave_inactive_flags(slave,
2707 							      BOND_SLAVE_NOTIFY_NOW);
2708 
2709 			slave_info(bond->dev, slave->dev, "link status definitely down, disabling slave\n");
2710 
2711 			bond_miimon_link_change(bond, slave, BOND_LINK_DOWN);
2712 
2713 			if (slave == rcu_access_pointer(bond->curr_active_slave))
2714 				goto do_failover;
2715 
2716 			continue;
2717 
2718 		default:
2719 			slave_err(bond->dev, slave->dev, "invalid new link %d on slave\n",
2720 				  slave->link_new_state);
2721 			bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
2722 
2723 			continue;
2724 		}
2725 
2726 do_failover:
2727 		block_netpoll_tx();
2728 		bond_select_active_slave(bond);
2729 		unblock_netpoll_tx();
2730 	}
2731 
2732 	bond_set_carrier(bond);
2733 }
2734 
2735 /* bond_mii_monitor
2736  *
2737  * Really a wrapper that splits the mii monitor into two phases: an
2738  * inspection, then (if inspection indicates something needs to be done)
2739  * an acquisition of appropriate locks followed by a commit phase to
2740  * implement whatever link state changes are indicated.
2741  */
2742 static void bond_mii_monitor(struct work_struct *work)
2743 {
2744 	struct bonding *bond = container_of(work, struct bonding,
2745 					    mii_work.work);
2746 	bool should_notify_peers = false;
2747 	bool commit;
2748 	unsigned long delay;
2749 	struct slave *slave;
2750 	struct list_head *iter;
2751 
2752 	delay = msecs_to_jiffies(bond->params.miimon);
2753 
2754 	if (!bond_has_slaves(bond))
2755 		goto re_arm;
2756 
2757 	rcu_read_lock();
2758 	should_notify_peers = bond_should_notify_peers(bond);
2759 	commit = !!bond_miimon_inspect(bond);
2760 	if (bond->send_peer_notif) {
2761 		rcu_read_unlock();
2762 		if (rtnl_trylock()) {
2763 			bond->send_peer_notif--;
2764 			rtnl_unlock();
2765 		}
2766 	} else {
2767 		rcu_read_unlock();
2768 	}
2769 
2770 	if (commit) {
2771 		/* Race avoidance with bond_close cancel of workqueue */
2772 		if (!rtnl_trylock()) {
2773 			delay = 1;
2774 			should_notify_peers = false;
2775 			goto re_arm;
2776 		}
2777 
2778 		bond_for_each_slave(bond, slave, iter) {
2779 			bond_commit_link_state(slave, BOND_SLAVE_NOTIFY_LATER);
2780 		}
2781 		bond_miimon_commit(bond);
2782 
2783 		rtnl_unlock();	/* might sleep, hold no other locks */
2784 	}
2785 
2786 re_arm:
2787 	if (bond->params.miimon)
2788 		queue_delayed_work(bond->wq, &bond->mii_work, delay);
2789 
2790 	if (should_notify_peers) {
2791 		if (!rtnl_trylock())
2792 			return;
2793 		call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
2794 		rtnl_unlock();
2795 	}
2796 }
2797 
2798 static int bond_upper_dev_walk(struct net_device *upper,
2799 			       struct netdev_nested_priv *priv)
2800 {
2801 	__be32 ip = *(__be32 *)priv->data;
2802 
2803 	return ip == bond_confirm_addr(upper, 0, ip);
2804 }
2805 
2806 static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
2807 {
2808 	struct netdev_nested_priv priv = {
2809 		.data = (void *)&ip,
2810 	};
2811 	bool ret = false;
2812 
2813 	if (ip == bond_confirm_addr(bond->dev, 0, ip))
2814 		return true;
2815 
2816 	rcu_read_lock();
2817 	if (netdev_walk_all_upper_dev_rcu(bond->dev, bond_upper_dev_walk, &priv))
2818 		ret = true;
2819 	rcu_read_unlock();
2820 
2821 	return ret;
2822 }
2823 
2824 static bool bond_handle_vlan(struct slave *slave, struct bond_vlan_tag *tags,
2825 			     struct sk_buff *skb)
2826 {
2827 	struct net_device *bond_dev = slave->bond->dev;
2828 	struct net_device *slave_dev = slave->dev;
2829 	struct bond_vlan_tag *outer_tag = tags;
2830 
2831 	if (!tags || tags->vlan_proto == VLAN_N_VID)
2832 		return true;
2833 
2834 	tags++;
2835 
2836 	/* Go through all the tags backwards and add them to the packet */
2837 	while (tags->vlan_proto != VLAN_N_VID) {
2838 		if (!tags->vlan_id) {
2839 			tags++;
2840 			continue;
2841 		}
2842 
2843 		slave_dbg(bond_dev, slave_dev, "inner tag: proto %X vid %X\n",
2844 			  ntohs(outer_tag->vlan_proto), tags->vlan_id);
2845 		skb = vlan_insert_tag_set_proto(skb, tags->vlan_proto,
2846 						tags->vlan_id);
2847 		if (!skb) {
2848 			net_err_ratelimited("failed to insert inner VLAN tag\n");
2849 			return false;
2850 		}
2851 
2852 		tags++;
2853 	}
2854 	/* Set the outer tag */
2855 	if (outer_tag->vlan_id) {
2856 		slave_dbg(bond_dev, slave_dev, "outer tag: proto %X vid %X\n",
2857 			  ntohs(outer_tag->vlan_proto), outer_tag->vlan_id);
2858 		__vlan_hwaccel_put_tag(skb, outer_tag->vlan_proto,
2859 				       outer_tag->vlan_id);
2860 	}
2861 
2862 	return true;
2863 }
2864 
2865 /* We go to the (large) trouble of VLAN tagging ARP frames because
2866  * switches in VLAN mode (especially if ports are configured as
2867  * "native" to a VLAN) might not pass non-tagged frames.
2868  */
2869 static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip,
2870 			  __be32 src_ip, struct bond_vlan_tag *tags)
2871 {
2872 	struct net_device *bond_dev = slave->bond->dev;
2873 	struct net_device *slave_dev = slave->dev;
2874 	struct sk_buff *skb;
2875 
2876 	slave_dbg(bond_dev, slave_dev, "arp %d on slave: dst %pI4 src %pI4\n",
2877 		  arp_op, &dest_ip, &src_ip);
2878 
2879 	skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
2880 			 NULL, slave_dev->dev_addr, NULL);
2881 
2882 	if (!skb) {
2883 		net_err_ratelimited("ARP packet allocation failed\n");
2884 		return;
2885 	}
2886 
2887 	if (bond_handle_vlan(slave, tags, skb)) {
2888 		slave_update_last_tx(slave);
2889 		arp_xmit(skb);
2890 	}
2891 
2892 	return;
2893 }
2894 
2895 /* Validate the device path between the @start_dev and the @end_dev.
2896  * The path is valid if the @end_dev is reachable through device
2897  * stacking.
2898  * When the path is validated, collect any vlan information in the
2899  * path.
2900  */
2901 struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev,
2902 					      struct net_device *end_dev,
2903 					      int level)
2904 {
2905 	struct bond_vlan_tag *tags;
2906 	struct net_device *upper;
2907 	struct list_head  *iter;
2908 
2909 	if (start_dev == end_dev) {
2910 		tags = kcalloc(level + 1, sizeof(*tags), GFP_ATOMIC);
2911 		if (!tags)
2912 			return ERR_PTR(-ENOMEM);
2913 		tags[level].vlan_proto = VLAN_N_VID;
2914 		return tags;
2915 	}
2916 
2917 	netdev_for_each_upper_dev_rcu(start_dev, upper, iter) {
2918 		tags = bond_verify_device_path(upper, end_dev, level + 1);
2919 		if (IS_ERR_OR_NULL(tags)) {
2920 			if (IS_ERR(tags))
2921 				return tags;
2922 			continue;
2923 		}
2924 		if (is_vlan_dev(upper)) {
2925 			tags[level].vlan_proto = vlan_dev_vlan_proto(upper);
2926 			tags[level].vlan_id = vlan_dev_vlan_id(upper);
2927 		}
2928 
2929 		return tags;
2930 	}
2931 
2932 	return NULL;
2933 }
2934 
2935 static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2936 {
2937 	struct rtable *rt;
2938 	struct bond_vlan_tag *tags;
2939 	__be32 *targets = bond->params.arp_targets, addr;
2940 	int i;
2941 
2942 	for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
2943 		slave_dbg(bond->dev, slave->dev, "%s: target %pI4\n",
2944 			  __func__, &targets[i]);
2945 		tags = NULL;
2946 
2947 		/* Find out through which dev should the packet go */
2948 		rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
2949 				     RTO_ONLINK, 0);
2950 		if (IS_ERR(rt)) {
2951 			/* there's no route to target - try to send arp
2952 			 * probe to generate any traffic (arp_validate=0)
2953 			 */
2954 			if (bond->params.arp_validate)
2955 				pr_warn_once("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
2956 					     bond->dev->name,
2957 					     &targets[i]);
2958 			bond_arp_send(slave, ARPOP_REQUEST, targets[i],
2959 				      0, tags);
2960 			continue;
2961 		}
2962 
2963 		/* bond device itself */
2964 		if (rt->dst.dev == bond->dev)
2965 			goto found;
2966 
2967 		rcu_read_lock();
2968 		tags = bond_verify_device_path(bond->dev, rt->dst.dev, 0);
2969 		rcu_read_unlock();
2970 
2971 		if (!IS_ERR_OR_NULL(tags))
2972 			goto found;
2973 
2974 		/* Not our device - skip */
2975 		slave_dbg(bond->dev, slave->dev, "no path to arp_ip_target %pI4 via rt.dev %s\n",
2976 			   &targets[i], rt->dst.dev ? rt->dst.dev->name : "NULL");
2977 
2978 		ip_rt_put(rt);
2979 		continue;
2980 
2981 found:
2982 		addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
2983 		ip_rt_put(rt);
2984 		bond_arp_send(slave, ARPOP_REQUEST, targets[i], addr, tags);
2985 		kfree(tags);
2986 	}
2987 }
2988 
2989 static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 sip, __be32 tip)
2990 {
2991 	int i;
2992 
2993 	if (!sip || !bond_has_this_ip(bond, tip)) {
2994 		slave_dbg(bond->dev, slave->dev, "%s: sip %pI4 tip %pI4 not found\n",
2995 			   __func__, &sip, &tip);
2996 		return;
2997 	}
2998 
2999 	i = bond_get_targets_ip(bond->params.arp_targets, sip);
3000 	if (i == -1) {
3001 		slave_dbg(bond->dev, slave->dev, "%s: sip %pI4 not found in targets\n",
3002 			   __func__, &sip);
3003 		return;
3004 	}
3005 	slave->last_rx = jiffies;
3006 	slave->target_last_arp_rx[i] = jiffies;
3007 }
3008 
3009 static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
3010 			struct slave *slave)
3011 {
3012 	struct arphdr *arp = (struct arphdr *)skb->data;
3013 	struct slave *curr_active_slave, *curr_arp_slave;
3014 	unsigned char *arp_ptr;
3015 	__be32 sip, tip;
3016 	unsigned int alen;
3017 
3018 	alen = arp_hdr_len(bond->dev);
3019 
3020 	if (alen > skb_headlen(skb)) {
3021 		arp = kmalloc(alen, GFP_ATOMIC);
3022 		if (!arp)
3023 			goto out_unlock;
3024 		if (skb_copy_bits(skb, 0, arp, alen) < 0)
3025 			goto out_unlock;
3026 	}
3027 
3028 	if (arp->ar_hln != bond->dev->addr_len ||
3029 	    skb->pkt_type == PACKET_OTHERHOST ||
3030 	    skb->pkt_type == PACKET_LOOPBACK ||
3031 	    arp->ar_hrd != htons(ARPHRD_ETHER) ||
3032 	    arp->ar_pro != htons(ETH_P_IP) ||
3033 	    arp->ar_pln != 4)
3034 		goto out_unlock;
3035 
3036 	arp_ptr = (unsigned char *)(arp + 1);
3037 	arp_ptr += bond->dev->addr_len;
3038 	memcpy(&sip, arp_ptr, 4);
3039 	arp_ptr += 4 + bond->dev->addr_len;
3040 	memcpy(&tip, arp_ptr, 4);
3041 
3042 	slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI4 tip %pI4\n",
3043 		  __func__, slave->dev->name, bond_slave_state(slave),
3044 		  bond->params.arp_validate, slave_do_arp_validate(bond, slave),
3045 		  &sip, &tip);
3046 
3047 	curr_active_slave = rcu_dereference(bond->curr_active_slave);
3048 	curr_arp_slave = rcu_dereference(bond->current_arp_slave);
3049 
3050 	/* We 'trust' the received ARP enough to validate it if:
3051 	 *
3052 	 * (a) the slave receiving the ARP is active (which includes the
3053 	 * current ARP slave, if any), or
3054 	 *
3055 	 * (b) the receiving slave isn't active, but there is a currently
3056 	 * active slave and it received valid arp reply(s) after it became
3057 	 * the currently active slave, or
3058 	 *
3059 	 * (c) there is an ARP slave that sent an ARP during the prior ARP
3060 	 * interval, and we receive an ARP reply on any slave.  We accept
3061 	 * these because switch FDB update delays may deliver the ARP
3062 	 * reply to a slave other than the sender of the ARP request.
3063 	 *
3064 	 * Note: for (b), backup slaves are receiving the broadcast ARP
3065 	 * request, not a reply.  This request passes from the sending
3066 	 * slave through the L2 switch(es) to the receiving slave.  Since
3067 	 * this is checking the request, sip/tip are swapped for
3068 	 * validation.
3069 	 *
3070 	 * This is done to avoid endless looping when we can't reach the
3071 	 * arp_ip_target and fool ourselves with our own arp requests.
3072 	 */
3073 	if (bond_is_active_slave(slave))
3074 		bond_validate_arp(bond, slave, sip, tip);
3075 	else if (curr_active_slave &&
3076 		 time_after(slave_last_rx(bond, curr_active_slave),
3077 			    curr_active_slave->last_link_up))
3078 		bond_validate_arp(bond, slave, tip, sip);
3079 	else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
3080 		 bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
3081 		bond_validate_arp(bond, slave, sip, tip);
3082 
3083 out_unlock:
3084 	if (arp != (struct arphdr *)skb->data)
3085 		kfree(arp);
3086 	return RX_HANDLER_ANOTHER;
3087 }
3088 
3089 #if IS_ENABLED(CONFIG_IPV6)
3090 static void bond_ns_send(struct slave *slave, const struct in6_addr *daddr,
3091 			 const struct in6_addr *saddr, struct bond_vlan_tag *tags)
3092 {
3093 	struct net_device *bond_dev = slave->bond->dev;
3094 	struct net_device *slave_dev = slave->dev;
3095 	struct in6_addr mcaddr;
3096 	struct sk_buff *skb;
3097 
3098 	slave_dbg(bond_dev, slave_dev, "NS on slave: dst %pI6c src %pI6c\n",
3099 		  daddr, saddr);
3100 
3101 	skb = ndisc_ns_create(slave_dev, daddr, saddr, 0);
3102 	if (!skb) {
3103 		net_err_ratelimited("NS packet allocation failed\n");
3104 		return;
3105 	}
3106 
3107 	addrconf_addr_solict_mult(daddr, &mcaddr);
3108 	if (bond_handle_vlan(slave, tags, skb)) {
3109 		slave_update_last_tx(slave);
3110 		ndisc_send_skb(skb, &mcaddr, saddr);
3111 	}
3112 }
3113 
3114 static void bond_ns_send_all(struct bonding *bond, struct slave *slave)
3115 {
3116 	struct in6_addr *targets = bond->params.ns_targets;
3117 	struct bond_vlan_tag *tags;
3118 	struct dst_entry *dst;
3119 	struct in6_addr saddr;
3120 	struct flowi6 fl6;
3121 	int i;
3122 
3123 	for (i = 0; i < BOND_MAX_NS_TARGETS && !ipv6_addr_any(&targets[i]); i++) {
3124 		slave_dbg(bond->dev, slave->dev, "%s: target %pI6c\n",
3125 			  __func__, &targets[i]);
3126 		tags = NULL;
3127 
3128 		/* Find out through which dev should the packet go */
3129 		memset(&fl6, 0, sizeof(struct flowi6));
3130 		fl6.daddr = targets[i];
3131 		fl6.flowi6_oif = bond->dev->ifindex;
3132 
3133 		dst = ip6_route_output(dev_net(bond->dev), NULL, &fl6);
3134 		if (dst->error) {
3135 			dst_release(dst);
3136 			/* there's no route to target - try to send arp
3137 			 * probe to generate any traffic (arp_validate=0)
3138 			 */
3139 			if (bond->params.arp_validate)
3140 				pr_warn_once("%s: no route to ns_ip6_target %pI6c and arp_validate is set\n",
3141 					     bond->dev->name,
3142 					     &targets[i]);
3143 			bond_ns_send(slave, &targets[i], &in6addr_any, tags);
3144 			continue;
3145 		}
3146 
3147 		/* bond device itself */
3148 		if (dst->dev == bond->dev)
3149 			goto found;
3150 
3151 		rcu_read_lock();
3152 		tags = bond_verify_device_path(bond->dev, dst->dev, 0);
3153 		rcu_read_unlock();
3154 
3155 		if (!IS_ERR_OR_NULL(tags))
3156 			goto found;
3157 
3158 		/* Not our device - skip */
3159 		slave_dbg(bond->dev, slave->dev, "no path to ns_ip6_target %pI6c via dst->dev %s\n",
3160 			  &targets[i], dst->dev ? dst->dev->name : "NULL");
3161 
3162 		dst_release(dst);
3163 		continue;
3164 
3165 found:
3166 		if (!ipv6_dev_get_saddr(dev_net(dst->dev), dst->dev, &targets[i], 0, &saddr))
3167 			bond_ns_send(slave, &targets[i], &saddr, tags);
3168 		else
3169 			bond_ns_send(slave, &targets[i], &in6addr_any, tags);
3170 
3171 		dst_release(dst);
3172 		kfree(tags);
3173 	}
3174 }
3175 
3176 static int bond_confirm_addr6(struct net_device *dev,
3177 			      struct netdev_nested_priv *priv)
3178 {
3179 	struct in6_addr *addr = (struct in6_addr *)priv->data;
3180 
3181 	return ipv6_chk_addr(dev_net(dev), addr, dev, 0);
3182 }
3183 
3184 static bool bond_has_this_ip6(struct bonding *bond, struct in6_addr *addr)
3185 {
3186 	struct netdev_nested_priv priv = {
3187 		.data = addr,
3188 	};
3189 	int ret = false;
3190 
3191 	if (bond_confirm_addr6(bond->dev, &priv))
3192 		return true;
3193 
3194 	rcu_read_lock();
3195 	if (netdev_walk_all_upper_dev_rcu(bond->dev, bond_confirm_addr6, &priv))
3196 		ret = true;
3197 	rcu_read_unlock();
3198 
3199 	return ret;
3200 }
3201 
3202 static void bond_validate_na(struct bonding *bond, struct slave *slave,
3203 			     struct in6_addr *saddr, struct in6_addr *daddr)
3204 {
3205 	int i;
3206 
3207 	/* Ignore NAs that:
3208 	 * 1. Source address is unspecified address.
3209 	 * 2. Dest address is neither all-nodes multicast address nor
3210 	 *    exist on bond interface.
3211 	 */
3212 	if (ipv6_addr_any(saddr) ||
3213 	    (!ipv6_addr_equal(daddr, &in6addr_linklocal_allnodes) &&
3214 	     !bond_has_this_ip6(bond, daddr))) {
3215 		slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c tip %pI6c not found\n",
3216 			  __func__, saddr, daddr);
3217 		return;
3218 	}
3219 
3220 	i = bond_get_targets_ip6(bond->params.ns_targets, saddr);
3221 	if (i == -1) {
3222 		slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c not found in targets\n",
3223 			  __func__, saddr);
3224 		return;
3225 	}
3226 	slave->last_rx = jiffies;
3227 	slave->target_last_arp_rx[i] = jiffies;
3228 }
3229 
3230 static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
3231 		       struct slave *slave)
3232 {
3233 	struct slave *curr_active_slave, *curr_arp_slave;
3234 	struct icmp6hdr *hdr = icmp6_hdr(skb);
3235 	struct in6_addr *saddr, *daddr;
3236 
3237 	if (skb->pkt_type == PACKET_OTHERHOST ||
3238 	    skb->pkt_type == PACKET_LOOPBACK ||
3239 	    hdr->icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT)
3240 		goto out;
3241 
3242 	saddr = &ipv6_hdr(skb)->saddr;
3243 	daddr = &ipv6_hdr(skb)->daddr;
3244 
3245 	slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI6c tip %pI6c\n",
3246 		  __func__, slave->dev->name, bond_slave_state(slave),
3247 		  bond->params.arp_validate, slave_do_arp_validate(bond, slave),
3248 		  saddr, daddr);
3249 
3250 	curr_active_slave = rcu_dereference(bond->curr_active_slave);
3251 	curr_arp_slave = rcu_dereference(bond->current_arp_slave);
3252 
3253 	/* We 'trust' the received ARP enough to validate it if:
3254 	 * see bond_arp_rcv().
3255 	 */
3256 	if (bond_is_active_slave(slave))
3257 		bond_validate_na(bond, slave, saddr, daddr);
3258 	else if (curr_active_slave &&
3259 		 time_after(slave_last_rx(bond, curr_active_slave),
3260 			    curr_active_slave->last_link_up))
3261 		bond_validate_na(bond, slave, saddr, daddr);
3262 	else if (curr_arp_slave &&
3263 		 bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
3264 		bond_validate_na(bond, slave, saddr, daddr);
3265 
3266 out:
3267 	return RX_HANDLER_ANOTHER;
3268 }
3269 #endif
3270 
3271 int bond_rcv_validate(const struct sk_buff *skb, struct bonding *bond,
3272 		      struct slave *slave)
3273 {
3274 #if IS_ENABLED(CONFIG_IPV6)
3275 	bool is_ipv6 = skb->protocol == __cpu_to_be16(ETH_P_IPV6);
3276 #endif
3277 	bool is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
3278 
3279 	slave_dbg(bond->dev, slave->dev, "%s: skb->dev %s\n",
3280 		  __func__, skb->dev->name);
3281 
3282 	/* Use arp validate logic for both ARP and NS */
3283 	if (!slave_do_arp_validate(bond, slave)) {
3284 		if ((slave_do_arp_validate_only(bond) && is_arp) ||
3285 #if IS_ENABLED(CONFIG_IPV6)
3286 		    (slave_do_arp_validate_only(bond) && is_ipv6) ||
3287 #endif
3288 		    !slave_do_arp_validate_only(bond))
3289 			slave->last_rx = jiffies;
3290 		return RX_HANDLER_ANOTHER;
3291 	} else if (is_arp) {
3292 		return bond_arp_rcv(skb, bond, slave);
3293 #if IS_ENABLED(CONFIG_IPV6)
3294 	} else if (is_ipv6) {
3295 		return bond_na_rcv(skb, bond, slave);
3296 #endif
3297 	} else {
3298 		return RX_HANDLER_ANOTHER;
3299 	}
3300 }
3301 
3302 static void bond_send_validate(struct bonding *bond, struct slave *slave)
3303 {
3304 	bond_arp_send_all(bond, slave);
3305 #if IS_ENABLED(CONFIG_IPV6)
3306 	bond_ns_send_all(bond, slave);
3307 #endif
3308 }
3309 
3310 /* function to verify if we're in the arp_interval timeslice, returns true if
3311  * (last_act - arp_interval) <= jiffies <= (last_act + mod * arp_interval +
3312  * arp_interval/2) . the arp_interval/2 is needed for really fast networks.
3313  */
3314 static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
3315 				  int mod)
3316 {
3317 	int delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
3318 
3319 	return time_in_range(jiffies,
3320 			     last_act - delta_in_ticks,
3321 			     last_act + mod * delta_in_ticks + delta_in_ticks/2);
3322 }
3323 
3324 /* This function is called regularly to monitor each slave's link
3325  * ensuring that traffic is being sent and received when arp monitoring
3326  * is used in load-balancing mode. if the adapter has been dormant, then an
3327  * arp is transmitted to generate traffic. see activebackup_arp_monitor for
3328  * arp monitoring in active backup mode.
3329  */
3330 static void bond_loadbalance_arp_mon(struct bonding *bond)
3331 {
3332 	struct slave *slave, *oldcurrent;
3333 	struct list_head *iter;
3334 	int do_failover = 0, slave_state_changed = 0;
3335 
3336 	if (!bond_has_slaves(bond))
3337 		goto re_arm;
3338 
3339 	rcu_read_lock();
3340 
3341 	oldcurrent = rcu_dereference(bond->curr_active_slave);
3342 	/* see if any of the previous devices are up now (i.e. they have
3343 	 * xmt and rcv traffic). the curr_active_slave does not come into
3344 	 * the picture unless it is null. also, slave->last_link_up is not
3345 	 * needed here because we send an arp on each slave and give a slave
3346 	 * as long as it needs to get the tx/rx within the delta.
3347 	 * TODO: what about up/down delay in arp mode? it wasn't here before
3348 	 *       so it can wait
3349 	 */
3350 	bond_for_each_slave_rcu(bond, slave, iter) {
3351 		unsigned long last_tx = slave_last_tx(slave);
3352 
3353 		bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
3354 
3355 		if (slave->link != BOND_LINK_UP) {
3356 			if (bond_time_in_interval(bond, last_tx, 1) &&
3357 			    bond_time_in_interval(bond, slave->last_rx, 1)) {
3358 
3359 				bond_propose_link_state(slave, BOND_LINK_UP);
3360 				slave_state_changed = 1;
3361 
3362 				/* primary_slave has no meaning in round-robin
3363 				 * mode. the window of a slave being up and
3364 				 * curr_active_slave being null after enslaving
3365 				 * is closed.
3366 				 */
3367 				if (!oldcurrent) {
3368 					slave_info(bond->dev, slave->dev, "link status definitely up\n");
3369 					do_failover = 1;
3370 				} else {
3371 					slave_info(bond->dev, slave->dev, "interface is now up\n");
3372 				}
3373 			}
3374 		} else {
3375 			/* slave->link == BOND_LINK_UP */
3376 
3377 			/* not all switches will respond to an arp request
3378 			 * when the source ip is 0, so don't take the link down
3379 			 * if we don't know our ip yet
3380 			 */
3381 			if (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) ||
3382 			    !bond_time_in_interval(bond, slave->last_rx, bond->params.missed_max)) {
3383 
3384 				bond_propose_link_state(slave, BOND_LINK_DOWN);
3385 				slave_state_changed = 1;
3386 
3387 				if (slave->link_failure_count < UINT_MAX)
3388 					slave->link_failure_count++;
3389 
3390 				slave_info(bond->dev, slave->dev, "interface is now down\n");
3391 
3392 				if (slave == oldcurrent)
3393 					do_failover = 1;
3394 			}
3395 		}
3396 
3397 		/* note: if switch is in round-robin mode, all links
3398 		 * must tx arp to ensure all links rx an arp - otherwise
3399 		 * links may oscillate or not come up at all; if switch is
3400 		 * in something like xor mode, there is nothing we can
3401 		 * do - all replies will be rx'ed on same link causing slaves
3402 		 * to be unstable during low/no traffic periods
3403 		 */
3404 		if (bond_slave_is_up(slave))
3405 			bond_send_validate(bond, slave);
3406 	}
3407 
3408 	rcu_read_unlock();
3409 
3410 	if (do_failover || slave_state_changed) {
3411 		if (!rtnl_trylock())
3412 			goto re_arm;
3413 
3414 		bond_for_each_slave(bond, slave, iter) {
3415 			if (slave->link_new_state != BOND_LINK_NOCHANGE)
3416 				slave->link = slave->link_new_state;
3417 		}
3418 
3419 		if (slave_state_changed) {
3420 			bond_slave_state_change(bond);
3421 			if (BOND_MODE(bond) == BOND_MODE_XOR)
3422 				bond_update_slave_arr(bond, NULL);
3423 		}
3424 		if (do_failover) {
3425 			block_netpoll_tx();
3426 			bond_select_active_slave(bond);
3427 			unblock_netpoll_tx();
3428 		}
3429 		rtnl_unlock();
3430 	}
3431 
3432 re_arm:
3433 	if (bond->params.arp_interval)
3434 		queue_delayed_work(bond->wq, &bond->arp_work,
3435 				   msecs_to_jiffies(bond->params.arp_interval));
3436 }
3437 
3438 /* Called to inspect slaves for active-backup mode ARP monitor link state
3439  * changes.  Sets proposed link state in slaves to specify what action
3440  * should take place for the slave.  Returns 0 if no changes are found, >0
3441  * if changes to link states must be committed.
3442  *
3443  * Called with rcu_read_lock held.
3444  */
3445 static int bond_ab_arp_inspect(struct bonding *bond)
3446 {
3447 	unsigned long last_tx, last_rx;
3448 	struct list_head *iter;
3449 	struct slave *slave;
3450 	int commit = 0;
3451 
3452 	bond_for_each_slave_rcu(bond, slave, iter) {
3453 		bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
3454 		last_rx = slave_last_rx(bond, slave);
3455 
3456 		if (slave->link != BOND_LINK_UP) {
3457 			if (bond_time_in_interval(bond, last_rx, 1)) {
3458 				bond_propose_link_state(slave, BOND_LINK_UP);
3459 				commit++;
3460 			} else if (slave->link == BOND_LINK_BACK) {
3461 				bond_propose_link_state(slave, BOND_LINK_FAIL);
3462 				commit++;
3463 			}
3464 			continue;
3465 		}
3466 
3467 		/* Give slaves 2*delta after being enslaved or made
3468 		 * active.  This avoids bouncing, as the last receive
3469 		 * times need a full ARP monitor cycle to be updated.
3470 		 */
3471 		if (bond_time_in_interval(bond, slave->last_link_up, 2))
3472 			continue;
3473 
3474 		/* Backup slave is down if:
3475 		 * - No current_arp_slave AND
3476 		 * - more than (missed_max+1)*delta since last receive AND
3477 		 * - the bond has an IP address
3478 		 *
3479 		 * Note: a non-null current_arp_slave indicates
3480 		 * the curr_active_slave went down and we are
3481 		 * searching for a new one; under this condition
3482 		 * we only take the curr_active_slave down - this
3483 		 * gives each slave a chance to tx/rx traffic
3484 		 * before being taken out
3485 		 */
3486 		if (!bond_is_active_slave(slave) &&
3487 		    !rcu_access_pointer(bond->current_arp_slave) &&
3488 		    !bond_time_in_interval(bond, last_rx, bond->params.missed_max + 1)) {
3489 			bond_propose_link_state(slave, BOND_LINK_DOWN);
3490 			commit++;
3491 		}
3492 
3493 		/* Active slave is down if:
3494 		 * - more than missed_max*delta since transmitting OR
3495 		 * - (more than missed_max*delta since receive AND
3496 		 *    the bond has an IP address)
3497 		 */
3498 		last_tx = slave_last_tx(slave);
3499 		if (bond_is_active_slave(slave) &&
3500 		    (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) ||
3501 		     !bond_time_in_interval(bond, last_rx, bond->params.missed_max))) {
3502 			bond_propose_link_state(slave, BOND_LINK_DOWN);
3503 			commit++;
3504 		}
3505 	}
3506 
3507 	return commit;
3508 }
3509 
3510 /* Called to commit link state changes noted by inspection step of
3511  * active-backup mode ARP monitor.
3512  *
3513  * Called with RTNL hold.
3514  */
3515 static void bond_ab_arp_commit(struct bonding *bond)
3516 {
3517 	struct list_head *iter;
3518 	unsigned long last_tx;
3519 	struct slave *slave;
3520 
3521 	bond_for_each_slave(bond, slave, iter) {
3522 		switch (slave->link_new_state) {
3523 		case BOND_LINK_NOCHANGE:
3524 			continue;
3525 
3526 		case BOND_LINK_UP:
3527 			last_tx = slave_last_tx(slave);
3528 			if (rtnl_dereference(bond->curr_active_slave) != slave ||
3529 			    (!rtnl_dereference(bond->curr_active_slave) &&
3530 			     bond_time_in_interval(bond, last_tx, 1))) {
3531 				struct slave *current_arp_slave;
3532 
3533 				current_arp_slave = rtnl_dereference(bond->current_arp_slave);
3534 				bond_set_slave_link_state(slave, BOND_LINK_UP,
3535 							  BOND_SLAVE_NOTIFY_NOW);
3536 				if (current_arp_slave) {
3537 					bond_set_slave_inactive_flags(
3538 						current_arp_slave,
3539 						BOND_SLAVE_NOTIFY_NOW);
3540 					RCU_INIT_POINTER(bond->current_arp_slave, NULL);
3541 				}
3542 
3543 				slave_info(bond->dev, slave->dev, "link status definitely up\n");
3544 
3545 				if (!rtnl_dereference(bond->curr_active_slave) ||
3546 				    slave == rtnl_dereference(bond->primary_slave))
3547 					goto do_failover;
3548 
3549 			}
3550 
3551 			continue;
3552 
3553 		case BOND_LINK_DOWN:
3554 			if (slave->link_failure_count < UINT_MAX)
3555 				slave->link_failure_count++;
3556 
3557 			bond_set_slave_link_state(slave, BOND_LINK_DOWN,
3558 						  BOND_SLAVE_NOTIFY_NOW);
3559 			bond_set_slave_inactive_flags(slave,
3560 						      BOND_SLAVE_NOTIFY_NOW);
3561 
3562 			slave_info(bond->dev, slave->dev, "link status definitely down, disabling slave\n");
3563 
3564 			if (slave == rtnl_dereference(bond->curr_active_slave)) {
3565 				RCU_INIT_POINTER(bond->current_arp_slave, NULL);
3566 				goto do_failover;
3567 			}
3568 
3569 			continue;
3570 
3571 		case BOND_LINK_FAIL:
3572 			bond_set_slave_link_state(slave, BOND_LINK_FAIL,
3573 						  BOND_SLAVE_NOTIFY_NOW);
3574 			bond_set_slave_inactive_flags(slave,
3575 						      BOND_SLAVE_NOTIFY_NOW);
3576 
3577 			/* A slave has just been enslaved and has become
3578 			 * the current active slave.
3579 			 */
3580 			if (rtnl_dereference(bond->curr_active_slave))
3581 				RCU_INIT_POINTER(bond->current_arp_slave, NULL);
3582 			continue;
3583 
3584 		default:
3585 			slave_err(bond->dev, slave->dev,
3586 				  "impossible: link_new_state %d on slave\n",
3587 				  slave->link_new_state);
3588 			continue;
3589 		}
3590 
3591 do_failover:
3592 		block_netpoll_tx();
3593 		bond_select_active_slave(bond);
3594 		unblock_netpoll_tx();
3595 	}
3596 
3597 	bond_set_carrier(bond);
3598 }
3599 
3600 /* Send ARP probes for active-backup mode ARP monitor.
3601  *
3602  * Called with rcu_read_lock held.
3603  */
3604 static bool bond_ab_arp_probe(struct bonding *bond)
3605 {
3606 	struct slave *slave, *before = NULL, *new_slave = NULL,
3607 		     *curr_arp_slave = rcu_dereference(bond->current_arp_slave),
3608 		     *curr_active_slave = rcu_dereference(bond->curr_active_slave);
3609 	struct list_head *iter;
3610 	bool found = false;
3611 	bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER;
3612 
3613 	if (curr_arp_slave && curr_active_slave)
3614 		netdev_info(bond->dev, "PROBE: c_arp %s && cas %s BAD\n",
3615 			    curr_arp_slave->dev->name,
3616 			    curr_active_slave->dev->name);
3617 
3618 	if (curr_active_slave) {
3619 		bond_send_validate(bond, curr_active_slave);
3620 		return should_notify_rtnl;
3621 	}
3622 
3623 	/* if we don't have a curr_active_slave, search for the next available
3624 	 * backup slave from the current_arp_slave and make it the candidate
3625 	 * for becoming the curr_active_slave
3626 	 */
3627 
3628 	if (!curr_arp_slave) {
3629 		curr_arp_slave = bond_first_slave_rcu(bond);
3630 		if (!curr_arp_slave)
3631 			return should_notify_rtnl;
3632 	}
3633 
3634 	bond_for_each_slave_rcu(bond, slave, iter) {
3635 		if (!found && !before && bond_slave_is_up(slave))
3636 			before = slave;
3637 
3638 		if (found && !new_slave && bond_slave_is_up(slave))
3639 			new_slave = slave;
3640 		/* if the link state is up at this point, we
3641 		 * mark it down - this can happen if we have
3642 		 * simultaneous link failures and
3643 		 * reselect_active_interface doesn't make this
3644 		 * one the current slave so it is still marked
3645 		 * up when it is actually down
3646 		 */
3647 		if (!bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
3648 			bond_set_slave_link_state(slave, BOND_LINK_DOWN,
3649 						  BOND_SLAVE_NOTIFY_LATER);
3650 			if (slave->link_failure_count < UINT_MAX)
3651 				slave->link_failure_count++;
3652 
3653 			bond_set_slave_inactive_flags(slave,
3654 						      BOND_SLAVE_NOTIFY_LATER);
3655 
3656 			slave_info(bond->dev, slave->dev, "backup interface is now down\n");
3657 		}
3658 		if (slave == curr_arp_slave)
3659 			found = true;
3660 	}
3661 
3662 	if (!new_slave && before)
3663 		new_slave = before;
3664 
3665 	if (!new_slave)
3666 		goto check_state;
3667 
3668 	bond_set_slave_link_state(new_slave, BOND_LINK_BACK,
3669 				  BOND_SLAVE_NOTIFY_LATER);
3670 	bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER);
3671 	bond_send_validate(bond, new_slave);
3672 	new_slave->last_link_up = jiffies;
3673 	rcu_assign_pointer(bond->current_arp_slave, new_slave);
3674 
3675 check_state:
3676 	bond_for_each_slave_rcu(bond, slave, iter) {
3677 		if (slave->should_notify || slave->should_notify_link) {
3678 			should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW;
3679 			break;
3680 		}
3681 	}
3682 	return should_notify_rtnl;
3683 }
3684 
3685 static void bond_activebackup_arp_mon(struct bonding *bond)
3686 {
3687 	bool should_notify_peers = false;
3688 	bool should_notify_rtnl = false;
3689 	int delta_in_ticks;
3690 
3691 	delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
3692 
3693 	if (!bond_has_slaves(bond))
3694 		goto re_arm;
3695 
3696 	rcu_read_lock();
3697 
3698 	should_notify_peers = bond_should_notify_peers(bond);
3699 
3700 	if (bond_ab_arp_inspect(bond)) {
3701 		rcu_read_unlock();
3702 
3703 		/* Race avoidance with bond_close flush of workqueue */
3704 		if (!rtnl_trylock()) {
3705 			delta_in_ticks = 1;
3706 			should_notify_peers = false;
3707 			goto re_arm;
3708 		}
3709 
3710 		bond_ab_arp_commit(bond);
3711 
3712 		rtnl_unlock();
3713 		rcu_read_lock();
3714 	}
3715 
3716 	should_notify_rtnl = bond_ab_arp_probe(bond);
3717 	rcu_read_unlock();
3718 
3719 re_arm:
3720 	if (bond->params.arp_interval)
3721 		queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
3722 
3723 	if (should_notify_peers || should_notify_rtnl) {
3724 		if (!rtnl_trylock())
3725 			return;
3726 
3727 		if (should_notify_peers) {
3728 			bond->send_peer_notif--;
3729 			call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
3730 						 bond->dev);
3731 		}
3732 		if (should_notify_rtnl) {
3733 			bond_slave_state_notify(bond);
3734 			bond_slave_link_notify(bond);
3735 		}
3736 
3737 		rtnl_unlock();
3738 	}
3739 }
3740 
3741 static void bond_arp_monitor(struct work_struct *work)
3742 {
3743 	struct bonding *bond = container_of(work, struct bonding,
3744 					    arp_work.work);
3745 
3746 	if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
3747 		bond_activebackup_arp_mon(bond);
3748 	else
3749 		bond_loadbalance_arp_mon(bond);
3750 }
3751 
3752 /*-------------------------- netdev event handling --------------------------*/
3753 
3754 /* Change device name */
3755 static int bond_event_changename(struct bonding *bond)
3756 {
3757 	bond_remove_proc_entry(bond);
3758 	bond_create_proc_entry(bond);
3759 
3760 	bond_debug_reregister(bond);
3761 
3762 	return NOTIFY_DONE;
3763 }
3764 
3765 static int bond_master_netdev_event(unsigned long event,
3766 				    struct net_device *bond_dev)
3767 {
3768 	struct bonding *event_bond = netdev_priv(bond_dev);
3769 
3770 	netdev_dbg(bond_dev, "%s called\n", __func__);
3771 
3772 	switch (event) {
3773 	case NETDEV_CHANGENAME:
3774 		return bond_event_changename(event_bond);
3775 	case NETDEV_UNREGISTER:
3776 		bond_remove_proc_entry(event_bond);
3777 #ifdef CONFIG_XFRM_OFFLOAD
3778 		xfrm_dev_state_flush(dev_net(bond_dev), bond_dev, true);
3779 #endif /* CONFIG_XFRM_OFFLOAD */
3780 		break;
3781 	case NETDEV_REGISTER:
3782 		bond_create_proc_entry(event_bond);
3783 		break;
3784 	default:
3785 		break;
3786 	}
3787 
3788 	return NOTIFY_DONE;
3789 }
3790 
3791 static int bond_slave_netdev_event(unsigned long event,
3792 				   struct net_device *slave_dev)
3793 {
3794 	struct slave *slave = bond_slave_get_rtnl(slave_dev), *primary;
3795 	struct bonding *bond;
3796 	struct net_device *bond_dev;
3797 
3798 	/* A netdev event can be generated while enslaving a device
3799 	 * before netdev_rx_handler_register is called in which case
3800 	 * slave will be NULL
3801 	 */
3802 	if (!slave) {
3803 		netdev_dbg(slave_dev, "%s called on NULL slave\n", __func__);
3804 		return NOTIFY_DONE;
3805 	}
3806 
3807 	bond_dev = slave->bond->dev;
3808 	bond = slave->bond;
3809 	primary = rtnl_dereference(bond->primary_slave);
3810 
3811 	slave_dbg(bond_dev, slave_dev, "%s called\n", __func__);
3812 
3813 	switch (event) {
3814 	case NETDEV_UNREGISTER:
3815 		if (bond_dev->type != ARPHRD_ETHER)
3816 			bond_release_and_destroy(bond_dev, slave_dev);
3817 		else
3818 			__bond_release_one(bond_dev, slave_dev, false, true);
3819 		break;
3820 	case NETDEV_UP:
3821 	case NETDEV_CHANGE:
3822 		/* For 802.3ad mode only:
3823 		 * Getting invalid Speed/Duplex values here will put slave
3824 		 * in weird state. Mark it as link-fail if the link was
3825 		 * previously up or link-down if it hasn't yet come up, and
3826 		 * let link-monitoring (miimon) set it right when correct
3827 		 * speeds/duplex are available.
3828 		 */
3829 		if (bond_update_speed_duplex(slave) &&
3830 		    BOND_MODE(bond) == BOND_MODE_8023AD) {
3831 			if (slave->last_link_up)
3832 				slave->link = BOND_LINK_FAIL;
3833 			else
3834 				slave->link = BOND_LINK_DOWN;
3835 		}
3836 
3837 		if (BOND_MODE(bond) == BOND_MODE_8023AD)
3838 			bond_3ad_adapter_speed_duplex_changed(slave);
3839 		fallthrough;
3840 	case NETDEV_DOWN:
3841 		/* Refresh slave-array if applicable!
3842 		 * If the setup does not use miimon or arpmon (mode-specific!),
3843 		 * then these events will not cause the slave-array to be
3844 		 * refreshed. This will cause xmit to use a slave that is not
3845 		 * usable. Avoid such situation by refeshing the array at these
3846 		 * events. If these (miimon/arpmon) parameters are configured
3847 		 * then array gets refreshed twice and that should be fine!
3848 		 */
3849 		if (bond_mode_can_use_xmit_hash(bond))
3850 			bond_update_slave_arr(bond, NULL);
3851 		break;
3852 	case NETDEV_CHANGEMTU:
3853 		/* TODO: Should slaves be allowed to
3854 		 * independently alter their MTU?  For
3855 		 * an active-backup bond, slaves need
3856 		 * not be the same type of device, so
3857 		 * MTUs may vary.  For other modes,
3858 		 * slaves arguably should have the
3859 		 * same MTUs. To do this, we'd need to
3860 		 * take over the slave's change_mtu
3861 		 * function for the duration of their
3862 		 * servitude.
3863 		 */
3864 		break;
3865 	case NETDEV_CHANGENAME:
3866 		/* we don't care if we don't have primary set */
3867 		if (!bond_uses_primary(bond) ||
3868 		    !bond->params.primary[0])
3869 			break;
3870 
3871 		if (slave == primary) {
3872 			/* slave's name changed - he's no longer primary */
3873 			RCU_INIT_POINTER(bond->primary_slave, NULL);
3874 		} else if (!strcmp(slave_dev->name, bond->params.primary)) {
3875 			/* we have a new primary slave */
3876 			rcu_assign_pointer(bond->primary_slave, slave);
3877 		} else { /* we didn't change primary - exit */
3878 			break;
3879 		}
3880 
3881 		netdev_info(bond->dev, "Primary slave changed to %s, reselecting active slave\n",
3882 			    primary ? slave_dev->name : "none");
3883 
3884 		block_netpoll_tx();
3885 		bond_select_active_slave(bond);
3886 		unblock_netpoll_tx();
3887 		break;
3888 	case NETDEV_FEAT_CHANGE:
3889 		bond_compute_features(bond);
3890 		break;
3891 	case NETDEV_RESEND_IGMP:
3892 		/* Propagate to master device */
3893 		call_netdevice_notifiers(event, slave->bond->dev);
3894 		break;
3895 	default:
3896 		break;
3897 	}
3898 
3899 	return NOTIFY_DONE;
3900 }
3901 
3902 /* bond_netdev_event: handle netdev notifier chain events.
3903  *
3904  * This function receives events for the netdev chain.  The caller (an
3905  * ioctl handler calling blocking_notifier_call_chain) holds the necessary
3906  * locks for us to safely manipulate the slave devices (RTNL lock,
3907  * dev_probe_lock).
3908  */
3909 static int bond_netdev_event(struct notifier_block *this,
3910 			     unsigned long event, void *ptr)
3911 {
3912 	struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
3913 
3914 	netdev_dbg(event_dev, "%s received %s\n",
3915 		   __func__, netdev_cmd_to_name(event));
3916 
3917 	if (!(event_dev->priv_flags & IFF_BONDING))
3918 		return NOTIFY_DONE;
3919 
3920 	if (event_dev->flags & IFF_MASTER) {
3921 		int ret;
3922 
3923 		ret = bond_master_netdev_event(event, event_dev);
3924 		if (ret != NOTIFY_DONE)
3925 			return ret;
3926 	}
3927 
3928 	if (event_dev->flags & IFF_SLAVE)
3929 		return bond_slave_netdev_event(event, event_dev);
3930 
3931 	return NOTIFY_DONE;
3932 }
3933 
3934 static struct notifier_block bond_netdev_notifier = {
3935 	.notifier_call = bond_netdev_event,
3936 };
3937 
3938 /*---------------------------- Hashing Policies -----------------------------*/
3939 
3940 /* Helper to access data in a packet, with or without a backing skb.
3941  * If skb is given the data is linearized if necessary via pskb_may_pull.
3942  */
3943 static inline const void *bond_pull_data(struct sk_buff *skb,
3944 					 const void *data, int hlen, int n)
3945 {
3946 	if (likely(n <= hlen))
3947 		return data;
3948 	else if (skb && likely(pskb_may_pull(skb, n)))
3949 		return skb->head;
3950 
3951 	return NULL;
3952 }
3953 
3954 /* L2 hash helper */
3955 static inline u32 bond_eth_hash(struct sk_buff *skb, const void *data, int mhoff, int hlen)
3956 {
3957 	struct ethhdr *ep;
3958 
3959 	data = bond_pull_data(skb, data, hlen, mhoff + sizeof(struct ethhdr));
3960 	if (!data)
3961 		return 0;
3962 
3963 	ep = (struct ethhdr *)(data + mhoff);
3964 	return ep->h_dest[5] ^ ep->h_source[5] ^ be16_to_cpu(ep->h_proto);
3965 }
3966 
3967 static bool bond_flow_ip(struct sk_buff *skb, struct flow_keys *fk, const void *data,
3968 			 int hlen, __be16 l2_proto, int *nhoff, int *ip_proto, bool l34)
3969 {
3970 	const struct ipv6hdr *iph6;
3971 	const struct iphdr *iph;
3972 
3973 	if (l2_proto == htons(ETH_P_IP)) {
3974 		data = bond_pull_data(skb, data, hlen, *nhoff + sizeof(*iph));
3975 		if (!data)
3976 			return false;
3977 
3978 		iph = (const struct iphdr *)(data + *nhoff);
3979 		iph_to_flow_copy_v4addrs(fk, iph);
3980 		*nhoff += iph->ihl << 2;
3981 		if (!ip_is_fragment(iph))
3982 			*ip_proto = iph->protocol;
3983 	} else if (l2_proto == htons(ETH_P_IPV6)) {
3984 		data = bond_pull_data(skb, data, hlen, *nhoff + sizeof(*iph6));
3985 		if (!data)
3986 			return false;
3987 
3988 		iph6 = (const struct ipv6hdr *)(data + *nhoff);
3989 		iph_to_flow_copy_v6addrs(fk, iph6);
3990 		*nhoff += sizeof(*iph6);
3991 		*ip_proto = iph6->nexthdr;
3992 	} else {
3993 		return false;
3994 	}
3995 
3996 	if (l34 && *ip_proto >= 0)
3997 		fk->ports.ports = __skb_flow_get_ports(skb, *nhoff, *ip_proto, data, hlen);
3998 
3999 	return true;
4000 }
4001 
4002 static u32 bond_vlan_srcmac_hash(struct sk_buff *skb, const void *data, int mhoff, int hlen)
4003 {
4004 	u32 srcmac_vendor = 0, srcmac_dev = 0;
4005 	struct ethhdr *mac_hdr;
4006 	u16 vlan = 0;
4007 	int i;
4008 
4009 	data = bond_pull_data(skb, data, hlen, mhoff + sizeof(struct ethhdr));
4010 	if (!data)
4011 		return 0;
4012 	mac_hdr = (struct ethhdr *)(data + mhoff);
4013 
4014 	for (i = 0; i < 3; i++)
4015 		srcmac_vendor = (srcmac_vendor << 8) | mac_hdr->h_source[i];
4016 
4017 	for (i = 3; i < ETH_ALEN; i++)
4018 		srcmac_dev = (srcmac_dev << 8) | mac_hdr->h_source[i];
4019 
4020 	if (skb && skb_vlan_tag_present(skb))
4021 		vlan = skb_vlan_tag_get(skb);
4022 
4023 	return vlan ^ srcmac_vendor ^ srcmac_dev;
4024 }
4025 
4026 /* Extract the appropriate headers based on bond's xmit policy */
4027 static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb, const void *data,
4028 			      __be16 l2_proto, int nhoff, int hlen, struct flow_keys *fk)
4029 {
4030 	bool l34 = bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34;
4031 	int ip_proto = -1;
4032 
4033 	switch (bond->params.xmit_policy) {
4034 	case BOND_XMIT_POLICY_ENCAP23:
4035 	case BOND_XMIT_POLICY_ENCAP34:
4036 		memset(fk, 0, sizeof(*fk));
4037 		return __skb_flow_dissect(NULL, skb, &flow_keys_bonding,
4038 					  fk, data, l2_proto, nhoff, hlen, 0);
4039 	default:
4040 		break;
4041 	}
4042 
4043 	fk->ports.ports = 0;
4044 	memset(&fk->icmp, 0, sizeof(fk->icmp));
4045 	if (!bond_flow_ip(skb, fk, data, hlen, l2_proto, &nhoff, &ip_proto, l34))
4046 		return false;
4047 
4048 	/* ICMP error packets contains at least 8 bytes of the header
4049 	 * of the packet which generated the error. Use this information
4050 	 * to correlate ICMP error packets within the same flow which
4051 	 * generated the error.
4052 	 */
4053 	if (ip_proto == IPPROTO_ICMP || ip_proto == IPPROTO_ICMPV6) {
4054 		skb_flow_get_icmp_tci(skb, &fk->icmp, data, nhoff, hlen);
4055 		if (ip_proto == IPPROTO_ICMP) {
4056 			if (!icmp_is_err(fk->icmp.type))
4057 				return true;
4058 
4059 			nhoff += sizeof(struct icmphdr);
4060 		} else if (ip_proto == IPPROTO_ICMPV6) {
4061 			if (!icmpv6_is_err(fk->icmp.type))
4062 				return true;
4063 
4064 			nhoff += sizeof(struct icmp6hdr);
4065 		}
4066 		return bond_flow_ip(skb, fk, data, hlen, l2_proto, &nhoff, &ip_proto, l34);
4067 	}
4068 
4069 	return true;
4070 }
4071 
4072 static u32 bond_ip_hash(u32 hash, struct flow_keys *flow, int xmit_policy)
4073 {
4074 	hash ^= (__force u32)flow_get_u32_dst(flow) ^
4075 		(__force u32)flow_get_u32_src(flow);
4076 	hash ^= (hash >> 16);
4077 	hash ^= (hash >> 8);
4078 
4079 	/* discard lowest hash bit to deal with the common even ports pattern */
4080 	if (xmit_policy == BOND_XMIT_POLICY_LAYER34 ||
4081 		xmit_policy == BOND_XMIT_POLICY_ENCAP34)
4082 		return hash >> 1;
4083 
4084 	return hash;
4085 }
4086 
4087 /* Generate hash based on xmit policy. If @skb is given it is used to linearize
4088  * the data as required, but this function can be used without it if the data is
4089  * known to be linear (e.g. with xdp_buff).
4090  */
4091 static u32 __bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, const void *data,
4092 			    __be16 l2_proto, int mhoff, int nhoff, int hlen)
4093 {
4094 	struct flow_keys flow;
4095 	u32 hash;
4096 
4097 	if (bond->params.xmit_policy == BOND_XMIT_POLICY_VLAN_SRCMAC)
4098 		return bond_vlan_srcmac_hash(skb, data, mhoff, hlen);
4099 
4100 	if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
4101 	    !bond_flow_dissect(bond, skb, data, l2_proto, nhoff, hlen, &flow))
4102 		return bond_eth_hash(skb, data, mhoff, hlen);
4103 
4104 	if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 ||
4105 	    bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23) {
4106 		hash = bond_eth_hash(skb, data, mhoff, hlen);
4107 	} else {
4108 		if (flow.icmp.id)
4109 			memcpy(&hash, &flow.icmp, sizeof(hash));
4110 		else
4111 			memcpy(&hash, &flow.ports.ports, sizeof(hash));
4112 	}
4113 
4114 	return bond_ip_hash(hash, &flow, bond->params.xmit_policy);
4115 }
4116 
4117 /**
4118  * bond_xmit_hash - generate a hash value based on the xmit policy
4119  * @bond: bonding device
4120  * @skb: buffer to use for headers
4121  *
4122  * This function will extract the necessary headers from the skb buffer and use
4123  * them to generate a hash based on the xmit_policy set in the bonding device
4124  */
4125 u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
4126 {
4127 	if (bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP34 &&
4128 	    skb->l4_hash)
4129 		return skb->hash;
4130 
4131 	return __bond_xmit_hash(bond, skb, skb->data, skb->protocol,
4132 				skb_mac_offset(skb), skb_network_offset(skb),
4133 				skb_headlen(skb));
4134 }
4135 
4136 /**
4137  * bond_xmit_hash_xdp - generate a hash value based on the xmit policy
4138  * @bond: bonding device
4139  * @xdp: buffer to use for headers
4140  *
4141  * The XDP variant of bond_xmit_hash.
4142  */
4143 static u32 bond_xmit_hash_xdp(struct bonding *bond, struct xdp_buff *xdp)
4144 {
4145 	struct ethhdr *eth;
4146 
4147 	if (xdp->data + sizeof(struct ethhdr) > xdp->data_end)
4148 		return 0;
4149 
4150 	eth = (struct ethhdr *)xdp->data;
4151 
4152 	return __bond_xmit_hash(bond, NULL, xdp->data, eth->h_proto, 0,
4153 				sizeof(struct ethhdr), xdp->data_end - xdp->data);
4154 }
4155 
4156 /*-------------------------- Device entry points ----------------------------*/
4157 
4158 void bond_work_init_all(struct bonding *bond)
4159 {
4160 	INIT_DELAYED_WORK(&bond->mcast_work,
4161 			  bond_resend_igmp_join_requests_delayed);
4162 	INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
4163 	INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
4164 	INIT_DELAYED_WORK(&bond->arp_work, bond_arp_monitor);
4165 	INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
4166 	INIT_DELAYED_WORK(&bond->slave_arr_work, bond_slave_arr_handler);
4167 }
4168 
4169 static void bond_work_cancel_all(struct bonding *bond)
4170 {
4171 	cancel_delayed_work_sync(&bond->mii_work);
4172 	cancel_delayed_work_sync(&bond->arp_work);
4173 	cancel_delayed_work_sync(&bond->alb_work);
4174 	cancel_delayed_work_sync(&bond->ad_work);
4175 	cancel_delayed_work_sync(&bond->mcast_work);
4176 	cancel_delayed_work_sync(&bond->slave_arr_work);
4177 }
4178 
4179 static int bond_open(struct net_device *bond_dev)
4180 {
4181 	struct bonding *bond = netdev_priv(bond_dev);
4182 	struct list_head *iter;
4183 	struct slave *slave;
4184 
4185 	if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN && !bond->rr_tx_counter) {
4186 		bond->rr_tx_counter = alloc_percpu(u32);
4187 		if (!bond->rr_tx_counter)
4188 			return -ENOMEM;
4189 	}
4190 
4191 	/* reset slave->backup and slave->inactive */
4192 	if (bond_has_slaves(bond)) {
4193 		bond_for_each_slave(bond, slave, iter) {
4194 			if (bond_uses_primary(bond) &&
4195 			    slave != rcu_access_pointer(bond->curr_active_slave)) {
4196 				bond_set_slave_inactive_flags(slave,
4197 							      BOND_SLAVE_NOTIFY_NOW);
4198 			} else if (BOND_MODE(bond) != BOND_MODE_8023AD) {
4199 				bond_set_slave_active_flags(slave,
4200 							    BOND_SLAVE_NOTIFY_NOW);
4201 			}
4202 		}
4203 	}
4204 
4205 	if (bond_is_lb(bond)) {
4206 		/* bond_alb_initialize must be called before the timer
4207 		 * is started.
4208 		 */
4209 		if (bond_alb_initialize(bond, (BOND_MODE(bond) == BOND_MODE_ALB)))
4210 			return -ENOMEM;
4211 		if (bond->params.tlb_dynamic_lb || BOND_MODE(bond) == BOND_MODE_ALB)
4212 			queue_delayed_work(bond->wq, &bond->alb_work, 0);
4213 	}
4214 
4215 	if (bond->params.miimon)  /* link check interval, in milliseconds. */
4216 		queue_delayed_work(bond->wq, &bond->mii_work, 0);
4217 
4218 	if (bond->params.arp_interval) {  /* arp interval, in milliseconds. */
4219 		queue_delayed_work(bond->wq, &bond->arp_work, 0);
4220 		bond->recv_probe = bond_rcv_validate;
4221 	}
4222 
4223 	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
4224 		queue_delayed_work(bond->wq, &bond->ad_work, 0);
4225 		/* register to receive LACPDUs */
4226 		bond->recv_probe = bond_3ad_lacpdu_recv;
4227 		bond_3ad_initiate_agg_selection(bond, 1);
4228 
4229 		bond_for_each_slave(bond, slave, iter)
4230 			dev_mc_add(slave->dev, lacpdu_mcast_addr);
4231 	}
4232 
4233 	if (bond_mode_can_use_xmit_hash(bond))
4234 		bond_update_slave_arr(bond, NULL);
4235 
4236 	return 0;
4237 }
4238 
4239 static int bond_close(struct net_device *bond_dev)
4240 {
4241 	struct bonding *bond = netdev_priv(bond_dev);
4242 	struct slave *slave;
4243 
4244 	bond_work_cancel_all(bond);
4245 	bond->send_peer_notif = 0;
4246 	if (bond_is_lb(bond))
4247 		bond_alb_deinitialize(bond);
4248 	bond->recv_probe = NULL;
4249 
4250 	if (bond_uses_primary(bond)) {
4251 		rcu_read_lock();
4252 		slave = rcu_dereference(bond->curr_active_slave);
4253 		if (slave)
4254 			bond_hw_addr_flush(bond_dev, slave->dev);
4255 		rcu_read_unlock();
4256 	} else {
4257 		struct list_head *iter;
4258 
4259 		bond_for_each_slave(bond, slave, iter)
4260 			bond_hw_addr_flush(bond_dev, slave->dev);
4261 	}
4262 
4263 	return 0;
4264 }
4265 
4266 /* fold stats, assuming all rtnl_link_stats64 fields are u64, but
4267  * that some drivers can provide 32bit values only.
4268  */
4269 static void bond_fold_stats(struct rtnl_link_stats64 *_res,
4270 			    const struct rtnl_link_stats64 *_new,
4271 			    const struct rtnl_link_stats64 *_old)
4272 {
4273 	const u64 *new = (const u64 *)_new;
4274 	const u64 *old = (const u64 *)_old;
4275 	u64 *res = (u64 *)_res;
4276 	int i;
4277 
4278 	for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) {
4279 		u64 nv = new[i];
4280 		u64 ov = old[i];
4281 		s64 delta = nv - ov;
4282 
4283 		/* detects if this particular field is 32bit only */
4284 		if (((nv | ov) >> 32) == 0)
4285 			delta = (s64)(s32)((u32)nv - (u32)ov);
4286 
4287 		/* filter anomalies, some drivers reset their stats
4288 		 * at down/up events.
4289 		 */
4290 		if (delta > 0)
4291 			res[i] += delta;
4292 	}
4293 }
4294 
4295 #ifdef CONFIG_LOCKDEP
4296 static int bond_get_lowest_level_rcu(struct net_device *dev)
4297 {
4298 	struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
4299 	struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
4300 	int cur = 0, max = 0;
4301 
4302 	now = dev;
4303 	iter = &dev->adj_list.lower;
4304 
4305 	while (1) {
4306 		next = NULL;
4307 		while (1) {
4308 			ldev = netdev_next_lower_dev_rcu(now, &iter);
4309 			if (!ldev)
4310 				break;
4311 
4312 			next = ldev;
4313 			niter = &ldev->adj_list.lower;
4314 			dev_stack[cur] = now;
4315 			iter_stack[cur++] = iter;
4316 			if (max <= cur)
4317 				max = cur;
4318 			break;
4319 		}
4320 
4321 		if (!next) {
4322 			if (!cur)
4323 				return max;
4324 			next = dev_stack[--cur];
4325 			niter = iter_stack[cur];
4326 		}
4327 
4328 		now = next;
4329 		iter = niter;
4330 	}
4331 
4332 	return max;
4333 }
4334 #endif
4335 
4336 static void bond_get_stats(struct net_device *bond_dev,
4337 			   struct rtnl_link_stats64 *stats)
4338 {
4339 	struct bonding *bond = netdev_priv(bond_dev);
4340 	struct rtnl_link_stats64 temp;
4341 	struct list_head *iter;
4342 	struct slave *slave;
4343 	int nest_level = 0;
4344 
4345 
4346 	rcu_read_lock();
4347 #ifdef CONFIG_LOCKDEP
4348 	nest_level = bond_get_lowest_level_rcu(bond_dev);
4349 #endif
4350 
4351 	spin_lock_nested(&bond->stats_lock, nest_level);
4352 	memcpy(stats, &bond->bond_stats, sizeof(*stats));
4353 
4354 	bond_for_each_slave_rcu(bond, slave, iter) {
4355 		const struct rtnl_link_stats64 *new =
4356 			dev_get_stats(slave->dev, &temp);
4357 
4358 		bond_fold_stats(stats, new, &slave->slave_stats);
4359 
4360 		/* save off the slave stats for the next run */
4361 		memcpy(&slave->slave_stats, new, sizeof(*new));
4362 	}
4363 
4364 	memcpy(&bond->bond_stats, stats, sizeof(*stats));
4365 	spin_unlock(&bond->stats_lock);
4366 	rcu_read_unlock();
4367 }
4368 
4369 static int bond_eth_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
4370 {
4371 	struct bonding *bond = netdev_priv(bond_dev);
4372 	struct mii_ioctl_data *mii = NULL;
4373 	const struct net_device_ops *ops;
4374 	struct net_device *real_dev;
4375 	struct hwtstamp_config cfg;
4376 	struct ifreq ifrr;
4377 	int res = 0;
4378 
4379 	netdev_dbg(bond_dev, "bond_eth_ioctl: cmd=%d\n", cmd);
4380 
4381 	switch (cmd) {
4382 	case SIOCGMIIPHY:
4383 		mii = if_mii(ifr);
4384 		if (!mii)
4385 			return -EINVAL;
4386 
4387 		mii->phy_id = 0;
4388 		fallthrough;
4389 	case SIOCGMIIREG:
4390 		/* We do this again just in case we were called by SIOCGMIIREG
4391 		 * instead of SIOCGMIIPHY.
4392 		 */
4393 		mii = if_mii(ifr);
4394 		if (!mii)
4395 			return -EINVAL;
4396 
4397 		if (mii->reg_num == 1) {
4398 			mii->val_out = 0;
4399 			if (netif_carrier_ok(bond->dev))
4400 				mii->val_out = BMSR_LSTATUS;
4401 		}
4402 
4403 		break;
4404 	case SIOCSHWTSTAMP:
4405 		if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
4406 			return -EFAULT;
4407 
4408 		if (!(cfg.flags & HWTSTAMP_FLAG_BONDED_PHC_INDEX))
4409 			return -EOPNOTSUPP;
4410 
4411 		fallthrough;
4412 	case SIOCGHWTSTAMP:
4413 		real_dev = bond_option_active_slave_get_rcu(bond);
4414 		if (!real_dev)
4415 			return -EOPNOTSUPP;
4416 
4417 		strscpy_pad(ifrr.ifr_name, real_dev->name, IFNAMSIZ);
4418 		ifrr.ifr_ifru = ifr->ifr_ifru;
4419 
4420 		ops = real_dev->netdev_ops;
4421 		if (netif_device_present(real_dev) && ops->ndo_eth_ioctl) {
4422 			res = ops->ndo_eth_ioctl(real_dev, &ifrr, cmd);
4423 			if (res)
4424 				return res;
4425 
4426 			ifr->ifr_ifru = ifrr.ifr_ifru;
4427 			if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
4428 				return -EFAULT;
4429 
4430 			/* Set the BOND_PHC_INDEX flag to notify user space */
4431 			cfg.flags |= HWTSTAMP_FLAG_BONDED_PHC_INDEX;
4432 
4433 			return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ?
4434 				-EFAULT : 0;
4435 		}
4436 		fallthrough;
4437 	default:
4438 		res = -EOPNOTSUPP;
4439 	}
4440 
4441 	return res;
4442 }
4443 
4444 static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
4445 {
4446 	struct bonding *bond = netdev_priv(bond_dev);
4447 	struct net_device *slave_dev = NULL;
4448 	struct ifbond k_binfo;
4449 	struct ifbond __user *u_binfo = NULL;
4450 	struct ifslave k_sinfo;
4451 	struct ifslave __user *u_sinfo = NULL;
4452 	struct bond_opt_value newval;
4453 	struct net *net;
4454 	int res = 0;
4455 
4456 	netdev_dbg(bond_dev, "bond_ioctl: cmd=%d\n", cmd);
4457 
4458 	switch (cmd) {
4459 	case SIOCBONDINFOQUERY:
4460 		u_binfo = (struct ifbond __user *)ifr->ifr_data;
4461 
4462 		if (copy_from_user(&k_binfo, u_binfo, sizeof(ifbond)))
4463 			return -EFAULT;
4464 
4465 		bond_info_query(bond_dev, &k_binfo);
4466 		if (copy_to_user(u_binfo, &k_binfo, sizeof(ifbond)))
4467 			return -EFAULT;
4468 
4469 		return 0;
4470 	case SIOCBONDSLAVEINFOQUERY:
4471 		u_sinfo = (struct ifslave __user *)ifr->ifr_data;
4472 
4473 		if (copy_from_user(&k_sinfo, u_sinfo, sizeof(ifslave)))
4474 			return -EFAULT;
4475 
4476 		res = bond_slave_info_query(bond_dev, &k_sinfo);
4477 		if (res == 0 &&
4478 		    copy_to_user(u_sinfo, &k_sinfo, sizeof(ifslave)))
4479 			return -EFAULT;
4480 
4481 		return res;
4482 	default:
4483 		break;
4484 	}
4485 
4486 	net = dev_net(bond_dev);
4487 
4488 	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
4489 		return -EPERM;
4490 
4491 	slave_dev = __dev_get_by_name(net, ifr->ifr_slave);
4492 
4493 	slave_dbg(bond_dev, slave_dev, "slave_dev=%p:\n", slave_dev);
4494 
4495 	if (!slave_dev)
4496 		return -ENODEV;
4497 
4498 	switch (cmd) {
4499 	case SIOCBONDENSLAVE:
4500 		res = bond_enslave(bond_dev, slave_dev, NULL);
4501 		break;
4502 	case SIOCBONDRELEASE:
4503 		res = bond_release(bond_dev, slave_dev);
4504 		break;
4505 	case SIOCBONDSETHWADDR:
4506 		res = bond_set_dev_addr(bond_dev, slave_dev);
4507 		break;
4508 	case SIOCBONDCHANGEACTIVE:
4509 		bond_opt_initstr(&newval, slave_dev->name);
4510 		res = __bond_opt_set_notify(bond, BOND_OPT_ACTIVE_SLAVE,
4511 					    &newval);
4512 		break;
4513 	default:
4514 		res = -EOPNOTSUPP;
4515 	}
4516 
4517 	return res;
4518 }
4519 
4520 static int bond_siocdevprivate(struct net_device *bond_dev, struct ifreq *ifr,
4521 			       void __user *data, int cmd)
4522 {
4523 	struct ifreq ifrdata = { .ifr_data = data };
4524 
4525 	switch (cmd) {
4526 	case BOND_INFO_QUERY_OLD:
4527 		return bond_do_ioctl(bond_dev, &ifrdata, SIOCBONDINFOQUERY);
4528 	case BOND_SLAVE_INFO_QUERY_OLD:
4529 		return bond_do_ioctl(bond_dev, &ifrdata, SIOCBONDSLAVEINFOQUERY);
4530 	case BOND_ENSLAVE_OLD:
4531 		return bond_do_ioctl(bond_dev, ifr, SIOCBONDENSLAVE);
4532 	case BOND_RELEASE_OLD:
4533 		return bond_do_ioctl(bond_dev, ifr, SIOCBONDRELEASE);
4534 	case BOND_SETHWADDR_OLD:
4535 		return bond_do_ioctl(bond_dev, ifr, SIOCBONDSETHWADDR);
4536 	case BOND_CHANGE_ACTIVE_OLD:
4537 		return bond_do_ioctl(bond_dev, ifr, SIOCBONDCHANGEACTIVE);
4538 	}
4539 
4540 	return -EOPNOTSUPP;
4541 }
4542 
4543 static void bond_change_rx_flags(struct net_device *bond_dev, int change)
4544 {
4545 	struct bonding *bond = netdev_priv(bond_dev);
4546 
4547 	if (change & IFF_PROMISC)
4548 		bond_set_promiscuity(bond,
4549 				     bond_dev->flags & IFF_PROMISC ? 1 : -1);
4550 
4551 	if (change & IFF_ALLMULTI)
4552 		bond_set_allmulti(bond,
4553 				  bond_dev->flags & IFF_ALLMULTI ? 1 : -1);
4554 }
4555 
4556 static void bond_set_rx_mode(struct net_device *bond_dev)
4557 {
4558 	struct bonding *bond = netdev_priv(bond_dev);
4559 	struct list_head *iter;
4560 	struct slave *slave;
4561 
4562 	rcu_read_lock();
4563 	if (bond_uses_primary(bond)) {
4564 		slave = rcu_dereference(bond->curr_active_slave);
4565 		if (slave) {
4566 			dev_uc_sync(slave->dev, bond_dev);
4567 			dev_mc_sync(slave->dev, bond_dev);
4568 		}
4569 	} else {
4570 		bond_for_each_slave_rcu(bond, slave, iter) {
4571 			dev_uc_sync_multiple(slave->dev, bond_dev);
4572 			dev_mc_sync_multiple(slave->dev, bond_dev);
4573 		}
4574 	}
4575 	rcu_read_unlock();
4576 }
4577 
4578 static int bond_neigh_init(struct neighbour *n)
4579 {
4580 	struct bonding *bond = netdev_priv(n->dev);
4581 	const struct net_device_ops *slave_ops;
4582 	struct neigh_parms parms;
4583 	struct slave *slave;
4584 	int ret = 0;
4585 
4586 	rcu_read_lock();
4587 	slave = bond_first_slave_rcu(bond);
4588 	if (!slave)
4589 		goto out;
4590 	slave_ops = slave->dev->netdev_ops;
4591 	if (!slave_ops->ndo_neigh_setup)
4592 		goto out;
4593 
4594 	/* TODO: find another way [1] to implement this.
4595 	 * Passing a zeroed structure is fragile,
4596 	 * but at least we do not pass garbage.
4597 	 *
4598 	 * [1] One way would be that ndo_neigh_setup() never touch
4599 	 *     struct neigh_parms, but propagate the new neigh_setup()
4600 	 *     back to ___neigh_create() / neigh_parms_alloc()
4601 	 */
4602 	memset(&parms, 0, sizeof(parms));
4603 	ret = slave_ops->ndo_neigh_setup(slave->dev, &parms);
4604 
4605 	if (ret)
4606 		goto out;
4607 
4608 	if (parms.neigh_setup)
4609 		ret = parms.neigh_setup(n);
4610 out:
4611 	rcu_read_unlock();
4612 	return ret;
4613 }
4614 
4615 /* The bonding ndo_neigh_setup is called at init time beofre any
4616  * slave exists. So we must declare proxy setup function which will
4617  * be used at run time to resolve the actual slave neigh param setup.
4618  *
4619  * It's also called by master devices (such as vlans) to setup their
4620  * underlying devices. In that case - do nothing, we're already set up from
4621  * our init.
4622  */
4623 static int bond_neigh_setup(struct net_device *dev,
4624 			    struct neigh_parms *parms)
4625 {
4626 	/* modify only our neigh_parms */
4627 	if (parms->dev == dev)
4628 		parms->neigh_setup = bond_neigh_init;
4629 
4630 	return 0;
4631 }
4632 
4633 /* Change the MTU of all of a master's slaves to match the master */
4634 static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
4635 {
4636 	struct bonding *bond = netdev_priv(bond_dev);
4637 	struct slave *slave, *rollback_slave;
4638 	struct list_head *iter;
4639 	int res = 0;
4640 
4641 	netdev_dbg(bond_dev, "bond=%p, new_mtu=%d\n", bond, new_mtu);
4642 
4643 	bond_for_each_slave(bond, slave, iter) {
4644 		slave_dbg(bond_dev, slave->dev, "s %p c_m %p\n",
4645 			   slave, slave->dev->netdev_ops->ndo_change_mtu);
4646 
4647 		res = dev_set_mtu(slave->dev, new_mtu);
4648 
4649 		if (res) {
4650 			/* If we failed to set the slave's mtu to the new value
4651 			 * we must abort the operation even in ACTIVE_BACKUP
4652 			 * mode, because if we allow the backup slaves to have
4653 			 * different mtu values than the active slave we'll
4654 			 * need to change their mtu when doing a failover. That
4655 			 * means changing their mtu from timer context, which
4656 			 * is probably not a good idea.
4657 			 */
4658 			slave_dbg(bond_dev, slave->dev, "err %d setting mtu to %d\n",
4659 				  res, new_mtu);
4660 			goto unwind;
4661 		}
4662 	}
4663 
4664 	bond_dev->mtu = new_mtu;
4665 
4666 	return 0;
4667 
4668 unwind:
4669 	/* unwind from head to the slave that failed */
4670 	bond_for_each_slave(bond, rollback_slave, iter) {
4671 		int tmp_res;
4672 
4673 		if (rollback_slave == slave)
4674 			break;
4675 
4676 		tmp_res = dev_set_mtu(rollback_slave->dev, bond_dev->mtu);
4677 		if (tmp_res)
4678 			slave_dbg(bond_dev, rollback_slave->dev, "unwind err %d\n",
4679 				  tmp_res);
4680 	}
4681 
4682 	return res;
4683 }
4684 
4685 /* Change HW address
4686  *
4687  * Note that many devices must be down to change the HW address, and
4688  * downing the master releases all slaves.  We can make bonds full of
4689  * bonding devices to test this, however.
4690  */
4691 static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
4692 {
4693 	struct bonding *bond = netdev_priv(bond_dev);
4694 	struct slave *slave, *rollback_slave;
4695 	struct sockaddr_storage *ss = addr, tmp_ss;
4696 	struct list_head *iter;
4697 	int res = 0;
4698 
4699 	if (BOND_MODE(bond) == BOND_MODE_ALB)
4700 		return bond_alb_set_mac_address(bond_dev, addr);
4701 
4702 
4703 	netdev_dbg(bond_dev, "%s: bond=%p\n", __func__, bond);
4704 
4705 	/* If fail_over_mac is enabled, do nothing and return success.
4706 	 * Returning an error causes ifenslave to fail.
4707 	 */
4708 	if (bond->params.fail_over_mac &&
4709 	    BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
4710 		return 0;
4711 
4712 	if (!is_valid_ether_addr(ss->__data))
4713 		return -EADDRNOTAVAIL;
4714 
4715 	bond_for_each_slave(bond, slave, iter) {
4716 		slave_dbg(bond_dev, slave->dev, "%s: slave=%p\n",
4717 			  __func__, slave);
4718 		res = dev_set_mac_address(slave->dev, addr, NULL);
4719 		if (res) {
4720 			/* TODO: consider downing the slave
4721 			 * and retry ?
4722 			 * User should expect communications
4723 			 * breakage anyway until ARP finish
4724 			 * updating, so...
4725 			 */
4726 			slave_dbg(bond_dev, slave->dev, "%s: err %d\n",
4727 				  __func__, res);
4728 			goto unwind;
4729 		}
4730 	}
4731 
4732 	/* success */
4733 	dev_addr_set(bond_dev, ss->__data);
4734 	return 0;
4735 
4736 unwind:
4737 	memcpy(tmp_ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
4738 	tmp_ss.ss_family = bond_dev->type;
4739 
4740 	/* unwind from head to the slave that failed */
4741 	bond_for_each_slave(bond, rollback_slave, iter) {
4742 		int tmp_res;
4743 
4744 		if (rollback_slave == slave)
4745 			break;
4746 
4747 		tmp_res = dev_set_mac_address(rollback_slave->dev,
4748 					      (struct sockaddr *)&tmp_ss, NULL);
4749 		if (tmp_res) {
4750 			slave_dbg(bond_dev, rollback_slave->dev, "%s: unwind err %d\n",
4751 				   __func__, tmp_res);
4752 		}
4753 	}
4754 
4755 	return res;
4756 }
4757 
4758 /**
4759  * bond_get_slave_by_id - get xmit slave with slave_id
4760  * @bond: bonding device that is transmitting
4761  * @slave_id: slave id up to slave_cnt-1 through which to transmit
4762  *
4763  * This function tries to get slave with slave_id but in case
4764  * it fails, it tries to find the first available slave for transmission.
4765  */
4766 static struct slave *bond_get_slave_by_id(struct bonding *bond,
4767 					  int slave_id)
4768 {
4769 	struct list_head *iter;
4770 	struct slave *slave;
4771 	int i = slave_id;
4772 
4773 	/* Here we start from the slave with slave_id */
4774 	bond_for_each_slave_rcu(bond, slave, iter) {
4775 		if (--i < 0) {
4776 			if (bond_slave_can_tx(slave))
4777 				return slave;
4778 		}
4779 	}
4780 
4781 	/* Here we start from the first slave up to slave_id */
4782 	i = slave_id;
4783 	bond_for_each_slave_rcu(bond, slave, iter) {
4784 		if (--i < 0)
4785 			break;
4786 		if (bond_slave_can_tx(slave))
4787 			return slave;
4788 	}
4789 	/* no slave that can tx has been found */
4790 	return NULL;
4791 }
4792 
4793 /**
4794  * bond_rr_gen_slave_id - generate slave id based on packets_per_slave
4795  * @bond: bonding device to use
4796  *
4797  * Based on the value of the bonding device's packets_per_slave parameter
4798  * this function generates a slave id, which is usually used as the next
4799  * slave to transmit through.
4800  */
4801 static u32 bond_rr_gen_slave_id(struct bonding *bond)
4802 {
4803 	u32 slave_id;
4804 	struct reciprocal_value reciprocal_packets_per_slave;
4805 	int packets_per_slave = bond->params.packets_per_slave;
4806 
4807 	switch (packets_per_slave) {
4808 	case 0:
4809 		slave_id = get_random_u32();
4810 		break;
4811 	case 1:
4812 		slave_id = this_cpu_inc_return(*bond->rr_tx_counter);
4813 		break;
4814 	default:
4815 		reciprocal_packets_per_slave =
4816 			bond->params.reciprocal_packets_per_slave;
4817 		slave_id = this_cpu_inc_return(*bond->rr_tx_counter);
4818 		slave_id = reciprocal_divide(slave_id,
4819 					     reciprocal_packets_per_slave);
4820 		break;
4821 	}
4822 
4823 	return slave_id;
4824 }
4825 
4826 static struct slave *bond_xmit_roundrobin_slave_get(struct bonding *bond,
4827 						    struct sk_buff *skb)
4828 {
4829 	struct slave *slave;
4830 	int slave_cnt;
4831 	u32 slave_id;
4832 
4833 	/* Start with the curr_active_slave that joined the bond as the
4834 	 * default for sending IGMP traffic.  For failover purposes one
4835 	 * needs to maintain some consistency for the interface that will
4836 	 * send the join/membership reports.  The curr_active_slave found
4837 	 * will send all of this type of traffic.
4838 	 */
4839 	if (skb->protocol == htons(ETH_P_IP)) {
4840 		int noff = skb_network_offset(skb);
4841 		struct iphdr *iph;
4842 
4843 		if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
4844 			goto non_igmp;
4845 
4846 		iph = ip_hdr(skb);
4847 		if (iph->protocol == IPPROTO_IGMP) {
4848 			slave = rcu_dereference(bond->curr_active_slave);
4849 			if (slave)
4850 				return slave;
4851 			return bond_get_slave_by_id(bond, 0);
4852 		}
4853 	}
4854 
4855 non_igmp:
4856 	slave_cnt = READ_ONCE(bond->slave_cnt);
4857 	if (likely(slave_cnt)) {
4858 		slave_id = bond_rr_gen_slave_id(bond) % slave_cnt;
4859 		return bond_get_slave_by_id(bond, slave_id);
4860 	}
4861 	return NULL;
4862 }
4863 
4864 static struct slave *bond_xdp_xmit_roundrobin_slave_get(struct bonding *bond,
4865 							struct xdp_buff *xdp)
4866 {
4867 	struct slave *slave;
4868 	int slave_cnt;
4869 	u32 slave_id;
4870 	const struct ethhdr *eth;
4871 	void *data = xdp->data;
4872 
4873 	if (data + sizeof(struct ethhdr) > xdp->data_end)
4874 		goto non_igmp;
4875 
4876 	eth = (struct ethhdr *)data;
4877 	data += sizeof(struct ethhdr);
4878 
4879 	/* See comment on IGMP in bond_xmit_roundrobin_slave_get() */
4880 	if (eth->h_proto == htons(ETH_P_IP)) {
4881 		const struct iphdr *iph;
4882 
4883 		if (data + sizeof(struct iphdr) > xdp->data_end)
4884 			goto non_igmp;
4885 
4886 		iph = (struct iphdr *)data;
4887 
4888 		if (iph->protocol == IPPROTO_IGMP) {
4889 			slave = rcu_dereference(bond->curr_active_slave);
4890 			if (slave)
4891 				return slave;
4892 			return bond_get_slave_by_id(bond, 0);
4893 		}
4894 	}
4895 
4896 non_igmp:
4897 	slave_cnt = READ_ONCE(bond->slave_cnt);
4898 	if (likely(slave_cnt)) {
4899 		slave_id = bond_rr_gen_slave_id(bond) % slave_cnt;
4900 		return bond_get_slave_by_id(bond, slave_id);
4901 	}
4902 	return NULL;
4903 }
4904 
4905 static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
4906 					struct net_device *bond_dev)
4907 {
4908 	struct bonding *bond = netdev_priv(bond_dev);
4909 	struct slave *slave;
4910 
4911 	slave = bond_xmit_roundrobin_slave_get(bond, skb);
4912 	if (likely(slave))
4913 		return bond_dev_queue_xmit(bond, skb, slave->dev);
4914 
4915 	return bond_tx_drop(bond_dev, skb);
4916 }
4917 
4918 static struct slave *bond_xmit_activebackup_slave_get(struct bonding *bond)
4919 {
4920 	return rcu_dereference(bond->curr_active_slave);
4921 }
4922 
4923 /* In active-backup mode, we know that bond->curr_active_slave is always valid if
4924  * the bond has a usable interface.
4925  */
4926 static netdev_tx_t bond_xmit_activebackup(struct sk_buff *skb,
4927 					  struct net_device *bond_dev)
4928 {
4929 	struct bonding *bond = netdev_priv(bond_dev);
4930 	struct slave *slave;
4931 
4932 	slave = bond_xmit_activebackup_slave_get(bond);
4933 	if (slave)
4934 		return bond_dev_queue_xmit(bond, skb, slave->dev);
4935 
4936 	return bond_tx_drop(bond_dev, skb);
4937 }
4938 
4939 /* Use this to update slave_array when (a) it's not appropriate to update
4940  * slave_array right away (note that update_slave_array() may sleep)
4941  * and / or (b) RTNL is not held.
4942  */
4943 void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay)
4944 {
4945 	queue_delayed_work(bond->wq, &bond->slave_arr_work, delay);
4946 }
4947 
4948 /* Slave array work handler. Holds only RTNL */
4949 static void bond_slave_arr_handler(struct work_struct *work)
4950 {
4951 	struct bonding *bond = container_of(work, struct bonding,
4952 					    slave_arr_work.work);
4953 	int ret;
4954 
4955 	if (!rtnl_trylock())
4956 		goto err;
4957 
4958 	ret = bond_update_slave_arr(bond, NULL);
4959 	rtnl_unlock();
4960 	if (ret) {
4961 		pr_warn_ratelimited("Failed to update slave array from WT\n");
4962 		goto err;
4963 	}
4964 	return;
4965 
4966 err:
4967 	bond_slave_arr_work_rearm(bond, 1);
4968 }
4969 
4970 static void bond_skip_slave(struct bond_up_slave *slaves,
4971 			    struct slave *skipslave)
4972 {
4973 	int idx;
4974 
4975 	/* Rare situation where caller has asked to skip a specific
4976 	 * slave but allocation failed (most likely!). BTW this is
4977 	 * only possible when the call is initiated from
4978 	 * __bond_release_one(). In this situation; overwrite the
4979 	 * skipslave entry in the array with the last entry from the
4980 	 * array to avoid a situation where the xmit path may choose
4981 	 * this to-be-skipped slave to send a packet out.
4982 	 */
4983 	for (idx = 0; slaves && idx < slaves->count; idx++) {
4984 		if (skipslave == slaves->arr[idx]) {
4985 			slaves->arr[idx] =
4986 				slaves->arr[slaves->count - 1];
4987 			slaves->count--;
4988 			break;
4989 		}
4990 	}
4991 }
4992 
4993 static void bond_set_slave_arr(struct bonding *bond,
4994 			       struct bond_up_slave *usable_slaves,
4995 			       struct bond_up_slave *all_slaves)
4996 {
4997 	struct bond_up_slave *usable, *all;
4998 
4999 	usable = rtnl_dereference(bond->usable_slaves);
5000 	rcu_assign_pointer(bond->usable_slaves, usable_slaves);
5001 	kfree_rcu(usable, rcu);
5002 
5003 	all = rtnl_dereference(bond->all_slaves);
5004 	rcu_assign_pointer(bond->all_slaves, all_slaves);
5005 	kfree_rcu(all, rcu);
5006 }
5007 
5008 static void bond_reset_slave_arr(struct bonding *bond)
5009 {
5010 	struct bond_up_slave *usable, *all;
5011 
5012 	usable = rtnl_dereference(bond->usable_slaves);
5013 	if (usable) {
5014 		RCU_INIT_POINTER(bond->usable_slaves, NULL);
5015 		kfree_rcu(usable, rcu);
5016 	}
5017 
5018 	all = rtnl_dereference(bond->all_slaves);
5019 	if (all) {
5020 		RCU_INIT_POINTER(bond->all_slaves, NULL);
5021 		kfree_rcu(all, rcu);
5022 	}
5023 }
5024 
5025 /* Build the usable slaves array in control path for modes that use xmit-hash
5026  * to determine the slave interface -
5027  * (a) BOND_MODE_8023AD
5028  * (b) BOND_MODE_XOR
5029  * (c) (BOND_MODE_TLB || BOND_MODE_ALB) && tlb_dynamic_lb == 0
5030  *
5031  * The caller is expected to hold RTNL only and NO other lock!
5032  */
5033 int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
5034 {
5035 	struct bond_up_slave *usable_slaves = NULL, *all_slaves = NULL;
5036 	struct slave *slave;
5037 	struct list_head *iter;
5038 	int agg_id = 0;
5039 	int ret = 0;
5040 
5041 	might_sleep();
5042 
5043 	usable_slaves = kzalloc(struct_size(usable_slaves, arr,
5044 					    bond->slave_cnt), GFP_KERNEL);
5045 	all_slaves = kzalloc(struct_size(all_slaves, arr,
5046 					 bond->slave_cnt), GFP_KERNEL);
5047 	if (!usable_slaves || !all_slaves) {
5048 		ret = -ENOMEM;
5049 		goto out;
5050 	}
5051 	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
5052 		struct ad_info ad_info;
5053 
5054 		spin_lock_bh(&bond->mode_lock);
5055 		if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
5056 			spin_unlock_bh(&bond->mode_lock);
5057 			pr_debug("bond_3ad_get_active_agg_info failed\n");
5058 			/* No active aggragator means it's not safe to use
5059 			 * the previous array.
5060 			 */
5061 			bond_reset_slave_arr(bond);
5062 			goto out;
5063 		}
5064 		spin_unlock_bh(&bond->mode_lock);
5065 		agg_id = ad_info.aggregator_id;
5066 	}
5067 	bond_for_each_slave(bond, slave, iter) {
5068 		if (skipslave == slave)
5069 			continue;
5070 
5071 		all_slaves->arr[all_slaves->count++] = slave;
5072 		if (BOND_MODE(bond) == BOND_MODE_8023AD) {
5073 			struct aggregator *agg;
5074 
5075 			agg = SLAVE_AD_INFO(slave)->port.aggregator;
5076 			if (!agg || agg->aggregator_identifier != agg_id)
5077 				continue;
5078 		}
5079 		if (!bond_slave_can_tx(slave))
5080 			continue;
5081 
5082 		slave_dbg(bond->dev, slave->dev, "Adding slave to tx hash array[%d]\n",
5083 			  usable_slaves->count);
5084 
5085 		usable_slaves->arr[usable_slaves->count++] = slave;
5086 	}
5087 
5088 	bond_set_slave_arr(bond, usable_slaves, all_slaves);
5089 	return ret;
5090 out:
5091 	if (ret != 0 && skipslave) {
5092 		bond_skip_slave(rtnl_dereference(bond->all_slaves),
5093 				skipslave);
5094 		bond_skip_slave(rtnl_dereference(bond->usable_slaves),
5095 				skipslave);
5096 	}
5097 	kfree_rcu(all_slaves, rcu);
5098 	kfree_rcu(usable_slaves, rcu);
5099 
5100 	return ret;
5101 }
5102 
5103 static struct slave *bond_xmit_3ad_xor_slave_get(struct bonding *bond,
5104 						 struct sk_buff *skb,
5105 						 struct bond_up_slave *slaves)
5106 {
5107 	struct slave *slave;
5108 	unsigned int count;
5109 	u32 hash;
5110 
5111 	hash = bond_xmit_hash(bond, skb);
5112 	count = slaves ? READ_ONCE(slaves->count) : 0;
5113 	if (unlikely(!count))
5114 		return NULL;
5115 
5116 	slave = slaves->arr[hash % count];
5117 	return slave;
5118 }
5119 
5120 static struct slave *bond_xdp_xmit_3ad_xor_slave_get(struct bonding *bond,
5121 						     struct xdp_buff *xdp)
5122 {
5123 	struct bond_up_slave *slaves;
5124 	unsigned int count;
5125 	u32 hash;
5126 
5127 	hash = bond_xmit_hash_xdp(bond, xdp);
5128 	slaves = rcu_dereference(bond->usable_slaves);
5129 	count = slaves ? READ_ONCE(slaves->count) : 0;
5130 	if (unlikely(!count))
5131 		return NULL;
5132 
5133 	return slaves->arr[hash % count];
5134 }
5135 
5136 /* Use this Xmit function for 3AD as well as XOR modes. The current
5137  * usable slave array is formed in the control path. The xmit function
5138  * just calculates hash and sends the packet out.
5139  */
5140 static netdev_tx_t bond_3ad_xor_xmit(struct sk_buff *skb,
5141 				     struct net_device *dev)
5142 {
5143 	struct bonding *bond = netdev_priv(dev);
5144 	struct bond_up_slave *slaves;
5145 	struct slave *slave;
5146 
5147 	slaves = rcu_dereference(bond->usable_slaves);
5148 	slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves);
5149 	if (likely(slave))
5150 		return bond_dev_queue_xmit(bond, skb, slave->dev);
5151 
5152 	return bond_tx_drop(dev, skb);
5153 }
5154 
5155 /* in broadcast mode, we send everything to all usable interfaces. */
5156 static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb,
5157 				       struct net_device *bond_dev)
5158 {
5159 	struct bonding *bond = netdev_priv(bond_dev);
5160 	struct slave *slave = NULL;
5161 	struct list_head *iter;
5162 	bool xmit_suc = false;
5163 	bool skb_used = false;
5164 
5165 	bond_for_each_slave_rcu(bond, slave, iter) {
5166 		struct sk_buff *skb2;
5167 
5168 		if (!(bond_slave_is_up(slave) && slave->link == BOND_LINK_UP))
5169 			continue;
5170 
5171 		if (bond_is_last_slave(bond, slave)) {
5172 			skb2 = skb;
5173 			skb_used = true;
5174 		} else {
5175 			skb2 = skb_clone(skb, GFP_ATOMIC);
5176 			if (!skb2) {
5177 				net_err_ratelimited("%s: Error: %s: skb_clone() failed\n",
5178 						    bond_dev->name, __func__);
5179 				continue;
5180 			}
5181 		}
5182 
5183 		if (bond_dev_queue_xmit(bond, skb2, slave->dev) == NETDEV_TX_OK)
5184 			xmit_suc = true;
5185 	}
5186 
5187 	if (!skb_used)
5188 		dev_kfree_skb_any(skb);
5189 
5190 	if (xmit_suc)
5191 		return NETDEV_TX_OK;
5192 
5193 	dev_core_stats_tx_dropped_inc(bond_dev);
5194 	return NET_XMIT_DROP;
5195 }
5196 
5197 /*------------------------- Device initialization ---------------------------*/
5198 
5199 /* Lookup the slave that corresponds to a qid */
5200 static inline int bond_slave_override(struct bonding *bond,
5201 				      struct sk_buff *skb)
5202 {
5203 	struct slave *slave = NULL;
5204 	struct list_head *iter;
5205 
5206 	if (!skb_rx_queue_recorded(skb))
5207 		return 1;
5208 
5209 	/* Find out if any slaves have the same mapping as this skb. */
5210 	bond_for_each_slave_rcu(bond, slave, iter) {
5211 		if (slave->queue_id == skb_get_queue_mapping(skb)) {
5212 			if (bond_slave_is_up(slave) &&
5213 			    slave->link == BOND_LINK_UP) {
5214 				bond_dev_queue_xmit(bond, skb, slave->dev);
5215 				return 0;
5216 			}
5217 			/* If the slave isn't UP, use default transmit policy. */
5218 			break;
5219 		}
5220 	}
5221 
5222 	return 1;
5223 }
5224 
5225 
5226 static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
5227 			     struct net_device *sb_dev)
5228 {
5229 	/* This helper function exists to help dev_pick_tx get the correct
5230 	 * destination queue.  Using a helper function skips a call to
5231 	 * skb_tx_hash and will put the skbs in the queue we expect on their
5232 	 * way down to the bonding driver.
5233 	 */
5234 	u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
5235 
5236 	/* Save the original txq to restore before passing to the driver */
5237 	qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb_get_queue_mapping(skb);
5238 
5239 	if (unlikely(txq >= dev->real_num_tx_queues)) {
5240 		do {
5241 			txq -= dev->real_num_tx_queues;
5242 		} while (txq >= dev->real_num_tx_queues);
5243 	}
5244 	return txq;
5245 }
5246 
5247 static struct net_device *bond_xmit_get_slave(struct net_device *master_dev,
5248 					      struct sk_buff *skb,
5249 					      bool all_slaves)
5250 {
5251 	struct bonding *bond = netdev_priv(master_dev);
5252 	struct bond_up_slave *slaves;
5253 	struct slave *slave = NULL;
5254 
5255 	switch (BOND_MODE(bond)) {
5256 	case BOND_MODE_ROUNDROBIN:
5257 		slave = bond_xmit_roundrobin_slave_get(bond, skb);
5258 		break;
5259 	case BOND_MODE_ACTIVEBACKUP:
5260 		slave = bond_xmit_activebackup_slave_get(bond);
5261 		break;
5262 	case BOND_MODE_8023AD:
5263 	case BOND_MODE_XOR:
5264 		if (all_slaves)
5265 			slaves = rcu_dereference(bond->all_slaves);
5266 		else
5267 			slaves = rcu_dereference(bond->usable_slaves);
5268 		slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves);
5269 		break;
5270 	case BOND_MODE_BROADCAST:
5271 		break;
5272 	case BOND_MODE_ALB:
5273 		slave = bond_xmit_alb_slave_get(bond, skb);
5274 		break;
5275 	case BOND_MODE_TLB:
5276 		slave = bond_xmit_tlb_slave_get(bond, skb);
5277 		break;
5278 	default:
5279 		/* Should never happen, mode already checked */
5280 		WARN_ONCE(true, "Unknown bonding mode");
5281 		break;
5282 	}
5283 
5284 	if (slave)
5285 		return slave->dev;
5286 	return NULL;
5287 }
5288 
5289 static void bond_sk_to_flow(struct sock *sk, struct flow_keys *flow)
5290 {
5291 	switch (sk->sk_family) {
5292 #if IS_ENABLED(CONFIG_IPV6)
5293 	case AF_INET6:
5294 		if (ipv6_only_sock(sk) ||
5295 		    ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) {
5296 			flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
5297 			flow->addrs.v6addrs.src = inet6_sk(sk)->saddr;
5298 			flow->addrs.v6addrs.dst = sk->sk_v6_daddr;
5299 			break;
5300 		}
5301 		fallthrough;
5302 #endif
5303 	default: /* AF_INET */
5304 		flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
5305 		flow->addrs.v4addrs.src = inet_sk(sk)->inet_rcv_saddr;
5306 		flow->addrs.v4addrs.dst = inet_sk(sk)->inet_daddr;
5307 		break;
5308 	}
5309 
5310 	flow->ports.src = inet_sk(sk)->inet_sport;
5311 	flow->ports.dst = inet_sk(sk)->inet_dport;
5312 }
5313 
5314 /**
5315  * bond_sk_hash_l34 - generate a hash value based on the socket's L3 and L4 fields
5316  * @sk: socket to use for headers
5317  *
5318  * This function will extract the necessary field from the socket and use
5319  * them to generate a hash based on the LAYER34 xmit_policy.
5320  * Assumes that sk is a TCP or UDP socket.
5321  */
5322 static u32 bond_sk_hash_l34(struct sock *sk)
5323 {
5324 	struct flow_keys flow;
5325 	u32 hash;
5326 
5327 	bond_sk_to_flow(sk, &flow);
5328 
5329 	/* L4 */
5330 	memcpy(&hash, &flow.ports.ports, sizeof(hash));
5331 	/* L3 */
5332 	return bond_ip_hash(hash, &flow, BOND_XMIT_POLICY_LAYER34);
5333 }
5334 
5335 static struct net_device *__bond_sk_get_lower_dev(struct bonding *bond,
5336 						  struct sock *sk)
5337 {
5338 	struct bond_up_slave *slaves;
5339 	struct slave *slave;
5340 	unsigned int count;
5341 	u32 hash;
5342 
5343 	slaves = rcu_dereference(bond->usable_slaves);
5344 	count = slaves ? READ_ONCE(slaves->count) : 0;
5345 	if (unlikely(!count))
5346 		return NULL;
5347 
5348 	hash = bond_sk_hash_l34(sk);
5349 	slave = slaves->arr[hash % count];
5350 
5351 	return slave->dev;
5352 }
5353 
5354 static struct net_device *bond_sk_get_lower_dev(struct net_device *dev,
5355 						struct sock *sk)
5356 {
5357 	struct bonding *bond = netdev_priv(dev);
5358 	struct net_device *lower = NULL;
5359 
5360 	rcu_read_lock();
5361 	if (bond_sk_check(bond))
5362 		lower = __bond_sk_get_lower_dev(bond, sk);
5363 	rcu_read_unlock();
5364 
5365 	return lower;
5366 }
5367 
5368 #if IS_ENABLED(CONFIG_TLS_DEVICE)
5369 static netdev_tx_t bond_tls_device_xmit(struct bonding *bond, struct sk_buff *skb,
5370 					struct net_device *dev)
5371 {
5372 	struct net_device *tls_netdev = rcu_dereference(tls_get_ctx(skb->sk)->netdev);
5373 
5374 	/* tls_netdev might become NULL, even if tls_is_sk_tx_device_offloaded
5375 	 * was true, if tls_device_down is running in parallel, but it's OK,
5376 	 * because bond_get_slave_by_dev has a NULL check.
5377 	 */
5378 	if (likely(bond_get_slave_by_dev(bond, tls_netdev)))
5379 		return bond_dev_queue_xmit(bond, skb, tls_netdev);
5380 	return bond_tx_drop(dev, skb);
5381 }
5382 #endif
5383 
5384 static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
5385 {
5386 	struct bonding *bond = netdev_priv(dev);
5387 
5388 	if (bond_should_override_tx_queue(bond) &&
5389 	    !bond_slave_override(bond, skb))
5390 		return NETDEV_TX_OK;
5391 
5392 #if IS_ENABLED(CONFIG_TLS_DEVICE)
5393 	if (skb->sk && tls_is_sk_tx_device_offloaded(skb->sk))
5394 		return bond_tls_device_xmit(bond, skb, dev);
5395 #endif
5396 
5397 	switch (BOND_MODE(bond)) {
5398 	case BOND_MODE_ROUNDROBIN:
5399 		return bond_xmit_roundrobin(skb, dev);
5400 	case BOND_MODE_ACTIVEBACKUP:
5401 		return bond_xmit_activebackup(skb, dev);
5402 	case BOND_MODE_8023AD:
5403 	case BOND_MODE_XOR:
5404 		return bond_3ad_xor_xmit(skb, dev);
5405 	case BOND_MODE_BROADCAST:
5406 		return bond_xmit_broadcast(skb, dev);
5407 	case BOND_MODE_ALB:
5408 		return bond_alb_xmit(skb, dev);
5409 	case BOND_MODE_TLB:
5410 		return bond_tlb_xmit(skb, dev);
5411 	default:
5412 		/* Should never happen, mode already checked */
5413 		netdev_err(dev, "Unknown bonding mode %d\n", BOND_MODE(bond));
5414 		WARN_ON_ONCE(1);
5415 		return bond_tx_drop(dev, skb);
5416 	}
5417 }
5418 
5419 static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
5420 {
5421 	struct bonding *bond = netdev_priv(dev);
5422 	netdev_tx_t ret = NETDEV_TX_OK;
5423 
5424 	/* If we risk deadlock from transmitting this in the
5425 	 * netpoll path, tell netpoll to queue the frame for later tx
5426 	 */
5427 	if (unlikely(is_netpoll_tx_blocked(dev)))
5428 		return NETDEV_TX_BUSY;
5429 
5430 	rcu_read_lock();
5431 	if (bond_has_slaves(bond))
5432 		ret = __bond_start_xmit(skb, dev);
5433 	else
5434 		ret = bond_tx_drop(dev, skb);
5435 	rcu_read_unlock();
5436 
5437 	return ret;
5438 }
5439 
5440 static struct net_device *
5441 bond_xdp_get_xmit_slave(struct net_device *bond_dev, struct xdp_buff *xdp)
5442 {
5443 	struct bonding *bond = netdev_priv(bond_dev);
5444 	struct slave *slave;
5445 
5446 	/* Caller needs to hold rcu_read_lock() */
5447 
5448 	switch (BOND_MODE(bond)) {
5449 	case BOND_MODE_ROUNDROBIN:
5450 		slave = bond_xdp_xmit_roundrobin_slave_get(bond, xdp);
5451 		break;
5452 
5453 	case BOND_MODE_ACTIVEBACKUP:
5454 		slave = bond_xmit_activebackup_slave_get(bond);
5455 		break;
5456 
5457 	case BOND_MODE_8023AD:
5458 	case BOND_MODE_XOR:
5459 		slave = bond_xdp_xmit_3ad_xor_slave_get(bond, xdp);
5460 		break;
5461 
5462 	default:
5463 		/* Should never happen. Mode guarded by bond_xdp_check() */
5464 		netdev_err(bond_dev, "Unknown bonding mode %d for xdp xmit\n", BOND_MODE(bond));
5465 		WARN_ON_ONCE(1);
5466 		return NULL;
5467 	}
5468 
5469 	if (slave)
5470 		return slave->dev;
5471 
5472 	return NULL;
5473 }
5474 
5475 static int bond_xdp_xmit(struct net_device *bond_dev,
5476 			 int n, struct xdp_frame **frames, u32 flags)
5477 {
5478 	int nxmit, err = -ENXIO;
5479 
5480 	rcu_read_lock();
5481 
5482 	for (nxmit = 0; nxmit < n; nxmit++) {
5483 		struct xdp_frame *frame = frames[nxmit];
5484 		struct xdp_frame *frames1[] = {frame};
5485 		struct net_device *slave_dev;
5486 		struct xdp_buff xdp;
5487 
5488 		xdp_convert_frame_to_buff(frame, &xdp);
5489 
5490 		slave_dev = bond_xdp_get_xmit_slave(bond_dev, &xdp);
5491 		if (!slave_dev) {
5492 			err = -ENXIO;
5493 			break;
5494 		}
5495 
5496 		err = slave_dev->netdev_ops->ndo_xdp_xmit(slave_dev, 1, frames1, flags);
5497 		if (err < 1)
5498 			break;
5499 	}
5500 
5501 	rcu_read_unlock();
5502 
5503 	/* If error happened on the first frame then we can pass the error up, otherwise
5504 	 * report the number of frames that were xmitted.
5505 	 */
5506 	if (err < 0)
5507 		return (nxmit == 0 ? err : nxmit);
5508 
5509 	return nxmit;
5510 }
5511 
5512 static int bond_xdp_set(struct net_device *dev, struct bpf_prog *prog,
5513 			struct netlink_ext_ack *extack)
5514 {
5515 	struct bonding *bond = netdev_priv(dev);
5516 	struct list_head *iter;
5517 	struct slave *slave, *rollback_slave;
5518 	struct bpf_prog *old_prog;
5519 	struct netdev_bpf xdp = {
5520 		.command = XDP_SETUP_PROG,
5521 		.flags   = 0,
5522 		.prog    = prog,
5523 		.extack  = extack,
5524 	};
5525 	int err;
5526 
5527 	ASSERT_RTNL();
5528 
5529 	if (!bond_xdp_check(bond))
5530 		return -EOPNOTSUPP;
5531 
5532 	old_prog = bond->xdp_prog;
5533 	bond->xdp_prog = prog;
5534 
5535 	bond_for_each_slave(bond, slave, iter) {
5536 		struct net_device *slave_dev = slave->dev;
5537 
5538 		if (!slave_dev->netdev_ops->ndo_bpf ||
5539 		    !slave_dev->netdev_ops->ndo_xdp_xmit) {
5540 			SLAVE_NL_ERR(dev, slave_dev, extack,
5541 				     "Slave device does not support XDP");
5542 			err = -EOPNOTSUPP;
5543 			goto err;
5544 		}
5545 
5546 		if (dev_xdp_prog_count(slave_dev) > 0) {
5547 			SLAVE_NL_ERR(dev, slave_dev, extack,
5548 				     "Slave has XDP program loaded, please unload before enslaving");
5549 			err = -EOPNOTSUPP;
5550 			goto err;
5551 		}
5552 
5553 		err = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
5554 		if (err < 0) {
5555 			/* ndo_bpf() sets extack error message */
5556 			slave_err(dev, slave_dev, "Error %d calling ndo_bpf\n", err);
5557 			goto err;
5558 		}
5559 		if (prog)
5560 			bpf_prog_inc(prog);
5561 	}
5562 
5563 	if (prog) {
5564 		static_branch_inc(&bpf_master_redirect_enabled_key);
5565 	} else if (old_prog) {
5566 		bpf_prog_put(old_prog);
5567 		static_branch_dec(&bpf_master_redirect_enabled_key);
5568 	}
5569 
5570 	return 0;
5571 
5572 err:
5573 	/* unwind the program changes */
5574 	bond->xdp_prog = old_prog;
5575 	xdp.prog = old_prog;
5576 	xdp.extack = NULL; /* do not overwrite original error */
5577 
5578 	bond_for_each_slave(bond, rollback_slave, iter) {
5579 		struct net_device *slave_dev = rollback_slave->dev;
5580 		int err_unwind;
5581 
5582 		if (slave == rollback_slave)
5583 			break;
5584 
5585 		err_unwind = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
5586 		if (err_unwind < 0)
5587 			slave_err(dev, slave_dev,
5588 				  "Error %d when unwinding XDP program change\n", err_unwind);
5589 		else if (xdp.prog)
5590 			bpf_prog_inc(xdp.prog);
5591 	}
5592 	return err;
5593 }
5594 
5595 static int bond_xdp(struct net_device *dev, struct netdev_bpf *xdp)
5596 {
5597 	switch (xdp->command) {
5598 	case XDP_SETUP_PROG:
5599 		return bond_xdp_set(dev, xdp->prog, xdp->extack);
5600 	default:
5601 		return -EINVAL;
5602 	}
5603 }
5604 
5605 static u32 bond_mode_bcast_speed(struct slave *slave, u32 speed)
5606 {
5607 	if (speed == 0 || speed == SPEED_UNKNOWN)
5608 		speed = slave->speed;
5609 	else
5610 		speed = min(speed, slave->speed);
5611 
5612 	return speed;
5613 }
5614 
5615 static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
5616 					   struct ethtool_link_ksettings *cmd)
5617 {
5618 	struct bonding *bond = netdev_priv(bond_dev);
5619 	struct list_head *iter;
5620 	struct slave *slave;
5621 	u32 speed = 0;
5622 
5623 	cmd->base.duplex = DUPLEX_UNKNOWN;
5624 	cmd->base.port = PORT_OTHER;
5625 
5626 	/* Since bond_slave_can_tx returns false for all inactive or down slaves, we
5627 	 * do not need to check mode.  Though link speed might not represent
5628 	 * the true receive or transmit bandwidth (not all modes are symmetric)
5629 	 * this is an accurate maximum.
5630 	 */
5631 	bond_for_each_slave(bond, slave, iter) {
5632 		if (bond_slave_can_tx(slave)) {
5633 			if (slave->speed != SPEED_UNKNOWN) {
5634 				if (BOND_MODE(bond) == BOND_MODE_BROADCAST)
5635 					speed = bond_mode_bcast_speed(slave,
5636 								      speed);
5637 				else
5638 					speed += slave->speed;
5639 			}
5640 			if (cmd->base.duplex == DUPLEX_UNKNOWN &&
5641 			    slave->duplex != DUPLEX_UNKNOWN)
5642 				cmd->base.duplex = slave->duplex;
5643 		}
5644 	}
5645 	cmd->base.speed = speed ? : SPEED_UNKNOWN;
5646 
5647 	return 0;
5648 }
5649 
5650 static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
5651 				     struct ethtool_drvinfo *drvinfo)
5652 {
5653 	strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
5654 	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d",
5655 		 BOND_ABI_VERSION);
5656 }
5657 
5658 static int bond_ethtool_get_ts_info(struct net_device *bond_dev,
5659 				    struct ethtool_ts_info *info)
5660 {
5661 	struct bonding *bond = netdev_priv(bond_dev);
5662 	const struct ethtool_ops *ops;
5663 	struct net_device *real_dev;
5664 	struct phy_device *phydev;
5665 	int ret = 0;
5666 
5667 	rcu_read_lock();
5668 	real_dev = bond_option_active_slave_get_rcu(bond);
5669 	dev_hold(real_dev);
5670 	rcu_read_unlock();
5671 
5672 	if (real_dev) {
5673 		ops = real_dev->ethtool_ops;
5674 		phydev = real_dev->phydev;
5675 
5676 		if (phy_has_tsinfo(phydev)) {
5677 			ret = phy_ts_info(phydev, info);
5678 			goto out;
5679 		} else if (ops->get_ts_info) {
5680 			ret = ops->get_ts_info(real_dev, info);
5681 			goto out;
5682 		}
5683 	}
5684 
5685 	info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
5686 				SOF_TIMESTAMPING_SOFTWARE;
5687 	info->phc_index = -1;
5688 
5689 out:
5690 	dev_put(real_dev);
5691 	return ret;
5692 }
5693 
5694 static const struct ethtool_ops bond_ethtool_ops = {
5695 	.get_drvinfo		= bond_ethtool_get_drvinfo,
5696 	.get_link		= ethtool_op_get_link,
5697 	.get_link_ksettings	= bond_ethtool_get_link_ksettings,
5698 	.get_ts_info		= bond_ethtool_get_ts_info,
5699 };
5700 
5701 static const struct net_device_ops bond_netdev_ops = {
5702 	.ndo_init		= bond_init,
5703 	.ndo_uninit		= bond_uninit,
5704 	.ndo_open		= bond_open,
5705 	.ndo_stop		= bond_close,
5706 	.ndo_start_xmit		= bond_start_xmit,
5707 	.ndo_select_queue	= bond_select_queue,
5708 	.ndo_get_stats64	= bond_get_stats,
5709 	.ndo_eth_ioctl		= bond_eth_ioctl,
5710 	.ndo_siocbond		= bond_do_ioctl,
5711 	.ndo_siocdevprivate	= bond_siocdevprivate,
5712 	.ndo_change_rx_flags	= bond_change_rx_flags,
5713 	.ndo_set_rx_mode	= bond_set_rx_mode,
5714 	.ndo_change_mtu		= bond_change_mtu,
5715 	.ndo_set_mac_address	= bond_set_mac_address,
5716 	.ndo_neigh_setup	= bond_neigh_setup,
5717 	.ndo_vlan_rx_add_vid	= bond_vlan_rx_add_vid,
5718 	.ndo_vlan_rx_kill_vid	= bond_vlan_rx_kill_vid,
5719 #ifdef CONFIG_NET_POLL_CONTROLLER
5720 	.ndo_netpoll_setup	= bond_netpoll_setup,
5721 	.ndo_netpoll_cleanup	= bond_netpoll_cleanup,
5722 	.ndo_poll_controller	= bond_poll_controller,
5723 #endif
5724 	.ndo_add_slave		= bond_enslave,
5725 	.ndo_del_slave		= bond_release,
5726 	.ndo_fix_features	= bond_fix_features,
5727 	.ndo_features_check	= passthru_features_check,
5728 	.ndo_get_xmit_slave	= bond_xmit_get_slave,
5729 	.ndo_sk_get_lower_dev	= bond_sk_get_lower_dev,
5730 	.ndo_bpf		= bond_xdp,
5731 	.ndo_xdp_xmit           = bond_xdp_xmit,
5732 	.ndo_xdp_get_xmit_slave = bond_xdp_get_xmit_slave,
5733 };
5734 
5735 static const struct device_type bond_type = {
5736 	.name = "bond",
5737 };
5738 
5739 static void bond_destructor(struct net_device *bond_dev)
5740 {
5741 	struct bonding *bond = netdev_priv(bond_dev);
5742 
5743 	if (bond->wq)
5744 		destroy_workqueue(bond->wq);
5745 
5746 	if (bond->rr_tx_counter)
5747 		free_percpu(bond->rr_tx_counter);
5748 }
5749 
5750 void bond_setup(struct net_device *bond_dev)
5751 {
5752 	struct bonding *bond = netdev_priv(bond_dev);
5753 
5754 	spin_lock_init(&bond->mode_lock);
5755 	bond->params = bonding_defaults;
5756 
5757 	/* Initialize pointers */
5758 	bond->dev = bond_dev;
5759 
5760 	/* Initialize the device entry points */
5761 	ether_setup(bond_dev);
5762 	bond_dev->max_mtu = ETH_MAX_MTU;
5763 	bond_dev->netdev_ops = &bond_netdev_ops;
5764 	bond_dev->ethtool_ops = &bond_ethtool_ops;
5765 
5766 	bond_dev->needs_free_netdev = true;
5767 	bond_dev->priv_destructor = bond_destructor;
5768 
5769 	SET_NETDEV_DEVTYPE(bond_dev, &bond_type);
5770 
5771 	/* Initialize the device options */
5772 	bond_dev->flags |= IFF_MASTER;
5773 	bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT | IFF_NO_QUEUE;
5774 	bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
5775 
5776 #ifdef CONFIG_XFRM_OFFLOAD
5777 	/* set up xfrm device ops (only supported in active-backup right now) */
5778 	bond_dev->xfrmdev_ops = &bond_xfrmdev_ops;
5779 	INIT_LIST_HEAD(&bond->ipsec_list);
5780 	spin_lock_init(&bond->ipsec_lock);
5781 #endif /* CONFIG_XFRM_OFFLOAD */
5782 
5783 	/* don't acquire bond device's netif_tx_lock when transmitting */
5784 	bond_dev->features |= NETIF_F_LLTX;
5785 
5786 	/* By default, we declare the bond to be fully
5787 	 * VLAN hardware accelerated capable. Special
5788 	 * care is taken in the various xmit functions
5789 	 * when there are slaves that are not hw accel
5790 	 * capable
5791 	 */
5792 
5793 	/* Don't allow bond devices to change network namespaces. */
5794 	bond_dev->features |= NETIF_F_NETNS_LOCAL;
5795 
5796 	bond_dev->hw_features = BOND_VLAN_FEATURES |
5797 				NETIF_F_HW_VLAN_CTAG_RX |
5798 				NETIF_F_HW_VLAN_CTAG_FILTER;
5799 
5800 	bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
5801 	bond_dev->features |= bond_dev->hw_features;
5802 	bond_dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
5803 #ifdef CONFIG_XFRM_OFFLOAD
5804 	bond_dev->hw_features |= BOND_XFRM_FEATURES;
5805 	/* Only enable XFRM features if this is an active-backup config */
5806 	if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
5807 		bond_dev->features |= BOND_XFRM_FEATURES;
5808 #endif /* CONFIG_XFRM_OFFLOAD */
5809 #if IS_ENABLED(CONFIG_TLS_DEVICE)
5810 	if (bond_sk_check(bond))
5811 		bond_dev->features |= BOND_TLS_FEATURES;
5812 #endif
5813 }
5814 
5815 /* Destroy a bonding device.
5816  * Must be under rtnl_lock when this function is called.
5817  */
5818 static void bond_uninit(struct net_device *bond_dev)
5819 {
5820 	struct bonding *bond = netdev_priv(bond_dev);
5821 	struct bond_up_slave *usable, *all;
5822 	struct list_head *iter;
5823 	struct slave *slave;
5824 
5825 	bond_netpoll_cleanup(bond_dev);
5826 
5827 	/* Release the bonded slaves */
5828 	bond_for_each_slave(bond, slave, iter)
5829 		__bond_release_one(bond_dev, slave->dev, true, true);
5830 	netdev_info(bond_dev, "Released all slaves\n");
5831 
5832 	usable = rtnl_dereference(bond->usable_slaves);
5833 	if (usable) {
5834 		RCU_INIT_POINTER(bond->usable_slaves, NULL);
5835 		kfree_rcu(usable, rcu);
5836 	}
5837 
5838 	all = rtnl_dereference(bond->all_slaves);
5839 	if (all) {
5840 		RCU_INIT_POINTER(bond->all_slaves, NULL);
5841 		kfree_rcu(all, rcu);
5842 	}
5843 
5844 	list_del(&bond->bond_list);
5845 
5846 	bond_debug_unregister(bond);
5847 }
5848 
5849 /*------------------------- Module initialization ---------------------------*/
5850 
5851 static int bond_check_params(struct bond_params *params)
5852 {
5853 	int arp_validate_value, fail_over_mac_value, primary_reselect_value, i;
5854 	struct bond_opt_value newval;
5855 	const struct bond_opt_value *valptr;
5856 	int arp_all_targets_value = 0;
5857 	u16 ad_actor_sys_prio = 0;
5858 	u16 ad_user_port_key = 0;
5859 	__be32 arp_target[BOND_MAX_ARP_TARGETS] = { 0 };
5860 	int arp_ip_count;
5861 	int bond_mode	= BOND_MODE_ROUNDROBIN;
5862 	int xmit_hashtype = BOND_XMIT_POLICY_LAYER2;
5863 	int lacp_fast = 0;
5864 	int tlb_dynamic_lb;
5865 
5866 	/* Convert string parameters. */
5867 	if (mode) {
5868 		bond_opt_initstr(&newval, mode);
5869 		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_MODE), &newval);
5870 		if (!valptr) {
5871 			pr_err("Error: Invalid bonding mode \"%s\"\n", mode);
5872 			return -EINVAL;
5873 		}
5874 		bond_mode = valptr->value;
5875 	}
5876 
5877 	if (xmit_hash_policy) {
5878 		if (bond_mode == BOND_MODE_ROUNDROBIN ||
5879 		    bond_mode == BOND_MODE_ACTIVEBACKUP ||
5880 		    bond_mode == BOND_MODE_BROADCAST) {
5881 			pr_info("xmit_hash_policy param is irrelevant in mode %s\n",
5882 				bond_mode_name(bond_mode));
5883 		} else {
5884 			bond_opt_initstr(&newval, xmit_hash_policy);
5885 			valptr = bond_opt_parse(bond_opt_get(BOND_OPT_XMIT_HASH),
5886 						&newval);
5887 			if (!valptr) {
5888 				pr_err("Error: Invalid xmit_hash_policy \"%s\"\n",
5889 				       xmit_hash_policy);
5890 				return -EINVAL;
5891 			}
5892 			xmit_hashtype = valptr->value;
5893 		}
5894 	}
5895 
5896 	if (lacp_rate) {
5897 		if (bond_mode != BOND_MODE_8023AD) {
5898 			pr_info("lacp_rate param is irrelevant in mode %s\n",
5899 				bond_mode_name(bond_mode));
5900 		} else {
5901 			bond_opt_initstr(&newval, lacp_rate);
5902 			valptr = bond_opt_parse(bond_opt_get(BOND_OPT_LACP_RATE),
5903 						&newval);
5904 			if (!valptr) {
5905 				pr_err("Error: Invalid lacp rate \"%s\"\n",
5906 				       lacp_rate);
5907 				return -EINVAL;
5908 			}
5909 			lacp_fast = valptr->value;
5910 		}
5911 	}
5912 
5913 	if (ad_select) {
5914 		bond_opt_initstr(&newval, ad_select);
5915 		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT),
5916 					&newval);
5917 		if (!valptr) {
5918 			pr_err("Error: Invalid ad_select \"%s\"\n", ad_select);
5919 			return -EINVAL;
5920 		}
5921 		params->ad_select = valptr->value;
5922 		if (bond_mode != BOND_MODE_8023AD)
5923 			pr_warn("ad_select param only affects 802.3ad mode\n");
5924 	} else {
5925 		params->ad_select = BOND_AD_STABLE;
5926 	}
5927 
5928 	if (max_bonds < 0) {
5929 		pr_warn("Warning: max_bonds (%d) not in range %d-%d, so it was reset to BOND_DEFAULT_MAX_BONDS (%d)\n",
5930 			max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS);
5931 		max_bonds = BOND_DEFAULT_MAX_BONDS;
5932 	}
5933 
5934 	if (miimon < 0) {
5935 		pr_warn("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to 0\n",
5936 			miimon, INT_MAX);
5937 		miimon = 0;
5938 	}
5939 
5940 	if (updelay < 0) {
5941 		pr_warn("Warning: updelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
5942 			updelay, INT_MAX);
5943 		updelay = 0;
5944 	}
5945 
5946 	if (downdelay < 0) {
5947 		pr_warn("Warning: downdelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
5948 			downdelay, INT_MAX);
5949 		downdelay = 0;
5950 	}
5951 
5952 	if ((use_carrier != 0) && (use_carrier != 1)) {
5953 		pr_warn("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n",
5954 			use_carrier);
5955 		use_carrier = 1;
5956 	}
5957 
5958 	if (num_peer_notif < 0 || num_peer_notif > 255) {
5959 		pr_warn("Warning: num_grat_arp/num_unsol_na (%d) not in range 0-255 so it was reset to 1\n",
5960 			num_peer_notif);
5961 		num_peer_notif = 1;
5962 	}
5963 
5964 	/* reset values for 802.3ad/TLB/ALB */
5965 	if (!bond_mode_uses_arp(bond_mode)) {
5966 		if (!miimon) {
5967 			pr_warn("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
5968 			pr_warn("Forcing miimon to 100msec\n");
5969 			miimon = BOND_DEFAULT_MIIMON;
5970 		}
5971 	}
5972 
5973 	if (tx_queues < 1 || tx_queues > 255) {
5974 		pr_warn("Warning: tx_queues (%d) should be between 1 and 255, resetting to %d\n",
5975 			tx_queues, BOND_DEFAULT_TX_QUEUES);
5976 		tx_queues = BOND_DEFAULT_TX_QUEUES;
5977 	}
5978 
5979 	if ((all_slaves_active != 0) && (all_slaves_active != 1)) {
5980 		pr_warn("Warning: all_slaves_active module parameter (%d), not of valid value (0/1), so it was set to 0\n",
5981 			all_slaves_active);
5982 		all_slaves_active = 0;
5983 	}
5984 
5985 	if (resend_igmp < 0 || resend_igmp > 255) {
5986 		pr_warn("Warning: resend_igmp (%d) should be between 0 and 255, resetting to %d\n",
5987 			resend_igmp, BOND_DEFAULT_RESEND_IGMP);
5988 		resend_igmp = BOND_DEFAULT_RESEND_IGMP;
5989 	}
5990 
5991 	bond_opt_initval(&newval, packets_per_slave);
5992 	if (!bond_opt_parse(bond_opt_get(BOND_OPT_PACKETS_PER_SLAVE), &newval)) {
5993 		pr_warn("Warning: packets_per_slave (%d) should be between 0 and %u resetting to 1\n",
5994 			packets_per_slave, USHRT_MAX);
5995 		packets_per_slave = 1;
5996 	}
5997 
5998 	if (bond_mode == BOND_MODE_ALB) {
5999 		pr_notice("In ALB mode you might experience client disconnections upon reconnection of a link if the bonding module updelay parameter (%d msec) is incompatible with the forwarding delay time of the switch\n",
6000 			  updelay);
6001 	}
6002 
6003 	if (!miimon) {
6004 		if (updelay || downdelay) {
6005 			/* just warn the user the up/down delay will have
6006 			 * no effect since miimon is zero...
6007 			 */
6008 			pr_warn("Warning: miimon module parameter not set and updelay (%d) or downdelay (%d) module parameter is set; updelay and downdelay have no effect unless miimon is set\n",
6009 				updelay, downdelay);
6010 		}
6011 	} else {
6012 		/* don't allow arp monitoring */
6013 		if (arp_interval) {
6014 			pr_warn("Warning: miimon (%d) and arp_interval (%d) can't be used simultaneously, disabling ARP monitoring\n",
6015 				miimon, arp_interval);
6016 			arp_interval = 0;
6017 		}
6018 
6019 		if ((updelay % miimon) != 0) {
6020 			pr_warn("Warning: updelay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
6021 				updelay, miimon, (updelay / miimon) * miimon);
6022 		}
6023 
6024 		updelay /= miimon;
6025 
6026 		if ((downdelay % miimon) != 0) {
6027 			pr_warn("Warning: downdelay (%d) is not a multiple of miimon (%d), downdelay rounded to %d ms\n",
6028 				downdelay, miimon,
6029 				(downdelay / miimon) * miimon);
6030 		}
6031 
6032 		downdelay /= miimon;
6033 	}
6034 
6035 	if (arp_interval < 0) {
6036 		pr_warn("Warning: arp_interval module parameter (%d), not in range 0-%d, so it was reset to 0\n",
6037 			arp_interval, INT_MAX);
6038 		arp_interval = 0;
6039 	}
6040 
6041 	for (arp_ip_count = 0, i = 0;
6042 	     (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) {
6043 		__be32 ip;
6044 
6045 		/* not a complete check, but good enough to catch mistakes */
6046 		if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) ||
6047 		    !bond_is_ip_target_ok(ip)) {
6048 			pr_warn("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
6049 				arp_ip_target[i]);
6050 			arp_interval = 0;
6051 		} else {
6052 			if (bond_get_targets_ip(arp_target, ip) == -1)
6053 				arp_target[arp_ip_count++] = ip;
6054 			else
6055 				pr_warn("Warning: duplicate address %pI4 in arp_ip_target, skipping\n",
6056 					&ip);
6057 		}
6058 	}
6059 
6060 	if (arp_interval && !arp_ip_count) {
6061 		/* don't allow arping if no arp_ip_target given... */
6062 		pr_warn("Warning: arp_interval module parameter (%d) specified without providing an arp_ip_target parameter, arp_interval was reset to 0\n",
6063 			arp_interval);
6064 		arp_interval = 0;
6065 	}
6066 
6067 	if (arp_validate) {
6068 		if (!arp_interval) {
6069 			pr_err("arp_validate requires arp_interval\n");
6070 			return -EINVAL;
6071 		}
6072 
6073 		bond_opt_initstr(&newval, arp_validate);
6074 		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_VALIDATE),
6075 					&newval);
6076 		if (!valptr) {
6077 			pr_err("Error: invalid arp_validate \"%s\"\n",
6078 			       arp_validate);
6079 			return -EINVAL;
6080 		}
6081 		arp_validate_value = valptr->value;
6082 	} else {
6083 		arp_validate_value = 0;
6084 	}
6085 
6086 	if (arp_all_targets) {
6087 		bond_opt_initstr(&newval, arp_all_targets);
6088 		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_ALL_TARGETS),
6089 					&newval);
6090 		if (!valptr) {
6091 			pr_err("Error: invalid arp_all_targets_value \"%s\"\n",
6092 			       arp_all_targets);
6093 			arp_all_targets_value = 0;
6094 		} else {
6095 			arp_all_targets_value = valptr->value;
6096 		}
6097 	}
6098 
6099 	if (miimon) {
6100 		pr_info("MII link monitoring set to %d ms\n", miimon);
6101 	} else if (arp_interval) {
6102 		valptr = bond_opt_get_val(BOND_OPT_ARP_VALIDATE,
6103 					  arp_validate_value);
6104 		pr_info("ARP monitoring set to %d ms, validate %s, with %d target(s):",
6105 			arp_interval, valptr->string, arp_ip_count);
6106 
6107 		for (i = 0; i < arp_ip_count; i++)
6108 			pr_cont(" %s", arp_ip_target[i]);
6109 
6110 		pr_cont("\n");
6111 
6112 	} else if (max_bonds) {
6113 		/* miimon and arp_interval not set, we need one so things
6114 		 * work as expected, see bonding.txt for details
6115 		 */
6116 		pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details\n");
6117 	}
6118 
6119 	if (primary && !bond_mode_uses_primary(bond_mode)) {
6120 		/* currently, using a primary only makes sense
6121 		 * in active backup, TLB or ALB modes
6122 		 */
6123 		pr_warn("Warning: %s primary device specified but has no effect in %s mode\n",
6124 			primary, bond_mode_name(bond_mode));
6125 		primary = NULL;
6126 	}
6127 
6128 	if (primary && primary_reselect) {
6129 		bond_opt_initstr(&newval, primary_reselect);
6130 		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_PRIMARY_RESELECT),
6131 					&newval);
6132 		if (!valptr) {
6133 			pr_err("Error: Invalid primary_reselect \"%s\"\n",
6134 			       primary_reselect);
6135 			return -EINVAL;
6136 		}
6137 		primary_reselect_value = valptr->value;
6138 	} else {
6139 		primary_reselect_value = BOND_PRI_RESELECT_ALWAYS;
6140 	}
6141 
6142 	if (fail_over_mac) {
6143 		bond_opt_initstr(&newval, fail_over_mac);
6144 		valptr = bond_opt_parse(bond_opt_get(BOND_OPT_FAIL_OVER_MAC),
6145 					&newval);
6146 		if (!valptr) {
6147 			pr_err("Error: invalid fail_over_mac \"%s\"\n",
6148 			       fail_over_mac);
6149 			return -EINVAL;
6150 		}
6151 		fail_over_mac_value = valptr->value;
6152 		if (bond_mode != BOND_MODE_ACTIVEBACKUP)
6153 			pr_warn("Warning: fail_over_mac only affects active-backup mode\n");
6154 	} else {
6155 		fail_over_mac_value = BOND_FOM_NONE;
6156 	}
6157 
6158 	bond_opt_initstr(&newval, "default");
6159 	valptr = bond_opt_parse(
6160 			bond_opt_get(BOND_OPT_AD_ACTOR_SYS_PRIO),
6161 				     &newval);
6162 	if (!valptr) {
6163 		pr_err("Error: No ad_actor_sys_prio default value");
6164 		return -EINVAL;
6165 	}
6166 	ad_actor_sys_prio = valptr->value;
6167 
6168 	valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_USER_PORT_KEY),
6169 				&newval);
6170 	if (!valptr) {
6171 		pr_err("Error: No ad_user_port_key default value");
6172 		return -EINVAL;
6173 	}
6174 	ad_user_port_key = valptr->value;
6175 
6176 	bond_opt_initstr(&newval, "default");
6177 	valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB), &newval);
6178 	if (!valptr) {
6179 		pr_err("Error: No tlb_dynamic_lb default value");
6180 		return -EINVAL;
6181 	}
6182 	tlb_dynamic_lb = valptr->value;
6183 
6184 	if (lp_interval == 0) {
6185 		pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
6186 			INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL);
6187 		lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
6188 	}
6189 
6190 	/* fill params struct with the proper values */
6191 	params->mode = bond_mode;
6192 	params->xmit_policy = xmit_hashtype;
6193 	params->miimon = miimon;
6194 	params->num_peer_notif = num_peer_notif;
6195 	params->arp_interval = arp_interval;
6196 	params->arp_validate = arp_validate_value;
6197 	params->arp_all_targets = arp_all_targets_value;
6198 	params->missed_max = 2;
6199 	params->updelay = updelay;
6200 	params->downdelay = downdelay;
6201 	params->peer_notif_delay = 0;
6202 	params->use_carrier = use_carrier;
6203 	params->lacp_active = 1;
6204 	params->lacp_fast = lacp_fast;
6205 	params->primary[0] = 0;
6206 	params->primary_reselect = primary_reselect_value;
6207 	params->fail_over_mac = fail_over_mac_value;
6208 	params->tx_queues = tx_queues;
6209 	params->all_slaves_active = all_slaves_active;
6210 	params->resend_igmp = resend_igmp;
6211 	params->min_links = min_links;
6212 	params->lp_interval = lp_interval;
6213 	params->packets_per_slave = packets_per_slave;
6214 	params->tlb_dynamic_lb = tlb_dynamic_lb;
6215 	params->ad_actor_sys_prio = ad_actor_sys_prio;
6216 	eth_zero_addr(params->ad_actor_system);
6217 	params->ad_user_port_key = ad_user_port_key;
6218 	if (packets_per_slave > 0) {
6219 		params->reciprocal_packets_per_slave =
6220 			reciprocal_value(packets_per_slave);
6221 	} else {
6222 		/* reciprocal_packets_per_slave is unused if
6223 		 * packets_per_slave is 0 or 1, just initialize it
6224 		 */
6225 		params->reciprocal_packets_per_slave =
6226 			(struct reciprocal_value) { 0 };
6227 	}
6228 
6229 	if (primary)
6230 		strscpy_pad(params->primary, primary, sizeof(params->primary));
6231 
6232 	memcpy(params->arp_targets, arp_target, sizeof(arp_target));
6233 #if IS_ENABLED(CONFIG_IPV6)
6234 	memset(params->ns_targets, 0, sizeof(struct in6_addr) * BOND_MAX_NS_TARGETS);
6235 #endif
6236 
6237 	return 0;
6238 }
6239 
6240 /* Called from registration process */
6241 static int bond_init(struct net_device *bond_dev)
6242 {
6243 	struct bonding *bond = netdev_priv(bond_dev);
6244 	struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
6245 
6246 	netdev_dbg(bond_dev, "Begin bond_init\n");
6247 
6248 	bond->wq = alloc_ordered_workqueue(bond_dev->name, WQ_MEM_RECLAIM);
6249 	if (!bond->wq)
6250 		return -ENOMEM;
6251 
6252 	spin_lock_init(&bond->stats_lock);
6253 	netdev_lockdep_set_classes(bond_dev);
6254 
6255 	list_add_tail(&bond->bond_list, &bn->dev_list);
6256 
6257 	bond_prepare_sysfs_group(bond);
6258 
6259 	bond_debug_register(bond);
6260 
6261 	/* Ensure valid dev_addr */
6262 	if (is_zero_ether_addr(bond_dev->dev_addr) &&
6263 	    bond_dev->addr_assign_type == NET_ADDR_PERM)
6264 		eth_hw_addr_random(bond_dev);
6265 
6266 	return 0;
6267 }
6268 
6269 unsigned int bond_get_num_tx_queues(void)
6270 {
6271 	return tx_queues;
6272 }
6273 
6274 /* Create a new bond based on the specified name and bonding parameters.
6275  * If name is NULL, obtain a suitable "bond%d" name for us.
6276  * Caller must NOT hold rtnl_lock; we need to release it here before we
6277  * set up our sysfs entries.
6278  */
6279 int bond_create(struct net *net, const char *name)
6280 {
6281 	struct net_device *bond_dev;
6282 	struct bonding *bond;
6283 	int res = -ENOMEM;
6284 
6285 	rtnl_lock();
6286 
6287 	bond_dev = alloc_netdev_mq(sizeof(struct bonding),
6288 				   name ? name : "bond%d", NET_NAME_UNKNOWN,
6289 				   bond_setup, tx_queues);
6290 	if (!bond_dev)
6291 		goto out;
6292 
6293 	bond = netdev_priv(bond_dev);
6294 	dev_net_set(bond_dev, net);
6295 	bond_dev->rtnl_link_ops = &bond_link_ops;
6296 
6297 	res = register_netdevice(bond_dev);
6298 	if (res < 0) {
6299 		free_netdev(bond_dev);
6300 		goto out;
6301 	}
6302 
6303 	netif_carrier_off(bond_dev);
6304 
6305 	bond_work_init_all(bond);
6306 
6307 out:
6308 	rtnl_unlock();
6309 	return res;
6310 }
6311 
6312 static int __net_init bond_net_init(struct net *net)
6313 {
6314 	struct bond_net *bn = net_generic(net, bond_net_id);
6315 
6316 	bn->net = net;
6317 	INIT_LIST_HEAD(&bn->dev_list);
6318 
6319 	bond_create_proc_dir(bn);
6320 	bond_create_sysfs(bn);
6321 
6322 	return 0;
6323 }
6324 
6325 static void __net_exit bond_net_exit_batch(struct list_head *net_list)
6326 {
6327 	struct bond_net *bn;
6328 	struct net *net;
6329 	LIST_HEAD(list);
6330 
6331 	list_for_each_entry(net, net_list, exit_list) {
6332 		bn = net_generic(net, bond_net_id);
6333 		bond_destroy_sysfs(bn);
6334 	}
6335 
6336 	/* Kill off any bonds created after unregistering bond rtnl ops */
6337 	rtnl_lock();
6338 	list_for_each_entry(net, net_list, exit_list) {
6339 		struct bonding *bond, *tmp_bond;
6340 
6341 		bn = net_generic(net, bond_net_id);
6342 		list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
6343 			unregister_netdevice_queue(bond->dev, &list);
6344 	}
6345 	unregister_netdevice_many(&list);
6346 	rtnl_unlock();
6347 
6348 	list_for_each_entry(net, net_list, exit_list) {
6349 		bn = net_generic(net, bond_net_id);
6350 		bond_destroy_proc_dir(bn);
6351 	}
6352 }
6353 
6354 static struct pernet_operations bond_net_ops = {
6355 	.init = bond_net_init,
6356 	.exit_batch = bond_net_exit_batch,
6357 	.id   = &bond_net_id,
6358 	.size = sizeof(struct bond_net),
6359 };
6360 
6361 static int __init bonding_init(void)
6362 {
6363 	int i;
6364 	int res;
6365 
6366 	res = bond_check_params(&bonding_defaults);
6367 	if (res)
6368 		goto out;
6369 
6370 	res = register_pernet_subsys(&bond_net_ops);
6371 	if (res)
6372 		goto out;
6373 
6374 	res = bond_netlink_init();
6375 	if (res)
6376 		goto err_link;
6377 
6378 	bond_create_debugfs();
6379 
6380 	for (i = 0; i < max_bonds; i++) {
6381 		res = bond_create(&init_net, NULL);
6382 		if (res)
6383 			goto err;
6384 	}
6385 
6386 	skb_flow_dissector_init(&flow_keys_bonding,
6387 				flow_keys_bonding_keys,
6388 				ARRAY_SIZE(flow_keys_bonding_keys));
6389 
6390 	register_netdevice_notifier(&bond_netdev_notifier);
6391 out:
6392 	return res;
6393 err:
6394 	bond_destroy_debugfs();
6395 	bond_netlink_fini();
6396 err_link:
6397 	unregister_pernet_subsys(&bond_net_ops);
6398 	goto out;
6399 
6400 }
6401 
6402 static void __exit bonding_exit(void)
6403 {
6404 	unregister_netdevice_notifier(&bond_netdev_notifier);
6405 
6406 	bond_destroy_debugfs();
6407 
6408 	bond_netlink_fini();
6409 	unregister_pernet_subsys(&bond_net_ops);
6410 
6411 #ifdef CONFIG_NET_POLL_CONTROLLER
6412 	/* Make sure we don't have an imbalance on our netpoll blocking */
6413 	WARN_ON(atomic_read(&netpoll_block_tx));
6414 #endif
6415 }
6416 
6417 module_init(bonding_init);
6418 module_exit(bonding_exit);
6419 MODULE_LICENSE("GPL");
6420 MODULE_DESCRIPTION(DRV_DESCRIPTION);
6421 MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others");
6422