1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <generated/utsrelease.h>
34 #include <linux/mlx5/fs.h>
35 #include <net/switchdev.h>
36 #include <net/pkt_cls.h>
37 #include <net/act_api.h>
38 #include <net/netevent.h>
39 #include <net/arp.h>
40 #include <net/devlink.h>
41 #include <net/ipv6_stubs.h>
42 
43 #include "eswitch.h"
44 #include "en.h"
45 #include "en_rep.h"
46 #include "en_tc.h"
47 #include "en/tc_tun.h"
48 #include "fs_core.h"
49 #include "lib/port_tun.h"
50 #include "lib/mlx5.h"
51 #define CREATE_TRACE_POINTS
52 #include "diag/en_rep_tracepoint.h"
53 
54 #define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
55         max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
56 #define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1
57 
58 static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
59 
60 struct mlx5e_rep_indr_block_priv {
61 	struct net_device *netdev;
62 	struct mlx5e_rep_priv *rpriv;
63 
64 	struct list_head list;
65 };
66 
67 static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
68 					    struct net_device *netdev);
69 
70 static void mlx5e_rep_get_drvinfo(struct net_device *dev,
71 				  struct ethtool_drvinfo *drvinfo)
72 {
73 	struct mlx5e_priv *priv = netdev_priv(dev);
74 	struct mlx5_core_dev *mdev = priv->mdev;
75 
76 	strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
77 		sizeof(drvinfo->driver));
78 	strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
79 	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
80 		 "%d.%d.%04d (%.16s)",
81 		 fw_rev_maj(mdev), fw_rev_min(mdev),
82 		 fw_rev_sub(mdev), mdev->board_id);
83 }
84 
85 static void mlx5e_uplink_rep_get_drvinfo(struct net_device *dev,
86 					 struct ethtool_drvinfo *drvinfo)
87 {
88 	struct mlx5e_priv *priv = netdev_priv(dev);
89 
90 	mlx5e_rep_get_drvinfo(dev, drvinfo);
91 	strlcpy(drvinfo->bus_info, pci_name(priv->mdev->pdev),
92 		sizeof(drvinfo->bus_info));
93 }
94 
95 static const struct counter_desc sw_rep_stats_desc[] = {
96 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
97 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
98 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
99 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
100 };
101 
102 struct vport_stats {
103 	u64 vport_rx_packets;
104 	u64 vport_tx_packets;
105 	u64 vport_rx_bytes;
106 	u64 vport_tx_bytes;
107 };
108 
109 static const struct counter_desc vport_rep_stats_desc[] = {
110 	{ MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_packets) },
111 	{ MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_bytes) },
112 	{ MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_packets) },
113 	{ MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_bytes) },
114 };
115 
116 #define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
117 #define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc)
118 
119 static void mlx5e_rep_get_strings(struct net_device *dev,
120 				  u32 stringset, uint8_t *data)
121 {
122 	int i, j;
123 
124 	switch (stringset) {
125 	case ETH_SS_STATS:
126 		for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
127 			strcpy(data + (i * ETH_GSTRING_LEN),
128 			       sw_rep_stats_desc[i].format);
129 		for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
130 			strcpy(data + (i * ETH_GSTRING_LEN),
131 			       vport_rep_stats_desc[j].format);
132 		break;
133 	}
134 }
135 
136 static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
137 {
138 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
139 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
140 	struct mlx5_eswitch_rep *rep = rpriv->rep;
141 	struct rtnl_link_stats64 *vport_stats;
142 	struct ifla_vf_stats vf_stats;
143 	int err;
144 
145 	err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats);
146 	if (err) {
147 		pr_warn("vport %d error %d reading stats\n", rep->vport, err);
148 		return;
149 	}
150 
151 	vport_stats = &priv->stats.vf_vport;
152 	/* flip tx/rx as we are reporting the counters for the switch vport */
153 	vport_stats->rx_packets = vf_stats.tx_packets;
154 	vport_stats->rx_bytes   = vf_stats.tx_bytes;
155 	vport_stats->tx_packets = vf_stats.rx_packets;
156 	vport_stats->tx_bytes   = vf_stats.rx_bytes;
157 }
158 
159 static void mlx5e_uplink_rep_update_hw_counters(struct mlx5e_priv *priv)
160 {
161 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
162 	struct rtnl_link_stats64 *vport_stats;
163 
164 	mlx5e_grp_802_3_update_stats(priv);
165 
166 	vport_stats = &priv->stats.vf_vport;
167 
168 	vport_stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
169 	vport_stats->rx_bytes   = PPORT_802_3_GET(pstats, a_octets_received_ok);
170 	vport_stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
171 	vport_stats->tx_bytes   = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
172 }
173 
174 static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
175 {
176 	struct mlx5e_sw_stats *s = &priv->stats.sw;
177 	struct rtnl_link_stats64 stats64 = {};
178 
179 	memset(s, 0, sizeof(*s));
180 	mlx5e_fold_sw_stats64(priv, &stats64);
181 
182 	s->rx_packets = stats64.rx_packets;
183 	s->rx_bytes   = stats64.rx_bytes;
184 	s->tx_packets = stats64.tx_packets;
185 	s->tx_bytes   = stats64.tx_bytes;
186 	s->tx_queue_dropped = stats64.tx_dropped;
187 }
188 
189 static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
190 					struct ethtool_stats *stats, u64 *data)
191 {
192 	struct mlx5e_priv *priv = netdev_priv(dev);
193 	int i, j;
194 
195 	if (!data)
196 		return;
197 
198 	mutex_lock(&priv->state_lock);
199 	mlx5e_rep_update_sw_counters(priv);
200 	priv->profile->update_stats(priv);
201 	mutex_unlock(&priv->state_lock);
202 
203 	for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
204 		data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
205 					       sw_rep_stats_desc, i);
206 
207 	for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
208 		data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport,
209 					       vport_rep_stats_desc, j);
210 }
211 
212 static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
213 {
214 	switch (sset) {
215 	case ETH_SS_STATS:
216 		return NUM_VPORT_REP_SW_COUNTERS + NUM_VPORT_REP_HW_COUNTERS;
217 	default:
218 		return -EOPNOTSUPP;
219 	}
220 }
221 
222 static void mlx5e_rep_get_ringparam(struct net_device *dev,
223 				struct ethtool_ringparam *param)
224 {
225 	struct mlx5e_priv *priv = netdev_priv(dev);
226 
227 	mlx5e_ethtool_get_ringparam(priv, param);
228 }
229 
230 static int mlx5e_rep_set_ringparam(struct net_device *dev,
231 			       struct ethtool_ringparam *param)
232 {
233 	struct mlx5e_priv *priv = netdev_priv(dev);
234 
235 	return mlx5e_ethtool_set_ringparam(priv, param);
236 }
237 
238 static int mlx5e_replace_rep_vport_rx_rule(struct mlx5e_priv *priv,
239 					   struct mlx5_flow_destination *dest)
240 {
241 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
242 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
243 	struct mlx5_eswitch_rep *rep = rpriv->rep;
244 	struct mlx5_flow_handle *flow_rule;
245 
246 	flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
247 						      rep->vport,
248 						      dest);
249 	if (IS_ERR(flow_rule))
250 		return PTR_ERR(flow_rule);
251 
252 	mlx5_del_flow_rules(rpriv->vport_rx_rule);
253 	rpriv->vport_rx_rule = flow_rule;
254 	return 0;
255 }
256 
257 static void mlx5e_rep_get_channels(struct net_device *dev,
258 				   struct ethtool_channels *ch)
259 {
260 	struct mlx5e_priv *priv = netdev_priv(dev);
261 
262 	mlx5e_ethtool_get_channels(priv, ch);
263 }
264 
265 static int mlx5e_rep_set_channels(struct net_device *dev,
266 				  struct ethtool_channels *ch)
267 {
268 	struct mlx5e_priv *priv = netdev_priv(dev);
269 	u16 curr_channels_amount = priv->channels.params.num_channels;
270 	u32 new_channels_amount = ch->combined_count;
271 	struct mlx5_flow_destination new_dest;
272 	int err = 0;
273 
274 	err = mlx5e_ethtool_set_channels(priv, ch);
275 	if (err)
276 		return err;
277 
278 	if (curr_channels_amount == 1 && new_channels_amount > 1) {
279 		new_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
280 		new_dest.ft = priv->fs.ttc.ft.t;
281 	} else if (new_channels_amount == 1 && curr_channels_amount > 1) {
282 		new_dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
283 		new_dest.tir_num = priv->direct_tir[0].tirn;
284 	} else {
285 		return 0;
286 	}
287 
288 	err = mlx5e_replace_rep_vport_rx_rule(priv, &new_dest);
289 	if (err) {
290 		netdev_warn(priv->netdev, "Failed to update vport rx rule, when going from (%d) channels to (%d) channels\n",
291 			    curr_channels_amount, new_channels_amount);
292 		return err;
293 	}
294 
295 	return 0;
296 }
297 
298 static int mlx5e_rep_get_coalesce(struct net_device *netdev,
299 				  struct ethtool_coalesce *coal)
300 {
301 	struct mlx5e_priv *priv = netdev_priv(netdev);
302 
303 	return mlx5e_ethtool_get_coalesce(priv, coal);
304 }
305 
306 static int mlx5e_rep_set_coalesce(struct net_device *netdev,
307 				  struct ethtool_coalesce *coal)
308 {
309 	struct mlx5e_priv *priv = netdev_priv(netdev);
310 
311 	return mlx5e_ethtool_set_coalesce(priv, coal);
312 }
313 
314 static u32 mlx5e_rep_get_rxfh_key_size(struct net_device *netdev)
315 {
316 	struct mlx5e_priv *priv = netdev_priv(netdev);
317 
318 	return mlx5e_ethtool_get_rxfh_key_size(priv);
319 }
320 
321 static u32 mlx5e_rep_get_rxfh_indir_size(struct net_device *netdev)
322 {
323 	struct mlx5e_priv *priv = netdev_priv(netdev);
324 
325 	return mlx5e_ethtool_get_rxfh_indir_size(priv);
326 }
327 
328 static void mlx5e_uplink_rep_get_pauseparam(struct net_device *netdev,
329 					    struct ethtool_pauseparam *pauseparam)
330 {
331 	struct mlx5e_priv *priv = netdev_priv(netdev);
332 
333 	mlx5e_ethtool_get_pauseparam(priv, pauseparam);
334 }
335 
336 static int mlx5e_uplink_rep_set_pauseparam(struct net_device *netdev,
337 					   struct ethtool_pauseparam *pauseparam)
338 {
339 	struct mlx5e_priv *priv = netdev_priv(netdev);
340 
341 	return mlx5e_ethtool_set_pauseparam(priv, pauseparam);
342 }
343 
344 static int mlx5e_uplink_rep_get_link_ksettings(struct net_device *netdev,
345 					       struct ethtool_link_ksettings *link_ksettings)
346 {
347 	struct mlx5e_priv *priv = netdev_priv(netdev);
348 
349 	return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings);
350 }
351 
352 static int mlx5e_uplink_rep_set_link_ksettings(struct net_device *netdev,
353 					       const struct ethtool_link_ksettings *link_ksettings)
354 {
355 	struct mlx5e_priv *priv = netdev_priv(netdev);
356 
357 	return mlx5e_ethtool_set_link_ksettings(priv, link_ksettings);
358 }
359 
360 static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
361 	.get_drvinfo	   = mlx5e_rep_get_drvinfo,
362 	.get_link	   = ethtool_op_get_link,
363 	.get_strings       = mlx5e_rep_get_strings,
364 	.get_sset_count    = mlx5e_rep_get_sset_count,
365 	.get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
366 	.get_ringparam     = mlx5e_rep_get_ringparam,
367 	.set_ringparam     = mlx5e_rep_set_ringparam,
368 	.get_channels      = mlx5e_rep_get_channels,
369 	.set_channels      = mlx5e_rep_set_channels,
370 	.get_coalesce      = mlx5e_rep_get_coalesce,
371 	.set_coalesce      = mlx5e_rep_set_coalesce,
372 	.get_rxfh_key_size   = mlx5e_rep_get_rxfh_key_size,
373 	.get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
374 };
375 
376 static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = {
377 	.get_drvinfo	   = mlx5e_uplink_rep_get_drvinfo,
378 	.get_link	   = ethtool_op_get_link,
379 	.get_strings       = mlx5e_rep_get_strings,
380 	.get_sset_count    = mlx5e_rep_get_sset_count,
381 	.get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
382 	.get_ringparam     = mlx5e_rep_get_ringparam,
383 	.set_ringparam     = mlx5e_rep_set_ringparam,
384 	.get_channels      = mlx5e_rep_get_channels,
385 	.set_channels      = mlx5e_rep_set_channels,
386 	.get_coalesce      = mlx5e_rep_get_coalesce,
387 	.set_coalesce      = mlx5e_rep_set_coalesce,
388 	.get_link_ksettings = mlx5e_uplink_rep_get_link_ksettings,
389 	.set_link_ksettings = mlx5e_uplink_rep_set_link_ksettings,
390 	.get_rxfh_key_size   = mlx5e_rep_get_rxfh_key_size,
391 	.get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
392 	.get_pauseparam    = mlx5e_uplink_rep_get_pauseparam,
393 	.set_pauseparam    = mlx5e_uplink_rep_set_pauseparam,
394 };
395 
396 static void mlx5e_rep_get_port_parent_id(struct net_device *dev,
397 					 struct netdev_phys_item_id *ppid)
398 {
399 	struct mlx5e_priv *priv;
400 	u64 parent_id;
401 
402 	priv = netdev_priv(dev);
403 
404 	parent_id = mlx5_query_nic_system_image_guid(priv->mdev);
405 	ppid->id_len = sizeof(parent_id);
406 	memcpy(ppid->id, &parent_id, sizeof(parent_id));
407 }
408 
409 static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
410 				 struct mlx5_eswitch_rep *rep)
411 {
412 	struct mlx5e_rep_sq *rep_sq, *tmp;
413 	struct mlx5e_rep_priv *rpriv;
414 
415 	if (esw->mode != MLX5_ESWITCH_OFFLOADS)
416 		return;
417 
418 	rpriv = mlx5e_rep_to_rep_priv(rep);
419 	list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) {
420 		mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
421 		list_del(&rep_sq->list);
422 		kfree(rep_sq);
423 	}
424 }
425 
426 static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
427 				 struct mlx5_eswitch_rep *rep,
428 				 u32 *sqns_array, int sqns_num)
429 {
430 	struct mlx5_flow_handle *flow_rule;
431 	struct mlx5e_rep_priv *rpriv;
432 	struct mlx5e_rep_sq *rep_sq;
433 	int err;
434 	int i;
435 
436 	if (esw->mode != MLX5_ESWITCH_OFFLOADS)
437 		return 0;
438 
439 	rpriv = mlx5e_rep_to_rep_priv(rep);
440 	for (i = 0; i < sqns_num; i++) {
441 		rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL);
442 		if (!rep_sq) {
443 			err = -ENOMEM;
444 			goto out_err;
445 		}
446 
447 		/* Add re-inject rule to the PF/representor sqs */
448 		flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
449 								rep->vport,
450 								sqns_array[i]);
451 		if (IS_ERR(flow_rule)) {
452 			err = PTR_ERR(flow_rule);
453 			kfree(rep_sq);
454 			goto out_err;
455 		}
456 		rep_sq->send_to_vport_rule = flow_rule;
457 		list_add(&rep_sq->list, &rpriv->vport_sqs_list);
458 	}
459 	return 0;
460 
461 out_err:
462 	mlx5e_sqs2vport_stop(esw, rep);
463 	return err;
464 }
465 
466 int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
467 {
468 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
469 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
470 	struct mlx5_eswitch_rep *rep = rpriv->rep;
471 	struct mlx5e_channel *c;
472 	int n, tc, num_sqs = 0;
473 	int err = -ENOMEM;
474 	u32 *sqs;
475 
476 	sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(*sqs), GFP_KERNEL);
477 	if (!sqs)
478 		goto out;
479 
480 	for (n = 0; n < priv->channels.num; n++) {
481 		c = priv->channels.c[n];
482 		for (tc = 0; tc < c->num_tc; tc++)
483 			sqs[num_sqs++] = c->sq[tc].sqn;
484 	}
485 
486 	err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
487 	kfree(sqs);
488 
489 out:
490 	if (err)
491 		netdev_warn(priv->netdev, "Failed to add SQs FWD rules %d\n", err);
492 	return err;
493 }
494 
495 void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
496 {
497 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
498 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
499 	struct mlx5_eswitch_rep *rep = rpriv->rep;
500 
501 	mlx5e_sqs2vport_stop(esw, rep);
502 }
503 
504 static unsigned long mlx5e_rep_ipv6_interval(void)
505 {
506 	if (IS_ENABLED(CONFIG_IPV6) && ipv6_stub->nd_tbl)
507 		return NEIGH_VAR(&ipv6_stub->nd_tbl->parms, DELAY_PROBE_TIME);
508 
509 	return ~0UL;
510 }
511 
512 static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
513 {
514 	unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
515 	unsigned long ipv6_interval = mlx5e_rep_ipv6_interval();
516 	struct net_device *netdev = rpriv->netdev;
517 	struct mlx5e_priv *priv = netdev_priv(netdev);
518 
519 	rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval);
520 	mlx5_fc_update_sampling_interval(priv->mdev, rpriv->neigh_update.min_interval);
521 }
522 
523 void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv)
524 {
525 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
526 	struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
527 
528 	mlx5_fc_queue_stats_work(priv->mdev,
529 				 &neigh_update->neigh_stats_work,
530 				 neigh_update->min_interval);
531 }
532 
533 static bool mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry *nhe)
534 {
535 	return refcount_inc_not_zero(&nhe->refcnt);
536 }
537 
538 static void mlx5e_rep_neigh_entry_remove(struct mlx5e_neigh_hash_entry *nhe);
539 
540 static void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe)
541 {
542 	if (refcount_dec_and_test(&nhe->refcnt)) {
543 		mlx5e_rep_neigh_entry_remove(nhe);
544 		kfree_rcu(nhe, rcu);
545 	}
546 }
547 
548 static struct mlx5e_neigh_hash_entry *
549 mlx5e_get_next_nhe(struct mlx5e_rep_priv *rpriv,
550 		   struct mlx5e_neigh_hash_entry *nhe)
551 {
552 	struct mlx5e_neigh_hash_entry *next = NULL;
553 
554 	rcu_read_lock();
555 
556 	for (next = nhe ?
557 		     list_next_or_null_rcu(&rpriv->neigh_update.neigh_list,
558 					   &nhe->neigh_list,
559 					   struct mlx5e_neigh_hash_entry,
560 					   neigh_list) :
561 		     list_first_or_null_rcu(&rpriv->neigh_update.neigh_list,
562 					    struct mlx5e_neigh_hash_entry,
563 					    neigh_list);
564 	     next;
565 	     next = list_next_or_null_rcu(&rpriv->neigh_update.neigh_list,
566 					  &next->neigh_list,
567 					  struct mlx5e_neigh_hash_entry,
568 					  neigh_list))
569 		if (mlx5e_rep_neigh_entry_hold(next))
570 			break;
571 
572 	rcu_read_unlock();
573 
574 	if (nhe)
575 		mlx5e_rep_neigh_entry_release(nhe);
576 
577 	return next;
578 }
579 
580 static void mlx5e_rep_neigh_stats_work(struct work_struct *work)
581 {
582 	struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv,
583 						    neigh_update.neigh_stats_work.work);
584 	struct net_device *netdev = rpriv->netdev;
585 	struct mlx5e_priv *priv = netdev_priv(netdev);
586 	struct mlx5e_neigh_hash_entry *nhe = NULL;
587 
588 	rtnl_lock();
589 	if (!list_empty(&rpriv->neigh_update.neigh_list))
590 		mlx5e_rep_queue_neigh_stats_work(priv);
591 
592 	while ((nhe = mlx5e_get_next_nhe(rpriv, nhe)) != NULL)
593 		mlx5e_tc_update_neigh_used_value(nhe);
594 
595 	rtnl_unlock();
596 }
597 
598 static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
599 				   struct mlx5e_encap_entry *e,
600 				   bool neigh_connected,
601 				   unsigned char ha[ETH_ALEN])
602 {
603 	struct ethhdr *eth = (struct ethhdr *)e->encap_header;
604 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
605 	bool encap_connected;
606 	LIST_HEAD(flow_list);
607 
608 	ASSERT_RTNL();
609 
610 	/* wait for encap to be fully initialized */
611 	wait_for_completion(&e->res_ready);
612 
613 	mutex_lock(&esw->offloads.encap_tbl_lock);
614 	encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
615 	if (e->compl_result < 0 || (encap_connected == neigh_connected &&
616 				    ether_addr_equal(e->h_dest, ha)))
617 		goto unlock;
618 
619 	mlx5e_take_all_encap_flows(e, &flow_list);
620 
621 	if ((e->flags & MLX5_ENCAP_ENTRY_VALID) &&
622 	    (!neigh_connected || !ether_addr_equal(e->h_dest, ha)))
623 		mlx5e_tc_encap_flows_del(priv, e, &flow_list);
624 
625 	if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
626 		ether_addr_copy(e->h_dest, ha);
627 		ether_addr_copy(eth->h_dest, ha);
628 		/* Update the encap source mac, in case that we delete
629 		 * the flows when encap source mac changed.
630 		 */
631 		ether_addr_copy(eth->h_source, e->route_dev->dev_addr);
632 
633 		mlx5e_tc_encap_flows_add(priv, e, &flow_list);
634 	}
635 unlock:
636 	mutex_unlock(&esw->offloads.encap_tbl_lock);
637 	mlx5e_put_encap_flow_list(priv, &flow_list);
638 }
639 
640 static void mlx5e_rep_neigh_update(struct work_struct *work)
641 {
642 	struct mlx5e_neigh_hash_entry *nhe =
643 		container_of(work, struct mlx5e_neigh_hash_entry, neigh_update_work);
644 	struct neighbour *n = nhe->n;
645 	struct mlx5e_encap_entry *e;
646 	unsigned char ha[ETH_ALEN];
647 	struct mlx5e_priv *priv;
648 	bool neigh_connected;
649 	u8 nud_state, dead;
650 
651 	rtnl_lock();
652 
653 	/* If these parameters are changed after we release the lock,
654 	 * we'll receive another event letting us know about it.
655 	 * We use this lock to avoid inconsistency between the neigh validity
656 	 * and it's hw address.
657 	 */
658 	read_lock_bh(&n->lock);
659 	memcpy(ha, n->ha, ETH_ALEN);
660 	nud_state = n->nud_state;
661 	dead = n->dead;
662 	read_unlock_bh(&n->lock);
663 
664 	neigh_connected = (nud_state & NUD_VALID) && !dead;
665 
666 	trace_mlx5e_rep_neigh_update(nhe, ha, neigh_connected);
667 
668 	list_for_each_entry(e, &nhe->encap_list, encap_list) {
669 		if (!mlx5e_encap_take(e))
670 			continue;
671 
672 		priv = netdev_priv(e->out_dev);
673 		mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
674 		mlx5e_encap_put(priv, e);
675 	}
676 	mlx5e_rep_neigh_entry_release(nhe);
677 	rtnl_unlock();
678 	neigh_release(n);
679 }
680 
681 static struct mlx5e_rep_indr_block_priv *
682 mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv,
683 				 struct net_device *netdev)
684 {
685 	struct mlx5e_rep_indr_block_priv *cb_priv;
686 
687 	/* All callback list access should be protected by RTNL. */
688 	ASSERT_RTNL();
689 
690 	list_for_each_entry(cb_priv,
691 			    &rpriv->uplink_priv.tc_indr_block_priv_list,
692 			    list)
693 		if (cb_priv->netdev == netdev)
694 			return cb_priv;
695 
696 	return NULL;
697 }
698 
699 static void mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv)
700 {
701 	struct mlx5e_rep_indr_block_priv *cb_priv, *temp;
702 	struct list_head *head = &rpriv->uplink_priv.tc_indr_block_priv_list;
703 
704 	list_for_each_entry_safe(cb_priv, temp, head, list) {
705 		mlx5e_rep_indr_unregister_block(rpriv, cb_priv->netdev);
706 		kfree(cb_priv);
707 	}
708 }
709 
710 static int
711 mlx5e_rep_indr_offload(struct net_device *netdev,
712 		       struct flow_cls_offload *flower,
713 		       struct mlx5e_rep_indr_block_priv *indr_priv)
714 {
715 	unsigned long flags = MLX5_TC_FLAG(EGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
716 	struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev);
717 	int err = 0;
718 
719 	switch (flower->command) {
720 	case FLOW_CLS_REPLACE:
721 		err = mlx5e_configure_flower(netdev, priv, flower, flags);
722 		break;
723 	case FLOW_CLS_DESTROY:
724 		err = mlx5e_delete_flower(netdev, priv, flower, flags);
725 		break;
726 	case FLOW_CLS_STATS:
727 		err = mlx5e_stats_flower(netdev, priv, flower, flags);
728 		break;
729 	default:
730 		err = -EOPNOTSUPP;
731 	}
732 
733 	return err;
734 }
735 
736 static int mlx5e_rep_indr_setup_block_cb(enum tc_setup_type type,
737 					 void *type_data, void *indr_priv)
738 {
739 	struct mlx5e_rep_indr_block_priv *priv = indr_priv;
740 
741 	switch (type) {
742 	case TC_SETUP_CLSFLOWER:
743 		return mlx5e_rep_indr_offload(priv->netdev, type_data, priv);
744 	default:
745 		return -EOPNOTSUPP;
746 	}
747 }
748 
749 static void mlx5e_rep_indr_tc_block_unbind(void *cb_priv)
750 {
751 	struct mlx5e_rep_indr_block_priv *indr_priv = cb_priv;
752 
753 	list_del(&indr_priv->list);
754 	kfree(indr_priv);
755 }
756 
757 static LIST_HEAD(mlx5e_block_cb_list);
758 
759 static int
760 mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
761 			      struct mlx5e_rep_priv *rpriv,
762 			      struct flow_block_offload *f)
763 {
764 	struct mlx5e_rep_indr_block_priv *indr_priv;
765 	struct flow_block_cb *block_cb;
766 
767 	if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
768 		return -EOPNOTSUPP;
769 
770 	f->unlocked_driver_cb = true;
771 	f->driver_block_list = &mlx5e_block_cb_list;
772 
773 	switch (f->command) {
774 	case FLOW_BLOCK_BIND:
775 		indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
776 		if (indr_priv)
777 			return -EEXIST;
778 
779 		indr_priv = kmalloc(sizeof(*indr_priv), GFP_KERNEL);
780 		if (!indr_priv)
781 			return -ENOMEM;
782 
783 		indr_priv->netdev = netdev;
784 		indr_priv->rpriv = rpriv;
785 		list_add(&indr_priv->list,
786 			 &rpriv->uplink_priv.tc_indr_block_priv_list);
787 
788 		block_cb = flow_block_cb_alloc(mlx5e_rep_indr_setup_block_cb,
789 					       indr_priv, indr_priv,
790 					       mlx5e_rep_indr_tc_block_unbind);
791 		if (IS_ERR(block_cb)) {
792 			list_del(&indr_priv->list);
793 			kfree(indr_priv);
794 			return PTR_ERR(block_cb);
795 		}
796 		flow_block_cb_add(block_cb, f);
797 		list_add_tail(&block_cb->driver_list, &mlx5e_block_cb_list);
798 
799 		return 0;
800 	case FLOW_BLOCK_UNBIND:
801 		indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
802 		if (!indr_priv)
803 			return -ENOENT;
804 
805 		block_cb = flow_block_cb_lookup(f->block,
806 						mlx5e_rep_indr_setup_block_cb,
807 						indr_priv);
808 		if (!block_cb)
809 			return -ENOENT;
810 
811 		flow_block_cb_remove(block_cb, f);
812 		list_del(&block_cb->driver_list);
813 		return 0;
814 	default:
815 		return -EOPNOTSUPP;
816 	}
817 	return 0;
818 }
819 
820 static
821 int mlx5e_rep_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
822 			       enum tc_setup_type type, void *type_data)
823 {
824 	switch (type) {
825 	case TC_SETUP_BLOCK:
826 		return mlx5e_rep_indr_setup_tc_block(netdev, cb_priv,
827 						      type_data);
828 	default:
829 		return -EOPNOTSUPP;
830 	}
831 }
832 
833 static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv,
834 					 struct net_device *netdev)
835 {
836 	int err;
837 
838 	err = __flow_indr_block_cb_register(netdev, rpriv,
839 					    mlx5e_rep_indr_setup_tc_cb,
840 					    rpriv);
841 	if (err) {
842 		struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
843 
844 		mlx5_core_err(priv->mdev, "Failed to register remote block notifier for %s err=%d\n",
845 			      netdev_name(netdev), err);
846 	}
847 	return err;
848 }
849 
850 static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
851 					    struct net_device *netdev)
852 {
853 	__flow_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb,
854 					rpriv);
855 }
856 
857 static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb,
858 					 unsigned long event, void *ptr)
859 {
860 	struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
861 						     uplink_priv.netdevice_nb);
862 	struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
863 	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
864 
865 	if (!mlx5e_tc_tun_device_to_offload(priv, netdev) &&
866 	    !(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev))
867 		return NOTIFY_OK;
868 
869 	switch (event) {
870 	case NETDEV_REGISTER:
871 		mlx5e_rep_indr_register_block(rpriv, netdev);
872 		break;
873 	case NETDEV_UNREGISTER:
874 		mlx5e_rep_indr_unregister_block(rpriv, netdev);
875 		break;
876 	}
877 	return NOTIFY_OK;
878 }
879 
880 static void
881 mlx5e_rep_queue_neigh_update_work(struct mlx5e_priv *priv,
882 				  struct mlx5e_neigh_hash_entry *nhe,
883 				  struct neighbour *n)
884 {
885 	/* Take a reference to ensure the neighbour and mlx5 encap
886 	 * entry won't be destructed until we drop the reference in
887 	 * delayed work.
888 	 */
889 	neigh_hold(n);
890 
891 	/* This assignment is valid as long as the the neigh reference
892 	 * is taken
893 	 */
894 	nhe->n = n;
895 
896 	if (!queue_work(priv->wq, &nhe->neigh_update_work)) {
897 		mlx5e_rep_neigh_entry_release(nhe);
898 		neigh_release(n);
899 	}
900 }
901 
902 static struct mlx5e_neigh_hash_entry *
903 mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
904 			     struct mlx5e_neigh *m_neigh);
905 
906 static int mlx5e_rep_netevent_event(struct notifier_block *nb,
907 				    unsigned long event, void *ptr)
908 {
909 	struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
910 						    neigh_update.netevent_nb);
911 	struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
912 	struct net_device *netdev = rpriv->netdev;
913 	struct mlx5e_priv *priv = netdev_priv(netdev);
914 	struct mlx5e_neigh_hash_entry *nhe = NULL;
915 	struct mlx5e_neigh m_neigh = {};
916 	struct neigh_parms *p;
917 	struct neighbour *n;
918 	bool found = false;
919 
920 	switch (event) {
921 	case NETEVENT_NEIGH_UPDATE:
922 		n = ptr;
923 #if IS_ENABLED(CONFIG_IPV6)
924 		if (n->tbl != ipv6_stub->nd_tbl && n->tbl != &arp_tbl)
925 #else
926 		if (n->tbl != &arp_tbl)
927 #endif
928 			return NOTIFY_DONE;
929 
930 		m_neigh.dev = n->dev;
931 		m_neigh.family = n->ops->family;
932 		memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
933 
934 		rcu_read_lock();
935 		nhe = mlx5e_rep_neigh_entry_lookup(priv, &m_neigh);
936 		rcu_read_unlock();
937 		if (!nhe)
938 			return NOTIFY_DONE;
939 
940 		mlx5e_rep_queue_neigh_update_work(priv, nhe, n);
941 		break;
942 
943 	case NETEVENT_DELAY_PROBE_TIME_UPDATE:
944 		p = ptr;
945 
946 		/* We check the device is present since we don't care about
947 		 * changes in the default table, we only care about changes
948 		 * done per device delay prob time parameter.
949 		 */
950 #if IS_ENABLED(CONFIG_IPV6)
951 		if (!p->dev || (p->tbl != ipv6_stub->nd_tbl && p->tbl != &arp_tbl))
952 #else
953 		if (!p->dev || p->tbl != &arp_tbl)
954 #endif
955 			return NOTIFY_DONE;
956 
957 		rcu_read_lock();
958 		list_for_each_entry_rcu(nhe, &neigh_update->neigh_list,
959 					neigh_list) {
960 			if (p->dev == nhe->m_neigh.dev) {
961 				found = true;
962 				break;
963 			}
964 		}
965 		rcu_read_unlock();
966 		if (!found)
967 			return NOTIFY_DONE;
968 
969 		neigh_update->min_interval = min_t(unsigned long,
970 						   NEIGH_VAR(p, DELAY_PROBE_TIME),
971 						   neigh_update->min_interval);
972 		mlx5_fc_update_sampling_interval(priv->mdev,
973 						 neigh_update->min_interval);
974 		break;
975 	}
976 	return NOTIFY_DONE;
977 }
978 
979 static const struct rhashtable_params mlx5e_neigh_ht_params = {
980 	.head_offset = offsetof(struct mlx5e_neigh_hash_entry, rhash_node),
981 	.key_offset = offsetof(struct mlx5e_neigh_hash_entry, m_neigh),
982 	.key_len = sizeof(struct mlx5e_neigh),
983 	.automatic_shrinking = true,
984 };
985 
986 static int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv)
987 {
988 	struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
989 	int err;
990 
991 	err = rhashtable_init(&neigh_update->neigh_ht, &mlx5e_neigh_ht_params);
992 	if (err)
993 		return err;
994 
995 	INIT_LIST_HEAD(&neigh_update->neigh_list);
996 	mutex_init(&neigh_update->encap_lock);
997 	INIT_DELAYED_WORK(&neigh_update->neigh_stats_work,
998 			  mlx5e_rep_neigh_stats_work);
999 	mlx5e_rep_neigh_update_init_interval(rpriv);
1000 
1001 	rpriv->neigh_update.netevent_nb.notifier_call = mlx5e_rep_netevent_event;
1002 	err = register_netevent_notifier(&rpriv->neigh_update.netevent_nb);
1003 	if (err)
1004 		goto out_err;
1005 	return 0;
1006 
1007 out_err:
1008 	rhashtable_destroy(&neigh_update->neigh_ht);
1009 	return err;
1010 }
1011 
1012 static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv)
1013 {
1014 	struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
1015 	struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
1016 
1017 	unregister_netevent_notifier(&neigh_update->netevent_nb);
1018 
1019 	flush_workqueue(priv->wq); /* flush neigh update works */
1020 
1021 	cancel_delayed_work_sync(&rpriv->neigh_update.neigh_stats_work);
1022 
1023 	mutex_destroy(&neigh_update->encap_lock);
1024 	rhashtable_destroy(&neigh_update->neigh_ht);
1025 }
1026 
1027 static int mlx5e_rep_neigh_entry_insert(struct mlx5e_priv *priv,
1028 					struct mlx5e_neigh_hash_entry *nhe)
1029 {
1030 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1031 	int err;
1032 
1033 	err = rhashtable_insert_fast(&rpriv->neigh_update.neigh_ht,
1034 				     &nhe->rhash_node,
1035 				     mlx5e_neigh_ht_params);
1036 	if (err)
1037 		return err;
1038 
1039 	list_add_rcu(&nhe->neigh_list, &rpriv->neigh_update.neigh_list);
1040 
1041 	return err;
1042 }
1043 
1044 static void mlx5e_rep_neigh_entry_remove(struct mlx5e_neigh_hash_entry *nhe)
1045 {
1046 	struct mlx5e_rep_priv *rpriv = nhe->priv->ppriv;
1047 
1048 	mutex_lock(&rpriv->neigh_update.encap_lock);
1049 
1050 	list_del_rcu(&nhe->neigh_list);
1051 
1052 	rhashtable_remove_fast(&rpriv->neigh_update.neigh_ht,
1053 			       &nhe->rhash_node,
1054 			       mlx5e_neigh_ht_params);
1055 	mutex_unlock(&rpriv->neigh_update.encap_lock);
1056 }
1057 
1058 /* This function must only be called under the representor's encap_lock or
1059  * inside rcu read lock section.
1060  */
1061 static struct mlx5e_neigh_hash_entry *
1062 mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
1063 			     struct mlx5e_neigh *m_neigh)
1064 {
1065 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1066 	struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
1067 	struct mlx5e_neigh_hash_entry *nhe;
1068 
1069 	nhe = rhashtable_lookup_fast(&neigh_update->neigh_ht, m_neigh,
1070 				     mlx5e_neigh_ht_params);
1071 	return nhe && mlx5e_rep_neigh_entry_hold(nhe) ? nhe : NULL;
1072 }
1073 
1074 static int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
1075 					struct mlx5e_encap_entry *e,
1076 					struct mlx5e_neigh_hash_entry **nhe)
1077 {
1078 	int err;
1079 
1080 	*nhe = kzalloc(sizeof(**nhe), GFP_KERNEL);
1081 	if (!*nhe)
1082 		return -ENOMEM;
1083 
1084 	(*nhe)->priv = priv;
1085 	memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh));
1086 	INIT_WORK(&(*nhe)->neigh_update_work, mlx5e_rep_neigh_update);
1087 	spin_lock_init(&(*nhe)->encap_list_lock);
1088 	INIT_LIST_HEAD(&(*nhe)->encap_list);
1089 	refcount_set(&(*nhe)->refcnt, 1);
1090 
1091 	err = mlx5e_rep_neigh_entry_insert(priv, *nhe);
1092 	if (err)
1093 		goto out_free;
1094 	return 0;
1095 
1096 out_free:
1097 	kfree(*nhe);
1098 	return err;
1099 }
1100 
1101 int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
1102 				 struct mlx5e_encap_entry *e)
1103 {
1104 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1105 	struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
1106 	struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
1107 	struct mlx5e_neigh_hash_entry *nhe;
1108 	int err;
1109 
1110 	err = mlx5_tun_entropy_refcount_inc(tun_entropy, e->reformat_type);
1111 	if (err)
1112 		return err;
1113 
1114 	mutex_lock(&rpriv->neigh_update.encap_lock);
1115 	nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
1116 	if (!nhe) {
1117 		err = mlx5e_rep_neigh_entry_create(priv, e, &nhe);
1118 		if (err) {
1119 			mutex_unlock(&rpriv->neigh_update.encap_lock);
1120 			mlx5_tun_entropy_refcount_dec(tun_entropy,
1121 						      e->reformat_type);
1122 			return err;
1123 		}
1124 	}
1125 
1126 	e->nhe = nhe;
1127 	spin_lock(&nhe->encap_list_lock);
1128 	list_add_rcu(&e->encap_list, &nhe->encap_list);
1129 	spin_unlock(&nhe->encap_list_lock);
1130 
1131 	mutex_unlock(&rpriv->neigh_update.encap_lock);
1132 
1133 	return 0;
1134 }
1135 
1136 void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
1137 				  struct mlx5e_encap_entry *e)
1138 {
1139 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1140 	struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
1141 	struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
1142 
1143 	if (!e->nhe)
1144 		return;
1145 
1146 	spin_lock(&e->nhe->encap_list_lock);
1147 	list_del_rcu(&e->encap_list);
1148 	spin_unlock(&e->nhe->encap_list_lock);
1149 
1150 	mlx5e_rep_neigh_entry_release(e->nhe);
1151 	e->nhe = NULL;
1152 	mlx5_tun_entropy_refcount_dec(tun_entropy, e->reformat_type);
1153 }
1154 
1155 static int mlx5e_rep_open(struct net_device *dev)
1156 {
1157 	struct mlx5e_priv *priv = netdev_priv(dev);
1158 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1159 	struct mlx5_eswitch_rep *rep = rpriv->rep;
1160 	int err;
1161 
1162 	mutex_lock(&priv->state_lock);
1163 	err = mlx5e_open_locked(dev);
1164 	if (err)
1165 		goto unlock;
1166 
1167 	if (!mlx5_modify_vport_admin_state(priv->mdev,
1168 					   MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
1169 					   rep->vport, 1,
1170 					   MLX5_VPORT_ADMIN_STATE_UP))
1171 		netif_carrier_on(dev);
1172 
1173 unlock:
1174 	mutex_unlock(&priv->state_lock);
1175 	return err;
1176 }
1177 
1178 static int mlx5e_rep_close(struct net_device *dev)
1179 {
1180 	struct mlx5e_priv *priv = netdev_priv(dev);
1181 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1182 	struct mlx5_eswitch_rep *rep = rpriv->rep;
1183 	int ret;
1184 
1185 	mutex_lock(&priv->state_lock);
1186 	mlx5_modify_vport_admin_state(priv->mdev,
1187 				      MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
1188 				      rep->vport, 1,
1189 				      MLX5_VPORT_ADMIN_STATE_DOWN);
1190 	ret = mlx5e_close_locked(dev);
1191 	mutex_unlock(&priv->state_lock);
1192 	return ret;
1193 }
1194 
1195 static int
1196 mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
1197 			      struct flow_cls_offload *cls_flower, int flags)
1198 {
1199 	switch (cls_flower->command) {
1200 	case FLOW_CLS_REPLACE:
1201 		return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
1202 					      flags);
1203 	case FLOW_CLS_DESTROY:
1204 		return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
1205 					   flags);
1206 	case FLOW_CLS_STATS:
1207 		return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
1208 					  flags);
1209 	default:
1210 		return -EOPNOTSUPP;
1211 	}
1212 }
1213 
1214 static
1215 int mlx5e_rep_setup_tc_cls_matchall(struct mlx5e_priv *priv,
1216 				    struct tc_cls_matchall_offload *ma)
1217 {
1218 	switch (ma->command) {
1219 	case TC_CLSMATCHALL_REPLACE:
1220 		return mlx5e_tc_configure_matchall(priv, ma);
1221 	case TC_CLSMATCHALL_DESTROY:
1222 		return mlx5e_tc_delete_matchall(priv, ma);
1223 	case TC_CLSMATCHALL_STATS:
1224 		mlx5e_tc_stats_matchall(priv, ma);
1225 		return 0;
1226 	default:
1227 		return -EOPNOTSUPP;
1228 	}
1229 }
1230 
1231 static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
1232 				 void *cb_priv)
1233 {
1234 	unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
1235 	struct mlx5e_priv *priv = cb_priv;
1236 
1237 	switch (type) {
1238 	case TC_SETUP_CLSFLOWER:
1239 		return mlx5e_rep_setup_tc_cls_flower(priv, type_data, flags);
1240 	case TC_SETUP_CLSMATCHALL:
1241 		return mlx5e_rep_setup_tc_cls_matchall(priv, type_data);
1242 	default:
1243 		return -EOPNOTSUPP;
1244 	}
1245 }
1246 
1247 static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data,
1248 				 void *cb_priv)
1249 {
1250 	struct flow_cls_offload *f = type_data;
1251 	struct flow_cls_offload cls_flower;
1252 	struct mlx5e_priv *priv = cb_priv;
1253 	struct mlx5_eswitch *esw;
1254 	unsigned long flags;
1255 	int err;
1256 
1257 	flags = MLX5_TC_FLAG(INGRESS) |
1258 		MLX5_TC_FLAG(ESW_OFFLOAD) |
1259 		MLX5_TC_FLAG(FT_OFFLOAD);
1260 	esw = priv->mdev->priv.eswitch;
1261 
1262 	switch (type) {
1263 	case TC_SETUP_CLSFLOWER:
1264 		if (!mlx5_eswitch_prios_supported(esw) || f->common.chain_index)
1265 			return -EOPNOTSUPP;
1266 
1267 		/* Re-use tc offload path by moving the ft flow to the
1268 		 * reserved ft chain.
1269 		 */
1270 		memcpy(&cls_flower, f, sizeof(*f));
1271 		cls_flower.common.chain_index = FDB_FT_CHAIN;
1272 		err = mlx5e_rep_setup_tc_cls_flower(priv, &cls_flower, flags);
1273 		memcpy(&f->stats, &cls_flower.stats, sizeof(f->stats));
1274 		return err;
1275 	default:
1276 		return -EOPNOTSUPP;
1277 	}
1278 }
1279 
1280 static LIST_HEAD(mlx5e_rep_block_tc_cb_list);
1281 static LIST_HEAD(mlx5e_rep_block_ft_cb_list);
1282 static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
1283 			      void *type_data)
1284 {
1285 	struct mlx5e_priv *priv = netdev_priv(dev);
1286 	struct flow_block_offload *f = type_data;
1287 
1288 	f->unlocked_driver_cb = true;
1289 
1290 	switch (type) {
1291 	case TC_SETUP_BLOCK:
1292 		return flow_block_cb_setup_simple(type_data,
1293 						  &mlx5e_rep_block_tc_cb_list,
1294 						  mlx5e_rep_setup_tc_cb,
1295 						  priv, priv, true);
1296 	case TC_SETUP_FT:
1297 		return flow_block_cb_setup_simple(type_data,
1298 						  &mlx5e_rep_block_ft_cb_list,
1299 						  mlx5e_rep_setup_ft_cb,
1300 						  priv, priv, true);
1301 	default:
1302 		return -EOPNOTSUPP;
1303 	}
1304 }
1305 
1306 bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
1307 {
1308 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1309 	struct mlx5_eswitch_rep *rep;
1310 
1311 	if (!MLX5_ESWITCH_MANAGER(priv->mdev))
1312 		return false;
1313 
1314 	if (!rpriv) /* non vport rep mlx5e instances don't use this field */
1315 		return false;
1316 
1317 	rep = rpriv->rep;
1318 	return (rep->vport == MLX5_VPORT_UPLINK);
1319 }
1320 
1321 static bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id)
1322 {
1323 	switch (attr_id) {
1324 	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1325 			return true;
1326 	}
1327 
1328 	return false;
1329 }
1330 
1331 static int
1332 mlx5e_get_sw_stats64(const struct net_device *dev,
1333 		     struct rtnl_link_stats64 *stats)
1334 {
1335 	struct mlx5e_priv *priv = netdev_priv(dev);
1336 
1337 	mlx5e_fold_sw_stats64(priv, stats);
1338 	return 0;
1339 }
1340 
1341 static int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev,
1342 				       void *sp)
1343 {
1344 	switch (attr_id) {
1345 	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1346 		return mlx5e_get_sw_stats64(dev, sp);
1347 	}
1348 
1349 	return -EINVAL;
1350 }
1351 
1352 static void
1353 mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
1354 {
1355 	struct mlx5e_priv *priv = netdev_priv(dev);
1356 
1357 	/* update HW stats in background for next time */
1358 	mlx5e_queue_update_stats(priv);
1359 	memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
1360 }
1361 
1362 static int mlx5e_rep_change_mtu(struct net_device *netdev, int new_mtu)
1363 {
1364 	return mlx5e_change_mtu(netdev, new_mtu, NULL);
1365 }
1366 
1367 static int mlx5e_uplink_rep_change_mtu(struct net_device *netdev, int new_mtu)
1368 {
1369 	return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu);
1370 }
1371 
1372 static int mlx5e_uplink_rep_set_mac(struct net_device *netdev, void *addr)
1373 {
1374 	struct sockaddr *saddr = addr;
1375 
1376 	if (!is_valid_ether_addr(saddr->sa_data))
1377 		return -EADDRNOTAVAIL;
1378 
1379 	ether_addr_copy(netdev->dev_addr, saddr->sa_data);
1380 	return 0;
1381 }
1382 
1383 static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
1384 					__be16 vlan_proto)
1385 {
1386 	netdev_warn_once(dev, "legacy vf vlan setting isn't supported in switchdev mode\n");
1387 
1388 	if (vlan != 0)
1389 		return -EOPNOTSUPP;
1390 
1391 	/* allow setting 0-vid for compatibility with libvirt */
1392 	return 0;
1393 }
1394 
1395 static struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev)
1396 {
1397 	struct mlx5e_priv *priv = netdev_priv(dev);
1398 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1399 
1400 	return &rpriv->dl_port;
1401 }
1402 
1403 static const struct net_device_ops mlx5e_netdev_ops_rep = {
1404 	.ndo_open                = mlx5e_rep_open,
1405 	.ndo_stop                = mlx5e_rep_close,
1406 	.ndo_start_xmit          = mlx5e_xmit,
1407 	.ndo_setup_tc            = mlx5e_rep_setup_tc,
1408 	.ndo_get_devlink_port = mlx5e_get_devlink_port,
1409 	.ndo_get_stats64         = mlx5e_rep_get_stats,
1410 	.ndo_has_offload_stats	 = mlx5e_rep_has_offload_stats,
1411 	.ndo_get_offload_stats	 = mlx5e_rep_get_offload_stats,
1412 	.ndo_change_mtu          = mlx5e_rep_change_mtu,
1413 };
1414 
1415 static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
1416 	.ndo_open                = mlx5e_open,
1417 	.ndo_stop                = mlx5e_close,
1418 	.ndo_start_xmit          = mlx5e_xmit,
1419 	.ndo_set_mac_address     = mlx5e_uplink_rep_set_mac,
1420 	.ndo_setup_tc            = mlx5e_rep_setup_tc,
1421 	.ndo_get_devlink_port = mlx5e_get_devlink_port,
1422 	.ndo_get_stats64         = mlx5e_get_stats,
1423 	.ndo_has_offload_stats	 = mlx5e_rep_has_offload_stats,
1424 	.ndo_get_offload_stats	 = mlx5e_rep_get_offload_stats,
1425 	.ndo_change_mtu          = mlx5e_uplink_rep_change_mtu,
1426 	.ndo_udp_tunnel_add      = mlx5e_add_vxlan_port,
1427 	.ndo_udp_tunnel_del      = mlx5e_del_vxlan_port,
1428 	.ndo_features_check      = mlx5e_features_check,
1429 	.ndo_set_vf_mac          = mlx5e_set_vf_mac,
1430 	.ndo_set_vf_rate         = mlx5e_set_vf_rate,
1431 	.ndo_get_vf_config       = mlx5e_get_vf_config,
1432 	.ndo_get_vf_stats        = mlx5e_get_vf_stats,
1433 	.ndo_set_vf_vlan         = mlx5e_uplink_rep_set_vf_vlan,
1434 	.ndo_set_features        = mlx5e_set_features,
1435 };
1436 
1437 bool mlx5e_eswitch_rep(struct net_device *netdev)
1438 {
1439 	if (netdev->netdev_ops == &mlx5e_netdev_ops_rep ||
1440 	    netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep)
1441 		return true;
1442 
1443 	return false;
1444 }
1445 
1446 static void mlx5e_build_rep_params(struct net_device *netdev)
1447 {
1448 	struct mlx5e_priv *priv = netdev_priv(netdev);
1449 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1450 	struct mlx5_eswitch_rep *rep = rpriv->rep;
1451 	struct mlx5_core_dev *mdev = priv->mdev;
1452 	struct mlx5e_params *params;
1453 
1454 	u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
1455 					 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
1456 					 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
1457 
1458 	params = &priv->channels.params;
1459 	params->hard_mtu    = MLX5E_ETH_HARD_MTU;
1460 	params->sw_mtu      = netdev->mtu;
1461 
1462 	/* SQ */
1463 	if (rep->vport == MLX5_VPORT_UPLINK)
1464 		params->log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
1465 	else
1466 		params->log_sq_size = MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE;
1467 
1468 	/* RQ */
1469 	mlx5e_build_rq_params(mdev, params);
1470 
1471 	/* CQ moderation params */
1472 	params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
1473 	mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
1474 
1475 	params->num_tc                = 1;
1476 	params->tunneled_offload_en = false;
1477 
1478 	mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
1479 
1480 	/* RSS */
1481 	mlx5e_build_rss_params(&priv->rss_params, params->num_channels);
1482 }
1483 
1484 static void mlx5e_build_rep_netdev(struct net_device *netdev)
1485 {
1486 	struct mlx5e_priv *priv = netdev_priv(netdev);
1487 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1488 	struct mlx5_eswitch_rep *rep = rpriv->rep;
1489 	struct mlx5_core_dev *mdev = priv->mdev;
1490 
1491 	if (rep->vport == MLX5_VPORT_UPLINK) {
1492 		SET_NETDEV_DEV(netdev, mdev->device);
1493 		netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep;
1494 		/* we want a persistent mac for the uplink rep */
1495 		mlx5_query_mac_address(mdev, netdev->dev_addr);
1496 		netdev->ethtool_ops = &mlx5e_uplink_rep_ethtool_ops;
1497 #ifdef CONFIG_MLX5_CORE_EN_DCB
1498 		if (MLX5_CAP_GEN(mdev, qos))
1499 			netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
1500 #endif
1501 	} else {
1502 		netdev->netdev_ops = &mlx5e_netdev_ops_rep;
1503 		eth_hw_addr_random(netdev);
1504 		netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
1505 	}
1506 
1507 	netdev->watchdog_timeo    = 15 * HZ;
1508 
1509 	netdev->features       |= NETIF_F_NETNS_LOCAL;
1510 
1511 	netdev->hw_features    |= NETIF_F_HW_TC;
1512 	netdev->hw_features    |= NETIF_F_SG;
1513 	netdev->hw_features    |= NETIF_F_IP_CSUM;
1514 	netdev->hw_features    |= NETIF_F_IPV6_CSUM;
1515 	netdev->hw_features    |= NETIF_F_GRO;
1516 	netdev->hw_features    |= NETIF_F_TSO;
1517 	netdev->hw_features    |= NETIF_F_TSO6;
1518 	netdev->hw_features    |= NETIF_F_RXCSUM;
1519 
1520 	if (rep->vport == MLX5_VPORT_UPLINK)
1521 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1522 	else
1523 		netdev->features |= NETIF_F_VLAN_CHALLENGED;
1524 
1525 	netdev->features |= netdev->hw_features;
1526 }
1527 
1528 static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
1529 			  struct net_device *netdev,
1530 			  const struct mlx5e_profile *profile,
1531 			  void *ppriv)
1532 {
1533 	struct mlx5e_priv *priv = netdev_priv(netdev);
1534 	int err;
1535 
1536 	err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
1537 	if (err)
1538 		return err;
1539 
1540 	priv->channels.params.num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS;
1541 
1542 	mlx5e_build_rep_params(netdev);
1543 	mlx5e_build_rep_netdev(netdev);
1544 
1545 	mlx5e_timestamp_init(priv);
1546 
1547 	return 0;
1548 }
1549 
1550 static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
1551 {
1552 	mlx5e_netdev_cleanup(priv->netdev, priv);
1553 }
1554 
1555 static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
1556 {
1557 	struct ttc_params ttc_params = {};
1558 	int tt, err;
1559 
1560 	priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
1561 					      MLX5_FLOW_NAMESPACE_KERNEL);
1562 
1563 	/* The inner_ttc in the ttc params is intentionally not set */
1564 	ttc_params.any_tt_tirn = priv->direct_tir[0].tirn;
1565 	mlx5e_set_ttc_ft_params(&ttc_params);
1566 	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
1567 		ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
1568 
1569 	err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
1570 	if (err) {
1571 		netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n", err);
1572 		return err;
1573 	}
1574 	return 0;
1575 }
1576 
1577 static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv *priv)
1578 {
1579 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1580 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1581 	struct mlx5_eswitch_rep *rep = rpriv->rep;
1582 	struct mlx5_flow_handle *flow_rule;
1583 	struct mlx5_flow_destination dest;
1584 
1585 	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1586 	dest.tir_num = priv->direct_tir[0].tirn;
1587 	flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
1588 						      rep->vport,
1589 						      &dest);
1590 	if (IS_ERR(flow_rule))
1591 		return PTR_ERR(flow_rule);
1592 	rpriv->vport_rx_rule = flow_rule;
1593 	return 0;
1594 }
1595 
1596 static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
1597 {
1598 	struct mlx5_core_dev *mdev = priv->mdev;
1599 	int err;
1600 
1601 	mlx5e_init_l2_addr(priv);
1602 
1603 	err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
1604 	if (err) {
1605 		mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
1606 		return err;
1607 	}
1608 
1609 	err = mlx5e_create_indirect_rqt(priv);
1610 	if (err)
1611 		goto err_close_drop_rq;
1612 
1613 	err = mlx5e_create_direct_rqts(priv, priv->direct_tir);
1614 	if (err)
1615 		goto err_destroy_indirect_rqts;
1616 
1617 	err = mlx5e_create_indirect_tirs(priv, false);
1618 	if (err)
1619 		goto err_destroy_direct_rqts;
1620 
1621 	err = mlx5e_create_direct_tirs(priv, priv->direct_tir);
1622 	if (err)
1623 		goto err_destroy_indirect_tirs;
1624 
1625 	err = mlx5e_create_rep_ttc_table(priv);
1626 	if (err)
1627 		goto err_destroy_direct_tirs;
1628 
1629 	err = mlx5e_create_rep_vport_rx_rule(priv);
1630 	if (err)
1631 		goto err_destroy_ttc_table;
1632 
1633 	return 0;
1634 
1635 err_destroy_ttc_table:
1636 	mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
1637 err_destroy_direct_tirs:
1638 	mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
1639 err_destroy_indirect_tirs:
1640 	mlx5e_destroy_indirect_tirs(priv, false);
1641 err_destroy_direct_rqts:
1642 	mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
1643 err_destroy_indirect_rqts:
1644 	mlx5e_destroy_rqt(priv, &priv->indir_rqt);
1645 err_close_drop_rq:
1646 	mlx5e_close_drop_rq(&priv->drop_rq);
1647 	return err;
1648 }
1649 
1650 static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
1651 {
1652 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1653 
1654 	mlx5_del_flow_rules(rpriv->vport_rx_rule);
1655 	mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
1656 	mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
1657 	mlx5e_destroy_indirect_tirs(priv, false);
1658 	mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
1659 	mlx5e_destroy_rqt(priv, &priv->indir_rqt);
1660 	mlx5e_close_drop_rq(&priv->drop_rq);
1661 }
1662 
1663 static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
1664 {
1665 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1666 	struct mlx5_rep_uplink_priv *uplink_priv;
1667 	int err;
1668 
1669 	err = mlx5e_create_tises(priv);
1670 	if (err) {
1671 		mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
1672 		return err;
1673 	}
1674 
1675 	if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
1676 		uplink_priv = &rpriv->uplink_priv;
1677 
1678 		mutex_init(&uplink_priv->unready_flows_lock);
1679 		INIT_LIST_HEAD(&uplink_priv->unready_flows);
1680 
1681 		/* init shared tc flow table */
1682 		err = mlx5e_tc_esw_init(&uplink_priv->tc_ht);
1683 		if (err)
1684 			goto destroy_tises;
1685 
1686 		mlx5_init_port_tun_entropy(&uplink_priv->tun_entropy, priv->mdev);
1687 
1688 		/* init indirect block notifications */
1689 		INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list);
1690 		uplink_priv->netdevice_nb.notifier_call = mlx5e_nic_rep_netdevice_event;
1691 		err = register_netdevice_notifier(&uplink_priv->netdevice_nb);
1692 		if (err) {
1693 			mlx5_core_err(priv->mdev, "Failed to register netdev notifier\n");
1694 			goto tc_esw_cleanup;
1695 		}
1696 	}
1697 
1698 	return 0;
1699 
1700 tc_esw_cleanup:
1701 	mlx5e_tc_esw_cleanup(&uplink_priv->tc_ht);
1702 destroy_tises:
1703 	mlx5e_destroy_tises(priv);
1704 	return err;
1705 }
1706 
1707 static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
1708 {
1709 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1710 
1711 	mlx5e_destroy_tises(priv);
1712 
1713 	if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
1714 		/* clean indirect TC block notifications */
1715 		unregister_netdevice_notifier(&rpriv->uplink_priv.netdevice_nb);
1716 		mlx5e_rep_indr_clean_block_privs(rpriv);
1717 
1718 		/* delete shared tc flow table */
1719 		mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht);
1720 		mutex_destroy(&rpriv->uplink_priv.unready_flows_lock);
1721 	}
1722 }
1723 
1724 static void mlx5e_rep_enable(struct mlx5e_priv *priv)
1725 {
1726 	mlx5e_set_netdev_mtu_boundaries(priv);
1727 }
1728 
1729 static int mlx5e_update_rep_rx(struct mlx5e_priv *priv)
1730 {
1731 	return 0;
1732 }
1733 
1734 static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event, void *data)
1735 {
1736 	struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb);
1737 
1738 	if (event == MLX5_EVENT_TYPE_PORT_CHANGE) {
1739 		struct mlx5_eqe *eqe = data;
1740 
1741 		switch (eqe->sub_type) {
1742 		case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
1743 		case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
1744 			queue_work(priv->wq, &priv->update_carrier_work);
1745 			break;
1746 		default:
1747 			return NOTIFY_DONE;
1748 		}
1749 
1750 		return NOTIFY_OK;
1751 	}
1752 
1753 	if (event == MLX5_DEV_EVENT_PORT_AFFINITY) {
1754 		struct mlx5e_rep_priv *rpriv = priv->ppriv;
1755 
1756 		queue_work(priv->wq, &rpriv->uplink_priv.reoffload_flows_work);
1757 
1758 		return NOTIFY_OK;
1759 	}
1760 
1761 	return NOTIFY_DONE;
1762 }
1763 
1764 static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
1765 {
1766 	struct net_device *netdev = priv->netdev;
1767 	struct mlx5_core_dev *mdev = priv->mdev;
1768 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1769 	u16 max_mtu;
1770 
1771 	netdev->min_mtu = ETH_MIN_MTU;
1772 	mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
1773 	netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
1774 	mlx5e_set_dev_port_mtu(priv);
1775 
1776 	INIT_WORK(&rpriv->uplink_priv.reoffload_flows_work,
1777 		  mlx5e_tc_reoffload_flows_work);
1778 
1779 	mlx5_lag_add(mdev, netdev);
1780 	priv->events_nb.notifier_call = uplink_rep_async_event;
1781 	mlx5_notifier_register(mdev, &priv->events_nb);
1782 #ifdef CONFIG_MLX5_CORE_EN_DCB
1783 	mlx5e_dcbnl_initialize(priv);
1784 	mlx5e_dcbnl_init_app(priv);
1785 #endif
1786 }
1787 
1788 static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
1789 {
1790 	struct mlx5_core_dev *mdev = priv->mdev;
1791 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
1792 
1793 #ifdef CONFIG_MLX5_CORE_EN_DCB
1794 	mlx5e_dcbnl_delete_app(priv);
1795 #endif
1796 	mlx5_notifier_unregister(mdev, &priv->events_nb);
1797 	cancel_work_sync(&rpriv->uplink_priv.reoffload_flows_work);
1798 	mlx5_lag_remove(mdev);
1799 }
1800 
1801 static const struct mlx5e_profile mlx5e_rep_profile = {
1802 	.init			= mlx5e_init_rep,
1803 	.cleanup		= mlx5e_cleanup_rep,
1804 	.init_rx		= mlx5e_init_rep_rx,
1805 	.cleanup_rx		= mlx5e_cleanup_rep_rx,
1806 	.init_tx		= mlx5e_init_rep_tx,
1807 	.cleanup_tx		= mlx5e_cleanup_rep_tx,
1808 	.enable		        = mlx5e_rep_enable,
1809 	.update_rx		= mlx5e_update_rep_rx,
1810 	.update_stats           = mlx5e_rep_update_hw_counters,
1811 	.rx_handlers.handle_rx_cqe       = mlx5e_handle_rx_cqe_rep,
1812 	.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
1813 	.max_tc			= 1,
1814 	.rq_groups		= MLX5E_NUM_RQ_GROUPS(REGULAR),
1815 };
1816 
1817 static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
1818 	.init			= mlx5e_init_rep,
1819 	.cleanup		= mlx5e_cleanup_rep,
1820 	.init_rx		= mlx5e_init_rep_rx,
1821 	.cleanup_rx		= mlx5e_cleanup_rep_rx,
1822 	.init_tx		= mlx5e_init_rep_tx,
1823 	.cleanup_tx		= mlx5e_cleanup_rep_tx,
1824 	.enable		        = mlx5e_uplink_rep_enable,
1825 	.disable	        = mlx5e_uplink_rep_disable,
1826 	.update_rx		= mlx5e_update_rep_rx,
1827 	.update_stats           = mlx5e_uplink_rep_update_hw_counters,
1828 	.update_carrier	        = mlx5e_update_carrier,
1829 	.rx_handlers.handle_rx_cqe       = mlx5e_handle_rx_cqe_rep,
1830 	.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
1831 	.max_tc			= MLX5E_MAX_NUM_TC,
1832 	.rq_groups		= MLX5E_NUM_RQ_GROUPS(REGULAR),
1833 };
1834 
1835 static bool
1836 is_devlink_port_supported(const struct mlx5_core_dev *dev,
1837 			  const struct mlx5e_rep_priv *rpriv)
1838 {
1839 	return rpriv->rep->vport == MLX5_VPORT_UPLINK ||
1840 	       rpriv->rep->vport == MLX5_VPORT_PF ||
1841 	       mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport);
1842 }
1843 
1844 static unsigned int
1845 vport_to_devlink_port_index(const struct mlx5_core_dev *dev, u16 vport_num)
1846 {
1847 	return (MLX5_CAP_GEN(dev, vhca_id) << 16) | vport_num;
1848 }
1849 
1850 static int register_devlink_port(struct mlx5_core_dev *dev,
1851 				 struct mlx5e_rep_priv *rpriv)
1852 {
1853 	struct devlink *devlink = priv_to_devlink(dev);
1854 	struct mlx5_eswitch_rep *rep = rpriv->rep;
1855 	struct netdev_phys_item_id ppid = {};
1856 	unsigned int dl_port_index = 0;
1857 
1858 	if (!is_devlink_port_supported(dev, rpriv))
1859 		return 0;
1860 
1861 	mlx5e_rep_get_port_parent_id(rpriv->netdev, &ppid);
1862 
1863 	if (rep->vport == MLX5_VPORT_UPLINK) {
1864 		devlink_port_attrs_set(&rpriv->dl_port,
1865 				       DEVLINK_PORT_FLAVOUR_PHYSICAL,
1866 				       PCI_FUNC(dev->pdev->devfn), false, 0,
1867 				       &ppid.id[0], ppid.id_len);
1868 		dl_port_index = vport_to_devlink_port_index(dev, rep->vport);
1869 	} else if (rep->vport == MLX5_VPORT_PF) {
1870 		devlink_port_attrs_pci_pf_set(&rpriv->dl_port,
1871 					      &ppid.id[0], ppid.id_len,
1872 					      dev->pdev->devfn);
1873 		dl_port_index = rep->vport;
1874 	} else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch,
1875 					    rpriv->rep->vport)) {
1876 		devlink_port_attrs_pci_vf_set(&rpriv->dl_port,
1877 					      &ppid.id[0], ppid.id_len,
1878 					      dev->pdev->devfn,
1879 					      rep->vport - 1);
1880 		dl_port_index = vport_to_devlink_port_index(dev, rep->vport);
1881 	}
1882 
1883 	return devlink_port_register(devlink, &rpriv->dl_port, dl_port_index);
1884 }
1885 
1886 static void unregister_devlink_port(struct mlx5_core_dev *dev,
1887 				    struct mlx5e_rep_priv *rpriv)
1888 {
1889 	if (is_devlink_port_supported(dev, rpriv))
1890 		devlink_port_unregister(&rpriv->dl_port);
1891 }
1892 
1893 /* e-Switch vport representors */
1894 static int
1895 mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1896 {
1897 	const struct mlx5e_profile *profile;
1898 	struct mlx5e_rep_priv *rpriv;
1899 	struct net_device *netdev;
1900 	int nch, err;
1901 
1902 	rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
1903 	if (!rpriv)
1904 		return -ENOMEM;
1905 
1906 	/* rpriv->rep to be looked up when profile->init() is called */
1907 	rpriv->rep = rep;
1908 
1909 	nch = mlx5e_get_max_num_channels(dev);
1910 	profile = (rep->vport == MLX5_VPORT_UPLINK) ?
1911 		  &mlx5e_uplink_rep_profile : &mlx5e_rep_profile;
1912 	netdev = mlx5e_create_netdev(dev, profile, nch, rpriv);
1913 	if (!netdev) {
1914 		pr_warn("Failed to create representor netdev for vport %d\n",
1915 			rep->vport);
1916 		kfree(rpriv);
1917 		return -EINVAL;
1918 	}
1919 
1920 	dev_net_set(netdev, mlx5_core_net(dev));
1921 	rpriv->netdev = netdev;
1922 	rep->rep_data[REP_ETH].priv = rpriv;
1923 	INIT_LIST_HEAD(&rpriv->vport_sqs_list);
1924 
1925 	if (rep->vport == MLX5_VPORT_UPLINK) {
1926 		err = mlx5e_create_mdev_resources(dev);
1927 		if (err)
1928 			goto err_destroy_netdev;
1929 	}
1930 
1931 	err = mlx5e_attach_netdev(netdev_priv(netdev));
1932 	if (err) {
1933 		pr_warn("Failed to attach representor netdev for vport %d\n",
1934 			rep->vport);
1935 		goto err_destroy_mdev_resources;
1936 	}
1937 
1938 	err = mlx5e_rep_neigh_init(rpriv);
1939 	if (err) {
1940 		pr_warn("Failed to initialized neighbours handling for vport %d\n",
1941 			rep->vport);
1942 		goto err_detach_netdev;
1943 	}
1944 
1945 	err = register_devlink_port(dev, rpriv);
1946 	if (err) {
1947 		esw_warn(dev, "Failed to register devlink port %d\n",
1948 			 rep->vport);
1949 		goto err_neigh_cleanup;
1950 	}
1951 
1952 	err = register_netdev(netdev);
1953 	if (err) {
1954 		pr_warn("Failed to register representor netdev for vport %d\n",
1955 			rep->vport);
1956 		goto err_devlink_cleanup;
1957 	}
1958 
1959 	if (is_devlink_port_supported(dev, rpriv))
1960 		devlink_port_type_eth_set(&rpriv->dl_port, netdev);
1961 	return 0;
1962 
1963 err_devlink_cleanup:
1964 	unregister_devlink_port(dev, rpriv);
1965 
1966 err_neigh_cleanup:
1967 	mlx5e_rep_neigh_cleanup(rpriv);
1968 
1969 err_detach_netdev:
1970 	mlx5e_detach_netdev(netdev_priv(netdev));
1971 
1972 err_destroy_mdev_resources:
1973 	if (rep->vport == MLX5_VPORT_UPLINK)
1974 		mlx5e_destroy_mdev_resources(dev);
1975 
1976 err_destroy_netdev:
1977 	mlx5e_destroy_netdev(netdev_priv(netdev));
1978 	kfree(rpriv);
1979 	return err;
1980 }
1981 
1982 static void
1983 mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
1984 {
1985 	struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1986 	struct net_device *netdev = rpriv->netdev;
1987 	struct mlx5e_priv *priv = netdev_priv(netdev);
1988 	struct mlx5_core_dev *dev = priv->mdev;
1989 	void *ppriv = priv->ppriv;
1990 
1991 	if (is_devlink_port_supported(dev, rpriv))
1992 		devlink_port_type_clear(&rpriv->dl_port);
1993 	unregister_netdev(netdev);
1994 	unregister_devlink_port(dev, rpriv);
1995 	mlx5e_rep_neigh_cleanup(rpriv);
1996 	mlx5e_detach_netdev(priv);
1997 	if (rep->vport == MLX5_VPORT_UPLINK)
1998 		mlx5e_destroy_mdev_resources(priv->mdev);
1999 	mlx5e_destroy_netdev(priv);
2000 	kfree(ppriv); /* mlx5e_rep_priv */
2001 }
2002 
2003 static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
2004 {
2005 	struct mlx5e_rep_priv *rpriv;
2006 
2007 	rpriv = mlx5e_rep_to_rep_priv(rep);
2008 
2009 	return rpriv->netdev;
2010 }
2011 
2012 static const struct mlx5_eswitch_rep_ops rep_ops = {
2013 	.load = mlx5e_vport_rep_load,
2014 	.unload = mlx5e_vport_rep_unload,
2015 	.get_proto_dev = mlx5e_vport_rep_get_proto_dev
2016 };
2017 
2018 void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev)
2019 {
2020 	struct mlx5_eswitch *esw = mdev->priv.eswitch;
2021 
2022 	mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_ETH);
2023 }
2024 
2025 void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev)
2026 {
2027 	struct mlx5_eswitch *esw = mdev->priv.eswitch;
2028 
2029 	mlx5_eswitch_unregister_vport_reps(esw, REP_ETH);
2030 }
2031