1 /*
2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include "mlx5_core.h"
39 #include "lib/eq.h"
40 #include "eswitch.h"
41 #include "fs_core.h"
42 #include "ecpf.h"
43 
44 enum {
45 	MLX5_ACTION_NONE = 0,
46 	MLX5_ACTION_ADD  = 1,
47 	MLX5_ACTION_DEL  = 2,
48 };
49 
50 /* Vport UC/MC hash node */
51 struct vport_addr {
52 	struct l2addr_node     node;
53 	u8                     action;
54 	u16                    vport;
55 	struct mlx5_flow_handle *flow_rule;
56 	bool mpfs; /* UC MAC was added to MPFs */
57 	/* A flag indicating that mac was added due to mc promiscuous vport */
58 	bool mc_promisc;
59 };
60 
61 enum {
62 	UC_ADDR_CHANGE = BIT(0),
63 	MC_ADDR_CHANGE = BIT(1),
64 	PROMISC_CHANGE = BIT(3),
65 };
66 
67 static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw);
68 static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw);
69 
70 /* Vport context events */
71 #define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \
72 			    MC_ADDR_CHANGE | \
73 			    PROMISC_CHANGE)
74 
75 struct mlx5_vport *__must_check
76 mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
77 {
78 	u16 idx;
79 
80 	if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
81 		return ERR_PTR(-EPERM);
82 
83 	idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
84 
85 	if (idx > esw->total_vports - 1) {
86 		esw_debug(esw->dev, "vport out of range: num(0x%x), idx(0x%x)\n",
87 			  vport_num, idx);
88 		return ERR_PTR(-EINVAL);
89 	}
90 
91 	return &esw->vports[idx];
92 }
93 
94 static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
95 					u32 events_mask)
96 {
97 	int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)]   = {0};
98 	int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
99 	void *nic_vport_ctx;
100 
101 	MLX5_SET(modify_nic_vport_context_in, in,
102 		 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
103 	MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
104 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
105 	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
106 	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
107 				     in, nic_vport_context);
108 
109 	MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);
110 
111 	if (events_mask & UC_ADDR_CHANGE)
112 		MLX5_SET(nic_vport_context, nic_vport_ctx,
113 			 event_on_uc_address_change, 1);
114 	if (events_mask & MC_ADDR_CHANGE)
115 		MLX5_SET(nic_vport_context, nic_vport_ctx,
116 			 event_on_mc_address_change, 1);
117 	if (events_mask & PROMISC_CHANGE)
118 		MLX5_SET(nic_vport_context, nic_vport_ctx,
119 			 event_on_promisc_change, 1);
120 
121 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
122 }
123 
124 /* E-Switch vport context HW commands */
125 static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
126 					void *in, int inlen)
127 {
128 	u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
129 
130 	MLX5_SET(modify_esw_vport_context_in, in, opcode,
131 		 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
132 	MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
133 	MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
134 	return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
135 }
136 
137 static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
138 				  u16 vlan, u8 qos, u8 set_flags)
139 {
140 	u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0};
141 
142 	if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
143 	    !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
144 		return -EOPNOTSUPP;
145 
146 	esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
147 		  vport, vlan, qos, set_flags);
148 
149 	if (set_flags & SET_VLAN_STRIP)
150 		MLX5_SET(modify_esw_vport_context_in, in,
151 			 esw_vport_context.vport_cvlan_strip, 1);
152 
153 	if (set_flags & SET_VLAN_INSERT) {
154 		/* insert only if no vlan in packet */
155 		MLX5_SET(modify_esw_vport_context_in, in,
156 			 esw_vport_context.vport_cvlan_insert, 1);
157 
158 		MLX5_SET(modify_esw_vport_context_in, in,
159 			 esw_vport_context.cvlan_pcp, qos);
160 		MLX5_SET(modify_esw_vport_context_in, in,
161 			 esw_vport_context.cvlan_id, vlan);
162 	}
163 
164 	MLX5_SET(modify_esw_vport_context_in, in,
165 		 field_select.vport_cvlan_strip, 1);
166 	MLX5_SET(modify_esw_vport_context_in, in,
167 		 field_select.vport_cvlan_insert, 1);
168 
169 	return modify_esw_vport_context_cmd(dev, vport, in, sizeof(in));
170 }
171 
172 /* E-Switch FDB */
173 static struct mlx5_flow_handle *
174 __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u16 vport, bool rx_rule,
175 			 u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN])
176 {
177 	int match_header = (is_zero_ether_addr(mac_c) ? 0 :
178 			    MLX5_MATCH_OUTER_HEADERS);
179 	struct mlx5_flow_handle *flow_rule = NULL;
180 	struct mlx5_flow_act flow_act = {0};
181 	struct mlx5_flow_destination dest = {};
182 	struct mlx5_flow_spec *spec;
183 	void *mv_misc = NULL;
184 	void *mc_misc = NULL;
185 	u8 *dmac_v = NULL;
186 	u8 *dmac_c = NULL;
187 
188 	if (rx_rule)
189 		match_header |= MLX5_MATCH_MISC_PARAMETERS;
190 
191 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
192 	if (!spec)
193 		return NULL;
194 
195 	dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
196 			      outer_headers.dmac_47_16);
197 	dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
198 			      outer_headers.dmac_47_16);
199 
200 	if (match_header & MLX5_MATCH_OUTER_HEADERS) {
201 		ether_addr_copy(dmac_v, mac_v);
202 		ether_addr_copy(dmac_c, mac_c);
203 	}
204 
205 	if (match_header & MLX5_MATCH_MISC_PARAMETERS) {
206 		mv_misc  = MLX5_ADDR_OF(fte_match_param, spec->match_value,
207 					misc_parameters);
208 		mc_misc  = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
209 					misc_parameters);
210 		MLX5_SET(fte_match_set_misc, mv_misc, source_port, MLX5_VPORT_UPLINK);
211 		MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
212 	}
213 
214 	dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
215 	dest.vport.num = vport;
216 
217 	esw_debug(esw->dev,
218 		  "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
219 		  dmac_v, dmac_c, vport);
220 	spec->match_criteria_enable = match_header;
221 	flow_act.action =  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
222 	flow_rule =
223 		mlx5_add_flow_rules(esw->fdb_table.legacy.fdb, spec,
224 				    &flow_act, &dest, 1);
225 	if (IS_ERR(flow_rule)) {
226 		esw_warn(esw->dev,
227 			 "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
228 			 dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
229 		flow_rule = NULL;
230 	}
231 
232 	kvfree(spec);
233 	return flow_rule;
234 }
235 
236 static struct mlx5_flow_handle *
237 esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u16 vport)
238 {
239 	u8 mac_c[ETH_ALEN];
240 
241 	eth_broadcast_addr(mac_c);
242 	return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac);
243 }
244 
245 static struct mlx5_flow_handle *
246 esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u16 vport)
247 {
248 	u8 mac_c[ETH_ALEN];
249 	u8 mac_v[ETH_ALEN];
250 
251 	eth_zero_addr(mac_c);
252 	eth_zero_addr(mac_v);
253 	mac_c[0] = 0x01;
254 	mac_v[0] = 0x01;
255 	return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v);
256 }
257 
258 static struct mlx5_flow_handle *
259 esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u16 vport)
260 {
261 	u8 mac_c[ETH_ALEN];
262 	u8 mac_v[ETH_ALEN];
263 
264 	eth_zero_addr(mac_c);
265 	eth_zero_addr(mac_v);
266 	return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
267 }
268 
269 enum {
270 	LEGACY_VEPA_PRIO = 0,
271 	LEGACY_FDB_PRIO,
272 };
273 
274 static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw)
275 {
276 	struct mlx5_core_dev *dev = esw->dev;
277 	struct mlx5_flow_namespace *root_ns;
278 	struct mlx5_flow_table *fdb;
279 	int err;
280 
281 	root_ns = mlx5_get_fdb_sub_ns(dev, 0);
282 	if (!root_ns) {
283 		esw_warn(dev, "Failed to get FDB flow namespace\n");
284 		return -EOPNOTSUPP;
285 	}
286 
287 	/* num FTE 2, num FG 2 */
288 	fdb = mlx5_create_auto_grouped_flow_table(root_ns, LEGACY_VEPA_PRIO,
289 						  2, 2, 0, 0);
290 	if (IS_ERR(fdb)) {
291 		err = PTR_ERR(fdb);
292 		esw_warn(dev, "Failed to create VEPA FDB err %d\n", err);
293 		return err;
294 	}
295 	esw->fdb_table.legacy.vepa_fdb = fdb;
296 
297 	return 0;
298 }
299 
300 static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
301 {
302 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
303 	struct mlx5_flow_table_attr ft_attr = {};
304 	struct mlx5_core_dev *dev = esw->dev;
305 	struct mlx5_flow_namespace *root_ns;
306 	struct mlx5_flow_table *fdb;
307 	struct mlx5_flow_group *g;
308 	void *match_criteria;
309 	int table_size;
310 	u32 *flow_group_in;
311 	u8 *dmac;
312 	int err = 0;
313 
314 	esw_debug(dev, "Create FDB log_max_size(%d)\n",
315 		  MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
316 
317 	root_ns = mlx5_get_fdb_sub_ns(dev, 0);
318 	if (!root_ns) {
319 		esw_warn(dev, "Failed to get FDB flow namespace\n");
320 		return -EOPNOTSUPP;
321 	}
322 
323 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
324 	if (!flow_group_in)
325 		return -ENOMEM;
326 
327 	table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
328 	ft_attr.max_fte = table_size;
329 	ft_attr.prio = LEGACY_FDB_PRIO;
330 	fdb = mlx5_create_flow_table(root_ns, &ft_attr);
331 	if (IS_ERR(fdb)) {
332 		err = PTR_ERR(fdb);
333 		esw_warn(dev, "Failed to create FDB Table err %d\n", err);
334 		goto out;
335 	}
336 	esw->fdb_table.legacy.fdb = fdb;
337 
338 	/* Addresses group : Full match unicast/multicast addresses */
339 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
340 		 MLX5_MATCH_OUTER_HEADERS);
341 	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
342 	dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
343 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
344 	/* Preserve 2 entries for allmulti and promisc rules*/
345 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
346 	eth_broadcast_addr(dmac);
347 	g = mlx5_create_flow_group(fdb, flow_group_in);
348 	if (IS_ERR(g)) {
349 		err = PTR_ERR(g);
350 		esw_warn(dev, "Failed to create flow group err(%d)\n", err);
351 		goto out;
352 	}
353 	esw->fdb_table.legacy.addr_grp = g;
354 
355 	/* Allmulti group : One rule that forwards any mcast traffic */
356 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
357 		 MLX5_MATCH_OUTER_HEADERS);
358 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2);
359 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2);
360 	eth_zero_addr(dmac);
361 	dmac[0] = 0x01;
362 	g = mlx5_create_flow_group(fdb, flow_group_in);
363 	if (IS_ERR(g)) {
364 		err = PTR_ERR(g);
365 		esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
366 		goto out;
367 	}
368 	esw->fdb_table.legacy.allmulti_grp = g;
369 
370 	/* Promiscuous group :
371 	 * One rule that forward all unmatched traffic from previous groups
372 	 */
373 	eth_zero_addr(dmac);
374 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
375 		 MLX5_MATCH_MISC_PARAMETERS);
376 	MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
377 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
378 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
379 	g = mlx5_create_flow_group(fdb, flow_group_in);
380 	if (IS_ERR(g)) {
381 		err = PTR_ERR(g);
382 		esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
383 		goto out;
384 	}
385 	esw->fdb_table.legacy.promisc_grp = g;
386 
387 out:
388 	if (err)
389 		esw_destroy_legacy_fdb_table(esw);
390 
391 	kvfree(flow_group_in);
392 	return err;
393 }
394 
395 static void esw_destroy_legacy_vepa_table(struct mlx5_eswitch *esw)
396 {
397 	esw_debug(esw->dev, "Destroy VEPA Table\n");
398 	if (!esw->fdb_table.legacy.vepa_fdb)
399 		return;
400 
401 	mlx5_destroy_flow_table(esw->fdb_table.legacy.vepa_fdb);
402 	esw->fdb_table.legacy.vepa_fdb = NULL;
403 }
404 
405 static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
406 {
407 	esw_debug(esw->dev, "Destroy FDB Table\n");
408 	if (!esw->fdb_table.legacy.fdb)
409 		return;
410 
411 	if (esw->fdb_table.legacy.promisc_grp)
412 		mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
413 	if (esw->fdb_table.legacy.allmulti_grp)
414 		mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
415 	if (esw->fdb_table.legacy.addr_grp)
416 		mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
417 	mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb);
418 
419 	esw->fdb_table.legacy.fdb = NULL;
420 	esw->fdb_table.legacy.addr_grp = NULL;
421 	esw->fdb_table.legacy.allmulti_grp = NULL;
422 	esw->fdb_table.legacy.promisc_grp = NULL;
423 }
424 
425 static int esw_create_legacy_table(struct mlx5_eswitch *esw)
426 {
427 	int err;
428 
429 	memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb));
430 
431 	err = esw_create_legacy_vepa_table(esw);
432 	if (err)
433 		return err;
434 
435 	err = esw_create_legacy_fdb_table(esw);
436 	if (err)
437 		esw_destroy_legacy_vepa_table(esw);
438 
439 	return err;
440 }
441 
442 static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
443 {
444 	esw_cleanup_vepa_rules(esw);
445 	esw_destroy_legacy_fdb_table(esw);
446 	esw_destroy_legacy_vepa_table(esw);
447 }
448 
449 /* E-Switch vport UC/MC lists management */
450 typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
451 				 struct vport_addr *vaddr);
452 
453 static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
454 {
455 	u8 *mac = vaddr->node.addr;
456 	u16 vport = vaddr->vport;
457 	int err;
458 
459 	/* Skip mlx5_mpfs_add_mac for eswitch_managers,
460 	 * it is already done by its netdev in mlx5e_execute_l2_action
461 	 */
462 	if (esw->manager_vport == vport)
463 		goto fdb_add;
464 
465 	err = mlx5_mpfs_add_mac(esw->dev, mac);
466 	if (err) {
467 		esw_warn(esw->dev,
468 			 "Failed to add L2 table mac(%pM) for vport(0x%x), err(%d)\n",
469 			 mac, vport, err);
470 		return err;
471 	}
472 	vaddr->mpfs = true;
473 
474 fdb_add:
475 	/* SRIOV is enabled: Forward UC MAC to vport */
476 	if (esw->fdb_table.legacy.fdb && esw->mode == SRIOV_LEGACY)
477 		vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
478 
479 	esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n",
480 		  vport, mac, vaddr->flow_rule);
481 
482 	return 0;
483 }
484 
485 static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
486 {
487 	u8 *mac = vaddr->node.addr;
488 	u16 vport = vaddr->vport;
489 	int err = 0;
490 
491 	/* Skip mlx5_mpfs_del_mac for eswitch managerss,
492 	 * it is already done by its netdev in mlx5e_execute_l2_action
493 	 */
494 	if (!vaddr->mpfs || esw->manager_vport == vport)
495 		goto fdb_del;
496 
497 	err = mlx5_mpfs_del_mac(esw->dev, mac);
498 	if (err)
499 		esw_warn(esw->dev,
500 			 "Failed to del L2 table mac(%pM) for vport(%d), err(%d)\n",
501 			 mac, vport, err);
502 	vaddr->mpfs = false;
503 
504 fdb_del:
505 	if (vaddr->flow_rule)
506 		mlx5_del_flow_rules(vaddr->flow_rule);
507 	vaddr->flow_rule = NULL;
508 
509 	return 0;
510 }
511 
512 static void update_allmulti_vports(struct mlx5_eswitch *esw,
513 				   struct vport_addr *vaddr,
514 				   struct esw_mc_addr *esw_mc)
515 {
516 	u8 *mac = vaddr->node.addr;
517 	struct mlx5_vport *vport;
518 	u16 i, vport_num;
519 
520 	mlx5_esw_for_all_vports(esw, i, vport) {
521 		struct hlist_head *vport_hash = vport->mc_list;
522 		struct vport_addr *iter_vaddr =
523 					l2addr_hash_find(vport_hash,
524 							 mac,
525 							 struct vport_addr);
526 		vport_num = vport->vport;
527 		if (IS_ERR_OR_NULL(vport->allmulti_rule) ||
528 		    vaddr->vport == vport_num)
529 			continue;
530 		switch (vaddr->action) {
531 		case MLX5_ACTION_ADD:
532 			if (iter_vaddr)
533 				continue;
534 			iter_vaddr = l2addr_hash_add(vport_hash, mac,
535 						     struct vport_addr,
536 						     GFP_KERNEL);
537 			if (!iter_vaddr) {
538 				esw_warn(esw->dev,
539 					 "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n",
540 					 mac, vport_num);
541 				continue;
542 			}
543 			iter_vaddr->vport = vport_num;
544 			iter_vaddr->flow_rule =
545 					esw_fdb_set_vport_rule(esw,
546 							       mac,
547 							       vport_num);
548 			iter_vaddr->mc_promisc = true;
549 			break;
550 		case MLX5_ACTION_DEL:
551 			if (!iter_vaddr)
552 				continue;
553 			mlx5_del_flow_rules(iter_vaddr->flow_rule);
554 			l2addr_hash_del(iter_vaddr);
555 			break;
556 		}
557 	}
558 }
559 
560 static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
561 {
562 	struct hlist_head *hash = esw->mc_table;
563 	struct esw_mc_addr *esw_mc;
564 	u8 *mac = vaddr->node.addr;
565 	u16 vport = vaddr->vport;
566 
567 	if (!esw->fdb_table.legacy.fdb)
568 		return 0;
569 
570 	esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
571 	if (esw_mc)
572 		goto add;
573 
574 	esw_mc = l2addr_hash_add(hash, mac, struct esw_mc_addr, GFP_KERNEL);
575 	if (!esw_mc)
576 		return -ENOMEM;
577 
578 	esw_mc->uplink_rule = /* Forward MC MAC to Uplink */
579 		esw_fdb_set_vport_rule(esw, mac, MLX5_VPORT_UPLINK);
580 
581 	/* Add this multicast mac to all the mc promiscuous vports */
582 	update_allmulti_vports(esw, vaddr, esw_mc);
583 
584 add:
585 	/* If the multicast mac is added as a result of mc promiscuous vport,
586 	 * don't increment the multicast ref count
587 	 */
588 	if (!vaddr->mc_promisc)
589 		esw_mc->refcnt++;
590 
591 	/* Forward MC MAC to vport */
592 	vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
593 	esw_debug(esw->dev,
594 		  "\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
595 		  vport, mac, vaddr->flow_rule,
596 		  esw_mc->refcnt, esw_mc->uplink_rule);
597 	return 0;
598 }
599 
600 static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
601 {
602 	struct hlist_head *hash = esw->mc_table;
603 	struct esw_mc_addr *esw_mc;
604 	u8 *mac = vaddr->node.addr;
605 	u16 vport = vaddr->vport;
606 
607 	if (!esw->fdb_table.legacy.fdb)
608 		return 0;
609 
610 	esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
611 	if (!esw_mc) {
612 		esw_warn(esw->dev,
613 			 "Failed to find eswitch MC addr for MAC(%pM) vport(%d)",
614 			 mac, vport);
615 		return -EINVAL;
616 	}
617 	esw_debug(esw->dev,
618 		  "\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
619 		  vport, mac, vaddr->flow_rule, esw_mc->refcnt,
620 		  esw_mc->uplink_rule);
621 
622 	if (vaddr->flow_rule)
623 		mlx5_del_flow_rules(vaddr->flow_rule);
624 	vaddr->flow_rule = NULL;
625 
626 	/* If the multicast mac is added as a result of mc promiscuous vport,
627 	 * don't decrement the multicast ref count.
628 	 */
629 	if (vaddr->mc_promisc || (--esw_mc->refcnt > 0))
630 		return 0;
631 
632 	/* Remove this multicast mac from all the mc promiscuous vports */
633 	update_allmulti_vports(esw, vaddr, esw_mc);
634 
635 	if (esw_mc->uplink_rule)
636 		mlx5_del_flow_rules(esw_mc->uplink_rule);
637 
638 	l2addr_hash_del(esw_mc);
639 	return 0;
640 }
641 
642 /* Apply vport UC/MC list to HW l2 table and FDB table */
643 static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
644 				      struct mlx5_vport *vport, int list_type)
645 {
646 	bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
647 	vport_addr_action vport_addr_add;
648 	vport_addr_action vport_addr_del;
649 	struct vport_addr *addr;
650 	struct l2addr_node *node;
651 	struct hlist_head *hash;
652 	struct hlist_node *tmp;
653 	int hi;
654 
655 	vport_addr_add = is_uc ? esw_add_uc_addr :
656 				 esw_add_mc_addr;
657 	vport_addr_del = is_uc ? esw_del_uc_addr :
658 				 esw_del_mc_addr;
659 
660 	hash = is_uc ? vport->uc_list : vport->mc_list;
661 	for_each_l2hash_node(node, tmp, hash, hi) {
662 		addr = container_of(node, struct vport_addr, node);
663 		switch (addr->action) {
664 		case MLX5_ACTION_ADD:
665 			vport_addr_add(esw, addr);
666 			addr->action = MLX5_ACTION_NONE;
667 			break;
668 		case MLX5_ACTION_DEL:
669 			vport_addr_del(esw, addr);
670 			l2addr_hash_del(addr);
671 			break;
672 		}
673 	}
674 }
675 
676 /* Sync vport UC/MC list from vport context */
677 static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
678 				       struct mlx5_vport *vport, int list_type)
679 {
680 	bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
681 	u8 (*mac_list)[ETH_ALEN];
682 	struct l2addr_node *node;
683 	struct vport_addr *addr;
684 	struct hlist_head *hash;
685 	struct hlist_node *tmp;
686 	int size;
687 	int err;
688 	int hi;
689 	int i;
690 
691 	size = is_uc ? MLX5_MAX_UC_PER_VPORT(esw->dev) :
692 		       MLX5_MAX_MC_PER_VPORT(esw->dev);
693 
694 	mac_list = kcalloc(size, ETH_ALEN, GFP_KERNEL);
695 	if (!mac_list)
696 		return;
697 
698 	hash = is_uc ? vport->uc_list : vport->mc_list;
699 
700 	for_each_l2hash_node(node, tmp, hash, hi) {
701 		addr = container_of(node, struct vport_addr, node);
702 		addr->action = MLX5_ACTION_DEL;
703 	}
704 
705 	if (!vport->enabled)
706 		goto out;
707 
708 	err = mlx5_query_nic_vport_mac_list(esw->dev, vport->vport, list_type,
709 					    mac_list, &size);
710 	if (err)
711 		goto out;
712 	esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
713 		  vport->vport, is_uc ? "UC" : "MC", size);
714 
715 	for (i = 0; i < size; i++) {
716 		if (is_uc && !is_valid_ether_addr(mac_list[i]))
717 			continue;
718 
719 		if (!is_uc && !is_multicast_ether_addr(mac_list[i]))
720 			continue;
721 
722 		addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr);
723 		if (addr) {
724 			addr->action = MLX5_ACTION_NONE;
725 			/* If this mac was previously added because of allmulti
726 			 * promiscuous rx mode, its now converted to be original
727 			 * vport mac.
728 			 */
729 			if (addr->mc_promisc) {
730 				struct esw_mc_addr *esw_mc =
731 					l2addr_hash_find(esw->mc_table,
732 							 mac_list[i],
733 							 struct esw_mc_addr);
734 				if (!esw_mc) {
735 					esw_warn(esw->dev,
736 						 "Failed to MAC(%pM) in mcast DB\n",
737 						 mac_list[i]);
738 					continue;
739 				}
740 				esw_mc->refcnt++;
741 				addr->mc_promisc = false;
742 			}
743 			continue;
744 		}
745 
746 		addr = l2addr_hash_add(hash, mac_list[i], struct vport_addr,
747 				       GFP_KERNEL);
748 		if (!addr) {
749 			esw_warn(esw->dev,
750 				 "Failed to add MAC(%pM) to vport[%d] DB\n",
751 				 mac_list[i], vport->vport);
752 			continue;
753 		}
754 		addr->vport = vport->vport;
755 		addr->action = MLX5_ACTION_ADD;
756 	}
757 out:
758 	kfree(mac_list);
759 }
760 
761 /* Sync vport UC/MC list from vport context
762  * Must be called after esw_update_vport_addr_list
763  */
764 static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw,
765 					struct mlx5_vport *vport)
766 {
767 	struct l2addr_node *node;
768 	struct vport_addr *addr;
769 	struct hlist_head *hash;
770 	struct hlist_node *tmp;
771 	int hi;
772 
773 	hash = vport->mc_list;
774 
775 	for_each_l2hash_node(node, tmp, esw->mc_table, hi) {
776 		u8 *mac = node->addr;
777 
778 		addr = l2addr_hash_find(hash, mac, struct vport_addr);
779 		if (addr) {
780 			if (addr->action == MLX5_ACTION_DEL)
781 				addr->action = MLX5_ACTION_NONE;
782 			continue;
783 		}
784 		addr = l2addr_hash_add(hash, mac, struct vport_addr,
785 				       GFP_KERNEL);
786 		if (!addr) {
787 			esw_warn(esw->dev,
788 				 "Failed to add allmulti MAC(%pM) to vport[%d] DB\n",
789 				 mac, vport->vport);
790 			continue;
791 		}
792 		addr->vport = vport->vport;
793 		addr->action = MLX5_ACTION_ADD;
794 		addr->mc_promisc = true;
795 	}
796 }
797 
798 /* Apply vport rx mode to HW FDB table */
799 static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw,
800 				    struct mlx5_vport *vport,
801 				    bool promisc, bool mc_promisc)
802 {
803 	struct esw_mc_addr *allmulti_addr = &esw->mc_promisc;
804 
805 	if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc)
806 		goto promisc;
807 
808 	if (mc_promisc) {
809 		vport->allmulti_rule =
810 			esw_fdb_set_vport_allmulti_rule(esw, vport->vport);
811 		if (!allmulti_addr->uplink_rule)
812 			allmulti_addr->uplink_rule =
813 				esw_fdb_set_vport_allmulti_rule(esw,
814 								MLX5_VPORT_UPLINK);
815 		allmulti_addr->refcnt++;
816 	} else if (vport->allmulti_rule) {
817 		mlx5_del_flow_rules(vport->allmulti_rule);
818 		vport->allmulti_rule = NULL;
819 
820 		if (--allmulti_addr->refcnt > 0)
821 			goto promisc;
822 
823 		if (allmulti_addr->uplink_rule)
824 			mlx5_del_flow_rules(allmulti_addr->uplink_rule);
825 		allmulti_addr->uplink_rule = NULL;
826 	}
827 
828 promisc:
829 	if (IS_ERR_OR_NULL(vport->promisc_rule) != promisc)
830 		return;
831 
832 	if (promisc) {
833 		vport->promisc_rule =
834 			esw_fdb_set_vport_promisc_rule(esw, vport->vport);
835 	} else if (vport->promisc_rule) {
836 		mlx5_del_flow_rules(vport->promisc_rule);
837 		vport->promisc_rule = NULL;
838 	}
839 }
840 
841 /* Sync vport rx mode from vport context */
842 static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw,
843 				     struct mlx5_vport *vport)
844 {
845 	int promisc_all = 0;
846 	int promisc_uc = 0;
847 	int promisc_mc = 0;
848 	int err;
849 
850 	err = mlx5_query_nic_vport_promisc(esw->dev,
851 					   vport->vport,
852 					   &promisc_uc,
853 					   &promisc_mc,
854 					   &promisc_all);
855 	if (err)
856 		return;
857 	esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
858 		  vport->vport, promisc_all, promisc_mc);
859 
860 	if (!vport->info.trusted || !vport->enabled) {
861 		promisc_uc = 0;
862 		promisc_mc = 0;
863 		promisc_all = 0;
864 	}
865 
866 	esw_apply_vport_rx_mode(esw, vport, promisc_all,
867 				(promisc_all || promisc_mc));
868 }
869 
870 static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
871 {
872 	struct mlx5_core_dev *dev = vport->dev;
873 	struct mlx5_eswitch *esw = dev->priv.eswitch;
874 	u8 mac[ETH_ALEN];
875 
876 	mlx5_query_nic_vport_mac_address(dev, vport->vport, mac);
877 	esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
878 		  vport->vport, mac);
879 
880 	if (vport->enabled_events & UC_ADDR_CHANGE) {
881 		esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
882 		esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
883 	}
884 
885 	if (vport->enabled_events & MC_ADDR_CHANGE)
886 		esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
887 
888 	if (vport->enabled_events & PROMISC_CHANGE) {
889 		esw_update_vport_rx_mode(esw, vport);
890 		if (!IS_ERR_OR_NULL(vport->allmulti_rule))
891 			esw_update_vport_mc_promisc(esw, vport);
892 	}
893 
894 	if (vport->enabled_events & (PROMISC_CHANGE | MC_ADDR_CHANGE))
895 		esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
896 
897 	esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
898 	if (vport->enabled)
899 		arm_vport_context_events_cmd(dev, vport->vport,
900 					     vport->enabled_events);
901 }
902 
903 static void esw_vport_change_handler(struct work_struct *work)
904 {
905 	struct mlx5_vport *vport =
906 		container_of(work, struct mlx5_vport, vport_change_handler);
907 	struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
908 
909 	mutex_lock(&esw->state_lock);
910 	esw_vport_change_handle_locked(vport);
911 	mutex_unlock(&esw->state_lock);
912 }
913 
914 int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
915 				struct mlx5_vport *vport)
916 {
917 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
918 	struct mlx5_flow_group *vlan_grp = NULL;
919 	struct mlx5_flow_group *drop_grp = NULL;
920 	struct mlx5_core_dev *dev = esw->dev;
921 	struct mlx5_flow_namespace *root_ns;
922 	struct mlx5_flow_table *acl;
923 	void *match_criteria;
924 	u32 *flow_group_in;
925 	/* The egress acl table contains 2 rules:
926 	 * 1)Allow traffic with vlan_tag=vst_vlan_id
927 	 * 2)Drop all other traffic.
928 	 */
929 	int table_size = 2;
930 	int err = 0;
931 
932 	if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
933 		return -EOPNOTSUPP;
934 
935 	if (!IS_ERR_OR_NULL(vport->egress.acl))
936 		return 0;
937 
938 	esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
939 		  vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
940 
941 	root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS,
942 						    vport->vport);
943 	if (!root_ns) {
944 		esw_warn(dev, "Failed to get E-Switch egress flow namespace for vport (%d)\n", vport->vport);
945 		return -EOPNOTSUPP;
946 	}
947 
948 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
949 	if (!flow_group_in)
950 		return -ENOMEM;
951 
952 	acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
953 	if (IS_ERR(acl)) {
954 		err = PTR_ERR(acl);
955 		esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
956 			 vport->vport, err);
957 		goto out;
958 	}
959 
960 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
961 	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
962 	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
963 	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid);
964 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
965 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
966 
967 	vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
968 	if (IS_ERR(vlan_grp)) {
969 		err = PTR_ERR(vlan_grp);
970 		esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
971 			 vport->vport, err);
972 		goto out;
973 	}
974 
975 	memset(flow_group_in, 0, inlen);
976 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
977 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
978 	drop_grp = mlx5_create_flow_group(acl, flow_group_in);
979 	if (IS_ERR(drop_grp)) {
980 		err = PTR_ERR(drop_grp);
981 		esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
982 			 vport->vport, err);
983 		goto out;
984 	}
985 
986 	vport->egress.acl = acl;
987 	vport->egress.drop_grp = drop_grp;
988 	vport->egress.allowed_vlans_grp = vlan_grp;
989 out:
990 	kvfree(flow_group_in);
991 	if (err && !IS_ERR_OR_NULL(vlan_grp))
992 		mlx5_destroy_flow_group(vlan_grp);
993 	if (err && !IS_ERR_OR_NULL(acl))
994 		mlx5_destroy_flow_table(acl);
995 	return err;
996 }
997 
998 void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
999 				    struct mlx5_vport *vport)
1000 {
1001 	if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan))
1002 		mlx5_del_flow_rules(vport->egress.allowed_vlan);
1003 
1004 	if (!IS_ERR_OR_NULL(vport->egress.drop_rule))
1005 		mlx5_del_flow_rules(vport->egress.drop_rule);
1006 
1007 	vport->egress.allowed_vlan = NULL;
1008 	vport->egress.drop_rule = NULL;
1009 }
1010 
1011 void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
1012 				  struct mlx5_vport *vport)
1013 {
1014 	if (IS_ERR_OR_NULL(vport->egress.acl))
1015 		return;
1016 
1017 	esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport);
1018 
1019 	esw_vport_cleanup_egress_rules(esw, vport);
1020 	mlx5_destroy_flow_group(vport->egress.allowed_vlans_grp);
1021 	mlx5_destroy_flow_group(vport->egress.drop_grp);
1022 	mlx5_destroy_flow_table(vport->egress.acl);
1023 	vport->egress.allowed_vlans_grp = NULL;
1024 	vport->egress.drop_grp = NULL;
1025 	vport->egress.acl = NULL;
1026 }
1027 
1028 int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
1029 				 struct mlx5_vport *vport)
1030 {
1031 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1032 	struct mlx5_core_dev *dev = esw->dev;
1033 	struct mlx5_flow_namespace *root_ns;
1034 	struct mlx5_flow_table *acl;
1035 	struct mlx5_flow_group *g;
1036 	void *match_criteria;
1037 	u32 *flow_group_in;
1038 	/* The ingress acl table contains 4 groups
1039 	 * (2 active rules at the same time -
1040 	 *      1 allow rule from one of the first 3 groups.
1041 	 *      1 drop rule from the last group):
1042 	 * 1)Allow untagged traffic with smac=original mac.
1043 	 * 2)Allow untagged traffic.
1044 	 * 3)Allow traffic with smac=original mac.
1045 	 * 4)Drop all other traffic.
1046 	 */
1047 	int table_size = 4;
1048 	int err = 0;
1049 
1050 	if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
1051 		return -EOPNOTSUPP;
1052 
1053 	if (!IS_ERR_OR_NULL(vport->ingress.acl))
1054 		return 0;
1055 
1056 	esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
1057 		  vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
1058 
1059 	root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
1060 						    vport->vport);
1061 	if (!root_ns) {
1062 		esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n", vport->vport);
1063 		return -EOPNOTSUPP;
1064 	}
1065 
1066 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1067 	if (!flow_group_in)
1068 		return -ENOMEM;
1069 
1070 	acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
1071 	if (IS_ERR(acl)) {
1072 		err = PTR_ERR(acl);
1073 		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
1074 			 vport->vport, err);
1075 		goto out;
1076 	}
1077 	vport->ingress.acl = acl;
1078 
1079 	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1080 
1081 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1082 	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
1083 	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
1084 	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
1085 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1086 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
1087 
1088 	g = mlx5_create_flow_group(acl, flow_group_in);
1089 	if (IS_ERR(g)) {
1090 		err = PTR_ERR(g);
1091 		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
1092 			 vport->vport, err);
1093 		goto out;
1094 	}
1095 	vport->ingress.allow_untagged_spoofchk_grp = g;
1096 
1097 	memset(flow_group_in, 0, inlen);
1098 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1099 	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
1100 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
1101 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
1102 
1103 	g = mlx5_create_flow_group(acl, flow_group_in);
1104 	if (IS_ERR(g)) {
1105 		err = PTR_ERR(g);
1106 		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
1107 			 vport->vport, err);
1108 		goto out;
1109 	}
1110 	vport->ingress.allow_untagged_only_grp = g;
1111 
1112 	memset(flow_group_in, 0, inlen);
1113 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1114 	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
1115 	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
1116 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
1117 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
1118 
1119 	g = mlx5_create_flow_group(acl, flow_group_in);
1120 	if (IS_ERR(g)) {
1121 		err = PTR_ERR(g);
1122 		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
1123 			 vport->vport, err);
1124 		goto out;
1125 	}
1126 	vport->ingress.allow_spoofchk_only_grp = g;
1127 
1128 	memset(flow_group_in, 0, inlen);
1129 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
1130 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
1131 
1132 	g = mlx5_create_flow_group(acl, flow_group_in);
1133 	if (IS_ERR(g)) {
1134 		err = PTR_ERR(g);
1135 		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
1136 			 vport->vport, err);
1137 		goto out;
1138 	}
1139 	vport->ingress.drop_grp = g;
1140 
1141 out:
1142 	if (err) {
1143 		if (!IS_ERR_OR_NULL(vport->ingress.allow_spoofchk_only_grp))
1144 			mlx5_destroy_flow_group(
1145 					vport->ingress.allow_spoofchk_only_grp);
1146 		if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_only_grp))
1147 			mlx5_destroy_flow_group(
1148 					vport->ingress.allow_untagged_only_grp);
1149 		if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_spoofchk_grp))
1150 			mlx5_destroy_flow_group(
1151 				vport->ingress.allow_untagged_spoofchk_grp);
1152 		if (!IS_ERR_OR_NULL(vport->ingress.acl))
1153 			mlx5_destroy_flow_table(vport->ingress.acl);
1154 	}
1155 
1156 	kvfree(flow_group_in);
1157 	return err;
1158 }
1159 
1160 void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
1161 				     struct mlx5_vport *vport)
1162 {
1163 	if (!IS_ERR_OR_NULL(vport->ingress.drop_rule))
1164 		mlx5_del_flow_rules(vport->ingress.drop_rule);
1165 
1166 	if (!IS_ERR_OR_NULL(vport->ingress.allow_rule))
1167 		mlx5_del_flow_rules(vport->ingress.allow_rule);
1168 
1169 	vport->ingress.drop_rule = NULL;
1170 	vport->ingress.allow_rule = NULL;
1171 }
1172 
1173 void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
1174 				   struct mlx5_vport *vport)
1175 {
1176 	if (IS_ERR_OR_NULL(vport->ingress.acl))
1177 		return;
1178 
1179 	esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
1180 
1181 	esw_vport_cleanup_ingress_rules(esw, vport);
1182 	mlx5_destroy_flow_group(vport->ingress.allow_spoofchk_only_grp);
1183 	mlx5_destroy_flow_group(vport->ingress.allow_untagged_only_grp);
1184 	mlx5_destroy_flow_group(vport->ingress.allow_untagged_spoofchk_grp);
1185 	mlx5_destroy_flow_group(vport->ingress.drop_grp);
1186 	mlx5_destroy_flow_table(vport->ingress.acl);
1187 	vport->ingress.acl = NULL;
1188 	vport->ingress.drop_grp = NULL;
1189 	vport->ingress.allow_spoofchk_only_grp = NULL;
1190 	vport->ingress.allow_untagged_only_grp = NULL;
1191 	vport->ingress.allow_untagged_spoofchk_grp = NULL;
1192 }
1193 
1194 static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
1195 				    struct mlx5_vport *vport)
1196 {
1197 	struct mlx5_fc *counter = vport->ingress.drop_counter;
1198 	struct mlx5_flow_destination drop_ctr_dst = {0};
1199 	struct mlx5_flow_destination *dst = NULL;
1200 	struct mlx5_flow_act flow_act = {0};
1201 	struct mlx5_flow_spec *spec;
1202 	int dest_num = 0;
1203 	int err = 0;
1204 	u8 *smac_v;
1205 
1206 	esw_vport_cleanup_ingress_rules(esw, vport);
1207 
1208 	if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
1209 		esw_vport_disable_ingress_acl(esw, vport);
1210 		return 0;
1211 	}
1212 
1213 	err = esw_vport_enable_ingress_acl(esw, vport);
1214 	if (err) {
1215 		mlx5_core_warn(esw->dev,
1216 			       "failed to enable ingress acl (%d) on vport[%d]\n",
1217 			       err, vport->vport);
1218 		return err;
1219 	}
1220 
1221 	esw_debug(esw->dev,
1222 		  "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
1223 		  vport->vport, vport->info.vlan, vport->info.qos);
1224 
1225 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1226 	if (!spec) {
1227 		err = -ENOMEM;
1228 		goto out;
1229 	}
1230 
1231 	if (vport->info.vlan || vport->info.qos)
1232 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1233 
1234 	if (vport->info.spoofchk) {
1235 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
1236 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0);
1237 		smac_v = MLX5_ADDR_OF(fte_match_param,
1238 				      spec->match_value,
1239 				      outer_headers.smac_47_16);
1240 		ether_addr_copy(smac_v, vport->info.mac);
1241 	}
1242 
1243 	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1244 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1245 	vport->ingress.allow_rule =
1246 		mlx5_add_flow_rules(vport->ingress.acl, spec,
1247 				    &flow_act, NULL, 0);
1248 	if (IS_ERR(vport->ingress.allow_rule)) {
1249 		err = PTR_ERR(vport->ingress.allow_rule);
1250 		esw_warn(esw->dev,
1251 			 "vport[%d] configure ingress allow rule, err(%d)\n",
1252 			 vport->vport, err);
1253 		vport->ingress.allow_rule = NULL;
1254 		goto out;
1255 	}
1256 
1257 	memset(spec, 0, sizeof(*spec));
1258 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
1259 
1260 	/* Attach drop flow counter */
1261 	if (counter) {
1262 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
1263 		drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1264 		drop_ctr_dst.counter_id = mlx5_fc_id(counter);
1265 		dst = &drop_ctr_dst;
1266 		dest_num++;
1267 	}
1268 	vport->ingress.drop_rule =
1269 		mlx5_add_flow_rules(vport->ingress.acl, spec,
1270 				    &flow_act, dst, dest_num);
1271 	if (IS_ERR(vport->ingress.drop_rule)) {
1272 		err = PTR_ERR(vport->ingress.drop_rule);
1273 		esw_warn(esw->dev,
1274 			 "vport[%d] configure ingress drop rule, err(%d)\n",
1275 			 vport->vport, err);
1276 		vport->ingress.drop_rule = NULL;
1277 		goto out;
1278 	}
1279 
1280 out:
1281 	if (err)
1282 		esw_vport_cleanup_ingress_rules(esw, vport);
1283 	kvfree(spec);
1284 	return err;
1285 }
1286 
1287 static int esw_vport_egress_config(struct mlx5_eswitch *esw,
1288 				   struct mlx5_vport *vport)
1289 {
1290 	struct mlx5_fc *counter = vport->egress.drop_counter;
1291 	struct mlx5_flow_destination drop_ctr_dst = {0};
1292 	struct mlx5_flow_destination *dst = NULL;
1293 	struct mlx5_flow_act flow_act = {0};
1294 	struct mlx5_flow_spec *spec;
1295 	int dest_num = 0;
1296 	int err = 0;
1297 
1298 	esw_vport_cleanup_egress_rules(esw, vport);
1299 
1300 	if (!vport->info.vlan && !vport->info.qos) {
1301 		esw_vport_disable_egress_acl(esw, vport);
1302 		return 0;
1303 	}
1304 
1305 	err = esw_vport_enable_egress_acl(esw, vport);
1306 	if (err) {
1307 		mlx5_core_warn(esw->dev,
1308 			       "failed to enable egress acl (%d) on vport[%d]\n",
1309 			       err, vport->vport);
1310 		return err;
1311 	}
1312 
1313 	esw_debug(esw->dev,
1314 		  "vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
1315 		  vport->vport, vport->info.vlan, vport->info.qos);
1316 
1317 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1318 	if (!spec) {
1319 		err = -ENOMEM;
1320 		goto out;
1321 	}
1322 
1323 	/* Allowed vlan rule */
1324 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1325 	MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
1326 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
1327 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->info.vlan);
1328 
1329 	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1330 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1331 	vport->egress.allowed_vlan =
1332 		mlx5_add_flow_rules(vport->egress.acl, spec,
1333 				    &flow_act, NULL, 0);
1334 	if (IS_ERR(vport->egress.allowed_vlan)) {
1335 		err = PTR_ERR(vport->egress.allowed_vlan);
1336 		esw_warn(esw->dev,
1337 			 "vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
1338 			 vport->vport, err);
1339 		vport->egress.allowed_vlan = NULL;
1340 		goto out;
1341 	}
1342 
1343 	/* Drop others rule (star rule) */
1344 	memset(spec, 0, sizeof(*spec));
1345 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
1346 
1347 	/* Attach egress drop flow counter */
1348 	if (counter) {
1349 		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
1350 		drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1351 		drop_ctr_dst.counter_id = mlx5_fc_id(counter);
1352 		dst = &drop_ctr_dst;
1353 		dest_num++;
1354 	}
1355 	vport->egress.drop_rule =
1356 		mlx5_add_flow_rules(vport->egress.acl, spec,
1357 				    &flow_act, dst, dest_num);
1358 	if (IS_ERR(vport->egress.drop_rule)) {
1359 		err = PTR_ERR(vport->egress.drop_rule);
1360 		esw_warn(esw->dev,
1361 			 "vport[%d] configure egress drop rule failed, err(%d)\n",
1362 			 vport->vport, err);
1363 		vport->egress.drop_rule = NULL;
1364 	}
1365 out:
1366 	kvfree(spec);
1367 	return err;
1368 }
1369 
1370 /* Vport QoS management */
1371 static int esw_create_tsar(struct mlx5_eswitch *esw)
1372 {
1373 	u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
1374 	struct mlx5_core_dev *dev = esw->dev;
1375 	int err;
1376 
1377 	if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
1378 		return 0;
1379 
1380 	if (esw->qos.enabled)
1381 		return -EEXIST;
1382 
1383 	err = mlx5_create_scheduling_element_cmd(dev,
1384 						 SCHEDULING_HIERARCHY_E_SWITCH,
1385 						 tsar_ctx,
1386 						 &esw->qos.root_tsar_id);
1387 	if (err) {
1388 		esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err);
1389 		return err;
1390 	}
1391 
1392 	esw->qos.enabled = true;
1393 	return 0;
1394 }
1395 
1396 static void esw_destroy_tsar(struct mlx5_eswitch *esw)
1397 {
1398 	int err;
1399 
1400 	if (!esw->qos.enabled)
1401 		return;
1402 
1403 	err = mlx5_destroy_scheduling_element_cmd(esw->dev,
1404 						  SCHEDULING_HIERARCHY_E_SWITCH,
1405 						  esw->qos.root_tsar_id);
1406 	if (err)
1407 		esw_warn(esw->dev, "E-Switch destroy TSAR failed (%d)\n", err);
1408 
1409 	esw->qos.enabled = false;
1410 }
1411 
1412 static int esw_vport_enable_qos(struct mlx5_eswitch *esw,
1413 				struct mlx5_vport *vport,
1414 				u32 initial_max_rate, u32 initial_bw_share)
1415 {
1416 	u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
1417 	struct mlx5_core_dev *dev = esw->dev;
1418 	void *vport_elem;
1419 	int err = 0;
1420 
1421 	if (!esw->qos.enabled || !MLX5_CAP_GEN(dev, qos) ||
1422 	    !MLX5_CAP_QOS(dev, esw_scheduling))
1423 		return 0;
1424 
1425 	if (vport->qos.enabled)
1426 		return -EEXIST;
1427 
1428 	MLX5_SET(scheduling_context, sched_ctx, element_type,
1429 		 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
1430 	vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
1431 				  element_attributes);
1432 	MLX5_SET(vport_element, vport_elem, vport_number, vport->vport);
1433 	MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
1434 		 esw->qos.root_tsar_id);
1435 	MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
1436 		 initial_max_rate);
1437 	MLX5_SET(scheduling_context, sched_ctx, bw_share, initial_bw_share);
1438 
1439 	err = mlx5_create_scheduling_element_cmd(dev,
1440 						 SCHEDULING_HIERARCHY_E_SWITCH,
1441 						 sched_ctx,
1442 						 &vport->qos.esw_tsar_ix);
1443 	if (err) {
1444 		esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n",
1445 			 vport->vport, err);
1446 		return err;
1447 	}
1448 
1449 	vport->qos.enabled = true;
1450 	return 0;
1451 }
1452 
1453 static void esw_vport_disable_qos(struct mlx5_eswitch *esw,
1454 				  struct mlx5_vport *vport)
1455 {
1456 	int err;
1457 
1458 	if (!vport->qos.enabled)
1459 		return;
1460 
1461 	err = mlx5_destroy_scheduling_element_cmd(esw->dev,
1462 						  SCHEDULING_HIERARCHY_E_SWITCH,
1463 						  vport->qos.esw_tsar_ix);
1464 	if (err)
1465 		esw_warn(esw->dev, "E-Switch destroy TSAR vport element failed (vport=%d,err=%d)\n",
1466 			 vport->vport, err);
1467 
1468 	vport->qos.enabled = false;
1469 }
1470 
1471 static int esw_vport_qos_config(struct mlx5_eswitch *esw,
1472 				struct mlx5_vport *vport,
1473 				u32 max_rate, u32 bw_share)
1474 {
1475 	u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
1476 	struct mlx5_core_dev *dev = esw->dev;
1477 	void *vport_elem;
1478 	u32 bitmask = 0;
1479 	int err = 0;
1480 
1481 	if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
1482 		return -EOPNOTSUPP;
1483 
1484 	if (!vport->qos.enabled)
1485 		return -EIO;
1486 
1487 	MLX5_SET(scheduling_context, sched_ctx, element_type,
1488 		 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
1489 	vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
1490 				  element_attributes);
1491 	MLX5_SET(vport_element, vport_elem, vport_number, vport->vport);
1492 	MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
1493 		 esw->qos.root_tsar_id);
1494 	MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
1495 		 max_rate);
1496 	MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
1497 	bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
1498 	bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE;
1499 
1500 	err = mlx5_modify_scheduling_element_cmd(dev,
1501 						 SCHEDULING_HIERARCHY_E_SWITCH,
1502 						 sched_ctx,
1503 						 vport->qos.esw_tsar_ix,
1504 						 bitmask);
1505 	if (err) {
1506 		esw_warn(esw->dev, "E-Switch modify TSAR vport element failed (vport=%d,err=%d)\n",
1507 			 vport->vport, err);
1508 		return err;
1509 	}
1510 
1511 	return 0;
1512 }
1513 
1514 static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
1515 {
1516 	((u8 *)node_guid)[7] = mac[0];
1517 	((u8 *)node_guid)[6] = mac[1];
1518 	((u8 *)node_guid)[5] = mac[2];
1519 	((u8 *)node_guid)[4] = 0xff;
1520 	((u8 *)node_guid)[3] = 0xfe;
1521 	((u8 *)node_guid)[2] = mac[3];
1522 	((u8 *)node_guid)[1] = mac[4];
1523 	((u8 *)node_guid)[0] = mac[5];
1524 }
1525 
1526 static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
1527 				 struct mlx5_vport *vport)
1528 {
1529 	u16 vport_num = vport->vport;
1530 
1531 	if (esw->manager_vport == vport_num)
1532 		return;
1533 
1534 	mlx5_modify_vport_admin_state(esw->dev,
1535 				      MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
1536 				      vport_num, 1,
1537 				      vport->info.link_state);
1538 
1539 	/* Host PF has its own mac/guid. */
1540 	if (vport_num) {
1541 		mlx5_modify_nic_vport_mac_address(esw->dev, vport_num,
1542 						  vport->info.mac);
1543 		mlx5_modify_nic_vport_node_guid(esw->dev, vport_num,
1544 						vport->info.node_guid);
1545 	}
1546 
1547 	modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, vport->info.qos,
1548 			       (vport->info.vlan || vport->info.qos));
1549 
1550 	/* Only legacy mode needs ACLs */
1551 	if (esw->mode == SRIOV_LEGACY) {
1552 		esw_vport_ingress_config(esw, vport);
1553 		esw_vport_egress_config(esw, vport);
1554 	}
1555 }
1556 
1557 static void esw_vport_create_drop_counters(struct mlx5_vport *vport)
1558 {
1559 	struct mlx5_core_dev *dev = vport->dev;
1560 
1561 	if (MLX5_CAP_ESW_INGRESS_ACL(dev, flow_counter)) {
1562 		vport->ingress.drop_counter = mlx5_fc_create(dev, false);
1563 		if (IS_ERR(vport->ingress.drop_counter)) {
1564 			esw_warn(dev,
1565 				 "vport[%d] configure ingress drop rule counter failed\n",
1566 				 vport->vport);
1567 			vport->ingress.drop_counter = NULL;
1568 		}
1569 	}
1570 
1571 	if (MLX5_CAP_ESW_EGRESS_ACL(dev, flow_counter)) {
1572 		vport->egress.drop_counter = mlx5_fc_create(dev, false);
1573 		if (IS_ERR(vport->egress.drop_counter)) {
1574 			esw_warn(dev,
1575 				 "vport[%d] configure egress drop rule counter failed\n",
1576 				 vport->vport);
1577 			vport->egress.drop_counter = NULL;
1578 		}
1579 	}
1580 }
1581 
1582 static void esw_vport_destroy_drop_counters(struct mlx5_vport *vport)
1583 {
1584 	struct mlx5_core_dev *dev = vport->dev;
1585 
1586 	if (vport->ingress.drop_counter)
1587 		mlx5_fc_destroy(dev, vport->ingress.drop_counter);
1588 	if (vport->egress.drop_counter)
1589 		mlx5_fc_destroy(dev, vport->egress.drop_counter);
1590 }
1591 
1592 static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
1593 			     int enable_events)
1594 {
1595 	u16 vport_num = vport->vport;
1596 
1597 	mutex_lock(&esw->state_lock);
1598 	WARN_ON(vport->enabled);
1599 
1600 	esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
1601 
1602 	/* Create steering drop counters for ingress and egress ACLs */
1603 	if (vport_num && esw->mode == SRIOV_LEGACY)
1604 		esw_vport_create_drop_counters(vport);
1605 
1606 	/* Restore old vport configuration */
1607 	esw_apply_vport_conf(esw, vport);
1608 
1609 	/* Attach vport to the eswitch rate limiter */
1610 	if (esw_vport_enable_qos(esw, vport, vport->info.max_rate,
1611 				 vport->qos.bw_share))
1612 		esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num);
1613 
1614 	/* Sync with current vport context */
1615 	vport->enabled_events = enable_events;
1616 	vport->enabled = true;
1617 
1618 	/* Esw manager is trusted by default. Host PF (vport 0) is trusted as well
1619 	 * in smartNIC as it's a vport group manager.
1620 	 */
1621 	if (esw->manager_vport == vport_num ||
1622 	    (!vport_num && mlx5_core_is_ecpf(esw->dev)))
1623 		vport->info.trusted = true;
1624 
1625 	esw_vport_change_handle_locked(vport);
1626 
1627 	esw->enabled_vports++;
1628 	esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
1629 	mutex_unlock(&esw->state_lock);
1630 }
1631 
1632 static void esw_disable_vport(struct mlx5_eswitch *esw,
1633 			      struct mlx5_vport *vport)
1634 {
1635 	u16 vport_num = vport->vport;
1636 
1637 	if (!vport->enabled)
1638 		return;
1639 
1640 	esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num);
1641 	/* Mark this vport as disabled to discard new events */
1642 	vport->enabled = false;
1643 
1644 	/* Wait for current already scheduled events to complete */
1645 	flush_workqueue(esw->work_queue);
1646 	/* Disable events from this vport */
1647 	arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
1648 	mutex_lock(&esw->state_lock);
1649 	/* We don't assume VFs will cleanup after themselves.
1650 	 * Calling vport change handler while vport is disabled will cleanup
1651 	 * the vport resources.
1652 	 */
1653 	esw_vport_change_handle_locked(vport);
1654 	vport->enabled_events = 0;
1655 	esw_vport_disable_qos(esw, vport);
1656 	if (esw->manager_vport != vport_num &&
1657 	    esw->mode == SRIOV_LEGACY) {
1658 		mlx5_modify_vport_admin_state(esw->dev,
1659 					      MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
1660 					      vport_num, 1,
1661 					      MLX5_VPORT_ADMIN_STATE_DOWN);
1662 		esw_vport_disable_egress_acl(esw, vport);
1663 		esw_vport_disable_ingress_acl(esw, vport);
1664 		esw_vport_destroy_drop_counters(vport);
1665 	}
1666 	esw->enabled_vports--;
1667 	mutex_unlock(&esw->state_lock);
1668 }
1669 
1670 static int eswitch_vport_event(struct notifier_block *nb,
1671 			       unsigned long type, void *data)
1672 {
1673 	struct mlx5_eswitch *esw = mlx5_nb_cof(nb, struct mlx5_eswitch, nb);
1674 	struct mlx5_eqe *eqe = data;
1675 	struct mlx5_vport *vport;
1676 	u16 vport_num;
1677 
1678 	vport_num = be16_to_cpu(eqe->data.vport_change.vport_num);
1679 	vport = mlx5_eswitch_get_vport(esw, vport_num);
1680 	if (IS_ERR(vport))
1681 		return NOTIFY_OK;
1682 
1683 	if (vport->enabled)
1684 		queue_work(esw->work_queue, &vport->vport_change_handler);
1685 
1686 	return NOTIFY_OK;
1687 }
1688 
1689 /* Public E-Switch API */
1690 #define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
1691 
1692 int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
1693 {
1694 	int vf_nvports = 0, total_nvports = 0;
1695 	struct mlx5_vport *vport;
1696 	int err;
1697 	int i, enabled_events;
1698 
1699 	if (!ESW_ALLOWED(esw) ||
1700 	    !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
1701 		esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
1702 		return -EOPNOTSUPP;
1703 	}
1704 
1705 	if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
1706 		esw_warn(esw->dev, "E-Switch ingress ACL is not supported by FW\n");
1707 
1708 	if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
1709 		esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n");
1710 
1711 	esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode);
1712 
1713 	if (mode == SRIOV_OFFLOADS) {
1714 		if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1715 			err = mlx5_query_host_params_num_vfs(esw->dev, &vf_nvports);
1716 			if (err)
1717 				return err;
1718 			total_nvports = esw->total_vports;
1719 		} else {
1720 			vf_nvports = nvfs;
1721 			total_nvports = nvfs + MLX5_SPECIAL_VPORTS(esw->dev);
1722 		}
1723 	}
1724 
1725 	esw->mode = mode;
1726 
1727 	mlx5_lag_update(esw->dev);
1728 
1729 	if (mode == SRIOV_LEGACY) {
1730 		err = esw_create_legacy_table(esw);
1731 		if (err)
1732 			goto abort;
1733 	} else {
1734 		mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
1735 		mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
1736 		err = esw_offloads_init(esw, vf_nvports, total_nvports);
1737 	}
1738 
1739 	if (err)
1740 		goto abort;
1741 
1742 	err = esw_create_tsar(esw);
1743 	if (err)
1744 		esw_warn(esw->dev, "Failed to create eswitch TSAR");
1745 
1746 	/* Don't enable vport events when in SRIOV_OFFLOADS mode, since:
1747 	 * 1. L2 table (MPFS) is programmed by PF/VF representors netdevs set_rx_mode
1748 	 * 2. FDB/Eswitch is programmed by user space tools
1749 	 */
1750 	enabled_events = (mode == SRIOV_LEGACY) ? SRIOV_VPORT_EVENTS : 0;
1751 
1752 	/* Enable PF vport */
1753 	vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
1754 	esw_enable_vport(esw, vport, enabled_events);
1755 
1756 	/* Enable ECPF vports */
1757 	if (mlx5_ecpf_vport_exists(esw->dev)) {
1758 		vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
1759 		esw_enable_vport(esw, vport, enabled_events);
1760 	}
1761 
1762 	/* Enable VF vports */
1763 	mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs)
1764 		esw_enable_vport(esw, vport, enabled_events);
1765 
1766 	if (mode == SRIOV_LEGACY) {
1767 		MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
1768 		mlx5_eq_notifier_register(esw->dev, &esw->nb);
1769 	}
1770 
1771 	esw_info(esw->dev, "SRIOV enabled: active vports(%d)\n",
1772 		 esw->enabled_vports);
1773 	return 0;
1774 
1775 abort:
1776 	esw->mode = SRIOV_NONE;
1777 
1778 	if (mode == SRIOV_OFFLOADS) {
1779 		mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
1780 		mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
1781 	}
1782 
1783 	return err;
1784 }
1785 
1786 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
1787 {
1788 	struct esw_mc_addr *mc_promisc;
1789 	struct mlx5_vport *vport;
1790 	int old_mode;
1791 	int i;
1792 
1793 	if (!ESW_ALLOWED(esw) || esw->mode == SRIOV_NONE)
1794 		return;
1795 
1796 	esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n",
1797 		 esw->enabled_vports, esw->mode);
1798 
1799 	mc_promisc = &esw->mc_promisc;
1800 
1801 	if (esw->mode == SRIOV_LEGACY)
1802 		mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
1803 
1804 	mlx5_esw_for_all_vports(esw, i, vport)
1805 		esw_disable_vport(esw, vport);
1806 
1807 	if (mc_promisc && mc_promisc->uplink_rule)
1808 		mlx5_del_flow_rules(mc_promisc->uplink_rule);
1809 
1810 	esw_destroy_tsar(esw);
1811 
1812 	if (esw->mode == SRIOV_LEGACY)
1813 		esw_destroy_legacy_table(esw);
1814 	else if (esw->mode == SRIOV_OFFLOADS)
1815 		esw_offloads_cleanup(esw);
1816 
1817 	old_mode = esw->mode;
1818 	esw->mode = SRIOV_NONE;
1819 
1820 	mlx5_lag_update(esw->dev);
1821 
1822 	if (old_mode == SRIOV_OFFLOADS) {
1823 		mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
1824 		mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
1825 	}
1826 }
1827 
1828 int mlx5_eswitch_init(struct mlx5_core_dev *dev)
1829 {
1830 	int total_vports = MLX5_TOTAL_VPORTS(dev);
1831 	struct mlx5_eswitch *esw;
1832 	struct mlx5_vport *vport;
1833 	int err, i;
1834 
1835 	if (!MLX5_VPORT_MANAGER(dev))
1836 		return 0;
1837 
1838 	esw_info(dev,
1839 		 "Total vports %d, per vport: max uc(%d) max mc(%d)\n",
1840 		 total_vports,
1841 		 MLX5_MAX_UC_PER_VPORT(dev),
1842 		 MLX5_MAX_MC_PER_VPORT(dev));
1843 
1844 	esw = kzalloc(sizeof(*esw), GFP_KERNEL);
1845 	if (!esw)
1846 		return -ENOMEM;
1847 
1848 	esw->dev = dev;
1849 	esw->manager_vport = mlx5_eswitch_manager_vport(dev);
1850 
1851 	esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
1852 	if (!esw->work_queue) {
1853 		err = -ENOMEM;
1854 		goto abort;
1855 	}
1856 
1857 	esw->vports = kcalloc(total_vports, sizeof(struct mlx5_vport),
1858 			      GFP_KERNEL);
1859 	if (!esw->vports) {
1860 		err = -ENOMEM;
1861 		goto abort;
1862 	}
1863 
1864 	esw->total_vports = total_vports;
1865 
1866 	err = esw_offloads_init_reps(esw);
1867 	if (err)
1868 		goto abort;
1869 
1870 	hash_init(esw->offloads.encap_tbl);
1871 	hash_init(esw->offloads.mod_hdr_tbl);
1872 	mutex_init(&esw->state_lock);
1873 
1874 	mlx5_esw_for_all_vports(esw, i, vport) {
1875 		vport->vport = mlx5_eswitch_index_to_vport_num(esw, i);
1876 		vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
1877 		vport->dev = dev;
1878 		INIT_WORK(&vport->vport_change_handler,
1879 			  esw_vport_change_handler);
1880 	}
1881 
1882 	esw->enabled_vports = 0;
1883 	esw->mode = SRIOV_NONE;
1884 	esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
1885 	if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) &&
1886 	    MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
1887 		esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
1888 	else
1889 		esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
1890 
1891 	dev->priv.eswitch = esw;
1892 	return 0;
1893 abort:
1894 	if (esw->work_queue)
1895 		destroy_workqueue(esw->work_queue);
1896 	esw_offloads_cleanup_reps(esw);
1897 	kfree(esw->vports);
1898 	kfree(esw);
1899 	return err;
1900 }
1901 
1902 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
1903 {
1904 	if (!esw || !MLX5_VPORT_MANAGER(esw->dev))
1905 		return;
1906 
1907 	esw_info(esw->dev, "cleanup\n");
1908 
1909 	esw->dev->priv.eswitch = NULL;
1910 	destroy_workqueue(esw->work_queue);
1911 	esw_offloads_cleanup_reps(esw);
1912 	kfree(esw->vports);
1913 	kfree(esw);
1914 }
1915 
1916 /* Vport Administration */
1917 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
1918 			       u16 vport, u8 mac[ETH_ALEN])
1919 {
1920 	struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
1921 	u64 node_guid;
1922 	int err = 0;
1923 
1924 	if (IS_ERR(evport))
1925 		return PTR_ERR(evport);
1926 	if (is_multicast_ether_addr(mac))
1927 		return -EINVAL;
1928 
1929 	mutex_lock(&esw->state_lock);
1930 
1931 	if (evport->info.spoofchk && !is_valid_ether_addr(mac))
1932 		mlx5_core_warn(esw->dev,
1933 			       "Set invalid MAC while spoofchk is on, vport(%d)\n",
1934 			       vport);
1935 
1936 	err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
1937 	if (err) {
1938 		mlx5_core_warn(esw->dev,
1939 			       "Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n",
1940 			       vport, err);
1941 		goto unlock;
1942 	}
1943 
1944 	node_guid_gen_from_mac(&node_guid, mac);
1945 	err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid);
1946 	if (err)
1947 		mlx5_core_warn(esw->dev,
1948 			       "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
1949 			       vport, err);
1950 
1951 	ether_addr_copy(evport->info.mac, mac);
1952 	evport->info.node_guid = node_guid;
1953 	if (evport->enabled && esw->mode == SRIOV_LEGACY)
1954 		err = esw_vport_ingress_config(esw, evport);
1955 
1956 unlock:
1957 	mutex_unlock(&esw->state_lock);
1958 	return err;
1959 }
1960 
1961 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
1962 				 u16 vport, int link_state)
1963 {
1964 	struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
1965 	int err = 0;
1966 
1967 	if (!ESW_ALLOWED(esw))
1968 		return -EPERM;
1969 	if (IS_ERR(evport))
1970 		return PTR_ERR(evport);
1971 
1972 	mutex_lock(&esw->state_lock);
1973 
1974 	err = mlx5_modify_vport_admin_state(esw->dev,
1975 					    MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
1976 					    vport, 1, link_state);
1977 	if (err) {
1978 		mlx5_core_warn(esw->dev,
1979 			       "Failed to set vport %d link state, err = %d",
1980 			       vport, err);
1981 		goto unlock;
1982 	}
1983 
1984 	evport->info.link_state = link_state;
1985 
1986 unlock:
1987 	mutex_unlock(&esw->state_lock);
1988 	return 0;
1989 }
1990 
1991 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
1992 				  u16 vport, struct ifla_vf_info *ivi)
1993 {
1994 	struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
1995 
1996 	if (IS_ERR(evport))
1997 		return PTR_ERR(evport);
1998 
1999 	memset(ivi, 0, sizeof(*ivi));
2000 	ivi->vf = vport - 1;
2001 
2002 	mutex_lock(&esw->state_lock);
2003 	ether_addr_copy(ivi->mac, evport->info.mac);
2004 	ivi->linkstate = evport->info.link_state;
2005 	ivi->vlan = evport->info.vlan;
2006 	ivi->qos = evport->info.qos;
2007 	ivi->spoofchk = evport->info.spoofchk;
2008 	ivi->trusted = evport->info.trusted;
2009 	ivi->min_tx_rate = evport->info.min_rate;
2010 	ivi->max_tx_rate = evport->info.max_rate;
2011 	mutex_unlock(&esw->state_lock);
2012 
2013 	return 0;
2014 }
2015 
2016 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
2017 				  u16 vport, u16 vlan, u8 qos, u8 set_flags)
2018 {
2019 	struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
2020 	int err = 0;
2021 
2022 	if (!ESW_ALLOWED(esw))
2023 		return -EPERM;
2024 	if (IS_ERR(evport))
2025 		return PTR_ERR(evport);
2026 	if (vlan > 4095 || qos > 7)
2027 		return -EINVAL;
2028 
2029 	mutex_lock(&esw->state_lock);
2030 
2031 	err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
2032 	if (err)
2033 		goto unlock;
2034 
2035 	evport->info.vlan = vlan;
2036 	evport->info.qos = qos;
2037 	if (evport->enabled && esw->mode == SRIOV_LEGACY) {
2038 		err = esw_vport_ingress_config(esw, evport);
2039 		if (err)
2040 			goto unlock;
2041 		err = esw_vport_egress_config(esw, evport);
2042 	}
2043 
2044 unlock:
2045 	mutex_unlock(&esw->state_lock);
2046 	return err;
2047 }
2048 
2049 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
2050 				u16 vport, u16 vlan, u8 qos)
2051 {
2052 	u8 set_flags = 0;
2053 
2054 	if (vlan || qos)
2055 		set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
2056 
2057 	return __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags);
2058 }
2059 
2060 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
2061 				    u16 vport, bool spoofchk)
2062 {
2063 	struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
2064 	bool pschk;
2065 	int err = 0;
2066 
2067 	if (!ESW_ALLOWED(esw))
2068 		return -EPERM;
2069 	if (IS_ERR(evport))
2070 		return PTR_ERR(evport);
2071 
2072 	mutex_lock(&esw->state_lock);
2073 	pschk = evport->info.spoofchk;
2074 	evport->info.spoofchk = spoofchk;
2075 	if (pschk && !is_valid_ether_addr(evport->info.mac))
2076 		mlx5_core_warn(esw->dev,
2077 			       "Spoofchk in set while MAC is invalid, vport(%d)\n",
2078 			       evport->vport);
2079 	if (evport->enabled && esw->mode == SRIOV_LEGACY)
2080 		err = esw_vport_ingress_config(esw, evport);
2081 	if (err)
2082 		evport->info.spoofchk = pschk;
2083 	mutex_unlock(&esw->state_lock);
2084 
2085 	return err;
2086 }
2087 
2088 static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw)
2089 {
2090 	if (esw->fdb_table.legacy.vepa_uplink_rule)
2091 		mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_uplink_rule);
2092 
2093 	if (esw->fdb_table.legacy.vepa_star_rule)
2094 		mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_star_rule);
2095 
2096 	esw->fdb_table.legacy.vepa_uplink_rule = NULL;
2097 	esw->fdb_table.legacy.vepa_star_rule = NULL;
2098 }
2099 
2100 static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
2101 					 u8 setting)
2102 {
2103 	struct mlx5_flow_destination dest = {};
2104 	struct mlx5_flow_act flow_act = {};
2105 	struct mlx5_flow_handle *flow_rule;
2106 	struct mlx5_flow_spec *spec;
2107 	int err = 0;
2108 	void *misc;
2109 
2110 	if (!setting) {
2111 		esw_cleanup_vepa_rules(esw);
2112 		return 0;
2113 	}
2114 
2115 	if (esw->fdb_table.legacy.vepa_uplink_rule)
2116 		return 0;
2117 
2118 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
2119 	if (!spec)
2120 		return -ENOMEM;
2121 
2122 	/* Uplink rule forward uplink traffic to FDB */
2123 	misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
2124 	MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK);
2125 
2126 	misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
2127 	MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
2128 
2129 	spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
2130 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
2131 	dest.ft = esw->fdb_table.legacy.fdb;
2132 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2133 	flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec,
2134 					&flow_act, &dest, 1);
2135 	if (IS_ERR(flow_rule)) {
2136 		err = PTR_ERR(flow_rule);
2137 		goto out;
2138 	} else {
2139 		esw->fdb_table.legacy.vepa_uplink_rule = flow_rule;
2140 	}
2141 
2142 	/* Star rule to forward all traffic to uplink vport */
2143 	memset(spec, 0, sizeof(*spec));
2144 	memset(&dest, 0, sizeof(dest));
2145 	dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
2146 	dest.vport.num = MLX5_VPORT_UPLINK;
2147 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2148 	flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec,
2149 					&flow_act, &dest, 1);
2150 	if (IS_ERR(flow_rule)) {
2151 		err = PTR_ERR(flow_rule);
2152 		goto out;
2153 	} else {
2154 		esw->fdb_table.legacy.vepa_star_rule = flow_rule;
2155 	}
2156 
2157 out:
2158 	kvfree(spec);
2159 	if (err)
2160 		esw_cleanup_vepa_rules(esw);
2161 	return err;
2162 }
2163 
2164 int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting)
2165 {
2166 	int err = 0;
2167 
2168 	if (!esw)
2169 		return -EOPNOTSUPP;
2170 
2171 	if (!ESW_ALLOWED(esw))
2172 		return -EPERM;
2173 
2174 	mutex_lock(&esw->state_lock);
2175 	if (esw->mode != SRIOV_LEGACY) {
2176 		err = -EOPNOTSUPP;
2177 		goto out;
2178 	}
2179 
2180 	err = _mlx5_eswitch_set_vepa_locked(esw, setting);
2181 
2182 out:
2183 	mutex_unlock(&esw->state_lock);
2184 	return err;
2185 }
2186 
2187 int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
2188 {
2189 	int err = 0;
2190 
2191 	if (!esw)
2192 		return -EOPNOTSUPP;
2193 
2194 	if (!ESW_ALLOWED(esw))
2195 		return -EPERM;
2196 
2197 	mutex_lock(&esw->state_lock);
2198 	if (esw->mode != SRIOV_LEGACY) {
2199 		err = -EOPNOTSUPP;
2200 		goto out;
2201 	}
2202 
2203 	*setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0;
2204 
2205 out:
2206 	mutex_unlock(&esw->state_lock);
2207 	return err;
2208 }
2209 
2210 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
2211 				 u16 vport, bool setting)
2212 {
2213 	struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
2214 
2215 	if (!ESW_ALLOWED(esw))
2216 		return -EPERM;
2217 	if (IS_ERR(evport))
2218 		return PTR_ERR(evport);
2219 
2220 	mutex_lock(&esw->state_lock);
2221 	evport->info.trusted = setting;
2222 	if (evport->enabled)
2223 		esw_vport_change_handle_locked(evport);
2224 	mutex_unlock(&esw->state_lock);
2225 
2226 	return 0;
2227 }
2228 
2229 static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
2230 {
2231 	u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
2232 	struct mlx5_vport *evport;
2233 	u32 max_guarantee = 0;
2234 	int i;
2235 
2236 	mlx5_esw_for_all_vports(esw, i, evport) {
2237 		if (!evport->enabled || evport->info.min_rate < max_guarantee)
2238 			continue;
2239 		max_guarantee = evport->info.min_rate;
2240 	}
2241 
2242 	return max_t(u32, max_guarantee / fw_max_bw_share, 1);
2243 }
2244 
2245 static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
2246 {
2247 	u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
2248 	struct mlx5_vport *evport;
2249 	u32 vport_max_rate;
2250 	u32 vport_min_rate;
2251 	u32 bw_share;
2252 	int err;
2253 	int i;
2254 
2255 	mlx5_esw_for_all_vports(esw, i, evport) {
2256 		if (!evport->enabled)
2257 			continue;
2258 		vport_min_rate = evport->info.min_rate;
2259 		vport_max_rate = evport->info.max_rate;
2260 		bw_share = MLX5_MIN_BW_SHARE;
2261 
2262 		if (vport_min_rate)
2263 			bw_share = MLX5_RATE_TO_BW_SHARE(vport_min_rate,
2264 							 divider,
2265 							 fw_max_bw_share);
2266 
2267 		if (bw_share == evport->qos.bw_share)
2268 			continue;
2269 
2270 		err = esw_vport_qos_config(esw, evport, vport_max_rate,
2271 					   bw_share);
2272 		if (!err)
2273 			evport->qos.bw_share = bw_share;
2274 		else
2275 			return err;
2276 	}
2277 
2278 	return 0;
2279 }
2280 
2281 int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
2282 				u32 max_rate, u32 min_rate)
2283 {
2284 	struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
2285 	u32 fw_max_bw_share;
2286 	u32 previous_min_rate;
2287 	u32 divider;
2288 	bool min_rate_supported;
2289 	bool max_rate_supported;
2290 	int err = 0;
2291 
2292 	if (!ESW_ALLOWED(esw))
2293 		return -EPERM;
2294 	if (IS_ERR(evport))
2295 		return PTR_ERR(evport);
2296 
2297 	fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
2298 	min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
2299 				fw_max_bw_share >= MLX5_MIN_BW_SHARE;
2300 	max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit);
2301 
2302 	if ((min_rate && !min_rate_supported) || (max_rate && !max_rate_supported))
2303 		return -EOPNOTSUPP;
2304 
2305 	mutex_lock(&esw->state_lock);
2306 
2307 	if (min_rate == evport->info.min_rate)
2308 		goto set_max_rate;
2309 
2310 	previous_min_rate = evport->info.min_rate;
2311 	evport->info.min_rate = min_rate;
2312 	divider = calculate_vports_min_rate_divider(esw);
2313 	err = normalize_vports_min_rate(esw, divider);
2314 	if (err) {
2315 		evport->info.min_rate = previous_min_rate;
2316 		goto unlock;
2317 	}
2318 
2319 set_max_rate:
2320 	if (max_rate == evport->info.max_rate)
2321 		goto unlock;
2322 
2323 	err = esw_vport_qos_config(esw, evport, max_rate, evport->qos.bw_share);
2324 	if (!err)
2325 		evport->info.max_rate = max_rate;
2326 
2327 unlock:
2328 	mutex_unlock(&esw->state_lock);
2329 	return err;
2330 }
2331 
2332 static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
2333 					       struct mlx5_vport *vport,
2334 					       struct mlx5_vport_drop_stats *stats)
2335 {
2336 	struct mlx5_eswitch *esw = dev->priv.eswitch;
2337 	u64 rx_discard_vport_down, tx_discard_vport_down;
2338 	u64 bytes = 0;
2339 	int err = 0;
2340 
2341 	if (!vport->enabled || esw->mode != SRIOV_LEGACY)
2342 		return 0;
2343 
2344 	if (vport->egress.drop_counter)
2345 		mlx5_fc_query(dev, vport->egress.drop_counter,
2346 			      &stats->rx_dropped, &bytes);
2347 
2348 	if (vport->ingress.drop_counter)
2349 		mlx5_fc_query(dev, vport->ingress.drop_counter,
2350 			      &stats->tx_dropped, &bytes);
2351 
2352 	if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) &&
2353 	    !MLX5_CAP_GEN(dev, transmit_discard_vport_down))
2354 		return 0;
2355 
2356 	err = mlx5_query_vport_down_stats(dev, vport->vport, 1,
2357 					  &rx_discard_vport_down,
2358 					  &tx_discard_vport_down);
2359 	if (err)
2360 		return err;
2361 
2362 	if (MLX5_CAP_GEN(dev, receive_discard_vport_down))
2363 		stats->rx_dropped += rx_discard_vport_down;
2364 	if (MLX5_CAP_GEN(dev, transmit_discard_vport_down))
2365 		stats->tx_dropped += tx_discard_vport_down;
2366 
2367 	return 0;
2368 }
2369 
2370 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
2371 				 u16 vport_num,
2372 				 struct ifla_vf_stats *vf_stats)
2373 {
2374 	struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
2375 	int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
2376 	u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
2377 	struct mlx5_vport_drop_stats stats = {0};
2378 	int err = 0;
2379 	u32 *out;
2380 
2381 	if (IS_ERR(vport))
2382 		return PTR_ERR(vport);
2383 
2384 	out = kvzalloc(outlen, GFP_KERNEL);
2385 	if (!out)
2386 		return -ENOMEM;
2387 
2388 	MLX5_SET(query_vport_counter_in, in, opcode,
2389 		 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
2390 	MLX5_SET(query_vport_counter_in, in, op_mod, 0);
2391 	MLX5_SET(query_vport_counter_in, in, vport_number, vport->vport);
2392 	MLX5_SET(query_vport_counter_in, in, other_vport, 1);
2393 
2394 	memset(out, 0, outlen);
2395 	err = mlx5_cmd_exec(esw->dev, in, sizeof(in), out, outlen);
2396 	if (err)
2397 		goto free_out;
2398 
2399 	#define MLX5_GET_CTR(p, x) \
2400 		MLX5_GET64(query_vport_counter_out, p, x)
2401 
2402 	memset(vf_stats, 0, sizeof(*vf_stats));
2403 	vf_stats->rx_packets =
2404 		MLX5_GET_CTR(out, received_eth_unicast.packets) +
2405 		MLX5_GET_CTR(out, received_ib_unicast.packets) +
2406 		MLX5_GET_CTR(out, received_eth_multicast.packets) +
2407 		MLX5_GET_CTR(out, received_ib_multicast.packets) +
2408 		MLX5_GET_CTR(out, received_eth_broadcast.packets);
2409 
2410 	vf_stats->rx_bytes =
2411 		MLX5_GET_CTR(out, received_eth_unicast.octets) +
2412 		MLX5_GET_CTR(out, received_ib_unicast.octets) +
2413 		MLX5_GET_CTR(out, received_eth_multicast.octets) +
2414 		MLX5_GET_CTR(out, received_ib_multicast.octets) +
2415 		MLX5_GET_CTR(out, received_eth_broadcast.octets);
2416 
2417 	vf_stats->tx_packets =
2418 		MLX5_GET_CTR(out, transmitted_eth_unicast.packets) +
2419 		MLX5_GET_CTR(out, transmitted_ib_unicast.packets) +
2420 		MLX5_GET_CTR(out, transmitted_eth_multicast.packets) +
2421 		MLX5_GET_CTR(out, transmitted_ib_multicast.packets) +
2422 		MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
2423 
2424 	vf_stats->tx_bytes =
2425 		MLX5_GET_CTR(out, transmitted_eth_unicast.octets) +
2426 		MLX5_GET_CTR(out, transmitted_ib_unicast.octets) +
2427 		MLX5_GET_CTR(out, transmitted_eth_multicast.octets) +
2428 		MLX5_GET_CTR(out, transmitted_ib_multicast.octets) +
2429 		MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
2430 
2431 	vf_stats->multicast =
2432 		MLX5_GET_CTR(out, received_eth_multicast.packets) +
2433 		MLX5_GET_CTR(out, received_ib_multicast.packets);
2434 
2435 	vf_stats->broadcast =
2436 		MLX5_GET_CTR(out, received_eth_broadcast.packets);
2437 
2438 	err = mlx5_eswitch_query_vport_drop_stats(esw->dev, vport, &stats);
2439 	if (err)
2440 		goto free_out;
2441 	vf_stats->rx_dropped = stats.rx_dropped;
2442 	vf_stats->tx_dropped = stats.tx_dropped;
2443 
2444 free_out:
2445 	kvfree(out);
2446 	return err;
2447 }
2448 
2449 u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
2450 {
2451 	return ESW_ALLOWED(esw) ? esw->mode : SRIOV_NONE;
2452 }
2453 EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
2454 
2455 bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1)
2456 {
2457 	if ((dev0->priv.eswitch->mode == SRIOV_NONE &&
2458 	     dev1->priv.eswitch->mode == SRIOV_NONE) ||
2459 	    (dev0->priv.eswitch->mode == SRIOV_OFFLOADS &&
2460 	     dev1->priv.eswitch->mode == SRIOV_OFFLOADS))
2461 		return true;
2462 
2463 	return false;
2464 }
2465 
2466 bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
2467 			       struct mlx5_core_dev *dev1)
2468 {
2469 	return (dev0->priv.eswitch->mode == SRIOV_OFFLOADS &&
2470 		dev1->priv.eswitch->mode == SRIOV_OFFLOADS);
2471 }
2472