1 /*-
2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26 #include "opt_rss.h"
27 #include "opt_ratelimit.h"
28
29 #include <linux/etherdevice.h>
30 #include <dev/mlx5/driver.h>
31 #include <dev/mlx5/mlx5_ifc.h>
32 #include <dev/mlx5/vport.h>
33 #include <dev/mlx5/fs.h>
34 #include <dev/mlx5/mpfs.h>
35 #include <dev/mlx5/mlx5_core/mlx5_core.h>
36 #include <dev/mlx5/mlx5_core/eswitch.h>
37
38 #define UPLINK_VPORT 0xFFFF
39
40 #define MLX5_DEBUG_ESWITCH_MASK BIT(3)
41
42 #define esw_info(dev, format, ...) \
43 printf("mlx5_core: INFO: ""(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
44
45 #define esw_warn(dev, format, ...) \
46 printf("mlx5_core: WARN: ""(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
47
48 #define esw_debug(dev, format, ...) \
49 mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
50
51 enum {
52 MLX5_ACTION_NONE = 0,
53 MLX5_ACTION_ADD = 1,
54 MLX5_ACTION_DEL = 2,
55 };
56
57 /* E-Switch UC L2 table hash node */
58 struct esw_uc_addr {
59 struct l2addr_node node;
60 u32 table_index;
61 u32 vport;
62 };
63
64 /* E-Switch MC FDB table hash node */
65 struct esw_mc_addr { /* SRIOV only */
66 struct l2addr_node node;
67 struct mlx5_flow_handle *uplink_rule; /* Forward to uplink rule */
68 u32 refcnt;
69 };
70
71 /* Vport UC/MC hash node */
72 struct vport_addr {
73 struct l2addr_node node;
74 u8 action;
75 u32 vport;
76 struct mlx5_flow_handle *flow_rule; /* SRIOV only */
77 };
78
79 enum {
80 UC_ADDR_CHANGE = BIT(0),
81 MC_ADDR_CHANGE = BIT(1),
82 };
83
84 /* Vport context events */
85 #define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \
86 MC_ADDR_CHANGE)
87
arm_vport_context_events_cmd(struct mlx5_core_dev * dev,u16 vport,u32 events_mask)88 static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
89 u32 events_mask)
90 {
91 int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
92 int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
93 void *nic_vport_ctx;
94
95 MLX5_SET(modify_nic_vport_context_in, in,
96 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
97 MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
98 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
99 if (vport)
100 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
101 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
102 in, nic_vport_context);
103
104 MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);
105
106 if (events_mask & UC_ADDR_CHANGE)
107 MLX5_SET(nic_vport_context, nic_vport_ctx,
108 event_on_uc_address_change, 1);
109 if (events_mask & MC_ADDR_CHANGE)
110 MLX5_SET(nic_vport_context, nic_vport_ctx,
111 event_on_mc_address_change, 1);
112
113 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
114 }
115
116 /* E-Switch vport context HW commands */
query_esw_vport_context_cmd(struct mlx5_core_dev * mdev,u32 vport,u32 * out,int outlen)117 static int query_esw_vport_context_cmd(struct mlx5_core_dev *mdev, u32 vport,
118 u32 *out, int outlen)
119 {
120 u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {0};
121
122 MLX5_SET(query_nic_vport_context_in, in, opcode,
123 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
124
125 MLX5_SET(query_esw_vport_context_in, in, vport_number, vport);
126 if (vport)
127 MLX5_SET(query_esw_vport_context_in, in, other_vport, 1);
128
129 return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
130 }
131
query_esw_vport_cvlan(struct mlx5_core_dev * dev,u32 vport,u16 * vlan,u8 * qos)132 static int query_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
133 u16 *vlan, u8 *qos)
134 {
135 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {0};
136 int err;
137 bool cvlan_strip;
138 bool cvlan_insert;
139
140 *vlan = 0;
141 *qos = 0;
142
143 if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
144 !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
145 return -ENOTSUPP;
146
147 err = query_esw_vport_context_cmd(dev, vport, out, sizeof(out));
148 if (err)
149 goto out;
150
151 cvlan_strip = MLX5_GET(query_esw_vport_context_out, out,
152 esw_vport_context.vport_cvlan_strip);
153
154 cvlan_insert = MLX5_GET(query_esw_vport_context_out, out,
155 esw_vport_context.vport_cvlan_insert);
156
157 if (cvlan_strip || cvlan_insert) {
158 *vlan = MLX5_GET(query_esw_vport_context_out, out,
159 esw_vport_context.cvlan_id);
160 *qos = MLX5_GET(query_esw_vport_context_out, out,
161 esw_vport_context.cvlan_pcp);
162 }
163
164 esw_debug(dev, "Query Vport[%d] cvlan: VLAN %d qos=%d\n",
165 vport, *vlan, *qos);
166 out:
167 return err;
168 }
169
modify_esw_vport_context_cmd(struct mlx5_core_dev * dev,u16 vport,void * in,int inlen)170 static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
171 void *in, int inlen)
172 {
173 u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
174
175 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
176 if (vport)
177 MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
178
179 MLX5_SET(modify_esw_vport_context_in, in, opcode,
180 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
181
182 return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
183 }
184
modify_esw_vport_cvlan(struct mlx5_core_dev * dev,u32 vport,u16 vlan,u8 qos,bool set)185 static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
186 u16 vlan, u8 qos, bool set)
187 {
188 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0};
189
190 if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
191 !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
192 return -ENOTSUPP;
193
194 esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%d\n",
195 vport, vlan, qos, set);
196
197 if (set) {
198 MLX5_SET(modify_esw_vport_context_in, in,
199 esw_vport_context.vport_cvlan_strip, 1);
200 /* insert only if no vlan in packet */
201 MLX5_SET(modify_esw_vport_context_in, in,
202 esw_vport_context.vport_cvlan_insert, 1);
203 MLX5_SET(modify_esw_vport_context_in, in,
204 esw_vport_context.cvlan_pcp, qos);
205 MLX5_SET(modify_esw_vport_context_in, in,
206 esw_vport_context.cvlan_id, vlan);
207 }
208
209 MLX5_SET(modify_esw_vport_context_in, in,
210 field_select.vport_cvlan_strip, 1);
211 MLX5_SET(modify_esw_vport_context_in, in,
212 field_select.vport_cvlan_insert, 1);
213
214 return modify_esw_vport_context_cmd(dev, vport, in, sizeof(in));
215 }
216
217 /* E-Switch FDB */
218 static struct mlx5_flow_handle *
esw_fdb_set_vport_rule(struct mlx5_eswitch * esw,u8 mac[ETH_ALEN],u32 vport)219 esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
220 {
221 struct mlx5_flow_destination dest;
222 struct mlx5_flow_handle *flow_rule = NULL;
223 struct mlx5_flow_act flow_act = {};
224 struct mlx5_flow_spec *spec;
225 u8 *dmac_v;
226 u8 *dmac_c;
227
228 spec = kzalloc(sizeof(*spec), GFP_KERNEL);
229 if (!spec) {
230 printf("mlx5_core: WARN: ""FDB: Failed to alloc flow spec\n");
231 goto out;
232 }
233 dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
234 outer_headers.dmac_47_16);
235 dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
236 outer_headers.dmac_47_16);
237
238 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
239 ether_addr_copy(dmac_v, mac);
240 /* Match criteria mask */
241 memset(dmac_c, 0xff, 6);
242
243 dest.type = MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT;
244 dest.vport.num = vport;
245
246 esw_debug(esw->dev,
247 "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
248 dmac_v, dmac_c, vport);
249 flow_act.action = MLX5_FLOW_RULE_FWD_ACTION_DEST;
250 flow_rule =
251 mlx5_add_flow_rules(esw->fdb_table.fdb, spec,
252 &flow_act, &dest, 1);
253 if (IS_ERR_OR_NULL(flow_rule)) {
254 printf("mlx5_core: WARN: ""FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n", dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
255 flow_rule = NULL;
256 }
257 out:
258 kfree(spec);
259 return flow_rule;
260 }
261
esw_create_fdb_table(struct mlx5_eswitch * esw)262 static int esw_create_fdb_table(struct mlx5_eswitch *esw)
263 {
264 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
265 struct mlx5_flow_table_attr ft_attr = {};
266 struct mlx5_core_dev *dev = esw->dev;
267 struct mlx5_flow_namespace *root_ns;
268 struct mlx5_flow_table *fdb;
269 struct mlx5_flow_group *g;
270 void *match_criteria;
271 int table_size;
272 u32 *flow_group_in;
273 u8 *dmac;
274 int err = 0;
275
276 esw_debug(dev, "Create FDB log_max_size(%d)\n",
277 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
278
279 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
280 if (!root_ns) {
281 esw_warn(dev, "Failed to get FDB flow namespace\n");
282 return -ENOMEM;
283 }
284
285 flow_group_in = mlx5_vzalloc(inlen);
286 if (!flow_group_in)
287 return -ENOMEM;
288 memset(flow_group_in, 0, inlen);
289
290 /* (-2) Since MaorG said so .. */
291 table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)) - 2;
292
293 ft_attr.prio = 0;
294 ft_attr.max_fte = table_size;
295 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
296 if (IS_ERR_OR_NULL(fdb)) {
297 err = PTR_ERR(fdb);
298 esw_warn(dev, "Failed to create FDB Table err %d\n", err);
299 goto out;
300 }
301
302 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
303 MLX5_MATCH_OUTER_HEADERS);
304 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
305 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
306 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
307 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
308 eth_broadcast_addr(dmac);
309
310 g = mlx5_create_flow_group(fdb, flow_group_in);
311 if (IS_ERR_OR_NULL(g)) {
312 err = PTR_ERR(g);
313 esw_warn(dev, "Failed to create flow group err(%d)\n", err);
314 goto out;
315 }
316
317 esw->fdb_table.addr_grp = g;
318 esw->fdb_table.fdb = fdb;
319 out:
320 kfree(flow_group_in);
321 if (err && !IS_ERR_OR_NULL(fdb))
322 mlx5_destroy_flow_table(fdb);
323 return err;
324 }
325
esw_destroy_fdb_table(struct mlx5_eswitch * esw)326 static void esw_destroy_fdb_table(struct mlx5_eswitch *esw)
327 {
328 if (!esw->fdb_table.fdb)
329 return;
330
331 esw_debug(esw->dev, "Destroy FDB Table\n");
332 mlx5_destroy_flow_group(esw->fdb_table.addr_grp);
333 mlx5_destroy_flow_table(esw->fdb_table.fdb);
334 esw->fdb_table.fdb = NULL;
335 esw->fdb_table.addr_grp = NULL;
336 }
337
338 /* E-Switch vport UC/MC lists management */
339 typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
340 struct vport_addr *vaddr);
341
esw_add_uc_addr(struct mlx5_eswitch * esw,struct vport_addr * vaddr)342 static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
343 {
344 struct hlist_head *hash = esw->l2_table.l2_hash;
345 struct esw_uc_addr *esw_uc;
346 u8 *mac = vaddr->node.addr;
347 u32 vport = vaddr->vport;
348 int err;
349
350 esw_uc = l2addr_hash_find(hash, mac, struct esw_uc_addr);
351 if (esw_uc) {
352 esw_warn(esw->dev,
353 "Failed to set L2 mac(%pM) for vport(%d), mac is already in use by vport(%d)\n",
354 mac, vport, esw_uc->vport);
355 return -EEXIST;
356 }
357
358 esw_uc = l2addr_hash_add(hash, mac, struct esw_uc_addr, GFP_KERNEL);
359 if (!esw_uc)
360 return -ENOMEM;
361 esw_uc->vport = vport;
362
363 err = mlx5_mpfs_add_mac(esw->dev, &esw_uc->table_index, mac, 0, 0);
364 if (err)
365 goto abort;
366
367 if (esw->fdb_table.fdb) /* SRIOV is enabled: Forward UC MAC to vport */
368 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
369
370 esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM index:%d fr(%p)\n",
371 vport, mac, esw_uc->table_index, vaddr->flow_rule);
372 return err;
373 abort:
374 l2addr_hash_del(esw_uc);
375 return err;
376 }
377
esw_del_uc_addr(struct mlx5_eswitch * esw,struct vport_addr * vaddr)378 static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
379 {
380 struct hlist_head *hash = esw->l2_table.l2_hash;
381 struct esw_uc_addr *esw_uc;
382 u8 *mac = vaddr->node.addr;
383 u32 vport = vaddr->vport;
384
385 esw_uc = l2addr_hash_find(hash, mac, struct esw_uc_addr);
386 if (!esw_uc || esw_uc->vport != vport) {
387 esw_debug(esw->dev,
388 "MAC(%pM) doesn't belong to vport (%d)\n",
389 mac, vport);
390 return -EINVAL;
391 }
392 esw_debug(esw->dev, "\tDELETE UC MAC: vport[%d] %pM index:%d fr(%p)\n",
393 vport, mac, esw_uc->table_index, vaddr->flow_rule);
394
395 mlx5_mpfs_del_mac(esw->dev, esw_uc->table_index);
396
397 mlx5_del_flow_rules(&vaddr->flow_rule);
398
399 l2addr_hash_del(esw_uc);
400 return 0;
401 }
402
esw_add_mc_addr(struct mlx5_eswitch * esw,struct vport_addr * vaddr)403 static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
404 {
405 struct hlist_head *hash = esw->mc_table;
406 struct esw_mc_addr *esw_mc;
407 u8 *mac = vaddr->node.addr;
408 u32 vport = vaddr->vport;
409
410 if (!esw->fdb_table.fdb)
411 return 0;
412
413 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
414 if (esw_mc)
415 goto add;
416
417 esw_mc = l2addr_hash_add(hash, mac, struct esw_mc_addr, GFP_KERNEL);
418 if (!esw_mc)
419 return -ENOMEM;
420
421 esw_mc->uplink_rule = /* Forward MC MAC to Uplink */
422 esw_fdb_set_vport_rule(esw, mac, UPLINK_VPORT);
423 add:
424 esw_mc->refcnt++;
425 /* Forward MC MAC to vport */
426 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
427 esw_debug(esw->dev,
428 "\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
429 vport, mac, vaddr->flow_rule,
430 esw_mc->refcnt, esw_mc->uplink_rule);
431 return 0;
432 }
433
esw_del_mc_addr(struct mlx5_eswitch * esw,struct vport_addr * vaddr)434 static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
435 {
436 struct hlist_head *hash = esw->mc_table;
437 struct esw_mc_addr *esw_mc;
438 u8 *mac = vaddr->node.addr;
439 u32 vport = vaddr->vport;
440
441 if (!esw->fdb_table.fdb)
442 return 0;
443
444 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
445 if (!esw_mc) {
446 esw_warn(esw->dev,
447 "Failed to find eswitch MC addr for MAC(%pM) vport(%d)",
448 mac, vport);
449 return -EINVAL;
450 }
451 esw_debug(esw->dev,
452 "\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
453 vport, mac, vaddr->flow_rule, esw_mc->refcnt,
454 esw_mc->uplink_rule);
455
456 mlx5_del_flow_rules(&vaddr->flow_rule);
457
458 if (--esw_mc->refcnt)
459 return 0;
460
461 mlx5_del_flow_rules(&esw_mc->uplink_rule);
462
463 l2addr_hash_del(esw_mc);
464 return 0;
465 }
466
467 /* Apply vport UC/MC list to HW l2 table and FDB table */
esw_apply_vport_addr_list(struct mlx5_eswitch * esw,u32 vport_num,int list_type)468 static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
469 u32 vport_num, int list_type)
470 {
471 struct mlx5_vport *vport = &esw->vports[vport_num];
472 bool is_uc = list_type == MLX5_NIC_VPORT_LIST_TYPE_UC;
473 vport_addr_action vport_addr_add;
474 vport_addr_action vport_addr_del;
475 struct vport_addr *addr;
476 struct l2addr_node *node;
477 struct hlist_head *hash;
478 struct hlist_node *tmp;
479 int hi;
480
481 vport_addr_add = is_uc ? esw_add_uc_addr :
482 esw_add_mc_addr;
483 vport_addr_del = is_uc ? esw_del_uc_addr :
484 esw_del_mc_addr;
485
486 hash = is_uc ? vport->uc_list : vport->mc_list;
487 for_each_l2hash_node(node, tmp, hash, hi) {
488 addr = container_of(node, struct vport_addr, node);
489 switch (addr->action) {
490 case MLX5_ACTION_ADD:
491 vport_addr_add(esw, addr);
492 addr->action = MLX5_ACTION_NONE;
493 break;
494 case MLX5_ACTION_DEL:
495 vport_addr_del(esw, addr);
496 l2addr_hash_del(addr);
497 break;
498 }
499 }
500 }
501
502 /* Sync vport UC/MC list from vport context */
esw_update_vport_addr_list(struct mlx5_eswitch * esw,u32 vport_num,int list_type)503 static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
504 u32 vport_num, int list_type)
505 {
506 struct mlx5_vport *vport = &esw->vports[vport_num];
507 bool is_uc = list_type == MLX5_NIC_VPORT_LIST_TYPE_UC;
508 u8 (*mac_list)[ETH_ALEN];
509 struct l2addr_node *node;
510 struct vport_addr *addr;
511 struct hlist_head *hash;
512 struct hlist_node *tmp;
513 int size;
514 int err;
515 int hi;
516 int i;
517
518 size = is_uc ? MLX5_MAX_UC_PER_VPORT(esw->dev) :
519 MLX5_MAX_MC_PER_VPORT(esw->dev);
520
521 mac_list = kcalloc(size, ETH_ALEN, GFP_KERNEL);
522 if (!mac_list)
523 return;
524
525 hash = is_uc ? vport->uc_list : vport->mc_list;
526
527 for_each_l2hash_node(node, tmp, hash, hi) {
528 addr = container_of(node, struct vport_addr, node);
529 addr->action = MLX5_ACTION_DEL;
530 }
531
532 err = mlx5_query_nic_vport_mac_list(esw->dev, vport_num, list_type,
533 mac_list, &size);
534 if (err)
535 return;
536 esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
537 vport_num, is_uc ? "UC" : "MC", size);
538
539 for (i = 0; i < size; i++) {
540 if (is_uc && !is_valid_ether_addr(mac_list[i]))
541 continue;
542
543 if (!is_uc && !is_multicast_ether_addr(mac_list[i]))
544 continue;
545
546 addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr);
547 if (addr) {
548 addr->action = MLX5_ACTION_NONE;
549 continue;
550 }
551
552 addr = l2addr_hash_add(hash, mac_list[i], struct vport_addr,
553 GFP_KERNEL);
554 if (!addr) {
555 esw_warn(esw->dev,
556 "Failed to add MAC(%pM) to vport[%d] DB\n",
557 mac_list[i], vport_num);
558 continue;
559 }
560 addr->vport = vport_num;
561 addr->action = MLX5_ACTION_ADD;
562 }
563 kfree(mac_list);
564 }
565
esw_vport_change_handler(struct work_struct * work)566 static void esw_vport_change_handler(struct work_struct *work)
567 {
568 struct mlx5_vport *vport =
569 container_of(work, struct mlx5_vport, vport_change_handler);
570 struct mlx5_core_dev *dev = vport->dev;
571 struct mlx5_eswitch *esw = dev->priv.eswitch;
572 u8 mac[ETH_ALEN];
573
574 mlx5_query_nic_vport_mac_address(dev, vport->vport, mac);
575 esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
576 vport->vport, mac);
577
578 if (vport->enabled_events & UC_ADDR_CHANGE) {
579 esw_update_vport_addr_list(esw, vport->vport,
580 MLX5_NIC_VPORT_LIST_TYPE_UC);
581 esw_apply_vport_addr_list(esw, vport->vport,
582 MLX5_NIC_VPORT_LIST_TYPE_UC);
583 }
584
585 if (vport->enabled_events & MC_ADDR_CHANGE) {
586 esw_update_vport_addr_list(esw, vport->vport,
587 MLX5_NIC_VPORT_LIST_TYPE_MC);
588 esw_apply_vport_addr_list(esw, vport->vport,
589 MLX5_NIC_VPORT_LIST_TYPE_MC);
590 }
591
592 esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
593 if (vport->enabled)
594 arm_vport_context_events_cmd(dev, vport->vport,
595 vport->enabled_events);
596 }
597
esw_vport_enable_egress_acl(struct mlx5_eswitch * esw,struct mlx5_vport * vport)598 static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
599 struct mlx5_vport *vport)
600 {
601 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
602 struct mlx5_flow_table_attr ft_attr = {};
603 struct mlx5_flow_group *vlan_grp = NULL;
604 struct mlx5_flow_group *drop_grp = NULL;
605 struct mlx5_core_dev *dev = esw->dev;
606 struct mlx5_flow_namespace *root_ns;
607 struct mlx5_flow_table *acl;
608 void *match_criteria;
609 u32 *flow_group_in;
610 int table_size = 2;
611 int err = 0;
612
613 if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
614 return;
615
616 esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
617 vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
618
619 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
620 if (!root_ns) {
621 esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
622 return;
623 }
624
625 flow_group_in = mlx5_vzalloc(inlen);
626 if (!flow_group_in)
627 return;
628
629 ft_attr.max_fte = table_size;
630 acl = mlx5_create_vport_flow_table(root_ns, &ft_attr, vport->vport);
631 if (IS_ERR_OR_NULL(acl)) {
632 err = PTR_ERR(acl);
633 esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
634 vport->vport, err);
635 goto out;
636 }
637
638 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
639 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
640 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
641 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid);
642 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
643 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
644
645 vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
646 if (IS_ERR_OR_NULL(vlan_grp)) {
647 err = PTR_ERR(vlan_grp);
648 esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
649 vport->vport, err);
650 goto out;
651 }
652
653 memset(flow_group_in, 0, inlen);
654 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
655 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
656 drop_grp = mlx5_create_flow_group(acl, flow_group_in);
657 if (IS_ERR_OR_NULL(drop_grp)) {
658 err = PTR_ERR(drop_grp);
659 esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
660 vport->vport, err);
661 goto out;
662 }
663
664 vport->egress.acl = acl;
665 vport->egress.drop_grp = drop_grp;
666 vport->egress.allowed_vlans_grp = vlan_grp;
667 out:
668 kfree(flow_group_in);
669 if (err && !IS_ERR_OR_NULL(vlan_grp))
670 mlx5_destroy_flow_group(vlan_grp);
671 if (err && !IS_ERR_OR_NULL(acl))
672 mlx5_destroy_flow_table(acl);
673 }
674
esw_vport_cleanup_egress_rules(struct mlx5_eswitch * esw,struct mlx5_vport * vport)675 static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
676 struct mlx5_vport *vport)
677 {
678 mlx5_del_flow_rules(&vport->egress.allowed_vlan);
679 mlx5_del_flow_rules(&vport->egress.drop_rule);
680 }
681
esw_vport_disable_egress_acl(struct mlx5_eswitch * esw,struct mlx5_vport * vport)682 static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
683 struct mlx5_vport *vport)
684 {
685 if (IS_ERR_OR_NULL(vport->egress.acl))
686 return;
687
688 esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport);
689
690 esw_vport_cleanup_egress_rules(esw, vport);
691 mlx5_destroy_flow_group(vport->egress.allowed_vlans_grp);
692 mlx5_destroy_flow_group(vport->egress.drop_grp);
693 mlx5_destroy_flow_table(vport->egress.acl);
694 vport->egress.allowed_vlans_grp = NULL;
695 vport->egress.drop_grp = NULL;
696 vport->egress.acl = NULL;
697 }
698
esw_vport_enable_ingress_acl(struct mlx5_eswitch * esw,struct mlx5_vport * vport)699 static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
700 struct mlx5_vport *vport)
701 {
702 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
703 struct mlx5_flow_table_attr ft_attr = {};
704 struct mlx5_core_dev *dev = esw->dev;
705 struct mlx5_flow_namespace *root_ns;
706 struct mlx5_flow_table *acl;
707 struct mlx5_flow_group *g;
708 void *match_criteria;
709 u32 *flow_group_in;
710 int table_size = 1;
711 int err = 0;
712
713 if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
714 return;
715
716 esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
717 vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
718
719 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
720 if (!root_ns) {
721 esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
722 return;
723 }
724
725 flow_group_in = mlx5_vzalloc(inlen);
726 if (!flow_group_in)
727 return;
728
729 ft_attr.max_fte = table_size;
730 acl = mlx5_create_vport_flow_table(root_ns, &ft_attr, vport->vport);
731 if (IS_ERR_OR_NULL(acl)) {
732 err = PTR_ERR(acl);
733 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
734 vport->vport, err);
735 goto out;
736 }
737
738 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
739 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
740 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
741 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
742 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
743
744 g = mlx5_create_flow_group(acl, flow_group_in);
745 if (IS_ERR_OR_NULL(g)) {
746 err = PTR_ERR(g);
747 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow group, err(%d)\n",
748 vport->vport, err);
749 goto out;
750 }
751
752 vport->ingress.acl = acl;
753 vport->ingress.drop_grp = g;
754 out:
755 kfree(flow_group_in);
756 if (err && !IS_ERR_OR_NULL(acl))
757 mlx5_destroy_flow_table(acl);
758 }
759
esw_vport_cleanup_ingress_rules(struct mlx5_eswitch * esw,struct mlx5_vport * vport)760 static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
761 struct mlx5_vport *vport)
762 {
763 mlx5_del_flow_rules(&vport->ingress.drop_rule);
764 }
765
esw_vport_disable_ingress_acl(struct mlx5_eswitch * esw,struct mlx5_vport * vport)766 static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
767 struct mlx5_vport *vport)
768 {
769 if (IS_ERR_OR_NULL(vport->ingress.acl))
770 return;
771
772 esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
773
774 esw_vport_cleanup_ingress_rules(esw, vport);
775 mlx5_destroy_flow_group(vport->ingress.drop_grp);
776 mlx5_destroy_flow_table(vport->ingress.acl);
777 vport->ingress.acl = NULL;
778 vport->ingress.drop_grp = NULL;
779 }
780
esw_vport_ingress_config(struct mlx5_eswitch * esw,struct mlx5_vport * vport)781 static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
782 struct mlx5_vport *vport)
783 {
784 struct mlx5_flow_act flow_act = {};
785 struct mlx5_flow_spec *spec;
786 int err = 0;
787
788 if (IS_ERR_OR_NULL(vport->ingress.acl)) {
789 esw_warn(esw->dev,
790 "vport[%d] configure ingress rules failed, ingress acl is not initialized!\n",
791 vport->vport);
792 return -EPERM;
793 }
794
795 esw_vport_cleanup_ingress_rules(esw, vport);
796
797 if (!vport->vlan && !vport->qos)
798 return 0;
799
800 esw_debug(esw->dev,
801 "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
802 vport->vport, vport->vlan, vport->qos);
803
804 spec = kzalloc(sizeof(*spec), GFP_KERNEL);
805 if (!spec) {
806 err = -ENOMEM;
807 esw_warn(esw->dev, "vport[%d] configure ingress rules failed, err(%d)\n",
808 vport->vport, err);
809 goto out;
810 }
811 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
812 MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
813
814 flow_act.action = MLX5_FLOW_RULE_FWD_ACTION_DROP;
815 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
816 vport->ingress.drop_rule =
817 mlx5_add_flow_rules(vport->ingress.acl, spec,
818 &flow_act, NULL, 0);
819 if (IS_ERR_OR_NULL(vport->ingress.drop_rule)) {
820 err = PTR_ERR(vport->ingress.drop_rule);
821 printf("mlx5_core: WARN: ""vport[%d] configure ingress rules, err(%d)\n", vport->vport, err);
822 vport->ingress.drop_rule = NULL;
823 }
824 out:
825 kfree(spec);
826 return err;
827 }
828
esw_vport_egress_config(struct mlx5_eswitch * esw,struct mlx5_vport * vport)829 static int esw_vport_egress_config(struct mlx5_eswitch *esw,
830 struct mlx5_vport *vport)
831 {
832 struct mlx5_flow_act flow_act = {};
833 struct mlx5_flow_spec *spec;
834 int err = 0;
835
836 if (IS_ERR_OR_NULL(vport->egress.acl)) {
837 esw_warn(esw->dev, "vport[%d] configure rgress rules failed, egress acl is not initialized!\n",
838 vport->vport);
839 return -EPERM;
840 }
841
842 esw_vport_cleanup_egress_rules(esw, vport);
843
844 if (!vport->vlan && !vport->qos)
845 return 0;
846
847 esw_debug(esw->dev,
848 "vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
849 vport->vport, vport->vlan, vport->qos);
850
851 spec = kzalloc(sizeof(*spec), GFP_KERNEL);
852 if (!spec) {
853 err = -ENOMEM;
854 esw_warn(esw->dev, "vport[%d] configure egress rules failed, err(%d)\n",
855 vport->vport, err);
856 goto out;
857 }
858
859 /* Allowed vlan rule */
860 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
861 MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
862 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
863 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->vlan);
864
865 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
866 flow_act.action = MLX5_FLOW_RULE_FWD_ACTION_ALLOW;
867
868 vport->egress.allowed_vlan =
869 mlx5_add_flow_rules(vport->egress.acl, spec,
870 &flow_act, NULL, 0);
871 if (IS_ERR_OR_NULL(vport->egress.allowed_vlan)) {
872 err = PTR_ERR(vport->egress.allowed_vlan);
873 printf("mlx5_core: WARN: ""vport[%d] configure egress allowed vlan rule failed, err(%d)\n", vport->vport, err);
874 vport->egress.allowed_vlan = NULL;
875 goto out;
876 }
877
878 flow_act.action = MLX5_FLOW_RULE_FWD_ACTION_DROP;
879 vport->egress.drop_rule =
880 mlx5_add_flow_rules(vport->egress.acl, NULL,
881 &flow_act, NULL, 0);
882 if (IS_ERR_OR_NULL(vport->egress.drop_rule)) {
883 err = PTR_ERR(vport->egress.drop_rule);
884 printf("mlx5_core: WARN: ""vport[%d] configure egress drop rule failed, err(%d)\n", vport->vport, err);
885 vport->egress.drop_rule = NULL;
886 }
887 out:
888 kfree(spec);
889 return err;
890 }
891
esw_enable_vport(struct mlx5_eswitch * esw,int vport_num,int enable_events)892 static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
893 int enable_events)
894 {
895 struct mlx5_vport *vport = &esw->vports[vport_num];
896 unsigned long flags;
897
898 mutex_lock(&vport->state_lock);
899 WARN_ON(vport->enabled);
900
901 esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
902
903 if (vport_num) { /* Only VFs need ACLs for VST and spoofchk filtering */
904 esw_vport_enable_ingress_acl(esw, vport);
905 esw_vport_enable_egress_acl(esw, vport);
906 esw_vport_ingress_config(esw, vport);
907 esw_vport_egress_config(esw, vport);
908 }
909
910 mlx5_modify_vport_admin_state(esw->dev,
911 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
912 vport_num,
913 MLX5_ESW_VPORT_ADMIN_STATE_AUTO);
914
915 /* Sync with current vport context */
916 vport->enabled_events = enable_events;
917 esw_vport_change_handler(&vport->vport_change_handler);
918
919 spin_lock_irqsave(&vport->lock, flags);
920 vport->enabled = true;
921 spin_unlock_irqrestore(&vport->lock, flags);
922
923 arm_vport_context_events_cmd(esw->dev, vport_num, enable_events);
924
925 esw->enabled_vports++;
926 esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
927 mutex_unlock(&vport->state_lock);
928 }
929
esw_cleanup_vport(struct mlx5_eswitch * esw,u16 vport_num)930 static void esw_cleanup_vport(struct mlx5_eswitch *esw, u16 vport_num)
931 {
932 struct mlx5_vport *vport = &esw->vports[vport_num];
933 struct l2addr_node *node;
934 struct vport_addr *addr;
935 struct hlist_node *tmp;
936 int hi;
937
938 for_each_l2hash_node(node, tmp, vport->uc_list, hi) {
939 addr = container_of(node, struct vport_addr, node);
940 addr->action = MLX5_ACTION_DEL;
941 }
942 esw_apply_vport_addr_list(esw, vport_num, MLX5_NIC_VPORT_LIST_TYPE_UC);
943
944 for_each_l2hash_node(node, tmp, vport->mc_list, hi) {
945 addr = container_of(node, struct vport_addr, node);
946 addr->action = MLX5_ACTION_DEL;
947 }
948 esw_apply_vport_addr_list(esw, vport_num, MLX5_NIC_VPORT_LIST_TYPE_MC);
949 }
950
esw_disable_vport(struct mlx5_eswitch * esw,int vport_num)951 static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
952 {
953 struct mlx5_vport *vport = &esw->vports[vport_num];
954 unsigned long flags;
955
956 mutex_lock(&vport->state_lock);
957 if (!vport->enabled) {
958 mutex_unlock(&vport->state_lock);
959 return;
960 }
961
962 esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num);
963 /* Mark this vport as disabled to discard new events */
964 spin_lock_irqsave(&vport->lock, flags);
965 vport->enabled = false;
966 vport->enabled_events = 0;
967 spin_unlock_irqrestore(&vport->lock, flags);
968
969 mlx5_modify_vport_admin_state(esw->dev,
970 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
971 vport_num,
972 MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
973 /* Wait for current already scheduled events to complete */
974 flush_workqueue(esw->work_queue);
975 /* Disable events from this vport */
976 arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
977 /* We don't assume VFs will cleanup after themselves */
978 esw_cleanup_vport(esw, vport_num);
979 if (vport_num) {
980 esw_vport_disable_egress_acl(esw, vport);
981 esw_vport_disable_ingress_acl(esw, vport);
982 }
983 esw->enabled_vports--;
984 mutex_unlock(&vport->state_lock);
985 }
986
987 /* Public E-Switch API */
mlx5_eswitch_enable_sriov(struct mlx5_eswitch * esw,int nvfs)988 int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs)
989 {
990 int err;
991 int i;
992
993 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
994 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
995 return 0;
996
997 if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
998 !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
999 esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
1000 return -ENOTSUPP;
1001 }
1002
1003 if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
1004 esw_warn(esw->dev, "E-Switch ingress ACL is not supported by FW\n");
1005
1006 if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
1007 esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n");
1008
1009 esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d)\n", nvfs);
1010
1011 esw_disable_vport(esw, 0);
1012
1013 err = esw_create_fdb_table(esw);
1014 if (err)
1015 goto abort;
1016
1017 for (i = 0; i <= nvfs; i++)
1018 esw_enable_vport(esw, i, SRIOV_VPORT_EVENTS);
1019
1020 esw_info(esw->dev, "SRIOV enabled: active vports(%d)\n",
1021 esw->enabled_vports);
1022 return 0;
1023
1024 abort:
1025 esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
1026 return err;
1027 }
1028
mlx5_eswitch_disable_sriov(struct mlx5_eswitch * esw)1029 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
1030 {
1031 int i;
1032
1033 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1034 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1035 return;
1036
1037 esw_info(esw->dev, "disable SRIOV: active vports(%d)\n",
1038 esw->enabled_vports);
1039
1040 for (i = 0; i < esw->total_vports; i++)
1041 esw_disable_vport(esw, i);
1042
1043 esw_destroy_fdb_table(esw);
1044
1045 /* VPORT 0 (PF) must be enabled back with non-sriov configuration */
1046 esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
1047 }
1048
mlx5_eswitch_init(struct mlx5_core_dev * dev,int total_vports)1049 int mlx5_eswitch_init(struct mlx5_core_dev *dev, int total_vports)
1050 {
1051 int l2_table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table);
1052 struct mlx5_eswitch *esw;
1053 int vport_num;
1054 int err;
1055
1056 if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
1057 MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1058 return 0;
1059
1060 esw_info(dev,
1061 "Total vports %d, l2 table size(%d), per vport: max uc(%d) max mc(%d)\n",
1062 total_vports, l2_table_size,
1063 MLX5_MAX_UC_PER_VPORT(dev),
1064 MLX5_MAX_MC_PER_VPORT(dev));
1065
1066 esw = kzalloc(sizeof(*esw), GFP_KERNEL);
1067 if (!esw)
1068 return -ENOMEM;
1069
1070 esw->dev = dev;
1071
1072 esw->l2_table.bitmap = kcalloc(BITS_TO_LONGS(l2_table_size),
1073 sizeof(uintptr_t), GFP_KERNEL);
1074 if (!esw->l2_table.bitmap) {
1075 err = -ENOMEM;
1076 goto abort;
1077 }
1078 esw->l2_table.size = l2_table_size;
1079
1080 esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
1081 if (!esw->work_queue) {
1082 err = -ENOMEM;
1083 goto abort;
1084 }
1085
1086 esw->vports = kcalloc(total_vports, sizeof(struct mlx5_vport),
1087 GFP_KERNEL);
1088 if (!esw->vports) {
1089 err = -ENOMEM;
1090 goto abort;
1091 }
1092
1093 for (vport_num = 0; vport_num < total_vports; vport_num++) {
1094 struct mlx5_vport *vport = &esw->vports[vport_num];
1095
1096 vport->vport = vport_num;
1097 vport->dev = dev;
1098 INIT_WORK(&vport->vport_change_handler,
1099 esw_vport_change_handler);
1100 spin_lock_init(&vport->lock);
1101 mutex_init(&vport->state_lock);
1102 }
1103
1104 esw->total_vports = total_vports;
1105 esw->enabled_vports = 0;
1106
1107 dev->priv.eswitch = esw;
1108 esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
1109 /* VF Vports will be enabled when SRIOV is enabled */
1110 return 0;
1111 abort:
1112 if (esw->work_queue)
1113 destroy_workqueue(esw->work_queue);
1114 kfree(esw->l2_table.bitmap);
1115 kfree(esw->vports);
1116 kfree(esw);
1117 return err;
1118 }
1119
mlx5_eswitch_cleanup(struct mlx5_eswitch * esw)1120 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
1121 {
1122 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1123 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1124 return;
1125
1126 esw_info(esw->dev, "cleanup\n");
1127 esw_disable_vport(esw, 0);
1128
1129 esw->dev->priv.eswitch = NULL;
1130 destroy_workqueue(esw->work_queue);
1131 kfree(esw->l2_table.bitmap);
1132 kfree(esw->vports);
1133 kfree(esw);
1134 }
1135
mlx5_eswitch_vport_event(struct mlx5_eswitch * esw,struct mlx5_eqe * eqe)1136 void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe)
1137 {
1138 struct mlx5_eqe_vport_change *vc_eqe = &eqe->data.vport_change;
1139 u16 vport_num = be16_to_cpu(vc_eqe->vport_num);
1140 struct mlx5_vport *vport;
1141
1142 if (!esw) {
1143 printf("mlx5_core: WARN: ""MLX5 E-Switch: vport %d got an event while eswitch is not initialized\n", vport_num);
1144 return;
1145 }
1146
1147 vport = &esw->vports[vport_num];
1148 spin_lock(&vport->lock);
1149 if (vport->enabled)
1150 queue_work(esw->work_queue, &vport->vport_change_handler);
1151 spin_unlock(&vport->lock);
1152 }
1153
1154 /* Vport Administration */
1155 #define ESW_ALLOWED(esw) \
1156 (esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev))
1157 #define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
1158
node_guid_gen_from_mac(u64 * node_guid,u8 mac[ETH_ALEN])1159 static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
1160 {
1161 ((u8 *)node_guid)[7] = mac[0];
1162 ((u8 *)node_guid)[6] = mac[1];
1163 ((u8 *)node_guid)[5] = mac[2];
1164 ((u8 *)node_guid)[4] = 0xff;
1165 ((u8 *)node_guid)[3] = 0xfe;
1166 ((u8 *)node_guid)[2] = mac[3];
1167 ((u8 *)node_guid)[1] = mac[4];
1168 ((u8 *)node_guid)[0] = mac[5];
1169 }
1170
mlx5_eswitch_set_vport_mac(struct mlx5_eswitch * esw,int vport,u8 mac[ETH_ALEN])1171 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
1172 int vport, u8 mac[ETH_ALEN])
1173 {
1174 int err = 0;
1175 u64 node_guid;
1176
1177 if (!ESW_ALLOWED(esw))
1178 return -EPERM;
1179 if (!LEGAL_VPORT(esw, vport))
1180 return -EINVAL;
1181
1182 err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
1183 if (err) {
1184 mlx5_core_warn(esw->dev,
1185 "Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n",
1186 vport, err);
1187 return err;
1188 }
1189
1190 node_guid_gen_from_mac(&node_guid, mac);
1191 err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid);
1192 if (err) {
1193 mlx5_core_warn(esw->dev,
1194 "Failed to mlx5_modify_nic_vport_node_guid vport(%d) err=(%d)\n",
1195 vport, err);
1196 return err;
1197 }
1198
1199 return err;
1200 }
1201
mlx5_eswitch_set_vport_state(struct mlx5_eswitch * esw,int vport,int link_state)1202 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
1203 int vport, int link_state)
1204 {
1205 if (!ESW_ALLOWED(esw))
1206 return -EPERM;
1207 if (!LEGAL_VPORT(esw, vport))
1208 return -EINVAL;
1209
1210 return mlx5_modify_vport_admin_state(esw->dev,
1211 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
1212 vport, link_state);
1213 }
1214
mlx5_eswitch_get_vport_config(struct mlx5_eswitch * esw,int vport,struct mlx5_esw_vport_info * ivi)1215 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
1216 int vport, struct mlx5_esw_vport_info *ivi)
1217 {
1218 u16 vlan;
1219 u8 qos;
1220
1221 if (!ESW_ALLOWED(esw))
1222 return -EPERM;
1223 if (!LEGAL_VPORT(esw, vport))
1224 return -EINVAL;
1225
1226 memset(ivi, 0, sizeof(*ivi));
1227 ivi->vf = vport - 1;
1228
1229 mlx5_query_nic_vport_mac_address(esw->dev, vport, ivi->mac);
1230 ivi->linkstate = mlx5_query_vport_admin_state(esw->dev,
1231 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
1232 vport);
1233 query_esw_vport_cvlan(esw->dev, vport, &vlan, &qos);
1234 ivi->vlan = vlan;
1235 ivi->qos = qos;
1236 ivi->spoofchk = 0;
1237
1238 return 0;
1239 }
1240
mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch * esw,int vport,u16 vlan,u8 qos)1241 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
1242 int vport, u16 vlan, u8 qos)
1243 {
1244 struct mlx5_vport *evport;
1245 int err = 0;
1246 int set = 0;
1247
1248 if (!ESW_ALLOWED(esw))
1249 return -EPERM;
1250 if (!LEGAL_VPORT(esw, vport) || (vlan > 4095) || (qos > 7))
1251 return -EINVAL;
1252
1253 if (vlan || qos)
1254 set = 1;
1255
1256 evport = &esw->vports[vport];
1257
1258 err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set);
1259 if (err)
1260 return err;
1261
1262 mutex_lock(&evport->state_lock);
1263 evport->vlan = vlan;
1264 evport->qos = qos;
1265 if (evport->enabled) {
1266 esw_vport_ingress_config(esw, evport);
1267 esw_vport_egress_config(esw, evport);
1268 }
1269 mutex_unlock(&evport->state_lock);
1270 return err;
1271 }
1272
1273