1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
3 
4 #include <linux/netdevice.h>
5 #include "lag.h"
6 
7 enum {
8 	MLX5_LAG_FT_LEVEL_TTC,
9 	MLX5_LAG_FT_LEVEL_INNER_TTC,
10 	MLX5_LAG_FT_LEVEL_DEFINER,
11 };
12 
13 static struct mlx5_flow_group *
mlx5_create_hash_flow_group(struct mlx5_flow_table * ft,struct mlx5_flow_definer * definer,u8 rules)14 mlx5_create_hash_flow_group(struct mlx5_flow_table *ft,
15 			    struct mlx5_flow_definer *definer,
16 			    u8 rules)
17 {
18 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
19 	struct mlx5_flow_group *fg;
20 	u32 *in;
21 
22 	in = kvzalloc(inlen, GFP_KERNEL);
23 	if (!in)
24 		return ERR_PTR(-ENOMEM);
25 
26 	MLX5_SET(create_flow_group_in, in, match_definer_id,
27 		 mlx5_get_match_definer_id(definer));
28 	MLX5_SET(create_flow_group_in, in, start_flow_index, 0);
29 	MLX5_SET(create_flow_group_in, in, end_flow_index, rules - 1);
30 	MLX5_SET(create_flow_group_in, in, group_type,
31 		 MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_HASH_SPLIT);
32 
33 	fg = mlx5_create_flow_group(ft, in);
34 	kvfree(in);
35 	return fg;
36 }
37 
mlx5_lag_create_port_sel_table(struct mlx5_lag * ldev,struct mlx5_lag_definer * lag_definer,u8 * ports)38 static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
39 					  struct mlx5_lag_definer *lag_definer,
40 					  u8 *ports)
41 {
42 	struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
43 	struct mlx5_flow_table_attr ft_attr = {};
44 	struct mlx5_flow_destination dest = {};
45 	MLX5_DECLARE_FLOW_ACT(flow_act);
46 	struct mlx5_flow_namespace *ns;
47 	int err, i;
48 	int idx;
49 	int j;
50 
51 	ft_attr.max_fte = ldev->ports * ldev->buckets;
52 	ft_attr.level = MLX5_LAG_FT_LEVEL_DEFINER;
53 
54 	ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_PORT_SEL);
55 	if (!ns) {
56 		mlx5_core_warn(dev, "Failed to get port selection namespace\n");
57 		return -EOPNOTSUPP;
58 	}
59 
60 	lag_definer->ft = mlx5_create_flow_table(ns, &ft_attr);
61 	if (IS_ERR(lag_definer->ft)) {
62 		mlx5_core_warn(dev, "Failed to create port selection table\n");
63 		return PTR_ERR(lag_definer->ft);
64 	}
65 
66 	lag_definer->fg = mlx5_create_hash_flow_group(lag_definer->ft,
67 						      lag_definer->definer,
68 						      ft_attr.max_fte);
69 	if (IS_ERR(lag_definer->fg)) {
70 		err = PTR_ERR(lag_definer->fg);
71 		goto destroy_ft;
72 	}
73 
74 	dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
75 	dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
76 	flow_act.flags |= FLOW_ACT_NO_APPEND;
77 	for (i = 0; i < ldev->ports; i++) {
78 		for (j = 0; j < ldev->buckets; j++) {
79 			u8 affinity;
80 
81 			idx = i * ldev->buckets + j;
82 			affinity = ports[idx];
83 
84 			dest.vport.vhca_id = MLX5_CAP_GEN(ldev->pf[affinity - 1].dev,
85 							  vhca_id);
86 			lag_definer->rules[idx] = mlx5_add_flow_rules(lag_definer->ft,
87 								      NULL, &flow_act,
88 								      &dest, 1);
89 			if (IS_ERR(lag_definer->rules[idx])) {
90 				err = PTR_ERR(lag_definer->rules[idx]);
91 				do {
92 					while (j--) {
93 						idx = i * ldev->buckets + j;
94 						mlx5_del_flow_rules(lag_definer->rules[idx]);
95 					}
96 					j = ldev->buckets;
97 				} while (i--);
98 				goto destroy_fg;
99 			}
100 		}
101 	}
102 
103 	return 0;
104 
105 destroy_fg:
106 	mlx5_destroy_flow_group(lag_definer->fg);
107 destroy_ft:
108 	mlx5_destroy_flow_table(lag_definer->ft);
109 	return err;
110 }
111 
mlx5_lag_set_definer_inner(u32 * match_definer_mask,enum mlx5_traffic_types tt)112 static int mlx5_lag_set_definer_inner(u32 *match_definer_mask,
113 				      enum mlx5_traffic_types tt)
114 {
115 	int format_id;
116 	u8 *ipv6;
117 
118 	switch (tt) {
119 	case MLX5_TT_IPV4_UDP:
120 	case MLX5_TT_IPV4_TCP:
121 		format_id = 23;
122 		MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
123 				 inner_l4_sport);
124 		MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
125 				 inner_l4_dport);
126 		MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
127 				 inner_ip_src_addr);
128 		MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
129 				 inner_ip_dest_addr);
130 		break;
131 	case MLX5_TT_IPV4:
132 		format_id = 23;
133 		MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
134 				 inner_l3_type);
135 		MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
136 				 inner_dmac_47_16);
137 		MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
138 				 inner_dmac_15_0);
139 		MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
140 				 inner_smac_47_16);
141 		MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
142 				 inner_smac_15_0);
143 		MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
144 				 inner_ip_src_addr);
145 		MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
146 				 inner_ip_dest_addr);
147 		break;
148 	case MLX5_TT_IPV6_TCP:
149 	case MLX5_TT_IPV6_UDP:
150 		format_id = 31;
151 		MLX5_SET_TO_ONES(match_definer_format_31, match_definer_mask,
152 				 inner_l4_sport);
153 		MLX5_SET_TO_ONES(match_definer_format_31, match_definer_mask,
154 				 inner_l4_dport);
155 		ipv6 = MLX5_ADDR_OF(match_definer_format_31, match_definer_mask,
156 				    inner_ip_dest_addr);
157 		memset(ipv6, 0xff, 16);
158 		ipv6 = MLX5_ADDR_OF(match_definer_format_31, match_definer_mask,
159 				    inner_ip_src_addr);
160 		memset(ipv6, 0xff, 16);
161 		break;
162 	case MLX5_TT_IPV6:
163 		format_id = 32;
164 		ipv6 = MLX5_ADDR_OF(match_definer_format_32, match_definer_mask,
165 				    inner_ip_dest_addr);
166 		memset(ipv6, 0xff, 16);
167 		ipv6 = MLX5_ADDR_OF(match_definer_format_32, match_definer_mask,
168 				    inner_ip_src_addr);
169 		memset(ipv6, 0xff, 16);
170 		MLX5_SET_TO_ONES(match_definer_format_32, match_definer_mask,
171 				 inner_dmac_47_16);
172 		MLX5_SET_TO_ONES(match_definer_format_32, match_definer_mask,
173 				 inner_dmac_15_0);
174 		MLX5_SET_TO_ONES(match_definer_format_32, match_definer_mask,
175 				 inner_smac_47_16);
176 		MLX5_SET_TO_ONES(match_definer_format_32, match_definer_mask,
177 				 inner_smac_15_0);
178 		break;
179 	default:
180 		format_id = 23;
181 		MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
182 				 inner_l3_type);
183 		MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
184 				 inner_dmac_47_16);
185 		MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
186 				 inner_dmac_15_0);
187 		MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
188 				 inner_smac_47_16);
189 		MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
190 				 inner_smac_15_0);
191 		break;
192 	}
193 
194 	return format_id;
195 }
196 
mlx5_lag_set_definer(u32 * match_definer_mask,enum mlx5_traffic_types tt,bool tunnel,enum netdev_lag_hash hash)197 static int mlx5_lag_set_definer(u32 *match_definer_mask,
198 				enum mlx5_traffic_types tt, bool tunnel,
199 				enum netdev_lag_hash hash)
200 {
201 	int format_id;
202 	u8 *ipv6;
203 
204 	if (tunnel)
205 		return mlx5_lag_set_definer_inner(match_definer_mask, tt);
206 
207 	switch (tt) {
208 	case MLX5_TT_IPV4_UDP:
209 	case MLX5_TT_IPV4_TCP:
210 		format_id = 22;
211 		MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
212 				 outer_l4_sport);
213 		MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
214 				 outer_l4_dport);
215 		MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
216 				 outer_ip_src_addr);
217 		MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
218 				 outer_ip_dest_addr);
219 		break;
220 	case MLX5_TT_IPV4:
221 		format_id = 22;
222 		MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
223 				 outer_l3_type);
224 		MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
225 				 outer_dmac_47_16);
226 		MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
227 				 outer_dmac_15_0);
228 		MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
229 				 outer_smac_47_16);
230 		MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
231 				 outer_smac_15_0);
232 		MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
233 				 outer_ip_src_addr);
234 		MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
235 				 outer_ip_dest_addr);
236 		break;
237 	case MLX5_TT_IPV6_TCP:
238 	case MLX5_TT_IPV6_UDP:
239 		format_id = 29;
240 		MLX5_SET_TO_ONES(match_definer_format_29, match_definer_mask,
241 				 outer_l4_sport);
242 		MLX5_SET_TO_ONES(match_definer_format_29, match_definer_mask,
243 				 outer_l4_dport);
244 		ipv6 = MLX5_ADDR_OF(match_definer_format_29, match_definer_mask,
245 				    outer_ip_dest_addr);
246 		memset(ipv6, 0xff, 16);
247 		ipv6 = MLX5_ADDR_OF(match_definer_format_29, match_definer_mask,
248 				    outer_ip_src_addr);
249 		memset(ipv6, 0xff, 16);
250 		break;
251 	case MLX5_TT_IPV6:
252 		format_id = 30;
253 		ipv6 = MLX5_ADDR_OF(match_definer_format_30, match_definer_mask,
254 				    outer_ip_dest_addr);
255 		memset(ipv6, 0xff, 16);
256 		ipv6 = MLX5_ADDR_OF(match_definer_format_30, match_definer_mask,
257 				    outer_ip_src_addr);
258 		memset(ipv6, 0xff, 16);
259 		MLX5_SET_TO_ONES(match_definer_format_30, match_definer_mask,
260 				 outer_dmac_47_16);
261 		MLX5_SET_TO_ONES(match_definer_format_30, match_definer_mask,
262 				 outer_dmac_15_0);
263 		MLX5_SET_TO_ONES(match_definer_format_30, match_definer_mask,
264 				 outer_smac_47_16);
265 		MLX5_SET_TO_ONES(match_definer_format_30, match_definer_mask,
266 				 outer_smac_15_0);
267 		break;
268 	default:
269 		format_id = 0;
270 		MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
271 				 outer_smac_47_16);
272 		MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
273 				 outer_smac_15_0);
274 
275 		if (hash == NETDEV_LAG_HASH_VLAN_SRCMAC) {
276 			MLX5_SET_TO_ONES(match_definer_format_0,
277 					 match_definer_mask,
278 					 outer_first_vlan_vid);
279 			break;
280 		}
281 
282 		MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
283 				 outer_ethertype);
284 		MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
285 				 outer_dmac_47_16);
286 		MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
287 				 outer_dmac_15_0);
288 		break;
289 	}
290 
291 	return format_id;
292 }
293 
294 static struct mlx5_lag_definer *
mlx5_lag_create_definer(struct mlx5_lag * ldev,enum netdev_lag_hash hash,enum mlx5_traffic_types tt,bool tunnel,u8 * ports)295 mlx5_lag_create_definer(struct mlx5_lag *ldev, enum netdev_lag_hash hash,
296 			enum mlx5_traffic_types tt, bool tunnel, u8 *ports)
297 {
298 	struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
299 	struct mlx5_lag_definer *lag_definer;
300 	u32 *match_definer_mask;
301 	int format_id, err;
302 
303 	lag_definer = kzalloc(sizeof(*lag_definer), GFP_KERNEL);
304 	if (!lag_definer)
305 		return ERR_PTR(-ENOMEM);
306 
307 	match_definer_mask = kvzalloc(MLX5_FLD_SZ_BYTES(match_definer,
308 							match_mask),
309 				      GFP_KERNEL);
310 	if (!match_definer_mask) {
311 		err = -ENOMEM;
312 		goto free_lag_definer;
313 	}
314 
315 	format_id = mlx5_lag_set_definer(match_definer_mask, tt, tunnel, hash);
316 	lag_definer->definer =
317 		mlx5_create_match_definer(dev, MLX5_FLOW_NAMESPACE_PORT_SEL,
318 					  format_id, match_definer_mask);
319 	if (IS_ERR(lag_definer->definer)) {
320 		err = PTR_ERR(lag_definer->definer);
321 		goto free_mask;
322 	}
323 
324 	err = mlx5_lag_create_port_sel_table(ldev, lag_definer, ports);
325 	if (err)
326 		goto destroy_match_definer;
327 
328 	kvfree(match_definer_mask);
329 
330 	return lag_definer;
331 
332 destroy_match_definer:
333 	mlx5_destroy_match_definer(dev, lag_definer->definer);
334 free_mask:
335 	kvfree(match_definer_mask);
336 free_lag_definer:
337 	kfree(lag_definer);
338 	return ERR_PTR(err);
339 }
340 
mlx5_lag_destroy_definer(struct mlx5_lag * ldev,struct mlx5_lag_definer * lag_definer)341 static void mlx5_lag_destroy_definer(struct mlx5_lag *ldev,
342 				     struct mlx5_lag_definer *lag_definer)
343 {
344 	struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
345 	int idx;
346 	int i;
347 	int j;
348 
349 	for (i = 0; i < ldev->ports; i++) {
350 		for (j = 0; j < ldev->buckets; j++) {
351 			idx = i * ldev->buckets + j;
352 			mlx5_del_flow_rules(lag_definer->rules[idx]);
353 		}
354 	}
355 	mlx5_destroy_flow_group(lag_definer->fg);
356 	mlx5_destroy_flow_table(lag_definer->ft);
357 	mlx5_destroy_match_definer(dev, lag_definer->definer);
358 	kfree(lag_definer);
359 }
360 
mlx5_lag_destroy_definers(struct mlx5_lag * ldev)361 static void mlx5_lag_destroy_definers(struct mlx5_lag *ldev)
362 {
363 	struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
364 	int tt;
365 
366 	for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
367 		if (port_sel->outer.definers[tt])
368 			mlx5_lag_destroy_definer(ldev,
369 						 port_sel->outer.definers[tt]);
370 		if (port_sel->inner.definers[tt])
371 			mlx5_lag_destroy_definer(ldev,
372 						 port_sel->inner.definers[tt]);
373 	}
374 }
375 
mlx5_lag_create_definers(struct mlx5_lag * ldev,enum netdev_lag_hash hash_type,u8 * ports)376 static int mlx5_lag_create_definers(struct mlx5_lag *ldev,
377 				    enum netdev_lag_hash hash_type,
378 				    u8 *ports)
379 {
380 	struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
381 	struct mlx5_lag_definer *lag_definer;
382 	int tt, err;
383 
384 	for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
385 		lag_definer = mlx5_lag_create_definer(ldev, hash_type, tt,
386 						      false, ports);
387 		if (IS_ERR(lag_definer)) {
388 			err = PTR_ERR(lag_definer);
389 			goto destroy_definers;
390 		}
391 		port_sel->outer.definers[tt] = lag_definer;
392 
393 		if (!port_sel->tunnel)
394 			continue;
395 
396 		lag_definer =
397 			mlx5_lag_create_definer(ldev, hash_type, tt,
398 						true, ports);
399 		if (IS_ERR(lag_definer)) {
400 			err = PTR_ERR(lag_definer);
401 			goto destroy_definers;
402 		}
403 		port_sel->inner.definers[tt] = lag_definer;
404 	}
405 
406 	return 0;
407 
408 destroy_definers:
409 	mlx5_lag_destroy_definers(ldev);
410 	return err;
411 }
412 
set_tt_map(struct mlx5_lag_port_sel * port_sel,enum netdev_lag_hash hash)413 static void set_tt_map(struct mlx5_lag_port_sel *port_sel,
414 		       enum netdev_lag_hash hash)
415 {
416 	port_sel->tunnel = false;
417 
418 	switch (hash) {
419 	case NETDEV_LAG_HASH_E34:
420 		port_sel->tunnel = true;
421 		fallthrough;
422 	case NETDEV_LAG_HASH_L34:
423 		set_bit(MLX5_TT_IPV4_TCP, port_sel->tt_map);
424 		set_bit(MLX5_TT_IPV4_UDP, port_sel->tt_map);
425 		set_bit(MLX5_TT_IPV6_TCP, port_sel->tt_map);
426 		set_bit(MLX5_TT_IPV6_UDP, port_sel->tt_map);
427 		set_bit(MLX5_TT_IPV4, port_sel->tt_map);
428 		set_bit(MLX5_TT_IPV6, port_sel->tt_map);
429 		set_bit(MLX5_TT_ANY, port_sel->tt_map);
430 		break;
431 	case NETDEV_LAG_HASH_E23:
432 		port_sel->tunnel = true;
433 		fallthrough;
434 	case NETDEV_LAG_HASH_L23:
435 		set_bit(MLX5_TT_IPV4, port_sel->tt_map);
436 		set_bit(MLX5_TT_IPV6, port_sel->tt_map);
437 		set_bit(MLX5_TT_ANY, port_sel->tt_map);
438 		break;
439 	default:
440 		set_bit(MLX5_TT_ANY, port_sel->tt_map);
441 		break;
442 	}
443 }
444 
445 #define SET_IGNORE_DESTS_BITS(tt_map, dests)				\
446 	do {								\
447 		int idx;						\
448 									\
449 		for_each_clear_bit(idx, tt_map, MLX5_NUM_TT)		\
450 			set_bit(idx, dests);				\
451 	} while (0)
452 
mlx5_lag_set_inner_ttc_params(struct mlx5_lag * ldev,struct ttc_params * ttc_params)453 static void mlx5_lag_set_inner_ttc_params(struct mlx5_lag *ldev,
454 					  struct ttc_params *ttc_params)
455 {
456 	struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
457 	struct mlx5_flow_table_attr *ft_attr;
458 	int tt;
459 
460 	ttc_params->ns_type = MLX5_FLOW_NAMESPACE_PORT_SEL;
461 	ft_attr = &ttc_params->ft_attr;
462 	ft_attr->level = MLX5_LAG_FT_LEVEL_INNER_TTC;
463 
464 	for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
465 		ttc_params->dests[tt].type =
466 			MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
467 		ttc_params->dests[tt].ft = port_sel->inner.definers[tt]->ft;
468 	}
469 	SET_IGNORE_DESTS_BITS(port_sel->tt_map, ttc_params->ignore_dests);
470 }
471 
mlx5_lag_set_outer_ttc_params(struct mlx5_lag * ldev,struct ttc_params * ttc_params)472 static void mlx5_lag_set_outer_ttc_params(struct mlx5_lag *ldev,
473 					  struct ttc_params *ttc_params)
474 {
475 	struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
476 	struct mlx5_flow_table_attr *ft_attr;
477 	int tt;
478 
479 	ttc_params->ns_type = MLX5_FLOW_NAMESPACE_PORT_SEL;
480 	ft_attr = &ttc_params->ft_attr;
481 	ft_attr->level = MLX5_LAG_FT_LEVEL_TTC;
482 
483 	for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
484 		ttc_params->dests[tt].type =
485 			MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
486 		ttc_params->dests[tt].ft = port_sel->outer.definers[tt]->ft;
487 	}
488 	SET_IGNORE_DESTS_BITS(port_sel->tt_map, ttc_params->ignore_dests);
489 
490 	ttc_params->inner_ttc = port_sel->tunnel;
491 	if (!port_sel->tunnel)
492 		return;
493 
494 	for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
495 		ttc_params->tunnel_dests[tt].type =
496 			MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
497 		ttc_params->tunnel_dests[tt].ft =
498 			mlx5_get_ttc_flow_table(port_sel->inner.ttc);
499 	}
500 }
501 
mlx5_lag_create_ttc_table(struct mlx5_lag * ldev)502 static int mlx5_lag_create_ttc_table(struct mlx5_lag *ldev)
503 {
504 	struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
505 	struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
506 	struct ttc_params ttc_params = {};
507 
508 	mlx5_lag_set_outer_ttc_params(ldev, &ttc_params);
509 	port_sel->outer.ttc = mlx5_create_ttc_table(dev, &ttc_params);
510 	return PTR_ERR_OR_ZERO(port_sel->outer.ttc);
511 }
512 
mlx5_lag_create_inner_ttc_table(struct mlx5_lag * ldev)513 static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev)
514 {
515 	struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
516 	struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
517 	struct ttc_params ttc_params = {};
518 
519 	mlx5_lag_set_inner_ttc_params(ldev, &ttc_params);
520 	port_sel->inner.ttc = mlx5_create_inner_ttc_table(dev, &ttc_params);
521 	return PTR_ERR_OR_ZERO(port_sel->inner.ttc);
522 }
523 
mlx5_lag_port_sel_create(struct mlx5_lag * ldev,enum netdev_lag_hash hash_type,u8 * ports)524 int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
525 			     enum netdev_lag_hash hash_type, u8 *ports)
526 {
527 	struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
528 	int err;
529 
530 	set_tt_map(port_sel, hash_type);
531 	err = mlx5_lag_create_definers(ldev, hash_type, ports);
532 	if (err)
533 		return err;
534 
535 	if (port_sel->tunnel) {
536 		err = mlx5_lag_create_inner_ttc_table(ldev);
537 		if (err)
538 			goto destroy_definers;
539 	}
540 
541 	err = mlx5_lag_create_ttc_table(ldev);
542 	if (err)
543 		goto destroy_inner;
544 
545 	return 0;
546 
547 destroy_inner:
548 	if (port_sel->tunnel)
549 		mlx5_destroy_ttc_table(port_sel->inner.ttc);
550 destroy_definers:
551 	mlx5_lag_destroy_definers(ldev);
552 	return err;
553 }
554 
__mlx5_lag_modify_definers_destinations(struct mlx5_lag * ldev,struct mlx5_lag_definer * def,u8 * ports)555 static int __mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev,
556 						   struct mlx5_lag_definer *def,
557 						   u8 *ports)
558 {
559 	struct mlx5_flow_destination dest = {};
560 	int idx;
561 	int err;
562 	int i;
563 	int j;
564 
565 	dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
566 	dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
567 
568 	for (i = 0; i < ldev->ports; i++) {
569 		for (j = 0; j < ldev->buckets; j++) {
570 			idx = i * ldev->buckets + j;
571 			if (ldev->v2p_map[idx] == ports[idx])
572 				continue;
573 
574 			dest.vport.vhca_id = MLX5_CAP_GEN(ldev->pf[ports[idx] - 1].dev,
575 							  vhca_id);
576 			err = mlx5_modify_rule_destination(def->rules[idx], &dest, NULL);
577 			if (err)
578 				return err;
579 		}
580 	}
581 
582 	return 0;
583 }
584 
585 static int
mlx5_lag_modify_definers_destinations(struct mlx5_lag * ldev,struct mlx5_lag_definer ** definers,u8 * ports)586 mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev,
587 				      struct mlx5_lag_definer **definers,
588 				      u8 *ports)
589 {
590 	struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
591 	int err;
592 	int tt;
593 
594 	for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
595 		err = __mlx5_lag_modify_definers_destinations(ldev, definers[tt], ports);
596 		if (err)
597 			return err;
598 	}
599 
600 	return 0;
601 }
602 
mlx5_lag_port_sel_modify(struct mlx5_lag * ldev,u8 * ports)603 int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 *ports)
604 {
605 	struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
606 	int err;
607 
608 	err = mlx5_lag_modify_definers_destinations(ldev,
609 						    port_sel->outer.definers,
610 						    ports);
611 	if (err)
612 		return err;
613 
614 	if (!port_sel->tunnel)
615 		return 0;
616 
617 	return mlx5_lag_modify_definers_destinations(ldev,
618 						     port_sel->inner.definers,
619 						     ports);
620 }
621 
mlx5_lag_port_sel_destroy(struct mlx5_lag * ldev)622 void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev)
623 {
624 	struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
625 
626 	mlx5_destroy_ttc_table(port_sel->outer.ttc);
627 	if (port_sel->tunnel)
628 		mlx5_destroy_ttc_table(port_sel->inner.ttc);
629 	mlx5_lag_destroy_definers(ldev);
630 	memset(port_sel, 0, sizeof(*port_sel));
631 }
632