1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2020 Mellanox Technologies.
3 
4 #include <linux/mlx5/driver.h>
5 #include <linux/mlx5/mlx5_ifc.h>
6 #include <linux/mlx5/fs.h>
7 
8 #include "lib/fs_chains.h"
9 #include "fs_ft_pool.h"
10 #include "en/mapping.h"
11 #include "fs_core.h"
12 #include "en_tc.h"
13 
14 #define chains_lock(chains) ((chains)->lock)
15 #define chains_ht(chains) ((chains)->chains_ht)
16 #define prios_ht(chains) ((chains)->prios_ht)
17 #define tc_default_ft(chains) ((chains)->tc_default_ft)
18 #define tc_end_ft(chains) ((chains)->tc_end_ft)
19 #define ns_to_chains_fs_prio(ns) ((ns) == MLX5_FLOW_NAMESPACE_FDB ? \
20 				  FDB_TC_OFFLOAD : MLX5E_TC_PRIO)
21 #define FT_TBL_SZ (64 * 1024)
22 
23 struct mlx5_fs_chains {
24 	struct mlx5_core_dev *dev;
25 
26 	struct rhashtable chains_ht;
27 	struct rhashtable prios_ht;
28 	/* Protects above chains_ht and prios_ht */
29 	struct mutex lock;
30 
31 	struct mlx5_flow_table *tc_default_ft;
32 	struct mlx5_flow_table *tc_end_ft;
33 	struct mapping_ctx *chains_mapping;
34 
35 	enum mlx5_flow_namespace_type ns;
36 	u32 group_num;
37 	u32 flags;
38 };
39 
40 struct fs_chain {
41 	struct rhash_head node;
42 
43 	u32 chain;
44 
45 	int ref;
46 	int id;
47 
48 	struct mlx5_fs_chains *chains;
49 	struct list_head prios_list;
50 	struct mlx5_flow_handle *restore_rule;
51 	struct mlx5_modify_hdr *miss_modify_hdr;
52 };
53 
54 struct prio_key {
55 	u32 chain;
56 	u32 prio;
57 	u32 level;
58 };
59 
60 struct prio {
61 	struct rhash_head node;
62 	struct list_head list;
63 
64 	struct prio_key key;
65 
66 	int ref;
67 
68 	struct fs_chain *chain;
69 	struct mlx5_flow_table *ft;
70 	struct mlx5_flow_table *next_ft;
71 	struct mlx5_flow_group *miss_group;
72 	struct mlx5_flow_handle *miss_rule;
73 };
74 
75 static const struct rhashtable_params chain_params = {
76 	.head_offset = offsetof(struct fs_chain, node),
77 	.key_offset = offsetof(struct fs_chain, chain),
78 	.key_len = sizeof_field(struct fs_chain, chain),
79 	.automatic_shrinking = true,
80 };
81 
82 static const struct rhashtable_params prio_params = {
83 	.head_offset = offsetof(struct prio, node),
84 	.key_offset = offsetof(struct prio, key),
85 	.key_len = sizeof_field(struct prio, key),
86 	.automatic_shrinking = true,
87 };
88 
89 bool mlx5_chains_prios_supported(struct mlx5_fs_chains *chains)
90 {
91 	return chains->flags & MLX5_CHAINS_AND_PRIOS_SUPPORTED;
92 }
93 
94 bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains)
95 {
96 	return chains->flags & MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
97 }
98 
99 bool mlx5_chains_backwards_supported(struct mlx5_fs_chains *chains)
100 {
101 	return mlx5_chains_prios_supported(chains) &&
102 	       mlx5_chains_ignore_flow_level_supported(chains);
103 }
104 
105 u32 mlx5_chains_get_chain_range(struct mlx5_fs_chains *chains)
106 {
107 	if (!mlx5_chains_prios_supported(chains))
108 		return 1;
109 
110 	if (mlx5_chains_ignore_flow_level_supported(chains))
111 		return UINT_MAX - 1;
112 
113 	/* We should get here only for eswitch case */
114 	return FDB_TC_MAX_CHAIN;
115 }
116 
117 u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains)
118 {
119 	return mlx5_chains_get_chain_range(chains) + 1;
120 }
121 
122 u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
123 {
124 	if (mlx5_chains_ignore_flow_level_supported(chains))
125 		return UINT_MAX;
126 
127 	if (!chains->dev->priv.eswitch ||
128 	    chains->dev->priv.eswitch->mode != MLX5_ESWITCH_OFFLOADS)
129 		return 1;
130 
131 	/* We should get here only for eswitch case */
132 	return FDB_TC_MAX_PRIO;
133 }
134 
135 static unsigned int mlx5_chains_get_level_range(struct mlx5_fs_chains *chains)
136 {
137 	if (mlx5_chains_ignore_flow_level_supported(chains))
138 		return UINT_MAX;
139 
140 	/* Same value for FDB and NIC RX tables */
141 	return FDB_TC_LEVELS_PER_PRIO;
142 }
143 
144 void
145 mlx5_chains_set_end_ft(struct mlx5_fs_chains *chains,
146 		       struct mlx5_flow_table *ft)
147 {
148 	tc_end_ft(chains) = ft;
149 }
150 
151 static struct mlx5_flow_table *
152 mlx5_chains_create_table(struct mlx5_fs_chains *chains,
153 			 u32 chain, u32 prio, u32 level)
154 {
155 	struct mlx5_flow_table_attr ft_attr = {};
156 	struct mlx5_flow_namespace *ns;
157 	struct mlx5_flow_table *ft;
158 	int sz;
159 
160 	if (chains->flags & MLX5_CHAINS_FT_TUNNEL_SUPPORTED)
161 		ft_attr.flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
162 				  MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
163 
164 	sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ? FT_TBL_SZ : POOL_NEXT_SIZE;
165 	ft_attr.max_fte = sz;
166 
167 	/* We use tc_default_ft(chains) as the table's next_ft till
168 	 * ignore_flow_level is allowed on FT creation and not just for FTEs.
169 	 * Instead caller should add an explicit miss rule if needed.
170 	 */
171 	ft_attr.next_ft = tc_default_ft(chains);
172 
173 	/* The root table(chain 0, prio 1, level 0) is required to be
174 	 * connected to the previous fs_core managed prio.
175 	 * We always create it, as a managed table, in order to align with
176 	 * fs_core logic.
177 	 */
178 	if (!mlx5_chains_ignore_flow_level_supported(chains) ||
179 	    (chain == 0 && prio == 1 && level == 0)) {
180 		ft_attr.level = level;
181 		ft_attr.prio = prio - 1;
182 		ns = (chains->ns == MLX5_FLOW_NAMESPACE_FDB) ?
183 			mlx5_get_fdb_sub_ns(chains->dev, chain) :
184 			mlx5_get_flow_namespace(chains->dev, chains->ns);
185 	} else {
186 		ft_attr.flags |= MLX5_FLOW_TABLE_UNMANAGED;
187 		ft_attr.prio = ns_to_chains_fs_prio(chains->ns);
188 		/* Firmware doesn't allow us to create another level 0 table,
189 		 * so we create all unmanaged tables as level 1.
190 		 *
191 		 * To connect them, we use explicit miss rules with
192 		 * ignore_flow_level. Caller is responsible to create
193 		 * these rules (if needed).
194 		 */
195 		ft_attr.level = 1;
196 		ns = mlx5_get_flow_namespace(chains->dev, chains->ns);
197 	}
198 
199 	ft_attr.autogroup.num_reserved_entries = 2;
200 	ft_attr.autogroup.max_num_groups = chains->group_num;
201 	ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
202 	if (IS_ERR(ft)) {
203 		mlx5_core_warn(chains->dev, "Failed to create chains table err %d (chain: %d, prio: %d, level: %d, size: %d)\n",
204 			       (int)PTR_ERR(ft), chain, prio, level, sz);
205 		return ft;
206 	}
207 
208 	return ft;
209 }
210 
211 static int
212 create_chain_restore(struct fs_chain *chain)
213 {
214 	struct mlx5_eswitch *esw = chain->chains->dev->priv.eswitch;
215 	u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
216 	struct mlx5_fs_chains *chains = chain->chains;
217 	enum mlx5e_tc_attr_to_reg mapped_obj_to_reg;
218 	struct mlx5_modify_hdr *mod_hdr;
219 	u32 index;
220 	int err;
221 
222 	if (chain->chain == mlx5_chains_get_nf_ft_chain(chains) ||
223 	    !mlx5_chains_prios_supported(chains))
224 		return 0;
225 
226 	err = mlx5_chains_get_chain_mapping(chains, chain->chain, &index);
227 	if (err)
228 		return err;
229 	if (index == MLX5_FS_DEFAULT_FLOW_TAG) {
230 		/* we got the special default flow tag id, so we won't know
231 		 * if we actually marked the packet with the restore rule
232 		 * we create.
233 		 *
234 		 * This case isn't possible with MLX5_FS_DEFAULT_FLOW_TAG = 0.
235 		 */
236 		err = mlx5_chains_get_chain_mapping(chains, chain->chain, &index);
237 		mapping_remove(chains->chains_mapping, MLX5_FS_DEFAULT_FLOW_TAG);
238 		if (err)
239 			return err;
240 	}
241 
242 	chain->id = index;
243 
244 	if (chains->ns == MLX5_FLOW_NAMESPACE_FDB) {
245 		mapped_obj_to_reg = MAPPED_OBJ_TO_REG;
246 		chain->restore_rule = esw_add_restore_rule(esw, chain->id);
247 		if (IS_ERR(chain->restore_rule)) {
248 			err = PTR_ERR(chain->restore_rule);
249 			goto err_rule;
250 		}
251 	} else if (chains->ns == MLX5_FLOW_NAMESPACE_KERNEL) {
252 		/* For NIC RX we don't need a restore rule
253 		 * since we write the metadata to reg_b
254 		 * that is passed to SW directly.
255 		 */
256 		mapped_obj_to_reg = NIC_MAPPED_OBJ_TO_REG;
257 	} else {
258 		err = -EINVAL;
259 		goto err_rule;
260 	}
261 
262 	MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
263 	MLX5_SET(set_action_in, modact, field,
264 		 mlx5e_tc_attr_to_reg_mappings[mapped_obj_to_reg].mfield);
265 	MLX5_SET(set_action_in, modact, offset,
266 		 mlx5e_tc_attr_to_reg_mappings[mapped_obj_to_reg].moffset);
267 	MLX5_SET(set_action_in, modact, length,
268 		 mlx5e_tc_attr_to_reg_mappings[mapped_obj_to_reg].mlen == 32 ?
269 		 0 : mlx5e_tc_attr_to_reg_mappings[mapped_obj_to_reg].mlen);
270 	MLX5_SET(set_action_in, modact, data, chain->id);
271 	mod_hdr = mlx5_modify_header_alloc(chains->dev, chains->ns,
272 					   1, modact);
273 	if (IS_ERR(mod_hdr)) {
274 		err = PTR_ERR(mod_hdr);
275 		goto err_mod_hdr;
276 	}
277 	chain->miss_modify_hdr = mod_hdr;
278 
279 	return 0;
280 
281 err_mod_hdr:
282 	if (!IS_ERR_OR_NULL(chain->restore_rule))
283 		mlx5_del_flow_rules(chain->restore_rule);
284 err_rule:
285 	/* Datapath can't find this mapping, so we can safely remove it */
286 	mapping_remove(chains->chains_mapping, chain->id);
287 	return err;
288 }
289 
290 static void destroy_chain_restore(struct fs_chain *chain)
291 {
292 	struct mlx5_fs_chains *chains = chain->chains;
293 
294 	if (!chain->miss_modify_hdr)
295 		return;
296 
297 	if (chain->restore_rule)
298 		mlx5_del_flow_rules(chain->restore_rule);
299 
300 	mlx5_modify_header_dealloc(chains->dev, chain->miss_modify_hdr);
301 	mapping_remove(chains->chains_mapping, chain->id);
302 }
303 
304 static struct fs_chain *
305 mlx5_chains_create_chain(struct mlx5_fs_chains *chains, u32 chain)
306 {
307 	struct fs_chain *chain_s = NULL;
308 	int err;
309 
310 	chain_s = kvzalloc(sizeof(*chain_s), GFP_KERNEL);
311 	if (!chain_s)
312 		return ERR_PTR(-ENOMEM);
313 
314 	chain_s->chains = chains;
315 	chain_s->chain = chain;
316 	INIT_LIST_HEAD(&chain_s->prios_list);
317 
318 	err = create_chain_restore(chain_s);
319 	if (err)
320 		goto err_restore;
321 
322 	err = rhashtable_insert_fast(&chains_ht(chains), &chain_s->node,
323 				     chain_params);
324 	if (err)
325 		goto err_insert;
326 
327 	return chain_s;
328 
329 err_insert:
330 	destroy_chain_restore(chain_s);
331 err_restore:
332 	kvfree(chain_s);
333 	return ERR_PTR(err);
334 }
335 
336 static void
337 mlx5_chains_destroy_chain(struct fs_chain *chain)
338 {
339 	struct mlx5_fs_chains *chains = chain->chains;
340 
341 	rhashtable_remove_fast(&chains_ht(chains), &chain->node,
342 			       chain_params);
343 
344 	destroy_chain_restore(chain);
345 	kvfree(chain);
346 }
347 
348 static struct fs_chain *
349 mlx5_chains_get_chain(struct mlx5_fs_chains *chains, u32 chain)
350 {
351 	struct fs_chain *chain_s;
352 
353 	chain_s = rhashtable_lookup_fast(&chains_ht(chains), &chain,
354 					 chain_params);
355 	if (!chain_s) {
356 		chain_s = mlx5_chains_create_chain(chains, chain);
357 		if (IS_ERR(chain_s))
358 			return chain_s;
359 	}
360 
361 	chain_s->ref++;
362 
363 	return chain_s;
364 }
365 
366 static struct mlx5_flow_handle *
367 mlx5_chains_add_miss_rule(struct fs_chain *chain,
368 			  struct mlx5_flow_table *ft,
369 			  struct mlx5_flow_table *next_ft)
370 {
371 	struct mlx5_fs_chains *chains = chain->chains;
372 	struct mlx5_flow_destination dest = {};
373 	struct mlx5_flow_act act = {};
374 
375 	act.flags  = FLOW_ACT_NO_APPEND;
376 	if (mlx5_chains_ignore_flow_level_supported(chain->chains))
377 		act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
378 
379 	act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
380 	dest.type  = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
381 	dest.ft = next_ft;
382 
383 	if (next_ft == tc_end_ft(chains) &&
384 	    chain->chain != mlx5_chains_get_nf_ft_chain(chains) &&
385 	    mlx5_chains_prios_supported(chains)) {
386 		act.modify_hdr = chain->miss_modify_hdr;
387 		act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
388 	}
389 
390 	return mlx5_add_flow_rules(ft, NULL, &act, &dest, 1);
391 }
392 
393 static int
394 mlx5_chains_update_prio_prevs(struct prio *prio,
395 			      struct mlx5_flow_table *next_ft)
396 {
397 	struct mlx5_flow_handle *miss_rules[FDB_TC_LEVELS_PER_PRIO + 1] = {};
398 	struct fs_chain *chain = prio->chain;
399 	struct prio *pos;
400 	int n = 0, err;
401 
402 	if (prio->key.level)
403 		return 0;
404 
405 	/* Iterate in reverse order until reaching the level 0 rule of
406 	 * the previous priority, adding all the miss rules first, so we can
407 	 * revert them if any of them fails.
408 	 */
409 	pos = prio;
410 	list_for_each_entry_continue_reverse(pos,
411 					     &chain->prios_list,
412 					     list) {
413 		miss_rules[n] = mlx5_chains_add_miss_rule(chain,
414 							  pos->ft,
415 							  next_ft);
416 		if (IS_ERR(miss_rules[n])) {
417 			err = PTR_ERR(miss_rules[n]);
418 			goto err_prev_rule;
419 		}
420 
421 		n++;
422 		if (!pos->key.level)
423 			break;
424 	}
425 
426 	/* Success, delete old miss rules, and update the pointers. */
427 	n = 0;
428 	pos = prio;
429 	list_for_each_entry_continue_reverse(pos,
430 					     &chain->prios_list,
431 					     list) {
432 		mlx5_del_flow_rules(pos->miss_rule);
433 
434 		pos->miss_rule = miss_rules[n];
435 		pos->next_ft = next_ft;
436 
437 		n++;
438 		if (!pos->key.level)
439 			break;
440 	}
441 
442 	return 0;
443 
444 err_prev_rule:
445 	while (--n >= 0)
446 		mlx5_del_flow_rules(miss_rules[n]);
447 
448 	return err;
449 }
450 
451 static void
452 mlx5_chains_put_chain(struct fs_chain *chain)
453 {
454 	if (--chain->ref == 0)
455 		mlx5_chains_destroy_chain(chain);
456 }
457 
458 static struct prio *
459 mlx5_chains_create_prio(struct mlx5_fs_chains *chains,
460 			u32 chain, u32 prio, u32 level)
461 {
462 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
463 	struct mlx5_flow_handle *miss_rule;
464 	struct mlx5_flow_group *miss_group;
465 	struct mlx5_flow_table *next_ft;
466 	struct mlx5_flow_table *ft;
467 	struct fs_chain *chain_s;
468 	struct list_head *pos;
469 	struct prio *prio_s;
470 	u32 *flow_group_in;
471 	int err;
472 
473 	chain_s = mlx5_chains_get_chain(chains, chain);
474 	if (IS_ERR(chain_s))
475 		return ERR_CAST(chain_s);
476 
477 	prio_s = kvzalloc(sizeof(*prio_s), GFP_KERNEL);
478 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
479 	if (!prio_s || !flow_group_in) {
480 		err = -ENOMEM;
481 		goto err_alloc;
482 	}
483 
484 	/* Chain's prio list is sorted by prio and level.
485 	 * And all levels of some prio point to the next prio's level 0.
486 	 * Example list (prio, level):
487 	 * (3,0)->(3,1)->(5,0)->(5,1)->(6,1)->(7,0)
488 	 * In hardware, we will we have the following pointers:
489 	 * (3,0) -> (5,0) -> (7,0) -> Slow path
490 	 * (3,1) -> (5,0)
491 	 * (5,1) -> (7,0)
492 	 * (6,1) -> (7,0)
493 	 */
494 
495 	/* Default miss for each chain: */
496 	next_ft = (chain == mlx5_chains_get_nf_ft_chain(chains)) ?
497 		  tc_default_ft(chains) :
498 		  tc_end_ft(chains);
499 	list_for_each(pos, &chain_s->prios_list) {
500 		struct prio *p = list_entry(pos, struct prio, list);
501 
502 		/* exit on first pos that is larger */
503 		if (prio < p->key.prio || (prio == p->key.prio &&
504 					   level < p->key.level)) {
505 			/* Get next level 0 table */
506 			next_ft = p->key.level == 0 ? p->ft : p->next_ft;
507 			break;
508 		}
509 	}
510 
511 	ft = mlx5_chains_create_table(chains, chain, prio, level);
512 	if (IS_ERR(ft)) {
513 		err = PTR_ERR(ft);
514 		goto err_create;
515 	}
516 
517 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index,
518 		 ft->max_fte - 2);
519 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
520 		 ft->max_fte - 1);
521 	miss_group = mlx5_create_flow_group(ft, flow_group_in);
522 	if (IS_ERR(miss_group)) {
523 		err = PTR_ERR(miss_group);
524 		goto err_group;
525 	}
526 
527 	/* Add miss rule to next_ft */
528 	miss_rule = mlx5_chains_add_miss_rule(chain_s, ft, next_ft);
529 	if (IS_ERR(miss_rule)) {
530 		err = PTR_ERR(miss_rule);
531 		goto err_miss_rule;
532 	}
533 
534 	prio_s->miss_group = miss_group;
535 	prio_s->miss_rule = miss_rule;
536 	prio_s->next_ft = next_ft;
537 	prio_s->chain = chain_s;
538 	prio_s->key.chain = chain;
539 	prio_s->key.prio = prio;
540 	prio_s->key.level = level;
541 	prio_s->ft = ft;
542 
543 	err = rhashtable_insert_fast(&prios_ht(chains), &prio_s->node,
544 				     prio_params);
545 	if (err)
546 		goto err_insert;
547 
548 	list_add(&prio_s->list, pos->prev);
549 
550 	/* Table is ready, connect it */
551 	err = mlx5_chains_update_prio_prevs(prio_s, ft);
552 	if (err)
553 		goto err_update;
554 
555 	kvfree(flow_group_in);
556 	return prio_s;
557 
558 err_update:
559 	list_del(&prio_s->list);
560 	rhashtable_remove_fast(&prios_ht(chains), &prio_s->node,
561 			       prio_params);
562 err_insert:
563 	mlx5_del_flow_rules(miss_rule);
564 err_miss_rule:
565 	mlx5_destroy_flow_group(miss_group);
566 err_group:
567 	mlx5_destroy_flow_table(ft);
568 err_create:
569 err_alloc:
570 	kvfree(prio_s);
571 	kvfree(flow_group_in);
572 	mlx5_chains_put_chain(chain_s);
573 	return ERR_PTR(err);
574 }
575 
576 static void
577 mlx5_chains_destroy_prio(struct mlx5_fs_chains *chains,
578 			 struct prio *prio)
579 {
580 	struct fs_chain *chain = prio->chain;
581 
582 	WARN_ON(mlx5_chains_update_prio_prevs(prio,
583 					      prio->next_ft));
584 
585 	list_del(&prio->list);
586 	rhashtable_remove_fast(&prios_ht(chains), &prio->node,
587 			       prio_params);
588 	mlx5_del_flow_rules(prio->miss_rule);
589 	mlx5_destroy_flow_group(prio->miss_group);
590 	mlx5_destroy_flow_table(prio->ft);
591 	mlx5_chains_put_chain(chain);
592 	kvfree(prio);
593 }
594 
595 struct mlx5_flow_table *
596 mlx5_chains_get_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
597 		      u32 level)
598 {
599 	struct mlx5_flow_table *prev_fts;
600 	struct prio *prio_s;
601 	struct prio_key key;
602 	int l = 0;
603 
604 	if ((chain > mlx5_chains_get_chain_range(chains) &&
605 	     chain != mlx5_chains_get_nf_ft_chain(chains)) ||
606 	    prio > mlx5_chains_get_prio_range(chains) ||
607 	    level > mlx5_chains_get_level_range(chains))
608 		return ERR_PTR(-EOPNOTSUPP);
609 
610 	/* create earlier levels for correct fs_core lookup when
611 	 * connecting tables.
612 	 */
613 	for (l = 0; l < level; l++) {
614 		prev_fts = mlx5_chains_get_table(chains, chain, prio, l);
615 		if (IS_ERR(prev_fts)) {
616 			prio_s = ERR_CAST(prev_fts);
617 			goto err_get_prevs;
618 		}
619 	}
620 
621 	key.chain = chain;
622 	key.prio = prio;
623 	key.level = level;
624 
625 	mutex_lock(&chains_lock(chains));
626 	prio_s = rhashtable_lookup_fast(&prios_ht(chains), &key,
627 					prio_params);
628 	if (!prio_s) {
629 		prio_s = mlx5_chains_create_prio(chains, chain,
630 						 prio, level);
631 		if (IS_ERR(prio_s))
632 			goto err_create_prio;
633 	}
634 
635 	++prio_s->ref;
636 	mutex_unlock(&chains_lock(chains));
637 
638 	return prio_s->ft;
639 
640 err_create_prio:
641 	mutex_unlock(&chains_lock(chains));
642 err_get_prevs:
643 	while (--l >= 0)
644 		mlx5_chains_put_table(chains, chain, prio, l);
645 	return ERR_CAST(prio_s);
646 }
647 
648 void
649 mlx5_chains_put_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
650 		      u32 level)
651 {
652 	struct prio *prio_s;
653 	struct prio_key key;
654 
655 	key.chain = chain;
656 	key.prio = prio;
657 	key.level = level;
658 
659 	mutex_lock(&chains_lock(chains));
660 	prio_s = rhashtable_lookup_fast(&prios_ht(chains), &key,
661 					prio_params);
662 	if (!prio_s)
663 		goto err_get_prio;
664 
665 	if (--prio_s->ref == 0)
666 		mlx5_chains_destroy_prio(chains, prio_s);
667 	mutex_unlock(&chains_lock(chains));
668 
669 	while (level-- > 0)
670 		mlx5_chains_put_table(chains, chain, prio, level);
671 
672 	return;
673 
674 err_get_prio:
675 	mutex_unlock(&chains_lock(chains));
676 	WARN_ONCE(1,
677 		  "Couldn't find table: (chain: %d prio: %d level: %d)",
678 		  chain, prio, level);
679 }
680 
681 struct mlx5_flow_table *
682 mlx5_chains_get_tc_end_ft(struct mlx5_fs_chains *chains)
683 {
684 	return tc_end_ft(chains);
685 }
686 
687 struct mlx5_flow_table *
688 mlx5_chains_create_global_table(struct mlx5_fs_chains *chains)
689 {
690 	u32 chain, prio, level;
691 	int err;
692 
693 	if (!mlx5_chains_ignore_flow_level_supported(chains)) {
694 		err = -EOPNOTSUPP;
695 
696 		mlx5_core_warn(chains->dev,
697 			       "Couldn't create global flow table, ignore_flow_level not supported.");
698 		goto err_ignore;
699 	}
700 
701 	chain = mlx5_chains_get_chain_range(chains),
702 	prio = mlx5_chains_get_prio_range(chains);
703 	level = mlx5_chains_get_level_range(chains);
704 
705 	return mlx5_chains_create_table(chains, chain, prio, level);
706 
707 err_ignore:
708 	return ERR_PTR(err);
709 }
710 
711 void
712 mlx5_chains_destroy_global_table(struct mlx5_fs_chains *chains,
713 				 struct mlx5_flow_table *ft)
714 {
715 	mlx5_destroy_flow_table(ft);
716 }
717 
718 static struct mlx5_fs_chains *
719 mlx5_chains_init(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
720 {
721 	struct mlx5_fs_chains *chains_priv;
722 	u32 max_flow_counter;
723 	int err;
724 
725 	chains_priv = kzalloc(sizeof(*chains_priv), GFP_KERNEL);
726 	if (!chains_priv)
727 		return ERR_PTR(-ENOMEM);
728 
729 	max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
730 			    MLX5_CAP_GEN(dev, max_flow_counter_15_0);
731 
732 	mlx5_core_dbg(dev,
733 		      "Init flow table chains, max counters(%d), groups(%d), max flow table size(%d)\n",
734 		      max_flow_counter, attr->max_grp_num, attr->max_ft_sz);
735 
736 	chains_priv->dev = dev;
737 	chains_priv->flags = attr->flags;
738 	chains_priv->ns = attr->ns;
739 	chains_priv->group_num = attr->max_grp_num;
740 	chains_priv->chains_mapping = attr->mapping;
741 	tc_default_ft(chains_priv) = tc_end_ft(chains_priv) = attr->default_ft;
742 
743 	mlx5_core_info(dev, "Supported tc offload range - chains: %u, prios: %u\n",
744 		       mlx5_chains_get_chain_range(chains_priv),
745 		       mlx5_chains_get_prio_range(chains_priv));
746 
747 	err = rhashtable_init(&chains_ht(chains_priv), &chain_params);
748 	if (err)
749 		goto init_chains_ht_err;
750 
751 	err = rhashtable_init(&prios_ht(chains_priv), &prio_params);
752 	if (err)
753 		goto init_prios_ht_err;
754 
755 	mutex_init(&chains_lock(chains_priv));
756 
757 	return chains_priv;
758 
759 init_prios_ht_err:
760 	rhashtable_destroy(&chains_ht(chains_priv));
761 init_chains_ht_err:
762 	kfree(chains_priv);
763 	return ERR_PTR(err);
764 }
765 
766 static void
767 mlx5_chains_cleanup(struct mlx5_fs_chains *chains)
768 {
769 	mutex_destroy(&chains_lock(chains));
770 	rhashtable_destroy(&prios_ht(chains));
771 	rhashtable_destroy(&chains_ht(chains));
772 
773 	kfree(chains);
774 }
775 
776 struct mlx5_fs_chains *
777 mlx5_chains_create(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
778 {
779 	struct mlx5_fs_chains *chains;
780 
781 	chains = mlx5_chains_init(dev, attr);
782 
783 	return chains;
784 }
785 
786 void
787 mlx5_chains_destroy(struct mlx5_fs_chains *chains)
788 {
789 	mlx5_chains_cleanup(chains);
790 }
791 
792 int
793 mlx5_chains_get_chain_mapping(struct mlx5_fs_chains *chains, u32 chain,
794 			      u32 *chain_mapping)
795 {
796 	struct mapping_ctx *ctx = chains->chains_mapping;
797 	struct mlx5_mapped_obj mapped_obj = {};
798 
799 	mapped_obj.type = MLX5_MAPPED_OBJ_CHAIN;
800 	mapped_obj.chain = chain;
801 	return mapping_add(ctx, &mapped_obj, chain_mapping);
802 }
803 
804 int
805 mlx5_chains_put_chain_mapping(struct mlx5_fs_chains *chains, u32 chain_mapping)
806 {
807 	struct mapping_ctx *ctx = chains->chains_mapping;
808 
809 	return mapping_remove(ctx, chain_mapping);
810 }
811