1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021 Mellanox Technologies. */
3 
4 #include <linux/skbuff.h>
5 #include <net/psample.h>
6 #include "en/mapping.h"
7 #include "en/tc/post_act.h"
8 #include "en/tc/act/sample.h"
9 #include "en/mod_hdr.h"
10 #include "sample.h"
11 #include "eswitch.h"
12 #include "en_tc.h"
13 #include "fs_core.h"
14 
15 #define MLX5_ESW_VPORT_TBL_SIZE_SAMPLE (64 * 1024)
16 
17 static const struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_sample_ns = {
18 	.max_fte = MLX5_ESW_VPORT_TBL_SIZE_SAMPLE,
19 	.max_num_groups = 0,    /* default num of groups */
20 	.flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | MLX5_FLOW_TABLE_TUNNEL_EN_DECAP,
21 };
22 
23 struct mlx5e_tc_psample {
24 	struct mlx5_eswitch *esw;
25 	struct mlx5_flow_table *termtbl;
26 	struct mlx5_flow_handle *termtbl_rule;
27 	DECLARE_HASHTABLE(hashtbl, 8);
28 	struct mutex ht_lock; /* protect hashtbl */
29 	DECLARE_HASHTABLE(restore_hashtbl, 8);
30 	struct mutex restore_lock; /* protect restore_hashtbl */
31 	struct mlx5e_post_act *post_act;
32 };
33 
34 struct mlx5e_sampler {
35 	struct hlist_node hlist;
36 	u32 sampler_id;
37 	u32 sample_ratio;
38 	u32 sample_table_id;
39 	u32 default_table_id;
40 	int count;
41 };
42 
43 struct mlx5e_sample_flow {
44 	struct mlx5e_sampler *sampler;
45 	struct mlx5e_sample_restore *restore;
46 	struct mlx5_flow_attr *pre_attr;
47 	struct mlx5_flow_handle *pre_rule;
48 	struct mlx5_flow_attr *post_attr;
49 	struct mlx5_flow_handle *post_rule;
50 };
51 
52 struct mlx5e_sample_restore {
53 	struct hlist_node hlist;
54 	struct mlx5_modify_hdr *modify_hdr;
55 	struct mlx5_flow_handle *rule;
56 	u32 obj_id;
57 	int count;
58 };
59 
60 static int
61 sampler_termtbl_create(struct mlx5e_tc_psample *tc_psample)
62 {
63 	struct mlx5_eswitch *esw = tc_psample->esw;
64 	struct mlx5_flow_table_attr ft_attr = {};
65 	struct mlx5_flow_destination dest = {};
66 	struct mlx5_core_dev *dev = esw->dev;
67 	struct mlx5_flow_namespace *root_ns;
68 	struct mlx5_flow_act act = {};
69 	int err;
70 
71 	if (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, termination_table))  {
72 		mlx5_core_warn(dev, "termination table is not supported\n");
73 		return -EOPNOTSUPP;
74 	}
75 
76 	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
77 	if (!root_ns) {
78 		mlx5_core_warn(dev, "failed to get FDB flow namespace\n");
79 		return -EOPNOTSUPP;
80 	}
81 
82 	ft_attr.flags = MLX5_FLOW_TABLE_TERMINATION | MLX5_FLOW_TABLE_UNMANAGED;
83 	ft_attr.autogroup.max_num_groups = 1;
84 	ft_attr.prio = FDB_SLOW_PATH;
85 	ft_attr.max_fte = 1;
86 	ft_attr.level = 1;
87 	tc_psample->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
88 	if (IS_ERR(tc_psample->termtbl)) {
89 		err = PTR_ERR(tc_psample->termtbl);
90 		mlx5_core_warn(dev, "failed to create termtbl, err: %d\n", err);
91 		return err;
92 	}
93 
94 	act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
95 	dest.vport.num = esw->manager_vport;
96 	tc_psample->termtbl_rule = mlx5_add_flow_rules(tc_psample->termtbl, NULL, &act, &dest, 1);
97 	if (IS_ERR(tc_psample->termtbl_rule)) {
98 		err = PTR_ERR(tc_psample->termtbl_rule);
99 		mlx5_core_warn(dev, "failed to create termtbl rule, err: %d\n", err);
100 		mlx5_destroy_flow_table(tc_psample->termtbl);
101 		return err;
102 	}
103 
104 	return 0;
105 }
106 
107 static void
108 sampler_termtbl_destroy(struct mlx5e_tc_psample *tc_psample)
109 {
110 	mlx5_del_flow_rules(tc_psample->termtbl_rule);
111 	mlx5_destroy_flow_table(tc_psample->termtbl);
112 }
113 
114 static int
115 sampler_obj_create(struct mlx5_core_dev *mdev, struct mlx5e_sampler *sampler)
116 {
117 	u32 in[MLX5_ST_SZ_DW(create_sampler_obj_in)] = {};
118 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
119 	u64 general_obj_types;
120 	void *obj;
121 	int err;
122 
123 	general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
124 	if (!(general_obj_types & MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_SAMPLER))
125 		return -EOPNOTSUPP;
126 	if (!MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, ignore_flow_level))
127 		return -EOPNOTSUPP;
128 
129 	obj = MLX5_ADDR_OF(create_sampler_obj_in, in, sampler_object);
130 	MLX5_SET(sampler_obj, obj, table_type, FS_FT_FDB);
131 	MLX5_SET(sampler_obj, obj, ignore_flow_level, 1);
132 	MLX5_SET(sampler_obj, obj, level, 1);
133 	MLX5_SET(sampler_obj, obj, sample_ratio, sampler->sample_ratio);
134 	MLX5_SET(sampler_obj, obj, sample_table_id, sampler->sample_table_id);
135 	MLX5_SET(sampler_obj, obj, default_table_id, sampler->default_table_id);
136 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
137 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_SAMPLER);
138 
139 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
140 	if (!err)
141 		sampler->sampler_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
142 
143 	return err;
144 }
145 
146 static void
147 sampler_obj_destroy(struct mlx5_core_dev *mdev, u32 sampler_id)
148 {
149 	u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
150 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
151 
152 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
153 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_SAMPLER);
154 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sampler_id);
155 
156 	mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
157 }
158 
159 static u32
160 sampler_hash(u32 sample_ratio, u32 default_table_id)
161 {
162 	return jhash_2words(sample_ratio, default_table_id, 0);
163 }
164 
165 static int
166 sampler_cmp(u32 sample_ratio1, u32 default_table_id1, u32 sample_ratio2, u32 default_table_id2)
167 {
168 	return sample_ratio1 != sample_ratio2 || default_table_id1 != default_table_id2;
169 }
170 
171 static struct mlx5e_sampler *
172 sampler_get(struct mlx5e_tc_psample *tc_psample, u32 sample_ratio, u32 default_table_id)
173 {
174 	struct mlx5e_sampler *sampler;
175 	u32 hash_key;
176 	int err;
177 
178 	mutex_lock(&tc_psample->ht_lock);
179 	hash_key = sampler_hash(sample_ratio, default_table_id);
180 	hash_for_each_possible(tc_psample->hashtbl, sampler, hlist, hash_key)
181 		if (!sampler_cmp(sampler->sample_ratio, sampler->default_table_id,
182 				 sample_ratio, default_table_id))
183 			goto add_ref;
184 
185 	sampler = kzalloc(sizeof(*sampler), GFP_KERNEL);
186 	if (!sampler) {
187 		err = -ENOMEM;
188 		goto err_alloc;
189 	}
190 
191 	sampler->sample_table_id = tc_psample->termtbl->id;
192 	sampler->default_table_id = default_table_id;
193 	sampler->sample_ratio = sample_ratio;
194 
195 	err = sampler_obj_create(tc_psample->esw->dev, sampler);
196 	if (err)
197 		goto err_create;
198 
199 	hash_add(tc_psample->hashtbl, &sampler->hlist, hash_key);
200 
201 add_ref:
202 	sampler->count++;
203 	mutex_unlock(&tc_psample->ht_lock);
204 	return sampler;
205 
206 err_create:
207 	kfree(sampler);
208 err_alloc:
209 	mutex_unlock(&tc_psample->ht_lock);
210 	return ERR_PTR(err);
211 }
212 
213 static void
214 sampler_put(struct mlx5e_tc_psample *tc_psample, struct mlx5e_sampler *sampler)
215 {
216 	mutex_lock(&tc_psample->ht_lock);
217 	if (--sampler->count == 0) {
218 		hash_del(&sampler->hlist);
219 		sampler_obj_destroy(tc_psample->esw->dev, sampler->sampler_id);
220 		kfree(sampler);
221 	}
222 	mutex_unlock(&tc_psample->ht_lock);
223 }
224 
225 /* obj_id is used to restore the sample parameters.
226  * Set fte_id in original flow table, then match it in the default table.
227  * Only set it for NICs can preserve reg_c or decap action. For other cases,
228  * use the same match in the default table.
229  * Use one header rewrite for both obj_id and fte_id.
230  */
231 static struct mlx5_modify_hdr *
232 sample_modify_hdr_get(struct mlx5_core_dev *mdev, u32 obj_id,
233 		      struct mlx5e_tc_mod_hdr_acts *mod_acts)
234 {
235 	struct mlx5_modify_hdr *modify_hdr;
236 	int err;
237 
238 	err = mlx5e_tc_match_to_reg_set(mdev, mod_acts, MLX5_FLOW_NAMESPACE_FDB,
239 					CHAIN_TO_REG, obj_id);
240 	if (err)
241 		goto err_set_regc0;
242 
243 	modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_FDB,
244 					      mod_acts->num_actions,
245 					      mod_acts->actions);
246 	if (IS_ERR(modify_hdr)) {
247 		err = PTR_ERR(modify_hdr);
248 		goto err_modify_hdr;
249 	}
250 
251 	mlx5e_mod_hdr_dealloc(mod_acts);
252 	return modify_hdr;
253 
254 err_modify_hdr:
255 	mlx5e_mod_hdr_dealloc(mod_acts);
256 err_set_regc0:
257 	return ERR_PTR(err);
258 }
259 
260 static struct mlx5e_sample_restore *
261 sample_restore_get(struct mlx5e_tc_psample *tc_psample, u32 obj_id,
262 		   struct mlx5e_tc_mod_hdr_acts *mod_acts)
263 {
264 	struct mlx5_eswitch *esw = tc_psample->esw;
265 	struct mlx5_core_dev *mdev = esw->dev;
266 	struct mlx5e_sample_restore *restore;
267 	struct mlx5_modify_hdr *modify_hdr;
268 	int err;
269 
270 	mutex_lock(&tc_psample->restore_lock);
271 	hash_for_each_possible(tc_psample->restore_hashtbl, restore, hlist, obj_id)
272 		if (restore->obj_id == obj_id)
273 			goto add_ref;
274 
275 	restore = kzalloc(sizeof(*restore), GFP_KERNEL);
276 	if (!restore) {
277 		err = -ENOMEM;
278 		goto err_alloc;
279 	}
280 	restore->obj_id = obj_id;
281 
282 	modify_hdr = sample_modify_hdr_get(mdev, obj_id, mod_acts);
283 	if (IS_ERR(modify_hdr)) {
284 		err = PTR_ERR(modify_hdr);
285 		goto err_modify_hdr;
286 	}
287 	restore->modify_hdr = modify_hdr;
288 
289 	restore->rule = esw_add_restore_rule(esw, obj_id);
290 	if (IS_ERR(restore->rule)) {
291 		err = PTR_ERR(restore->rule);
292 		goto err_restore;
293 	}
294 
295 	hash_add(tc_psample->restore_hashtbl, &restore->hlist, obj_id);
296 add_ref:
297 	restore->count++;
298 	mutex_unlock(&tc_psample->restore_lock);
299 	return restore;
300 
301 err_restore:
302 	mlx5_modify_header_dealloc(mdev, restore->modify_hdr);
303 err_modify_hdr:
304 	kfree(restore);
305 err_alloc:
306 	mutex_unlock(&tc_psample->restore_lock);
307 	return ERR_PTR(err);
308 }
309 
310 static void
311 sample_restore_put(struct mlx5e_tc_psample *tc_psample, struct mlx5e_sample_restore *restore)
312 {
313 	mutex_lock(&tc_psample->restore_lock);
314 	if (--restore->count == 0)
315 		hash_del(&restore->hlist);
316 	mutex_unlock(&tc_psample->restore_lock);
317 
318 	if (!restore->count) {
319 		mlx5_del_flow_rules(restore->rule);
320 		mlx5_modify_header_dealloc(tc_psample->esw->dev, restore->modify_hdr);
321 		kfree(restore);
322 	}
323 }
324 
325 void mlx5e_tc_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj)
326 {
327 	u32 trunc_size = mapped_obj->sample.trunc_size;
328 	struct psample_group psample_group = {};
329 	struct psample_metadata md = {};
330 
331 	md.trunc_size = trunc_size ? min(trunc_size, skb->len) : skb->len;
332 	md.in_ifindex = skb->dev->ifindex;
333 	psample_group.group_num = mapped_obj->sample.group_id;
334 	psample_group.net = &init_net;
335 	skb_push(skb, skb->mac_len);
336 
337 	psample_sample_packet(&psample_group, skb, mapped_obj->sample.rate, &md);
338 }
339 
340 static int
341 add_post_rule(struct mlx5_eswitch *esw, struct mlx5e_sample_flow *sample_flow,
342 	      struct mlx5_flow_spec *spec, struct mlx5_flow_attr *attr,
343 	      u32 *default_tbl_id)
344 {
345 	struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
346 	u32 attr_sz = ns_to_attr_sz(MLX5_FLOW_NAMESPACE_FDB);
347 	struct mlx5_vport_tbl_attr per_vport_tbl_attr;
348 	struct mlx5_flow_table *default_tbl;
349 	struct mlx5_flow_attr *post_attr;
350 	int err;
351 
352 	/* Allocate default table per vport, chain and prio. Otherwise, there is
353 	 * only one default table for the same sampler object. Rules with different
354 	 * prio and chain may overlap. For CT sample action, per vport default
355 	 * table is needed to resotre the metadata.
356 	 */
357 	per_vport_tbl_attr.chain = attr->chain;
358 	per_vport_tbl_attr.prio = attr->prio;
359 	per_vport_tbl_attr.vport = esw_attr->in_rep->vport;
360 	per_vport_tbl_attr.vport_ns = &mlx5_esw_vport_tbl_sample_ns;
361 	default_tbl = mlx5_esw_vporttbl_get(esw, &per_vport_tbl_attr);
362 	if (IS_ERR(default_tbl)) {
363 		err = PTR_ERR(default_tbl);
364 		goto err_default_tbl;
365 	}
366 	*default_tbl_id = default_tbl->id;
367 
368 	post_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
369 	if (!post_attr) {
370 		err = -ENOMEM;
371 		goto err_attr;
372 	}
373 	sample_flow->post_attr = post_attr;
374 	memcpy(post_attr, attr, attr_sz);
375 	/* Perform the original matches on the default table.
376 	 * Offload all actions except the sample action.
377 	 */
378 	post_attr->chain = 0;
379 	post_attr->prio = 0;
380 	post_attr->ft = default_tbl;
381 	post_attr->flags = MLX5_ATTR_FLAG_NO_IN_PORT;
382 
383 	/* When offloading sample and encap action, if there is no valid
384 	 * neigh data struct, a slow path rule is offloaded first. Source
385 	 * port metadata match is set at that time. A per vport table is
386 	 * already allocated. No need to match it again. So clear the source
387 	 * port metadata match.
388 	 */
389 	mlx5_eswitch_clear_rule_source_port(esw, spec);
390 	sample_flow->post_rule = mlx5_eswitch_add_offloaded_rule(esw, spec, post_attr);
391 	if (IS_ERR(sample_flow->post_rule)) {
392 		err = PTR_ERR(sample_flow->post_rule);
393 		goto err_rule;
394 	}
395 	return 0;
396 
397 err_rule:
398 	kfree(post_attr);
399 err_attr:
400 	mlx5_esw_vporttbl_put(esw, &per_vport_tbl_attr);
401 err_default_tbl:
402 	return err;
403 }
404 
405 static void
406 del_post_rule(struct mlx5_eswitch *esw, struct mlx5e_sample_flow *sample_flow,
407 	      struct mlx5_flow_attr *attr)
408 {
409 	struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
410 	struct mlx5_vport_tbl_attr tbl_attr;
411 
412 	mlx5_eswitch_del_offloaded_rule(esw, sample_flow->post_rule, sample_flow->post_attr);
413 	kfree(sample_flow->post_attr);
414 	tbl_attr.chain = attr->chain;
415 	tbl_attr.prio = attr->prio;
416 	tbl_attr.vport = esw_attr->in_rep->vport;
417 	tbl_attr.vport_ns = &mlx5_esw_vport_tbl_sample_ns;
418 	mlx5_esw_vporttbl_put(esw, &tbl_attr);
419 }
420 
421 /* For the following typical flow table:
422  *
423  * +-------------------------------+
424  * +       original flow table     +
425  * +-------------------------------+
426  * +         original match        +
427  * +-------------------------------+
428  * + sample action + other actions +
429  * +-------------------------------+
430  *
431  * We translate the tc filter with sample action to the following HW model:
432  *
433  *         +---------------------+
434  *         + original flow table +
435  *         +---------------------+
436  *         +   original match    +
437  *         +---------------------+
438  *               | set fte_id (if reg_c preserve cap)
439  *               | do decap (if required)
440  *               v
441  * +------------------------------------------------+
442  * +                Flow Sampler Object             +
443  * +------------------------------------------------+
444  * +                    sample ratio                +
445  * +------------------------------------------------+
446  * +    sample table id    |    default table id    +
447  * +------------------------------------------------+
448  *            |                            |
449  *            v                            v
450  * +-----------------------------+  +-------------------+
451  * +        sample table         +  +   default table   +
452  * +-----------------------------+  +-------------------+
453  * + forward to management vport +             |
454  * +-----------------------------+             |
455  *                                     +-------+------+
456  *                                     |              |reg_c preserve cap
457  *                                     |              |or decap action
458  *                                     v              v
459  *                        +-----------------+   +-------------+
460  *                        + per vport table +   + post action +
461  *                        +-----------------+   +-------------+
462  *                        + original match  +
463  *                        +-----------------+
464  *                        + other actions   +
465  *                        +-----------------+
466  */
467 struct mlx5_flow_handle *
468 mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
469 			struct mlx5_flow_spec *spec,
470 			struct mlx5_flow_attr *attr)
471 {
472 	struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
473 	struct mlx5_esw_flow_attr *pre_esw_attr;
474 	struct mlx5_mapped_obj restore_obj = {};
475 	struct mlx5e_tc_mod_hdr_acts *mod_acts;
476 	struct mlx5e_sample_flow *sample_flow;
477 	struct mlx5e_sample_attr *sample_attr;
478 	struct mlx5_flow_attr *pre_attr;
479 	u32 tunnel_id = attr->tunnel_id;
480 	struct mlx5_eswitch *esw;
481 	u32 default_tbl_id;
482 	u32 obj_id;
483 	int err;
484 
485 	if (IS_ERR_OR_NULL(tc_psample))
486 		return ERR_PTR(-EOPNOTSUPP);
487 
488 	sample_flow = kzalloc(sizeof(*sample_flow), GFP_KERNEL);
489 	if (!sample_flow)
490 		return ERR_PTR(-ENOMEM);
491 	sample_attr = &attr->sample_attr;
492 	sample_attr->sample_flow = sample_flow;
493 
494 	/* For NICs with reg_c_preserve support or decap action, use
495 	 * post action instead of the per vport, chain and prio table.
496 	 * Only match the fte id instead of the same match in the
497 	 * original flow table.
498 	 */
499 	esw = tc_psample->esw;
500 	if (mlx5e_tc_act_sample_is_multi_table(esw->dev, attr)) {
501 		struct mlx5_flow_table *ft;
502 
503 		ft = mlx5e_tc_post_act_get_ft(tc_psample->post_act);
504 		default_tbl_id = ft->id;
505 	} else {
506 		err = add_post_rule(esw, sample_flow, spec, attr, &default_tbl_id);
507 		if (err)
508 			goto err_post_rule;
509 	}
510 
511 	/* Create sampler object. */
512 	sample_flow->sampler = sampler_get(tc_psample, sample_attr->rate, default_tbl_id);
513 	if (IS_ERR(sample_flow->sampler)) {
514 		err = PTR_ERR(sample_flow->sampler);
515 		goto err_sampler;
516 	}
517 	sample_attr->sampler_id = sample_flow->sampler->sampler_id;
518 
519 	/* Create an id mapping reg_c0 value to sample object. */
520 	restore_obj.type = MLX5_MAPPED_OBJ_SAMPLE;
521 	restore_obj.sample.group_id = sample_attr->group_num;
522 	restore_obj.sample.rate = sample_attr->rate;
523 	restore_obj.sample.trunc_size = sample_attr->trunc_size;
524 	restore_obj.sample.tunnel_id = tunnel_id;
525 	err = mapping_add(esw->offloads.reg_c0_obj_pool, &restore_obj, &obj_id);
526 	if (err)
527 		goto err_obj_id;
528 	sample_attr->restore_obj_id = obj_id;
529 
530 	/* Create sample restore context. */
531 	mod_acts = &attr->parse_attr->mod_hdr_acts;
532 	sample_flow->restore = sample_restore_get(tc_psample, obj_id, mod_acts);
533 	if (IS_ERR(sample_flow->restore)) {
534 		err = PTR_ERR(sample_flow->restore);
535 		goto err_sample_restore;
536 	}
537 
538 	/* Perform the original matches on the original table. Offload the
539 	 * sample action. The destination is the sampler object.
540 	 */
541 	pre_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
542 	if (!pre_attr) {
543 		err = -ENOMEM;
544 		goto err_alloc_pre_flow_attr;
545 	}
546 	pre_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
547 	/* For decap action, do decap in the original flow table instead of the
548 	 * default flow table.
549 	 */
550 	if (tunnel_id)
551 		pre_attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
552 	pre_attr->modify_hdr = sample_flow->restore->modify_hdr;
553 	pre_attr->flags = MLX5_ATTR_FLAG_SAMPLE;
554 	pre_attr->inner_match_level = attr->inner_match_level;
555 	pre_attr->outer_match_level = attr->outer_match_level;
556 	pre_attr->chain = attr->chain;
557 	pre_attr->prio = attr->prio;
558 	pre_attr->ft = attr->ft;
559 	pre_attr->sample_attr = *sample_attr;
560 	pre_esw_attr = pre_attr->esw_attr;
561 	pre_esw_attr->in_mdev = esw_attr->in_mdev;
562 	pre_esw_attr->in_rep = esw_attr->in_rep;
563 	sample_flow->pre_rule = mlx5_eswitch_add_offloaded_rule(esw, spec, pre_attr);
564 	if (IS_ERR(sample_flow->pre_rule)) {
565 		err = PTR_ERR(sample_flow->pre_rule);
566 		goto err_pre_offload_rule;
567 	}
568 	sample_flow->pre_attr = pre_attr;
569 
570 	return sample_flow->pre_rule;
571 
572 err_pre_offload_rule:
573 	kfree(pre_attr);
574 err_alloc_pre_flow_attr:
575 	sample_restore_put(tc_psample, sample_flow->restore);
576 err_sample_restore:
577 	mapping_remove(esw->offloads.reg_c0_obj_pool, obj_id);
578 err_obj_id:
579 	sampler_put(tc_psample, sample_flow->sampler);
580 err_sampler:
581 	if (sample_flow->post_rule)
582 		del_post_rule(esw, sample_flow, attr);
583 err_post_rule:
584 	kfree(sample_flow);
585 	return ERR_PTR(err);
586 }
587 
588 void
589 mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *tc_psample,
590 			  struct mlx5_flow_handle *rule,
591 			  struct mlx5_flow_attr *attr)
592 {
593 	struct mlx5e_sample_flow *sample_flow;
594 	struct mlx5_eswitch *esw;
595 
596 	if (IS_ERR_OR_NULL(tc_psample))
597 		return;
598 
599 	/* The following delete order can't be changed, otherwise,
600 	 * will hit fw syndromes.
601 	 */
602 	esw = tc_psample->esw;
603 	sample_flow = attr->sample_attr.sample_flow;
604 	mlx5_eswitch_del_offloaded_rule(esw, sample_flow->pre_rule, sample_flow->pre_attr);
605 
606 	sample_restore_put(tc_psample, sample_flow->restore);
607 	mapping_remove(esw->offloads.reg_c0_obj_pool, attr->sample_attr.restore_obj_id);
608 	sampler_put(tc_psample, sample_flow->sampler);
609 	if (sample_flow->post_rule)
610 		del_post_rule(esw, sample_flow, attr);
611 
612 	kfree(sample_flow->pre_attr);
613 	kfree(sample_flow);
614 }
615 
616 struct mlx5e_tc_psample *
617 mlx5e_tc_sample_init(struct mlx5_eswitch *esw, struct mlx5e_post_act *post_act)
618 {
619 	struct mlx5e_tc_psample *tc_psample;
620 	int err;
621 
622 	tc_psample = kzalloc(sizeof(*tc_psample), GFP_KERNEL);
623 	if (!tc_psample)
624 		return ERR_PTR(-ENOMEM);
625 	if (IS_ERR_OR_NULL(post_act)) {
626 		err = PTR_ERR(post_act);
627 		goto err_post_act;
628 	}
629 	tc_psample->post_act = post_act;
630 	tc_psample->esw = esw;
631 	err = sampler_termtbl_create(tc_psample);
632 	if (err)
633 		goto err_post_act;
634 
635 	mutex_init(&tc_psample->ht_lock);
636 	mutex_init(&tc_psample->restore_lock);
637 
638 	return tc_psample;
639 
640 err_post_act:
641 	kfree(tc_psample);
642 	return ERR_PTR(err);
643 }
644 
645 void
646 mlx5e_tc_sample_cleanup(struct mlx5e_tc_psample *tc_psample)
647 {
648 	if (IS_ERR_OR_NULL(tc_psample))
649 		return;
650 
651 	mutex_destroy(&tc_psample->restore_lock);
652 	mutex_destroy(&tc_psample->ht_lock);
653 	sampler_termtbl_destroy(tc_psample);
654 	kfree(tc_psample);
655 }
656