1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */
3 
4 #include "rx_res.h"
5 #include "channels.h"
6 #include "params.h"
7 
8 #define MLX5E_MAX_NUM_RSS 16
9 
10 struct mlx5e_rx_res {
11 	struct mlx5_core_dev *mdev; /* primary */
12 	enum mlx5e_rx_res_features features;
13 	unsigned int max_nch;
14 	u32 drop_rqn;
15 
16 	struct mlx5e_packet_merge_param pkt_merge_param;
17 	struct rw_semaphore pkt_merge_param_sem;
18 
19 	struct mlx5e_rss *rss[MLX5E_MAX_NUM_RSS];
20 	bool rss_active;
21 	u32 *rss_rqns;
22 	u32 *rss_vhca_ids;
23 	unsigned int rss_nch;
24 
25 	struct {
26 		struct mlx5e_rqt direct_rqt;
27 		struct mlx5e_tir direct_tir;
28 	} *channels;
29 
30 	struct {
31 		struct mlx5e_rqt rqt;
32 		struct mlx5e_tir tir;
33 	} ptp;
34 };
35 
36 /* API for rx_res_rss_* */
37 
get_vhca_ids(struct mlx5e_rx_res * res,int offset)38 static u32 *get_vhca_ids(struct mlx5e_rx_res *res, int offset)
39 {
40 	bool multi_vhca = res->features & MLX5E_RX_RES_FEATURE_MULTI_VHCA;
41 
42 	return multi_vhca ? res->rss_vhca_ids + offset : NULL;
43 }
44 
mlx5e_rx_res_rss_update_num_channels(struct mlx5e_rx_res * res,u32 nch)45 void mlx5e_rx_res_rss_update_num_channels(struct mlx5e_rx_res *res, u32 nch)
46 {
47 	int i;
48 
49 	for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) {
50 		if (res->rss[i])
51 			mlx5e_rss_params_indir_modify_actual_size(res->rss[i], nch);
52 	}
53 }
54 
mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res * res,unsigned int init_nch)55 static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res,
56 				     unsigned int init_nch)
57 {
58 	bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
59 	struct mlx5e_rss *rss;
60 
61 	if (WARN_ON(res->rss[0]))
62 		return -EINVAL;
63 
64 	rss = mlx5e_rss_init(res->mdev, inner_ft_support, res->drop_rqn,
65 			     &res->pkt_merge_param, MLX5E_RSS_INIT_TIRS, init_nch, res->max_nch);
66 	if (IS_ERR(rss))
67 		return PTR_ERR(rss);
68 
69 	mlx5e_rss_set_indir_uniform(rss, init_nch);
70 
71 	res->rss[0] = rss;
72 
73 	return 0;
74 }
75 
mlx5e_rx_res_rss_init(struct mlx5e_rx_res * res,u32 * rss_idx,unsigned int init_nch)76 int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int init_nch)
77 {
78 	bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
79 	struct mlx5e_rss *rss;
80 	int i;
81 
82 	for (i = 1; i < MLX5E_MAX_NUM_RSS; i++)
83 		if (!res->rss[i])
84 			break;
85 
86 	if (i == MLX5E_MAX_NUM_RSS)
87 		return -ENOSPC;
88 
89 	rss = mlx5e_rss_init(res->mdev, inner_ft_support, res->drop_rqn,
90 			     &res->pkt_merge_param, MLX5E_RSS_INIT_NO_TIRS, init_nch,
91 			     res->max_nch);
92 	if (IS_ERR(rss))
93 		return PTR_ERR(rss);
94 
95 	mlx5e_rss_set_indir_uniform(rss, init_nch);
96 	if (res->rss_active) {
97 		u32 *vhca_ids = get_vhca_ids(res, 0);
98 
99 		mlx5e_rss_enable(rss, res->rss_rqns, vhca_ids, res->rss_nch);
100 	}
101 
102 	res->rss[i] = rss;
103 	*rss_idx = i;
104 
105 	return 0;
106 }
107 
__mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res * res,u32 rss_idx)108 static int __mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx)
109 {
110 	struct mlx5e_rss *rss = res->rss[rss_idx];
111 	int err;
112 
113 	err = mlx5e_rss_cleanup(rss);
114 	if (err)
115 		return err;
116 
117 	res->rss[rss_idx] = NULL;
118 
119 	return 0;
120 }
121 
mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res * res,u32 rss_idx)122 int mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx)
123 {
124 	struct mlx5e_rss *rss;
125 
126 	if (rss_idx >= MLX5E_MAX_NUM_RSS)
127 		return -EINVAL;
128 
129 	rss = res->rss[rss_idx];
130 	if (!rss)
131 		return -EINVAL;
132 
133 	return __mlx5e_rx_res_rss_destroy(res, rss_idx);
134 }
135 
mlx5e_rx_res_rss_destroy_all(struct mlx5e_rx_res * res)136 static void mlx5e_rx_res_rss_destroy_all(struct mlx5e_rx_res *res)
137 {
138 	int i;
139 
140 	for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) {
141 		struct mlx5e_rss *rss = res->rss[i];
142 		int err;
143 
144 		if (!rss)
145 			continue;
146 
147 		err = __mlx5e_rx_res_rss_destroy(res, i);
148 		if (err) {
149 			unsigned int refcount;
150 
151 			refcount = mlx5e_rss_refcnt_read(rss);
152 			mlx5_core_warn(res->mdev,
153 				       "Failed to destroy RSS context %d, refcount = %u, err = %d\n",
154 				       i, refcount, err);
155 		}
156 	}
157 }
158 
mlx5e_rx_res_rss_enable(struct mlx5e_rx_res * res)159 static void mlx5e_rx_res_rss_enable(struct mlx5e_rx_res *res)
160 {
161 	int i;
162 
163 	res->rss_active = true;
164 
165 	for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) {
166 		struct mlx5e_rss *rss = res->rss[i];
167 		u32 *vhca_ids;
168 
169 		if (!rss)
170 			continue;
171 		vhca_ids = get_vhca_ids(res, 0);
172 		mlx5e_rss_enable(rss, res->rss_rqns, vhca_ids, res->rss_nch);
173 	}
174 }
175 
mlx5e_rx_res_rss_disable(struct mlx5e_rx_res * res)176 static void mlx5e_rx_res_rss_disable(struct mlx5e_rx_res *res)
177 {
178 	int i;
179 
180 	res->rss_active = false;
181 
182 	for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) {
183 		struct mlx5e_rss *rss = res->rss[i];
184 
185 		if (!rss)
186 			continue;
187 		mlx5e_rss_disable(rss);
188 	}
189 }
190 
191 /* Updates the indirection table SW shadow, does not update the HW resources yet */
mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res * res,unsigned int nch)192 void mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res *res, unsigned int nch)
193 {
194 	WARN_ON_ONCE(res->rss_active);
195 	mlx5e_rss_set_indir_uniform(res->rss[0], nch);
196 }
197 
mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res * res,u32 rss_idx,u32 * indir,u8 * key,u8 * hfunc)198 int mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
199 			      u32 *indir, u8 *key, u8 *hfunc)
200 {
201 	struct mlx5e_rss *rss;
202 
203 	if (rss_idx >= MLX5E_MAX_NUM_RSS)
204 		return -EINVAL;
205 
206 	rss = res->rss[rss_idx];
207 	if (!rss)
208 		return -ENOENT;
209 
210 	return mlx5e_rss_get_rxfh(rss, indir, key, hfunc);
211 }
212 
mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res * res,u32 rss_idx,const u32 * indir,const u8 * key,const u8 * hfunc)213 int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
214 			      const u32 *indir, const u8 *key, const u8 *hfunc)
215 {
216 	u32 *vhca_ids = get_vhca_ids(res, 0);
217 	struct mlx5e_rss *rss;
218 
219 	if (rss_idx >= MLX5E_MAX_NUM_RSS)
220 		return -EINVAL;
221 
222 	rss = res->rss[rss_idx];
223 	if (!rss)
224 		return -ENOENT;
225 
226 	return mlx5e_rss_set_rxfh(rss, indir, key, hfunc, res->rss_rqns, vhca_ids,
227 				  res->rss_nch);
228 }
229 
mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res * res,u32 rss_idx,enum mlx5_traffic_types tt)230 int mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, u32 rss_idx,
231 				     enum mlx5_traffic_types tt)
232 {
233 	struct mlx5e_rss *rss;
234 
235 	if (rss_idx >= MLX5E_MAX_NUM_RSS)
236 		return -EINVAL;
237 
238 	rss = res->rss[rss_idx];
239 	if (!rss)
240 		return -ENOENT;
241 
242 	return mlx5e_rss_get_hash_fields(rss, tt);
243 }
244 
mlx5e_rx_res_rss_set_hash_fields(struct mlx5e_rx_res * res,u32 rss_idx,enum mlx5_traffic_types tt,u8 rx_hash_fields)245 int mlx5e_rx_res_rss_set_hash_fields(struct mlx5e_rx_res *res, u32 rss_idx,
246 				     enum mlx5_traffic_types tt, u8 rx_hash_fields)
247 {
248 	struct mlx5e_rss *rss;
249 
250 	if (rss_idx >= MLX5E_MAX_NUM_RSS)
251 		return -EINVAL;
252 
253 	rss = res->rss[rss_idx];
254 	if (!rss)
255 		return -ENOENT;
256 
257 	return mlx5e_rss_set_hash_fields(rss, tt, rx_hash_fields);
258 }
259 
mlx5e_rx_res_rss_cnt(struct mlx5e_rx_res * res)260 int mlx5e_rx_res_rss_cnt(struct mlx5e_rx_res *res)
261 {
262 	int i, cnt;
263 
264 	cnt = 0;
265 	for (i = 0; i < MLX5E_MAX_NUM_RSS; i++)
266 		if (res->rss[i])
267 			cnt++;
268 
269 	return cnt;
270 }
271 
mlx5e_rx_res_rss_index(struct mlx5e_rx_res * res,struct mlx5e_rss * rss)272 int mlx5e_rx_res_rss_index(struct mlx5e_rx_res *res, struct mlx5e_rss *rss)
273 {
274 	int i;
275 
276 	if (!rss)
277 		return -EINVAL;
278 
279 	for (i = 0; i < MLX5E_MAX_NUM_RSS; i++)
280 		if (rss == res->rss[i])
281 			return i;
282 
283 	return -ENOENT;
284 }
285 
mlx5e_rx_res_rss_get(struct mlx5e_rx_res * res,u32 rss_idx)286 struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx)
287 {
288 	if (rss_idx >= MLX5E_MAX_NUM_RSS)
289 		return NULL;
290 
291 	return res->rss[rss_idx];
292 }
293 
294 /* End of API rx_res_rss_* */
295 
mlx5e_rx_res_free(struct mlx5e_rx_res * res)296 static void mlx5e_rx_res_free(struct mlx5e_rx_res *res)
297 {
298 	kvfree(res->rss_vhca_ids);
299 	kvfree(res->rss_rqns);
300 	kvfree(res);
301 }
302 
mlx5e_rx_res_alloc(struct mlx5_core_dev * mdev,unsigned int max_nch,bool multi_vhca)303 static struct mlx5e_rx_res *mlx5e_rx_res_alloc(struct mlx5_core_dev *mdev, unsigned int max_nch,
304 					       bool multi_vhca)
305 {
306 	struct mlx5e_rx_res *rx_res;
307 
308 	rx_res = kvzalloc(sizeof(*rx_res), GFP_KERNEL);
309 	if (!rx_res)
310 		return NULL;
311 
312 	rx_res->rss_rqns = kvcalloc(max_nch, sizeof(*rx_res->rss_rqns), GFP_KERNEL);
313 	if (!rx_res->rss_rqns) {
314 		kvfree(rx_res);
315 		return NULL;
316 	}
317 
318 	if (multi_vhca) {
319 		rx_res->rss_vhca_ids = kvcalloc(max_nch, sizeof(*rx_res->rss_vhca_ids), GFP_KERNEL);
320 		if (!rx_res->rss_vhca_ids) {
321 			kvfree(rx_res->rss_rqns);
322 			kvfree(rx_res);
323 			return NULL;
324 		}
325 	}
326 
327 	return rx_res;
328 }
329 
mlx5e_rx_res_channels_init(struct mlx5e_rx_res * res)330 static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res)
331 {
332 	bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
333 	struct mlx5e_tir_builder *builder;
334 	int err = 0;
335 	int ix;
336 
337 	builder = mlx5e_tir_builder_alloc(false);
338 	if (!builder)
339 		return -ENOMEM;
340 
341 	res->channels = kvcalloc(res->max_nch, sizeof(*res->channels), GFP_KERNEL);
342 	if (!res->channels) {
343 		err = -ENOMEM;
344 		goto out;
345 	}
346 
347 	for (ix = 0; ix < res->max_nch; ix++) {
348 		err = mlx5e_rqt_init_direct(&res->channels[ix].direct_rqt,
349 					    res->mdev, false, res->drop_rqn,
350 					    mlx5e_rqt_size(res->mdev, res->max_nch));
351 		if (err) {
352 			mlx5_core_warn(res->mdev, "Failed to create a direct RQT: err = %d, ix = %u\n",
353 				       err, ix);
354 			goto err_destroy_direct_rqts;
355 		}
356 	}
357 
358 	for (ix = 0; ix < res->max_nch; ix++) {
359 		mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
360 					    mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
361 					    inner_ft_support);
362 		mlx5e_tir_builder_build_packet_merge(builder, &res->pkt_merge_param);
363 		mlx5e_tir_builder_build_direct(builder);
364 
365 		err = mlx5e_tir_init(&res->channels[ix].direct_tir, builder, res->mdev, true);
366 		if (err) {
367 			mlx5_core_warn(res->mdev, "Failed to create a direct TIR: err = %d, ix = %u\n",
368 				       err, ix);
369 			goto err_destroy_direct_tirs;
370 		}
371 
372 		mlx5e_tir_builder_clear(builder);
373 	}
374 
375 	goto out;
376 
377 err_destroy_direct_tirs:
378 	while (--ix >= 0)
379 		mlx5e_tir_destroy(&res->channels[ix].direct_tir);
380 
381 	ix = res->max_nch;
382 err_destroy_direct_rqts:
383 	while (--ix >= 0)
384 		mlx5e_rqt_destroy(&res->channels[ix].direct_rqt);
385 
386 	kvfree(res->channels);
387 
388 out:
389 	mlx5e_tir_builder_free(builder);
390 
391 	return err;
392 }
393 
mlx5e_rx_res_ptp_init(struct mlx5e_rx_res * res)394 static int mlx5e_rx_res_ptp_init(struct mlx5e_rx_res *res)
395 {
396 	bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
397 	struct mlx5e_tir_builder *builder;
398 	int err;
399 
400 	builder = mlx5e_tir_builder_alloc(false);
401 	if (!builder)
402 		return -ENOMEM;
403 
404 	err = mlx5e_rqt_init_direct(&res->ptp.rqt, res->mdev, false, res->drop_rqn,
405 				    mlx5e_rqt_size(res->mdev, res->max_nch));
406 	if (err)
407 		goto out;
408 
409 	/* Separated from the channels RQs, does not share pkt_merge state with them */
410 	mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
411 				    mlx5e_rqt_get_rqtn(&res->ptp.rqt),
412 				    inner_ft_support);
413 	mlx5e_tir_builder_build_direct(builder);
414 
415 	err = mlx5e_tir_init(&res->ptp.tir, builder, res->mdev, true);
416 	if (err)
417 		goto err_destroy_ptp_rqt;
418 
419 	goto out;
420 
421 err_destroy_ptp_rqt:
422 	mlx5e_rqt_destroy(&res->ptp.rqt);
423 
424 out:
425 	mlx5e_tir_builder_free(builder);
426 	return err;
427 }
428 
mlx5e_rx_res_channels_destroy(struct mlx5e_rx_res * res)429 static void mlx5e_rx_res_channels_destroy(struct mlx5e_rx_res *res)
430 {
431 	unsigned int ix;
432 
433 	for (ix = 0; ix < res->max_nch; ix++) {
434 		mlx5e_tir_destroy(&res->channels[ix].direct_tir);
435 		mlx5e_rqt_destroy(&res->channels[ix].direct_rqt);
436 	}
437 
438 	kvfree(res->channels);
439 }
440 
mlx5e_rx_res_ptp_destroy(struct mlx5e_rx_res * res)441 static void mlx5e_rx_res_ptp_destroy(struct mlx5e_rx_res *res)
442 {
443 	mlx5e_tir_destroy(&res->ptp.tir);
444 	mlx5e_rqt_destroy(&res->ptp.rqt);
445 }
446 
447 struct mlx5e_rx_res *
mlx5e_rx_res_create(struct mlx5_core_dev * mdev,enum mlx5e_rx_res_features features,unsigned int max_nch,u32 drop_rqn,const struct mlx5e_packet_merge_param * init_pkt_merge_param,unsigned int init_nch)448 mlx5e_rx_res_create(struct mlx5_core_dev *mdev, enum mlx5e_rx_res_features features,
449 		    unsigned int max_nch, u32 drop_rqn,
450 		    const struct mlx5e_packet_merge_param *init_pkt_merge_param,
451 		    unsigned int init_nch)
452 {
453 	bool multi_vhca = features & MLX5E_RX_RES_FEATURE_MULTI_VHCA;
454 	struct mlx5e_rx_res *res;
455 	int err;
456 
457 	res = mlx5e_rx_res_alloc(mdev, max_nch, multi_vhca);
458 	if (!res)
459 		return ERR_PTR(-ENOMEM);
460 
461 	res->mdev = mdev;
462 	res->features = features;
463 	res->max_nch = max_nch;
464 	res->drop_rqn = drop_rqn;
465 
466 	res->pkt_merge_param = *init_pkt_merge_param;
467 	init_rwsem(&res->pkt_merge_param_sem);
468 
469 	err = mlx5e_rx_res_rss_init_def(res, init_nch);
470 	if (err)
471 		goto err_rx_res_free;
472 
473 	err = mlx5e_rx_res_channels_init(res);
474 	if (err)
475 		goto err_rss_destroy;
476 
477 	err = mlx5e_rx_res_ptp_init(res);
478 	if (err)
479 		goto err_channels_destroy;
480 
481 	return res;
482 
483 err_channels_destroy:
484 	mlx5e_rx_res_channels_destroy(res);
485 err_rss_destroy:
486 	__mlx5e_rx_res_rss_destroy(res, 0);
487 err_rx_res_free:
488 	mlx5e_rx_res_free(res);
489 	return ERR_PTR(err);
490 }
491 
mlx5e_rx_res_destroy(struct mlx5e_rx_res * res)492 void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res)
493 {
494 	mlx5e_rx_res_ptp_destroy(res);
495 	mlx5e_rx_res_channels_destroy(res);
496 	mlx5e_rx_res_rss_destroy_all(res);
497 	mlx5e_rx_res_free(res);
498 }
499 
mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res * res,unsigned int ix)500 u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix)
501 {
502 	return mlx5e_tir_get_tirn(&res->channels[ix].direct_tir);
503 }
504 
mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res * res,enum mlx5_traffic_types tt)505 u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt)
506 {
507 	struct mlx5e_rss *rss = res->rss[0];
508 
509 	return mlx5e_rss_get_tirn(rss, tt, false);
510 }
511 
mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res * res,enum mlx5_traffic_types tt)512 u32 mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt)
513 {
514 	struct mlx5e_rss *rss = res->rss[0];
515 
516 	return mlx5e_rss_get_tirn(rss, tt, true);
517 }
518 
mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res * res)519 u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res)
520 {
521 	WARN_ON(!(res->features & MLX5E_RX_RES_FEATURE_PTP));
522 	return mlx5e_tir_get_tirn(&res->ptp.tir);
523 }
524 
mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res * res,unsigned int ix)525 static u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix)
526 {
527 	return mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt);
528 }
529 
mlx5e_rx_res_channel_activate_direct(struct mlx5e_rx_res * res,struct mlx5e_channels * chs,unsigned int ix)530 static void mlx5e_rx_res_channel_activate_direct(struct mlx5e_rx_res *res,
531 						 struct mlx5e_channels *chs,
532 						 unsigned int ix)
533 {
534 	u32 *vhca_id = get_vhca_ids(res, ix);
535 	u32 rqn = res->rss_rqns[ix];
536 	int err;
537 
538 	err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, rqn, vhca_id);
539 	if (err)
540 		mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to RQ %#x (channel %u): err = %d\n",
541 			       mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
542 			       rqn, ix, err);
543 }
544 
mlx5e_rx_res_channel_deactivate_direct(struct mlx5e_rx_res * res,unsigned int ix)545 static void mlx5e_rx_res_channel_deactivate_direct(struct mlx5e_rx_res *res,
546 						   unsigned int ix)
547 {
548 	int err;
549 
550 	err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, res->drop_rqn, NULL);
551 	if (err)
552 		mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (channel %u): err = %d\n",
553 			       mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
554 			       res->drop_rqn, ix, err);
555 }
556 
mlx5e_rx_res_channels_activate(struct mlx5e_rx_res * res,struct mlx5e_channels * chs)557 void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs)
558 {
559 	unsigned int nch, ix;
560 	int err;
561 
562 	nch = mlx5e_channels_get_num(chs);
563 
564 	for (ix = 0; ix < chs->num; ix++) {
565 		u32 *vhca_id = get_vhca_ids(res, ix);
566 
567 		if (mlx5e_channels_is_xsk(chs, ix))
568 			mlx5e_channels_get_xsk_rqn(chs, ix, &res->rss_rqns[ix], vhca_id);
569 		else
570 			mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix], vhca_id);
571 	}
572 	res->rss_nch = chs->num;
573 
574 	mlx5e_rx_res_rss_enable(res);
575 
576 	for (ix = 0; ix < nch; ix++)
577 		mlx5e_rx_res_channel_activate_direct(res, chs, ix);
578 	for (ix = nch; ix < res->max_nch; ix++)
579 		mlx5e_rx_res_channel_deactivate_direct(res, ix);
580 
581 	if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
582 		u32 rqn;
583 
584 		if (!mlx5e_channels_get_ptp_rqn(chs, &rqn))
585 			rqn = res->drop_rqn;
586 
587 		err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, rqn, NULL);
588 		if (err)
589 			mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to RQ %#x (PTP): err = %d\n",
590 				       mlx5e_rqt_get_rqtn(&res->ptp.rqt),
591 				       rqn, err);
592 	}
593 }
594 
mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res * res)595 void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res)
596 {
597 	unsigned int ix;
598 	int err;
599 
600 	mlx5e_rx_res_rss_disable(res);
601 
602 	for (ix = 0; ix < res->max_nch; ix++)
603 		mlx5e_rx_res_channel_deactivate_direct(res, ix);
604 
605 	if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
606 		err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, res->drop_rqn, NULL);
607 		if (err)
608 			mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (PTP): err = %d\n",
609 				       mlx5e_rqt_get_rqtn(&res->ptp.rqt),
610 				       res->drop_rqn, err);
611 	}
612 }
613 
mlx5e_rx_res_xsk_update(struct mlx5e_rx_res * res,struct mlx5e_channels * chs,unsigned int ix,bool xsk)614 void mlx5e_rx_res_xsk_update(struct mlx5e_rx_res *res, struct mlx5e_channels *chs,
615 			     unsigned int ix, bool xsk)
616 {
617 	u32 *vhca_id = get_vhca_ids(res, ix);
618 
619 	if (xsk)
620 		mlx5e_channels_get_xsk_rqn(chs, ix, &res->rss_rqns[ix], vhca_id);
621 	else
622 		mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix], vhca_id);
623 
624 	mlx5e_rx_res_rss_enable(res);
625 
626 	mlx5e_rx_res_channel_activate_direct(res, chs, ix);
627 }
628 
mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res * res,struct mlx5e_packet_merge_param * pkt_merge_param)629 int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res,
630 					struct mlx5e_packet_merge_param *pkt_merge_param)
631 {
632 	struct mlx5e_tir_builder *builder;
633 	int err, final_err;
634 	unsigned int ix;
635 
636 	builder = mlx5e_tir_builder_alloc(true);
637 	if (!builder)
638 		return -ENOMEM;
639 
640 	down_write(&res->pkt_merge_param_sem);
641 	res->pkt_merge_param = *pkt_merge_param;
642 
643 	mlx5e_tir_builder_build_packet_merge(builder, pkt_merge_param);
644 
645 	final_err = 0;
646 
647 	for (ix = 0; ix < MLX5E_MAX_NUM_RSS; ix++) {
648 		struct mlx5e_rss *rss = res->rss[ix];
649 
650 		if (!rss)
651 			continue;
652 
653 		err = mlx5e_rss_packet_merge_set_param(rss, pkt_merge_param);
654 		if (err)
655 			final_err = final_err ? : err;
656 	}
657 
658 	for (ix = 0; ix < res->max_nch; ix++) {
659 		err = mlx5e_tir_modify(&res->channels[ix].direct_tir, builder);
660 		if (err) {
661 			mlx5_core_warn(res->mdev, "Failed to update packet merge state of direct TIR %#x for channel %u: err = %d\n",
662 				       mlx5e_tir_get_tirn(&res->channels[ix].direct_tir), ix, err);
663 			if (!final_err)
664 				final_err = err;
665 		}
666 	}
667 
668 	up_write(&res->pkt_merge_param_sem);
669 	mlx5e_tir_builder_free(builder);
670 	return final_err;
671 }
672 
mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res * res)673 struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res *res)
674 {
675 	return mlx5e_rss_get_hash(res->rss[0]);
676 }
677 
mlx5e_rx_res_tls_tir_create(struct mlx5e_rx_res * res,unsigned int rxq,struct mlx5e_tir * tir)678 int mlx5e_rx_res_tls_tir_create(struct mlx5e_rx_res *res, unsigned int rxq,
679 				struct mlx5e_tir *tir)
680 {
681 	bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
682 	struct mlx5e_tir_builder *builder;
683 	u32 rqtn;
684 	int err;
685 
686 	builder = mlx5e_tir_builder_alloc(false);
687 	if (!builder)
688 		return -ENOMEM;
689 
690 	rqtn = mlx5e_rx_res_get_rqtn_direct(res, rxq);
691 
692 	mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn, rqtn,
693 				    inner_ft_support);
694 	mlx5e_tir_builder_build_direct(builder);
695 	mlx5e_tir_builder_build_tls(builder);
696 	down_read(&res->pkt_merge_param_sem);
697 	mlx5e_tir_builder_build_packet_merge(builder, &res->pkt_merge_param);
698 	err = mlx5e_tir_init(tir, builder, res->mdev, false);
699 	up_read(&res->pkt_merge_param_sem);
700 
701 	mlx5e_tir_builder_free(builder);
702 
703 	return err;
704 }
705