1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/device.h>
33 #include <linux/netdevice.h>
34 #include "en.h"
35 #include "en/port.h"
36 #include "en/port_buffer.h"
37 
38 #define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
39 
40 #define MLX5E_100MB (100000)
41 #define MLX5E_1GB   (1000000)
42 
43 #define MLX5E_CEE_STATE_UP    1
44 #define MLX5E_CEE_STATE_DOWN  0
45 
46 /* Max supported cable length is 1000 meters */
47 #define MLX5E_MAX_CABLE_LENGTH 1000
48 
49 enum {
50 	MLX5E_VENDOR_TC_GROUP_NUM = 7,
51 	MLX5E_LOWEST_PRIO_GROUP   = 0,
52 };
53 
54 enum {
55 	MLX5_DCB_CHG_RESET,
56 	MLX5_DCB_NO_CHG,
57 	MLX5_DCB_CHG_NO_RESET,
58 };
59 
60 #define MLX5_DSCP_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, qcam_reg)  && \
61 				   MLX5_CAP_QCAM_REG(mdev, qpts) && \
62 				   MLX5_CAP_QCAM_REG(mdev, qpdpm))
63 
64 static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state);
65 static int mlx5e_set_dscp2prio(struct mlx5e_priv *priv, u8 dscp, u8 prio);
66 
67 /* If dcbx mode is non-host set the dcbx mode to host.
68  */
mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv * priv,enum mlx5_dcbx_oper_mode mode)69 static int mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv *priv,
70 				     enum mlx5_dcbx_oper_mode mode)
71 {
72 	struct mlx5_core_dev *mdev = priv->mdev;
73 	u32 param[MLX5_ST_SZ_DW(dcbx_param)];
74 	int err;
75 
76 	err = mlx5_query_port_dcbx_param(mdev, param);
77 	if (err)
78 		return err;
79 
80 	MLX5_SET(dcbx_param, param, version_admin, mode);
81 	if (mode != MLX5E_DCBX_PARAM_VER_OPER_HOST)
82 		MLX5_SET(dcbx_param, param, willing_admin, 1);
83 
84 	return mlx5_set_port_dcbx_param(mdev, param);
85 }
86 
mlx5e_dcbnl_switch_to_host_mode(struct mlx5e_priv * priv)87 static int mlx5e_dcbnl_switch_to_host_mode(struct mlx5e_priv *priv)
88 {
89 	struct mlx5e_dcbx *dcbx = &priv->dcbx;
90 	int err;
91 
92 	if (!MLX5_CAP_GEN(priv->mdev, dcbx))
93 		return 0;
94 
95 	if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
96 		return 0;
97 
98 	err = mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_HOST);
99 	if (err)
100 		return err;
101 
102 	dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_HOST;
103 	return 0;
104 }
105 
mlx5e_dcbnl_ieee_getets(struct net_device * netdev,struct ieee_ets * ets)106 static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
107 				   struct ieee_ets *ets)
108 {
109 	struct mlx5e_priv *priv = netdev_priv(netdev);
110 	struct mlx5_core_dev *mdev = priv->mdev;
111 	u8 tc_group[IEEE_8021QAZ_MAX_TCS];
112 	bool is_tc_group_6_exist = false;
113 	bool is_zero_bw_ets_tc = false;
114 	int err = 0;
115 	int i;
116 
117 	if (!MLX5_CAP_GEN(priv->mdev, ets))
118 		return -EOPNOTSUPP;
119 
120 	ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
121 	for (i = 0; i < ets->ets_cap; i++) {
122 		err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]);
123 		if (err)
124 			return err;
125 
126 		err = mlx5_query_port_tc_group(mdev, i, &tc_group[i]);
127 		if (err)
128 			return err;
129 
130 		err = mlx5_query_port_tc_bw_alloc(mdev, i, &ets->tc_tx_bw[i]);
131 		if (err)
132 			return err;
133 
134 		if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC &&
135 		    tc_group[i] == (MLX5E_LOWEST_PRIO_GROUP + 1))
136 			is_zero_bw_ets_tc = true;
137 
138 		if (tc_group[i] == (MLX5E_VENDOR_TC_GROUP_NUM - 1))
139 			is_tc_group_6_exist = true;
140 	}
141 
142 	/* Report 0% ets tc if exits*/
143 	if (is_zero_bw_ets_tc) {
144 		for (i = 0; i < ets->ets_cap; i++)
145 			if (tc_group[i] == MLX5E_LOWEST_PRIO_GROUP)
146 				ets->tc_tx_bw[i] = 0;
147 	}
148 
149 	/* Update tc_tsa based on fw setting*/
150 	for (i = 0; i < ets->ets_cap; i++) {
151 		if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC)
152 			priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
153 		else if (tc_group[i] == MLX5E_VENDOR_TC_GROUP_NUM &&
154 			 !is_tc_group_6_exist)
155 			priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
156 	}
157 	memcpy(ets->tc_tsa, priv->dcbx.tc_tsa, sizeof(ets->tc_tsa));
158 
159 	return err;
160 }
161 
mlx5e_build_tc_group(struct ieee_ets * ets,u8 * tc_group,int max_tc)162 static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
163 {
164 	bool any_tc_mapped_to_ets = false;
165 	bool ets_zero_bw = false;
166 	int strict_group;
167 	int i;
168 
169 	for (i = 0; i <= max_tc; i++) {
170 		if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
171 			any_tc_mapped_to_ets = true;
172 			if (!ets->tc_tx_bw[i])
173 				ets_zero_bw = true;
174 		}
175 	}
176 
177 	/* strict group has higher priority than ets group */
178 	strict_group = MLX5E_LOWEST_PRIO_GROUP;
179 	if (any_tc_mapped_to_ets)
180 		strict_group++;
181 	if (ets_zero_bw)
182 		strict_group++;
183 
184 	for (i = 0; i <= max_tc; i++) {
185 		switch (ets->tc_tsa[i]) {
186 		case IEEE_8021QAZ_TSA_VENDOR:
187 			tc_group[i] = MLX5E_VENDOR_TC_GROUP_NUM;
188 			break;
189 		case IEEE_8021QAZ_TSA_STRICT:
190 			tc_group[i] = strict_group++;
191 			break;
192 		case IEEE_8021QAZ_TSA_ETS:
193 			tc_group[i] = MLX5E_LOWEST_PRIO_GROUP;
194 			if (ets->tc_tx_bw[i] && ets_zero_bw)
195 				tc_group[i] = MLX5E_LOWEST_PRIO_GROUP + 1;
196 			break;
197 		}
198 	}
199 }
200 
mlx5e_build_tc_tx_bw(struct ieee_ets * ets,u8 * tc_tx_bw,u8 * tc_group,int max_tc)201 static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
202 				 u8 *tc_group, int max_tc)
203 {
204 	int bw_for_ets_zero_bw_tc = 0;
205 	int last_ets_zero_bw_tc = -1;
206 	int num_ets_zero_bw = 0;
207 	int i;
208 
209 	for (i = 0; i <= max_tc; i++) {
210 		if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS &&
211 		    !ets->tc_tx_bw[i]) {
212 			num_ets_zero_bw++;
213 			last_ets_zero_bw_tc = i;
214 		}
215 	}
216 
217 	if (num_ets_zero_bw)
218 		bw_for_ets_zero_bw_tc = MLX5E_MAX_BW_ALLOC / num_ets_zero_bw;
219 
220 	for (i = 0; i <= max_tc; i++) {
221 		switch (ets->tc_tsa[i]) {
222 		case IEEE_8021QAZ_TSA_VENDOR:
223 			tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
224 			break;
225 		case IEEE_8021QAZ_TSA_STRICT:
226 			tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
227 			break;
228 		case IEEE_8021QAZ_TSA_ETS:
229 			tc_tx_bw[i] = ets->tc_tx_bw[i] ?
230 				      ets->tc_tx_bw[i] :
231 				      bw_for_ets_zero_bw_tc;
232 			break;
233 		}
234 	}
235 
236 	/* Make sure the total bw for ets zero bw group is 100% */
237 	if (last_ets_zero_bw_tc != -1)
238 		tc_tx_bw[last_ets_zero_bw_tc] +=
239 			MLX5E_MAX_BW_ALLOC % num_ets_zero_bw;
240 }
241 
242 /* If there are ETS BW 0,
243  *   Set ETS group # to 1 for all ETS non zero BW tcs. Their sum must be 100%.
244  *   Set group #0 to all the ETS BW 0 tcs and
245  *     equally splits the 100% BW between them
246  *   Report both group #0 and #1 as ETS type.
247  *     All the tcs in group #0 will be reported with 0% BW.
248  */
mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv * priv,struct ieee_ets * ets)249 static int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
250 {
251 	struct mlx5_core_dev *mdev = priv->mdev;
252 	u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS];
253 	u8 tc_group[IEEE_8021QAZ_MAX_TCS];
254 	int max_tc = mlx5_max_tc(mdev);
255 	int err, i;
256 
257 	mlx5e_build_tc_group(ets, tc_group, max_tc);
258 	mlx5e_build_tc_tx_bw(ets, tc_tx_bw, tc_group, max_tc);
259 
260 	err = mlx5_set_port_prio_tc(mdev, ets->prio_tc);
261 	if (err)
262 		return err;
263 
264 	err = mlx5_set_port_tc_group(mdev, tc_group);
265 	if (err)
266 		return err;
267 
268 	err = mlx5_set_port_tc_bw_alloc(mdev, tc_tx_bw);
269 
270 	if (err)
271 		return err;
272 
273 	memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa));
274 
275 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
276 		mlx5e_dbg(HW, priv, "%s: prio_%d <=> tc_%d\n",
277 			  __func__, i, ets->prio_tc[i]);
278 		mlx5e_dbg(HW, priv, "%s: tc_%d <=> tx_bw_%d%%, group_%d\n",
279 			  __func__, i, tc_tx_bw[i], tc_group[i]);
280 	}
281 
282 	return err;
283 }
284 
mlx5e_dbcnl_validate_ets(struct net_device * netdev,struct ieee_ets * ets,bool zero_sum_allowed)285 static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
286 				    struct ieee_ets *ets,
287 				    bool zero_sum_allowed)
288 {
289 	bool have_ets_tc = false;
290 	int bw_sum = 0;
291 	int i;
292 
293 	/* Validate Priority */
294 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
295 		if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY) {
296 			netdev_err(netdev,
297 				   "Failed to validate ETS: priority value greater than max(%d)\n",
298 				    MLX5E_MAX_PRIORITY);
299 			return -EINVAL;
300 		}
301 	}
302 
303 	/* Validate Bandwidth Sum */
304 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
305 		if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
306 			have_ets_tc = true;
307 			bw_sum += ets->tc_tx_bw[i];
308 		}
309 	}
310 
311 	if (have_ets_tc && bw_sum != 100) {
312 		if (bw_sum || (!bw_sum && !zero_sum_allowed))
313 			netdev_err(netdev,
314 				   "Failed to validate ETS: BW sum is illegal\n");
315 		return -EINVAL;
316 	}
317 	return 0;
318 }
319 
mlx5e_dcbnl_ieee_setets(struct net_device * netdev,struct ieee_ets * ets)320 static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
321 				   struct ieee_ets *ets)
322 {
323 	struct mlx5e_priv *priv = netdev_priv(netdev);
324 	int err;
325 
326 	if (!MLX5_CAP_GEN(priv->mdev, ets))
327 		return -EOPNOTSUPP;
328 
329 	err = mlx5e_dbcnl_validate_ets(netdev, ets, false);
330 	if (err)
331 		return err;
332 
333 	err = mlx5e_dcbnl_ieee_setets_core(priv, ets);
334 	if (err)
335 		return err;
336 
337 	return 0;
338 }
339 
mlx5e_dcbnl_ieee_getpfc(struct net_device * dev,struct ieee_pfc * pfc)340 static int mlx5e_dcbnl_ieee_getpfc(struct net_device *dev,
341 				   struct ieee_pfc *pfc)
342 {
343 	struct mlx5e_priv *priv = netdev_priv(dev);
344 	struct mlx5_core_dev *mdev = priv->mdev;
345 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
346 	int i;
347 
348 	pfc->pfc_cap = mlx5_max_tc(mdev) + 1;
349 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
350 		pfc->requests[i]    = PPORT_PER_PRIO_GET(pstats, i, tx_pause);
351 		pfc->indications[i] = PPORT_PER_PRIO_GET(pstats, i, rx_pause);
352 	}
353 
354 	if (MLX5_BUFFER_SUPPORTED(mdev))
355 		pfc->delay = priv->dcbx.cable_len;
356 
357 	return mlx5_query_port_pfc(mdev, &pfc->pfc_en, NULL);
358 }
359 
mlx5e_dcbnl_ieee_setpfc(struct net_device * dev,struct ieee_pfc * pfc)360 static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
361 				   struct ieee_pfc *pfc)
362 {
363 	struct mlx5e_priv *priv = netdev_priv(dev);
364 	struct mlx5_core_dev *mdev = priv->mdev;
365 	u32 old_cable_len = priv->dcbx.cable_len;
366 	struct ieee_pfc pfc_new;
367 	u32 changed = 0;
368 	u8 curr_pfc_en;
369 	int ret = 0;
370 
371 	/* pfc_en */
372 	mlx5_query_port_pfc(mdev, &curr_pfc_en, NULL);
373 	if (pfc->pfc_en != curr_pfc_en) {
374 		ret = mlx5_set_port_pfc(mdev, pfc->pfc_en, pfc->pfc_en);
375 		if (ret)
376 			return ret;
377 		mlx5_toggle_port_link(mdev);
378 		changed |= MLX5E_PORT_BUFFER_PFC;
379 	}
380 
381 	if (pfc->delay &&
382 	    pfc->delay < MLX5E_MAX_CABLE_LENGTH &&
383 	    pfc->delay != priv->dcbx.cable_len) {
384 		priv->dcbx.cable_len = pfc->delay;
385 		changed |= MLX5E_PORT_BUFFER_CABLE_LEN;
386 	}
387 
388 	if (MLX5_BUFFER_SUPPORTED(mdev)) {
389 		pfc_new.pfc_en = (changed & MLX5E_PORT_BUFFER_PFC) ? pfc->pfc_en : curr_pfc_en;
390 		if (priv->dcbx.manual_buffer)
391 			ret = mlx5e_port_manual_buffer_config(priv, changed,
392 							      dev->mtu, &pfc_new,
393 							      NULL, NULL);
394 
395 		if (ret && (changed & MLX5E_PORT_BUFFER_CABLE_LEN))
396 			priv->dcbx.cable_len = old_cable_len;
397 	}
398 
399 	if (!ret) {
400 		mlx5e_dbg(HW, priv,
401 			  "%s: PFC per priority bit mask: 0x%x\n",
402 			  __func__, pfc->pfc_en);
403 	}
404 	return ret;
405 }
406 
mlx5e_dcbnl_getdcbx(struct net_device * dev)407 static u8 mlx5e_dcbnl_getdcbx(struct net_device *dev)
408 {
409 	struct mlx5e_priv *priv = netdev_priv(dev);
410 
411 	return priv->dcbx.cap;
412 }
413 
mlx5e_dcbnl_setdcbx(struct net_device * dev,u8 mode)414 static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
415 {
416 	struct mlx5e_priv *priv = netdev_priv(dev);
417 	struct mlx5e_dcbx *dcbx = &priv->dcbx;
418 
419 	if (mode & DCB_CAP_DCBX_LLD_MANAGED)
420 		return 1;
421 
422 	if ((!mode) && MLX5_CAP_GEN(priv->mdev, dcbx)) {
423 		if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_AUTO)
424 			return 0;
425 
426 		/* set dcbx to fw controlled */
427 		if (!mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_AUTO)) {
428 			dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO;
429 			dcbx->cap &= ~DCB_CAP_DCBX_HOST;
430 			return 0;
431 		}
432 
433 		return 1;
434 	}
435 
436 	if (!(mode & DCB_CAP_DCBX_HOST))
437 		return 1;
438 
439 	if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev)))
440 		return 1;
441 
442 	dcbx->cap = mode;
443 
444 	return 0;
445 }
446 
mlx5e_dcbnl_ieee_setapp(struct net_device * dev,struct dcb_app * app)447 static int mlx5e_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
448 {
449 	struct mlx5e_priv *priv = netdev_priv(dev);
450 	struct dcb_app temp;
451 	bool is_new;
452 	int err;
453 
454 	if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) ||
455 	    !MLX5_DSCP_SUPPORTED(priv->mdev))
456 		return -EOPNOTSUPP;
457 
458 	if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) ||
459 	    (app->protocol >= MLX5E_MAX_DSCP))
460 		return -EINVAL;
461 
462 	/* Save the old entry info */
463 	temp.selector = IEEE_8021QAZ_APP_SEL_DSCP;
464 	temp.protocol = app->protocol;
465 	temp.priority = priv->dcbx_dp.dscp2prio[app->protocol];
466 
467 	/* Check if need to switch to dscp trust state */
468 	if (!priv->dcbx.dscp_app_cnt) {
469 		err =  mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_DSCP);
470 		if (err)
471 			return err;
472 	}
473 
474 	/* Skip the fw command if new and old mapping are the same */
475 	if (app->priority != priv->dcbx_dp.dscp2prio[app->protocol]) {
476 		err = mlx5e_set_dscp2prio(priv, app->protocol, app->priority);
477 		if (err)
478 			goto fw_err;
479 	}
480 
481 	/* Delete the old entry if exists */
482 	is_new = false;
483 	err = dcb_ieee_delapp(dev, &temp);
484 	if (err)
485 		is_new = true;
486 
487 	/* Add new entry and update counter */
488 	err = dcb_ieee_setapp(dev, app);
489 	if (err)
490 		return err;
491 
492 	if (is_new)
493 		priv->dcbx.dscp_app_cnt++;
494 
495 	return err;
496 
497 fw_err:
498 	mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
499 	return err;
500 }
501 
mlx5e_dcbnl_ieee_delapp(struct net_device * dev,struct dcb_app * app)502 static int mlx5e_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
503 {
504 	struct mlx5e_priv *priv = netdev_priv(dev);
505 	int err;
506 
507 	if  (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) ||
508 	     !MLX5_DSCP_SUPPORTED(priv->mdev))
509 		return -EOPNOTSUPP;
510 
511 	if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) ||
512 	    (app->protocol >= MLX5E_MAX_DSCP))
513 		return -EINVAL;
514 
515 	/* Skip if no dscp app entry */
516 	if (!priv->dcbx.dscp_app_cnt)
517 		return -ENOENT;
518 
519 	/* Check if the entry matches fw setting */
520 	if (app->priority != priv->dcbx_dp.dscp2prio[app->protocol])
521 		return -ENOENT;
522 
523 	/* Delete the app entry */
524 	err = dcb_ieee_delapp(dev, app);
525 	if (err)
526 		return err;
527 
528 	/* Reset the priority mapping back to zero */
529 	err = mlx5e_set_dscp2prio(priv, app->protocol, 0);
530 	if (err)
531 		goto fw_err;
532 
533 	priv->dcbx.dscp_app_cnt--;
534 
535 	/* Check if need to switch to pcp trust state */
536 	if (!priv->dcbx.dscp_app_cnt)
537 		err = mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
538 
539 	return err;
540 
541 fw_err:
542 	mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
543 	return err;
544 }
545 
mlx5e_dcbnl_ieee_getmaxrate(struct net_device * netdev,struct ieee_maxrate * maxrate)546 static int mlx5e_dcbnl_ieee_getmaxrate(struct net_device *netdev,
547 				       struct ieee_maxrate *maxrate)
548 {
549 	struct mlx5e_priv *priv    = netdev_priv(netdev);
550 	struct mlx5_core_dev *mdev = priv->mdev;
551 	u8 max_bw_value[IEEE_8021QAZ_MAX_TCS];
552 	u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
553 	int err;
554 	int i;
555 
556 	err = mlx5_query_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
557 	if (err)
558 		return err;
559 
560 	memset(maxrate->tc_maxrate, 0, sizeof(maxrate->tc_maxrate));
561 
562 	for (i = 0; i <= mlx5_max_tc(mdev); i++) {
563 		switch (max_bw_unit[i]) {
564 		case MLX5_100_MBPS_UNIT:
565 			maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_100MB;
566 			break;
567 		case MLX5_GBPS_UNIT:
568 			maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_1GB;
569 			break;
570 		case MLX5_BW_NO_LIMIT:
571 			break;
572 		default:
573 			WARN(true, "non-supported BW unit");
574 			break;
575 		}
576 	}
577 
578 	return 0;
579 }
580 
mlx5e_dcbnl_ieee_setmaxrate(struct net_device * netdev,struct ieee_maxrate * maxrate)581 static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev,
582 				       struct ieee_maxrate *maxrate)
583 {
584 	struct mlx5e_priv *priv    = netdev_priv(netdev);
585 	struct mlx5_core_dev *mdev = priv->mdev;
586 	u8 max_bw_value[IEEE_8021QAZ_MAX_TCS];
587 	u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
588 	__u64 upper_limit_mbps = roundup(255 * MLX5E_100MB, MLX5E_1GB);
589 	int i;
590 
591 	memset(max_bw_value, 0, sizeof(max_bw_value));
592 	memset(max_bw_unit, 0, sizeof(max_bw_unit));
593 
594 	for (i = 0; i <= mlx5_max_tc(mdev); i++) {
595 		if (!maxrate->tc_maxrate[i]) {
596 			max_bw_unit[i]  = MLX5_BW_NO_LIMIT;
597 			continue;
598 		}
599 		if (maxrate->tc_maxrate[i] < upper_limit_mbps) {
600 			max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
601 						  MLX5E_100MB);
602 			max_bw_value[i] = max_bw_value[i] ? max_bw_value[i] : 1;
603 			max_bw_unit[i]  = MLX5_100_MBPS_UNIT;
604 		} else {
605 			max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
606 						  MLX5E_1GB);
607 			max_bw_unit[i]  = MLX5_GBPS_UNIT;
608 		}
609 	}
610 
611 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
612 		mlx5e_dbg(HW, priv, "%s: tc_%d <=> max_bw %d Gbps\n",
613 			  __func__, i, max_bw_value[i]);
614 	}
615 
616 	return mlx5_modify_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
617 }
618 
mlx5e_dcbnl_setall(struct net_device * netdev)619 static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
620 {
621 	struct mlx5e_priv *priv = netdev_priv(netdev);
622 	struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
623 	struct mlx5_core_dev *mdev = priv->mdev;
624 	struct ieee_ets ets;
625 	struct ieee_pfc pfc;
626 	int err = -EOPNOTSUPP;
627 	int i;
628 
629 	if (!MLX5_CAP_GEN(mdev, ets))
630 		goto out;
631 
632 	memset(&ets, 0, sizeof(ets));
633 	memset(&pfc, 0, sizeof(pfc));
634 
635 	ets.ets_cap = IEEE_8021QAZ_MAX_TCS;
636 	for (i = 0; i < CEE_DCBX_MAX_PGS; i++) {
637 		ets.tc_tx_bw[i] = cee_cfg->pg_bw_pct[i];
638 		ets.tc_rx_bw[i] = cee_cfg->pg_bw_pct[i];
639 		ets.tc_tsa[i]   = IEEE_8021QAZ_TSA_ETS;
640 		ets.prio_tc[i]  = cee_cfg->prio_to_pg_map[i];
641 		mlx5e_dbg(HW, priv,
642 			  "%s: Priority group %d: tx_bw %d, rx_bw %d, prio_tc %d\n",
643 			  __func__, i, ets.tc_tx_bw[i], ets.tc_rx_bw[i],
644 			  ets.prio_tc[i]);
645 	}
646 
647 	err = mlx5e_dbcnl_validate_ets(netdev, &ets, true);
648 	if (err)
649 		goto out;
650 
651 	err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
652 	if (err) {
653 		netdev_err(netdev,
654 			   "%s, Failed to set ETS: %d\n", __func__, err);
655 		goto out;
656 	}
657 
658 	/* Set PFC */
659 	pfc.pfc_cap = mlx5_max_tc(mdev) + 1;
660 	if (!cee_cfg->pfc_enable)
661 		pfc.pfc_en = 0;
662 	else
663 		for (i = 0; i < CEE_DCBX_MAX_PRIO; i++)
664 			pfc.pfc_en |= cee_cfg->pfc_setting[i] << i;
665 
666 	err = mlx5e_dcbnl_ieee_setpfc(netdev, &pfc);
667 	if (err) {
668 		netdev_err(netdev,
669 			   "%s, Failed to set PFC: %d\n", __func__, err);
670 		goto out;
671 	}
672 out:
673 	return err ? MLX5_DCB_NO_CHG : MLX5_DCB_CHG_RESET;
674 }
675 
mlx5e_dcbnl_getstate(struct net_device * netdev)676 static u8 mlx5e_dcbnl_getstate(struct net_device *netdev)
677 {
678 	return MLX5E_CEE_STATE_UP;
679 }
680 
mlx5e_dcbnl_getpermhwaddr(struct net_device * netdev,u8 * perm_addr)681 static void mlx5e_dcbnl_getpermhwaddr(struct net_device *netdev,
682 				      u8 *perm_addr)
683 {
684 	struct mlx5e_priv *priv = netdev_priv(netdev);
685 
686 	if (!perm_addr)
687 		return;
688 
689 	memset(perm_addr, 0xff, MAX_ADDR_LEN);
690 
691 	mlx5_query_mac_address(priv->mdev, perm_addr);
692 }
693 
mlx5e_dcbnl_setpgtccfgtx(struct net_device * netdev,int priority,u8 prio_type,u8 pgid,u8 bw_pct,u8 up_map)694 static void mlx5e_dcbnl_setpgtccfgtx(struct net_device *netdev,
695 				     int priority, u8 prio_type,
696 				     u8 pgid, u8 bw_pct, u8 up_map)
697 {
698 	struct mlx5e_priv *priv = netdev_priv(netdev);
699 	struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
700 
701 	if (priority >= CEE_DCBX_MAX_PRIO) {
702 		netdev_err(netdev,
703 			   "%s, priority is out of range\n", __func__);
704 		return;
705 	}
706 
707 	if (pgid >= CEE_DCBX_MAX_PGS) {
708 		netdev_err(netdev,
709 			   "%s, priority group is out of range\n", __func__);
710 		return;
711 	}
712 
713 	cee_cfg->prio_to_pg_map[priority] = pgid;
714 }
715 
mlx5e_dcbnl_setpgbwgcfgtx(struct net_device * netdev,int pgid,u8 bw_pct)716 static void mlx5e_dcbnl_setpgbwgcfgtx(struct net_device *netdev,
717 				      int pgid, u8 bw_pct)
718 {
719 	struct mlx5e_priv *priv = netdev_priv(netdev);
720 	struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
721 
722 	if (pgid >= CEE_DCBX_MAX_PGS) {
723 		netdev_err(netdev,
724 			   "%s, priority group is out of range\n", __func__);
725 		return;
726 	}
727 
728 	cee_cfg->pg_bw_pct[pgid] = bw_pct;
729 }
730 
mlx5e_dcbnl_getpgtccfgtx(struct net_device * netdev,int priority,u8 * prio_type,u8 * pgid,u8 * bw_pct,u8 * up_map)731 static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev,
732 				     int priority, u8 *prio_type,
733 				     u8 *pgid, u8 *bw_pct, u8 *up_map)
734 {
735 	struct mlx5e_priv *priv = netdev_priv(netdev);
736 	struct mlx5_core_dev *mdev = priv->mdev;
737 
738 	if (!MLX5_CAP_GEN(priv->mdev, ets)) {
739 		netdev_err(netdev, "%s, ets is not supported\n", __func__);
740 		return;
741 	}
742 
743 	if (priority >= CEE_DCBX_MAX_PRIO) {
744 		netdev_err(netdev,
745 			   "%s, priority is out of range\n", __func__);
746 		return;
747 	}
748 
749 	*prio_type = 0;
750 	*bw_pct = 0;
751 	*up_map = 0;
752 
753 	if (mlx5_query_port_prio_tc(mdev, priority, pgid))
754 		*pgid = 0;
755 }
756 
mlx5e_dcbnl_getpgbwgcfgtx(struct net_device * netdev,int pgid,u8 * bw_pct)757 static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
758 				      int pgid, u8 *bw_pct)
759 {
760 	struct ieee_ets ets;
761 
762 	if (pgid >= CEE_DCBX_MAX_PGS) {
763 		netdev_err(netdev,
764 			   "%s, priority group is out of range\n", __func__);
765 		return;
766 	}
767 
768 	mlx5e_dcbnl_ieee_getets(netdev, &ets);
769 	*bw_pct = ets.tc_tx_bw[pgid];
770 }
771 
mlx5e_dcbnl_setpfccfg(struct net_device * netdev,int priority,u8 setting)772 static void mlx5e_dcbnl_setpfccfg(struct net_device *netdev,
773 				  int priority, u8 setting)
774 {
775 	struct mlx5e_priv *priv = netdev_priv(netdev);
776 	struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
777 
778 	if (priority >= CEE_DCBX_MAX_PRIO) {
779 		netdev_err(netdev,
780 			   "%s, priority is out of range\n", __func__);
781 		return;
782 	}
783 
784 	if (setting > 1)
785 		return;
786 
787 	cee_cfg->pfc_setting[priority] = setting;
788 }
789 
790 static int
mlx5e_dcbnl_get_priority_pfc(struct net_device * netdev,int priority,u8 * setting)791 mlx5e_dcbnl_get_priority_pfc(struct net_device *netdev,
792 			     int priority, u8 *setting)
793 {
794 	struct ieee_pfc pfc;
795 	int err;
796 
797 	err = mlx5e_dcbnl_ieee_getpfc(netdev, &pfc);
798 
799 	if (err)
800 		*setting = 0;
801 	else
802 		*setting = (pfc.pfc_en >> priority) & 0x01;
803 
804 	return err;
805 }
806 
mlx5e_dcbnl_getpfccfg(struct net_device * netdev,int priority,u8 * setting)807 static void mlx5e_dcbnl_getpfccfg(struct net_device *netdev,
808 				  int priority, u8 *setting)
809 {
810 	if (priority >= CEE_DCBX_MAX_PRIO) {
811 		netdev_err(netdev,
812 			   "%s, priority is out of range\n", __func__);
813 		return;
814 	}
815 
816 	if (!setting)
817 		return;
818 
819 	mlx5e_dcbnl_get_priority_pfc(netdev, priority, setting);
820 }
821 
mlx5e_dcbnl_getcap(struct net_device * netdev,int capid,u8 * cap)822 static u8 mlx5e_dcbnl_getcap(struct net_device *netdev,
823 			     int capid, u8 *cap)
824 {
825 	struct mlx5e_priv *priv = netdev_priv(netdev);
826 	struct mlx5_core_dev *mdev = priv->mdev;
827 	u8 rval = 0;
828 
829 	switch (capid) {
830 	case DCB_CAP_ATTR_PG:
831 		*cap = true;
832 		break;
833 	case DCB_CAP_ATTR_PFC:
834 		*cap = true;
835 		break;
836 	case DCB_CAP_ATTR_UP2TC:
837 		*cap = false;
838 		break;
839 	case DCB_CAP_ATTR_PG_TCS:
840 		*cap = 1 << mlx5_max_tc(mdev);
841 		break;
842 	case DCB_CAP_ATTR_PFC_TCS:
843 		*cap = 1 << mlx5_max_tc(mdev);
844 		break;
845 	case DCB_CAP_ATTR_GSP:
846 		*cap = false;
847 		break;
848 	case DCB_CAP_ATTR_BCN:
849 		*cap = false;
850 		break;
851 	case DCB_CAP_ATTR_DCBX:
852 		*cap = priv->dcbx.cap |
853 		       DCB_CAP_DCBX_VER_CEE |
854 		       DCB_CAP_DCBX_VER_IEEE;
855 		break;
856 	default:
857 		*cap = 0;
858 		rval = 1;
859 		break;
860 	}
861 
862 	return rval;
863 }
864 
mlx5e_dcbnl_getnumtcs(struct net_device * netdev,int tcs_id,u8 * num)865 static int mlx5e_dcbnl_getnumtcs(struct net_device *netdev,
866 				 int tcs_id, u8 *num)
867 {
868 	struct mlx5e_priv *priv = netdev_priv(netdev);
869 	struct mlx5_core_dev *mdev = priv->mdev;
870 
871 	switch (tcs_id) {
872 	case DCB_NUMTCS_ATTR_PG:
873 	case DCB_NUMTCS_ATTR_PFC:
874 		*num = mlx5_max_tc(mdev) + 1;
875 		break;
876 	default:
877 		return -EINVAL;
878 	}
879 
880 	return 0;
881 }
882 
mlx5e_dcbnl_getpfcstate(struct net_device * netdev)883 static u8 mlx5e_dcbnl_getpfcstate(struct net_device *netdev)
884 {
885 	struct ieee_pfc pfc;
886 
887 	if (mlx5e_dcbnl_ieee_getpfc(netdev, &pfc))
888 		return MLX5E_CEE_STATE_DOWN;
889 
890 	return pfc.pfc_en ? MLX5E_CEE_STATE_UP : MLX5E_CEE_STATE_DOWN;
891 }
892 
mlx5e_dcbnl_setpfcstate(struct net_device * netdev,u8 state)893 static void mlx5e_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
894 {
895 	struct mlx5e_priv *priv = netdev_priv(netdev);
896 	struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
897 
898 	if ((state != MLX5E_CEE_STATE_UP) && (state != MLX5E_CEE_STATE_DOWN))
899 		return;
900 
901 	cee_cfg->pfc_enable = state;
902 }
903 
mlx5e_dcbnl_getbuffer(struct net_device * dev,struct dcbnl_buffer * dcb_buffer)904 static int mlx5e_dcbnl_getbuffer(struct net_device *dev,
905 				 struct dcbnl_buffer *dcb_buffer)
906 {
907 	struct mlx5e_priv *priv = netdev_priv(dev);
908 	struct mlx5_core_dev *mdev = priv->mdev;
909 	struct mlx5e_port_buffer port_buffer;
910 	u8 buffer[MLX5E_MAX_PRIORITY];
911 	int i, err;
912 
913 	if (!MLX5_BUFFER_SUPPORTED(mdev))
914 		return -EOPNOTSUPP;
915 
916 	err = mlx5e_port_query_priority2buffer(mdev, buffer);
917 	if (err)
918 		return err;
919 
920 	for (i = 0; i < MLX5E_MAX_PRIORITY; i++)
921 		dcb_buffer->prio2buffer[i] = buffer[i];
922 
923 	err = mlx5e_port_query_buffer(priv, &port_buffer);
924 	if (err)
925 		return err;
926 
927 	for (i = 0; i < MLX5E_MAX_BUFFER; i++)
928 		dcb_buffer->buffer_size[i] = port_buffer.buffer[i].size;
929 	dcb_buffer->total_size = port_buffer.port_buffer_size;
930 
931 	return 0;
932 }
933 
mlx5e_dcbnl_setbuffer(struct net_device * dev,struct dcbnl_buffer * dcb_buffer)934 static int mlx5e_dcbnl_setbuffer(struct net_device *dev,
935 				 struct dcbnl_buffer *dcb_buffer)
936 {
937 	struct mlx5e_priv *priv = netdev_priv(dev);
938 	struct mlx5_core_dev *mdev = priv->mdev;
939 	struct mlx5e_port_buffer port_buffer;
940 	u8 old_prio2buffer[MLX5E_MAX_PRIORITY];
941 	u32 *buffer_size = NULL;
942 	u8 *prio2buffer = NULL;
943 	u32 changed = 0;
944 	int i, err;
945 
946 	if (!MLX5_BUFFER_SUPPORTED(mdev))
947 		return -EOPNOTSUPP;
948 
949 	for (i = 0; i < DCBX_MAX_BUFFERS; i++)
950 		mlx5_core_dbg(mdev, "buffer[%d]=%d\n", i, dcb_buffer->buffer_size[i]);
951 
952 	for (i = 0; i < MLX5E_MAX_PRIORITY; i++)
953 		mlx5_core_dbg(mdev, "priority %d buffer%d\n", i, dcb_buffer->prio2buffer[i]);
954 
955 	err = mlx5e_port_query_priority2buffer(mdev, old_prio2buffer);
956 	if (err)
957 		return err;
958 
959 	for (i = 0; i < MLX5E_MAX_PRIORITY; i++) {
960 		if (dcb_buffer->prio2buffer[i] != old_prio2buffer[i]) {
961 			changed |= MLX5E_PORT_BUFFER_PRIO2BUFFER;
962 			prio2buffer = dcb_buffer->prio2buffer;
963 			break;
964 		}
965 	}
966 
967 	err = mlx5e_port_query_buffer(priv, &port_buffer);
968 	if (err)
969 		return err;
970 
971 	for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
972 		if (port_buffer.buffer[i].size != dcb_buffer->buffer_size[i]) {
973 			changed |= MLX5E_PORT_BUFFER_SIZE;
974 			buffer_size = dcb_buffer->buffer_size;
975 			break;
976 		}
977 	}
978 
979 	if (!changed)
980 		return 0;
981 
982 	priv->dcbx.manual_buffer = true;
983 	err = mlx5e_port_manual_buffer_config(priv, changed, dev->mtu, NULL,
984 					      buffer_size, prio2buffer);
985 	return err;
986 }
987 
988 static const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = {
989 	.ieee_getets	= mlx5e_dcbnl_ieee_getets,
990 	.ieee_setets	= mlx5e_dcbnl_ieee_setets,
991 	.ieee_getmaxrate = mlx5e_dcbnl_ieee_getmaxrate,
992 	.ieee_setmaxrate = mlx5e_dcbnl_ieee_setmaxrate,
993 	.ieee_getpfc	= mlx5e_dcbnl_ieee_getpfc,
994 	.ieee_setpfc	= mlx5e_dcbnl_ieee_setpfc,
995 	.ieee_setapp    = mlx5e_dcbnl_ieee_setapp,
996 	.ieee_delapp    = mlx5e_dcbnl_ieee_delapp,
997 	.getdcbx	= mlx5e_dcbnl_getdcbx,
998 	.setdcbx	= mlx5e_dcbnl_setdcbx,
999 	.dcbnl_getbuffer = mlx5e_dcbnl_getbuffer,
1000 	.dcbnl_setbuffer = mlx5e_dcbnl_setbuffer,
1001 
1002 /* CEE interfaces */
1003 	.setall         = mlx5e_dcbnl_setall,
1004 	.getstate       = mlx5e_dcbnl_getstate,
1005 	.getpermhwaddr  = mlx5e_dcbnl_getpermhwaddr,
1006 
1007 	.setpgtccfgtx   = mlx5e_dcbnl_setpgtccfgtx,
1008 	.setpgbwgcfgtx  = mlx5e_dcbnl_setpgbwgcfgtx,
1009 	.getpgtccfgtx   = mlx5e_dcbnl_getpgtccfgtx,
1010 	.getpgbwgcfgtx  = mlx5e_dcbnl_getpgbwgcfgtx,
1011 
1012 	.setpfccfg      = mlx5e_dcbnl_setpfccfg,
1013 	.getpfccfg      = mlx5e_dcbnl_getpfccfg,
1014 	.getcap         = mlx5e_dcbnl_getcap,
1015 	.getnumtcs      = mlx5e_dcbnl_getnumtcs,
1016 	.getpfcstate    = mlx5e_dcbnl_getpfcstate,
1017 	.setpfcstate    = mlx5e_dcbnl_setpfcstate,
1018 };
1019 
mlx5e_dcbnl_build_netdev(struct net_device * netdev)1020 void mlx5e_dcbnl_build_netdev(struct net_device *netdev)
1021 {
1022 	struct mlx5e_priv *priv = netdev_priv(netdev);
1023 	struct mlx5_core_dev *mdev = priv->mdev;
1024 
1025 	if (MLX5_CAP_GEN(mdev, vport_group_manager) && MLX5_CAP_GEN(mdev, qos))
1026 		netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
1027 }
1028 
mlx5e_dcbnl_build_rep_netdev(struct net_device * netdev)1029 void mlx5e_dcbnl_build_rep_netdev(struct net_device *netdev)
1030 {
1031 	struct mlx5e_priv *priv = netdev_priv(netdev);
1032 	struct mlx5_core_dev *mdev = priv->mdev;
1033 
1034 	if (MLX5_CAP_GEN(mdev, qos))
1035 		netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
1036 }
1037 
mlx5e_dcbnl_query_dcbx_mode(struct mlx5e_priv * priv,enum mlx5_dcbx_oper_mode * mode)1038 static void mlx5e_dcbnl_query_dcbx_mode(struct mlx5e_priv *priv,
1039 					enum mlx5_dcbx_oper_mode *mode)
1040 {
1041 	u32 out[MLX5_ST_SZ_DW(dcbx_param)];
1042 
1043 	*mode = MLX5E_DCBX_PARAM_VER_OPER_HOST;
1044 
1045 	if (!mlx5_query_port_dcbx_param(priv->mdev, out))
1046 		*mode = MLX5_GET(dcbx_param, out, version_oper);
1047 
1048 	/* From driver's point of view, we only care if the mode
1049 	 * is host (HOST) or non-host (AUTO)
1050 	 */
1051 	if (*mode != MLX5E_DCBX_PARAM_VER_OPER_HOST)
1052 		*mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO;
1053 }
1054 
mlx5e_ets_init(struct mlx5e_priv * priv)1055 static void mlx5e_ets_init(struct mlx5e_priv *priv)
1056 {
1057 	struct ieee_ets ets;
1058 	int err;
1059 	int i;
1060 
1061 	if (!MLX5_CAP_GEN(priv->mdev, ets))
1062 		return;
1063 
1064 	memset(&ets, 0, sizeof(ets));
1065 	ets.ets_cap = mlx5_max_tc(priv->mdev) + 1;
1066 	for (i = 0; i < ets.ets_cap; i++) {
1067 		ets.tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
1068 		ets.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
1069 		ets.prio_tc[i] = i;
1070 	}
1071 
1072 	if (ets.ets_cap > 1) {
1073 		/* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
1074 		ets.prio_tc[0] = 1;
1075 		ets.prio_tc[1] = 0;
1076 	}
1077 
1078 	err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
1079 	if (err)
1080 		netdev_err(priv->netdev,
1081 			   "%s, Failed to init ETS: %d\n", __func__, err);
1082 }
1083 
1084 enum {
1085 	INIT,
1086 	DELETE,
1087 };
1088 
mlx5e_dcbnl_dscp_app(struct mlx5e_priv * priv,int action)1089 static void mlx5e_dcbnl_dscp_app(struct mlx5e_priv *priv, int action)
1090 {
1091 	struct dcb_app temp;
1092 	int i;
1093 
1094 	if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
1095 		return;
1096 
1097 	if (!MLX5_DSCP_SUPPORTED(priv->mdev))
1098 		return;
1099 
1100 	/* No SEL_DSCP entry in non DSCP state */
1101 	if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_DSCP)
1102 		return;
1103 
1104 	temp.selector = IEEE_8021QAZ_APP_SEL_DSCP;
1105 	for (i = 0; i < MLX5E_MAX_DSCP; i++) {
1106 		temp.protocol = i;
1107 		temp.priority = priv->dcbx_dp.dscp2prio[i];
1108 		if (action == INIT)
1109 			dcb_ieee_setapp(priv->netdev, &temp);
1110 		else
1111 			dcb_ieee_delapp(priv->netdev, &temp);
1112 	}
1113 
1114 	priv->dcbx.dscp_app_cnt = (action == INIT) ? MLX5E_MAX_DSCP : 0;
1115 }
1116 
mlx5e_dcbnl_init_app(struct mlx5e_priv * priv)1117 void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv)
1118 {
1119 	mlx5e_dcbnl_dscp_app(priv, INIT);
1120 }
1121 
mlx5e_dcbnl_delete_app(struct mlx5e_priv * priv)1122 void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv)
1123 {
1124 	mlx5e_dcbnl_dscp_app(priv, DELETE);
1125 }
1126 
mlx5e_params_calc_trust_tx_min_inline_mode(struct mlx5_core_dev * mdev,struct mlx5e_params * params,u8 trust_state)1127 static void mlx5e_params_calc_trust_tx_min_inline_mode(struct mlx5_core_dev *mdev,
1128 						       struct mlx5e_params *params,
1129 						       u8 trust_state)
1130 {
1131 	mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
1132 	if (trust_state == MLX5_QPTS_TRUST_DSCP &&
1133 	    params->tx_min_inline_mode == MLX5_INLINE_MODE_L2)
1134 		params->tx_min_inline_mode = MLX5_INLINE_MODE_IP;
1135 }
1136 
mlx5e_update_trust_state_hw(struct mlx5e_priv * priv,void * context)1137 static int mlx5e_update_trust_state_hw(struct mlx5e_priv *priv, void *context)
1138 {
1139 	u8 *trust_state = context;
1140 	int err;
1141 
1142 	err = mlx5_set_trust_state(priv->mdev, *trust_state);
1143 	if (err)
1144 		return err;
1145 	priv->dcbx_dp.trust_state = *trust_state;
1146 
1147 	return 0;
1148 }
1149 
mlx5e_set_trust_state(struct mlx5e_priv * priv,u8 trust_state)1150 static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
1151 {
1152 	struct mlx5e_params new_params;
1153 	bool reset = true;
1154 	int err;
1155 
1156 	mutex_lock(&priv->state_lock);
1157 
1158 	new_params = priv->channels.params;
1159 	mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &new_params,
1160 						   trust_state);
1161 
1162 	/* Skip if tx_min_inline is the same */
1163 	if (new_params.tx_min_inline_mode == priv->channels.params.tx_min_inline_mode)
1164 		reset = false;
1165 
1166 	err = mlx5e_safe_switch_params(priv, &new_params,
1167 				       mlx5e_update_trust_state_hw,
1168 				       &trust_state, reset);
1169 
1170 	mutex_unlock(&priv->state_lock);
1171 
1172 	return err;
1173 }
1174 
mlx5e_set_dscp2prio(struct mlx5e_priv * priv,u8 dscp,u8 prio)1175 static int mlx5e_set_dscp2prio(struct mlx5e_priv *priv, u8 dscp, u8 prio)
1176 {
1177 	int err;
1178 
1179 	err = mlx5_set_dscp2prio(priv->mdev, dscp, prio);
1180 	if (err)
1181 		return err;
1182 
1183 	priv->dcbx_dp.dscp2prio[dscp] = prio;
1184 	return err;
1185 }
1186 
mlx5e_trust_initialize(struct mlx5e_priv * priv)1187 static int mlx5e_trust_initialize(struct mlx5e_priv *priv)
1188 {
1189 	struct mlx5_core_dev *mdev = priv->mdev;
1190 	int err;
1191 
1192 	priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_PCP;
1193 
1194 	if (!MLX5_DSCP_SUPPORTED(mdev))
1195 		return 0;
1196 
1197 	err = mlx5_query_trust_state(priv->mdev, &priv->dcbx_dp.trust_state);
1198 	if (err)
1199 		return err;
1200 
1201 	mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &priv->channels.params,
1202 						   priv->dcbx_dp.trust_state);
1203 
1204 	err = mlx5_query_dscp2prio(priv->mdev, priv->dcbx_dp.dscp2prio);
1205 	if (err)
1206 		return err;
1207 
1208 	return 0;
1209 }
1210 
1211 #define MLX5E_BUFFER_CELL_SHIFT 7
1212 
mlx5e_query_port_buffers_cell_size(struct mlx5e_priv * priv)1213 static u16 mlx5e_query_port_buffers_cell_size(struct mlx5e_priv *priv)
1214 {
1215 	struct mlx5_core_dev *mdev = priv->mdev;
1216 	u32 out[MLX5_ST_SZ_DW(sbcam_reg)] = {};
1217 	u32 in[MLX5_ST_SZ_DW(sbcam_reg)] = {};
1218 
1219 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1220 		return (1 << MLX5E_BUFFER_CELL_SHIFT);
1221 
1222 	if (mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
1223 				 MLX5_REG_SBCAM, 0, 0))
1224 		return (1 << MLX5E_BUFFER_CELL_SHIFT);
1225 
1226 	return MLX5_GET(sbcam_reg, out, cap_cell_size);
1227 }
1228 
mlx5e_dcbnl_initialize(struct mlx5e_priv * priv)1229 void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
1230 {
1231 	struct mlx5e_dcbx *dcbx = &priv->dcbx;
1232 
1233 	mlx5e_trust_initialize(priv);
1234 
1235 	if (!MLX5_CAP_GEN(priv->mdev, qos))
1236 		return;
1237 
1238 	if (MLX5_CAP_GEN(priv->mdev, dcbx))
1239 		mlx5e_dcbnl_query_dcbx_mode(priv, &dcbx->mode);
1240 
1241 	priv->dcbx.cap = DCB_CAP_DCBX_VER_CEE |
1242 			 DCB_CAP_DCBX_VER_IEEE;
1243 	if (priv->dcbx.mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
1244 		priv->dcbx.cap |= DCB_CAP_DCBX_HOST;
1245 
1246 	priv->dcbx.port_buff_cell_sz = mlx5e_query_port_buffers_cell_size(priv);
1247 	priv->dcbx.manual_buffer = false;
1248 	priv->dcbx.cable_len = MLX5E_DEFAULT_CABLE_LEN;
1249 
1250 	mlx5e_ets_init(priv);
1251 }
1252