1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/netdevice.h>
5 #include <linux/string.h>
6 #include <linux/bitops.h>
7 #include <net/dcbnl.h>
8 
9 #include "spectrum.h"
10 #include "reg.h"
11 
12 static u8 mlxsw_sp_dcbnl_getdcbx(struct net_device __always_unused *dev)
13 {
14 	return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
15 }
16 
17 static u8 mlxsw_sp_dcbnl_setdcbx(struct net_device __always_unused *dev,
18 				 u8 mode)
19 {
20 	return (mode != (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE)) ? 1 : 0;
21 }
22 
23 static int mlxsw_sp_dcbnl_ieee_getets(struct net_device *dev,
24 				      struct ieee_ets *ets)
25 {
26 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
27 
28 	memcpy(ets, mlxsw_sp_port->dcb.ets, sizeof(*ets));
29 
30 	return 0;
31 }
32 
33 static int mlxsw_sp_port_ets_validate(struct mlxsw_sp_port *mlxsw_sp_port,
34 				      struct ieee_ets *ets)
35 {
36 	struct net_device *dev = mlxsw_sp_port->dev;
37 	bool has_ets_tc = false;
38 	int i, tx_bw_sum = 0;
39 
40 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
41 		switch (ets->tc_tsa[i]) {
42 		case IEEE_8021QAZ_TSA_STRICT:
43 			break;
44 		case IEEE_8021QAZ_TSA_ETS:
45 			has_ets_tc = true;
46 			tx_bw_sum += ets->tc_tx_bw[i];
47 			break;
48 		default:
49 			netdev_err(dev, "Only strict priority and ETS are supported\n");
50 			return -EINVAL;
51 		}
52 
53 		if (ets->prio_tc[i] >= IEEE_8021QAZ_MAX_TCS) {
54 			netdev_err(dev, "Invalid TC\n");
55 			return -EINVAL;
56 		}
57 	}
58 
59 	if (has_ets_tc && tx_bw_sum != 100) {
60 		netdev_err(dev, "Total ETS bandwidth should equal 100\n");
61 		return -EINVAL;
62 	}
63 
64 	return 0;
65 }
66 
67 static int mlxsw_sp_port_headroom_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
68 					  struct ieee_ets *ets)
69 {
70 	struct net_device *dev = mlxsw_sp_port->dev;
71 	struct mlxsw_sp_hdroom hdroom;
72 	int prio;
73 	int err;
74 
75 	hdroom = *mlxsw_sp_port->hdroom;
76 	for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
77 		hdroom.prios.prio[prio].ets_buf_idx = ets->prio_tc[prio];
78 	mlxsw_sp_hdroom_prios_reset_buf_idx(&hdroom);
79 	mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
80 	mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
81 
82 	err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
83 	if (err) {
84 		netdev_err(dev, "Failed to configure port's headroom\n");
85 		return err;
86 	}
87 
88 	return 0;
89 }
90 
91 static int __mlxsw_sp_dcbnl_ieee_setets(struct mlxsw_sp_port *mlxsw_sp_port,
92 					struct ieee_ets *ets)
93 {
94 	struct ieee_ets *my_ets = mlxsw_sp_port->dcb.ets;
95 	struct net_device *dev = mlxsw_sp_port->dev;
96 	int i, err;
97 
98 	/* Egress configuration. */
99 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
100 		bool dwrr = ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS;
101 		u8 weight = ets->tc_tx_bw[i];
102 
103 		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
104 					    MLXSW_REG_QEEC_HR_SUBGROUP, i,
105 					    0, dwrr, weight);
106 		if (err) {
107 			netdev_err(dev, "Failed to link subgroup ETS element %d to group\n",
108 				   i);
109 			goto err_port_ets_set;
110 		}
111 	}
112 
113 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
114 		err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i,
115 						ets->prio_tc[i]);
116 		if (err) {
117 			netdev_err(dev, "Failed to map prio %d to TC %d\n", i,
118 				   ets->prio_tc[i]);
119 			goto err_port_prio_tc_set;
120 		}
121 	}
122 
123 	/* Ingress configuration. */
124 	err = mlxsw_sp_port_headroom_ets_set(mlxsw_sp_port, ets);
125 	if (err)
126 		goto err_port_headroom_set;
127 
128 	return 0;
129 
130 err_port_headroom_set:
131 	i = IEEE_8021QAZ_MAX_TCS;
132 err_port_prio_tc_set:
133 	for (i--; i >= 0; i--)
134 		mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, my_ets->prio_tc[i]);
135 	i = IEEE_8021QAZ_MAX_TCS;
136 err_port_ets_set:
137 	for (i--; i >= 0; i--) {
138 		bool dwrr = my_ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS;
139 		u8 weight = my_ets->tc_tx_bw[i];
140 
141 		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
142 					    MLXSW_REG_QEEC_HR_SUBGROUP, i,
143 					    0, dwrr, weight);
144 	}
145 	return err;
146 }
147 
148 static int mlxsw_sp_dcbnl_ieee_setets(struct net_device *dev,
149 				      struct ieee_ets *ets)
150 {
151 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
152 	int err;
153 
154 	err = mlxsw_sp_port_ets_validate(mlxsw_sp_port, ets);
155 	if (err)
156 		return err;
157 
158 	err = __mlxsw_sp_dcbnl_ieee_setets(mlxsw_sp_port, ets);
159 	if (err)
160 		return err;
161 
162 	memcpy(mlxsw_sp_port->dcb.ets, ets, sizeof(*ets));
163 	mlxsw_sp_port->dcb.ets->ets_cap = IEEE_8021QAZ_MAX_TCS;
164 
165 	return 0;
166 }
167 
168 static int mlxsw_sp_dcbnl_app_validate(struct net_device *dev,
169 				       struct dcb_app *app)
170 {
171 	if (app->priority >= IEEE_8021QAZ_MAX_TCS) {
172 		netdev_err(dev, "APP entry with priority value %u is invalid\n",
173 			   app->priority);
174 		return -EINVAL;
175 	}
176 
177 	switch (app->selector) {
178 	case IEEE_8021QAZ_APP_SEL_DSCP:
179 		if (app->protocol >= 64) {
180 			netdev_err(dev, "DSCP APP entry with protocol value %u is invalid\n",
181 				   app->protocol);
182 			return -EINVAL;
183 		}
184 		break;
185 
186 	case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
187 		if (app->protocol) {
188 			netdev_err(dev, "EtherType APP entries with protocol value != 0 not supported\n");
189 			return -EINVAL;
190 		}
191 		break;
192 
193 	default:
194 		netdev_err(dev, "APP entries with selector %u not supported\n",
195 			   app->selector);
196 		return -EINVAL;
197 	}
198 
199 	return 0;
200 }
201 
202 static u8
203 mlxsw_sp_port_dcb_app_default_prio(struct mlxsw_sp_port *mlxsw_sp_port)
204 {
205 	u8 prio_mask;
206 
207 	prio_mask = dcb_ieee_getapp_default_prio_mask(mlxsw_sp_port->dev);
208 	if (prio_mask)
209 		/* Take the highest configured priority. */
210 		return fls(prio_mask) - 1;
211 
212 	return 0;
213 }
214 
215 static void
216 mlxsw_sp_port_dcb_app_dscp_prio_map(struct mlxsw_sp_port *mlxsw_sp_port,
217 				    u8 default_prio,
218 				    struct dcb_ieee_app_dscp_map *map)
219 {
220 	int i;
221 
222 	dcb_ieee_getapp_dscp_prio_mask_map(mlxsw_sp_port->dev, map);
223 	for (i = 0; i < ARRAY_SIZE(map->map); ++i) {
224 		if (map->map[i])
225 			map->map[i] = fls(map->map[i]) - 1;
226 		else
227 			map->map[i] = default_prio;
228 	}
229 }
230 
231 static bool
232 mlxsw_sp_port_dcb_app_prio_dscp_map(struct mlxsw_sp_port *mlxsw_sp_port,
233 				    struct dcb_ieee_app_prio_map *map)
234 {
235 	bool have_dscp = false;
236 	int i;
237 
238 	dcb_ieee_getapp_prio_dscp_mask_map(mlxsw_sp_port->dev, map);
239 	for (i = 0; i < ARRAY_SIZE(map->map); ++i) {
240 		if (map->map[i]) {
241 			map->map[i] = fls64(map->map[i]) - 1;
242 			have_dscp = true;
243 		}
244 	}
245 
246 	return have_dscp;
247 }
248 
249 static int
250 mlxsw_sp_port_dcb_app_update_qpts(struct mlxsw_sp_port *mlxsw_sp_port,
251 				  enum mlxsw_reg_qpts_trust_state ts)
252 {
253 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
254 	char qpts_pl[MLXSW_REG_QPTS_LEN];
255 
256 	mlxsw_reg_qpts_pack(qpts_pl, mlxsw_sp_port->local_port, ts);
257 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpts), qpts_pl);
258 }
259 
260 static int
261 mlxsw_sp_port_dcb_app_update_qrwe(struct mlxsw_sp_port *mlxsw_sp_port,
262 				  bool rewrite_dscp)
263 {
264 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
265 	char qrwe_pl[MLXSW_REG_QRWE_LEN];
266 
267 	mlxsw_reg_qrwe_pack(qrwe_pl, mlxsw_sp_port->local_port,
268 			    false, rewrite_dscp);
269 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qrwe), qrwe_pl);
270 }
271 
272 static int
273 mlxsw_sp_port_dcb_toggle_trust(struct mlxsw_sp_port *mlxsw_sp_port,
274 			       enum mlxsw_reg_qpts_trust_state ts)
275 {
276 	bool rewrite_dscp = ts == MLXSW_REG_QPTS_TRUST_STATE_DSCP;
277 	int err;
278 
279 	if (mlxsw_sp_port->dcb.trust_state == ts)
280 		return 0;
281 
282 	err = mlxsw_sp_port_dcb_app_update_qpts(mlxsw_sp_port, ts);
283 	if (err)
284 		return err;
285 
286 	err = mlxsw_sp_port_dcb_app_update_qrwe(mlxsw_sp_port, rewrite_dscp);
287 	if (err)
288 		goto err_update_qrwe;
289 
290 	mlxsw_sp_port->dcb.trust_state = ts;
291 	return 0;
292 
293 err_update_qrwe:
294 	mlxsw_sp_port_dcb_app_update_qpts(mlxsw_sp_port,
295 					  mlxsw_sp_port->dcb.trust_state);
296 	return err;
297 }
298 
299 static int
300 mlxsw_sp_port_dcb_app_update_qpdp(struct mlxsw_sp_port *mlxsw_sp_port,
301 				  u8 default_prio)
302 {
303 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
304 	char qpdp_pl[MLXSW_REG_QPDP_LEN];
305 
306 	mlxsw_reg_qpdp_pack(qpdp_pl, mlxsw_sp_port->local_port, default_prio);
307 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpdp), qpdp_pl);
308 }
309 
310 static int
311 mlxsw_sp_port_dcb_app_update_qpdpm(struct mlxsw_sp_port *mlxsw_sp_port,
312 				   struct dcb_ieee_app_dscp_map *map)
313 {
314 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
315 	char qpdpm_pl[MLXSW_REG_QPDPM_LEN];
316 	short int i;
317 
318 	mlxsw_reg_qpdpm_pack(qpdpm_pl, mlxsw_sp_port->local_port);
319 	for (i = 0; i < ARRAY_SIZE(map->map); ++i)
320 		mlxsw_reg_qpdpm_dscp_pack(qpdpm_pl, i, map->map[i]);
321 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpdpm), qpdpm_pl);
322 }
323 
324 static int
325 mlxsw_sp_port_dcb_app_update_qpdsm(struct mlxsw_sp_port *mlxsw_sp_port,
326 				   struct dcb_ieee_app_prio_map *map)
327 {
328 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
329 	char qpdsm_pl[MLXSW_REG_QPDSM_LEN];
330 	short int i;
331 
332 	mlxsw_reg_qpdsm_pack(qpdsm_pl, mlxsw_sp_port->local_port);
333 	for (i = 0; i < ARRAY_SIZE(map->map); ++i)
334 		mlxsw_reg_qpdsm_prio_pack(qpdsm_pl, i, map->map[i]);
335 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpdsm), qpdsm_pl);
336 }
337 
338 static int mlxsw_sp_port_dcb_app_update(struct mlxsw_sp_port *mlxsw_sp_port)
339 {
340 	struct dcb_ieee_app_prio_map prio_map;
341 	struct dcb_ieee_app_dscp_map dscp_map;
342 	u8 default_prio;
343 	bool have_dscp;
344 	int err;
345 
346 	default_prio = mlxsw_sp_port_dcb_app_default_prio(mlxsw_sp_port);
347 	err = mlxsw_sp_port_dcb_app_update_qpdp(mlxsw_sp_port, default_prio);
348 	if (err) {
349 		netdev_err(mlxsw_sp_port->dev, "Couldn't configure port default priority\n");
350 		return err;
351 	}
352 
353 	have_dscp = mlxsw_sp_port_dcb_app_prio_dscp_map(mlxsw_sp_port,
354 							&prio_map);
355 
356 	mlxsw_sp_port_dcb_app_dscp_prio_map(mlxsw_sp_port, default_prio,
357 					    &dscp_map);
358 	err = mlxsw_sp_port_dcb_app_update_qpdpm(mlxsw_sp_port,
359 						 &dscp_map);
360 	if (err) {
361 		netdev_err(mlxsw_sp_port->dev, "Couldn't configure priority map\n");
362 		return err;
363 	}
364 
365 	err = mlxsw_sp_port_dcb_app_update_qpdsm(mlxsw_sp_port,
366 						 &prio_map);
367 	if (err) {
368 		netdev_err(mlxsw_sp_port->dev, "Couldn't configure DSCP rewrite map\n");
369 		return err;
370 	}
371 
372 	if (!have_dscp) {
373 		err = mlxsw_sp_port_dcb_toggle_trust(mlxsw_sp_port,
374 					MLXSW_REG_QPTS_TRUST_STATE_PCP);
375 		if (err)
376 			netdev_err(mlxsw_sp_port->dev, "Couldn't switch to trust L2\n");
377 		return err;
378 	}
379 
380 	err = mlxsw_sp_port_dcb_toggle_trust(mlxsw_sp_port,
381 					     MLXSW_REG_QPTS_TRUST_STATE_DSCP);
382 	if (err) {
383 		/* A failure to set trust DSCP means that the QPDPM and QPDSM
384 		 * maps installed above are not in effect. And since we are here
385 		 * attempting to set trust DSCP, we couldn't have attempted to
386 		 * switch trust to PCP. Thus no cleanup is necessary.
387 		 */
388 		netdev_err(mlxsw_sp_port->dev, "Couldn't switch to trust L3\n");
389 		return err;
390 	}
391 
392 	return 0;
393 }
394 
395 static int mlxsw_sp_dcbnl_ieee_setapp(struct net_device *dev,
396 				      struct dcb_app *app)
397 {
398 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
399 	int err;
400 
401 	err = mlxsw_sp_dcbnl_app_validate(dev, app);
402 	if (err)
403 		return err;
404 
405 	err = dcb_ieee_setapp(dev, app);
406 	if (err)
407 		return err;
408 
409 	err = mlxsw_sp_port_dcb_app_update(mlxsw_sp_port);
410 	if (err)
411 		goto err_update;
412 
413 	return 0;
414 
415 err_update:
416 	dcb_ieee_delapp(dev, app);
417 	return err;
418 }
419 
420 static int mlxsw_sp_dcbnl_ieee_delapp(struct net_device *dev,
421 				      struct dcb_app *app)
422 {
423 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
424 	int err;
425 
426 	err = dcb_ieee_delapp(dev, app);
427 	if (err)
428 		return err;
429 
430 	err = mlxsw_sp_port_dcb_app_update(mlxsw_sp_port);
431 	if (err)
432 		netdev_err(dev, "Failed to update DCB APP configuration\n");
433 	return 0;
434 }
435 
436 static int mlxsw_sp_dcbnl_ieee_getmaxrate(struct net_device *dev,
437 					  struct ieee_maxrate *maxrate)
438 {
439 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
440 
441 	memcpy(maxrate, mlxsw_sp_port->dcb.maxrate, sizeof(*maxrate));
442 
443 	return 0;
444 }
445 
446 static int mlxsw_sp_dcbnl_ieee_setmaxrate(struct net_device *dev,
447 					  struct ieee_maxrate *maxrate)
448 {
449 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
450 	struct ieee_maxrate *my_maxrate = mlxsw_sp_port->dcb.maxrate;
451 	int err, i;
452 
453 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
454 		err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
455 						    MLXSW_REG_QEEC_HR_SUBGROUP,
456 						    i, 0,
457 						    maxrate->tc_maxrate[i], 0);
458 		if (err) {
459 			netdev_err(dev, "Failed to set maxrate for TC %d\n", i);
460 			goto err_port_ets_maxrate_set;
461 		}
462 	}
463 
464 	memcpy(mlxsw_sp_port->dcb.maxrate, maxrate, sizeof(*maxrate));
465 
466 	return 0;
467 
468 err_port_ets_maxrate_set:
469 	for (i--; i >= 0; i--)
470 		mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
471 					      MLXSW_REG_QEEC_HR_SUBGROUP,
472 					      i, 0,
473 					      my_maxrate->tc_maxrate[i], 0);
474 	return err;
475 }
476 
477 static int mlxsw_sp_port_pfc_cnt_get(struct mlxsw_sp_port *mlxsw_sp_port,
478 				     u8 prio)
479 {
480 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
481 	struct ieee_pfc *my_pfc = mlxsw_sp_port->dcb.pfc;
482 	char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
483 	int err;
484 
485 	mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port,
486 			     MLXSW_REG_PPCNT_PRIO_CNT, prio);
487 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
488 	if (err)
489 		return err;
490 
491 	my_pfc->requests[prio] = mlxsw_reg_ppcnt_tx_pause_get(ppcnt_pl);
492 	my_pfc->indications[prio] = mlxsw_reg_ppcnt_rx_pause_get(ppcnt_pl);
493 
494 	return 0;
495 }
496 
497 static int mlxsw_sp_dcbnl_ieee_getpfc(struct net_device *dev,
498 				      struct ieee_pfc *pfc)
499 {
500 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
501 	int err, i;
502 
503 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
504 		err = mlxsw_sp_port_pfc_cnt_get(mlxsw_sp_port, i);
505 		if (err) {
506 			netdev_err(dev, "Failed to get PFC count for priority %d\n",
507 				   i);
508 			return err;
509 		}
510 	}
511 
512 	memcpy(pfc, mlxsw_sp_port->dcb.pfc, sizeof(*pfc));
513 
514 	return 0;
515 }
516 
517 static int mlxsw_sp_port_pfc_set(struct mlxsw_sp_port *mlxsw_sp_port,
518 				 struct ieee_pfc *pfc)
519 {
520 	char pfcc_pl[MLXSW_REG_PFCC_LEN];
521 
522 	mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
523 	mlxsw_reg_pfcc_pprx_set(pfcc_pl, mlxsw_sp_port->link.rx_pause);
524 	mlxsw_reg_pfcc_pptx_set(pfcc_pl, mlxsw_sp_port->link.tx_pause);
525 	mlxsw_reg_pfcc_prio_pack(pfcc_pl, pfc->pfc_en);
526 
527 	return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
528 			       pfcc_pl);
529 }
530 
531 static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
532 				      struct ieee_pfc *pfc)
533 {
534 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
535 	bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
536 	struct mlxsw_sp_hdroom orig_hdroom;
537 	struct mlxsw_sp_hdroom hdroom;
538 	int prio;
539 	int err;
540 
541 	if (pause_en && pfc->pfc_en) {
542 		netdev_err(dev, "PAUSE frames already enabled on port\n");
543 		return -EINVAL;
544 	}
545 
546 	orig_hdroom = *mlxsw_sp_port->hdroom;
547 
548 	hdroom = orig_hdroom;
549 	if (pfc->pfc_en)
550 		hdroom.delay_bytes = DIV_ROUND_UP(pfc->delay, BITS_PER_BYTE);
551 	else
552 		hdroom.delay_bytes = 0;
553 
554 	for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
555 		hdroom.prios.prio[prio].lossy = !(pfc->pfc_en & BIT(prio));
556 
557 	mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
558 	mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
559 
560 	err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
561 	if (err) {
562 		netdev_err(dev, "Failed to configure port's headroom for PFC\n");
563 		return err;
564 	}
565 
566 	err = mlxsw_sp_port_pfc_set(mlxsw_sp_port, pfc);
567 	if (err) {
568 		netdev_err(dev, "Failed to configure PFC\n");
569 		goto err_port_pfc_set;
570 	}
571 
572 	memcpy(mlxsw_sp_port->dcb.pfc, pfc, sizeof(*pfc));
573 	mlxsw_sp_port->dcb.pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
574 
575 	return 0;
576 
577 err_port_pfc_set:
578 	mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
579 	return err;
580 }
581 
582 static int mlxsw_sp_dcbnl_getbuffer(struct net_device *dev, struct dcbnl_buffer *buf)
583 {
584 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
585 	struct mlxsw_sp_hdroom *hdroom = mlxsw_sp_port->hdroom;
586 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
587 	int prio;
588 	int i;
589 
590 	buf->total_size = 0;
591 
592 	BUILD_BUG_ON(DCBX_MAX_BUFFERS > MLXSW_SP_PB_COUNT);
593 	for (i = 0; i < MLXSW_SP_PB_COUNT; i++) {
594 		u32 bytes = mlxsw_sp_cells_bytes(mlxsw_sp, hdroom->bufs.buf[i].size_cells);
595 
596 		if (i < DCBX_MAX_BUFFERS)
597 			buf->buffer_size[i] = bytes;
598 		buf->total_size += bytes;
599 	}
600 
601 	buf->total_size += mlxsw_sp_cells_bytes(mlxsw_sp, hdroom->int_buf.size_cells);
602 
603 	for (prio = 0; prio < IEEE_8021Q_MAX_PRIORITIES; prio++)
604 		buf->prio2buffer[prio] = hdroom->prios.prio[prio].buf_idx;
605 
606 	return 0;
607 }
608 
609 static int mlxsw_sp_dcbnl_setbuffer(struct net_device *dev, struct dcbnl_buffer *buf)
610 {
611 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
612 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
613 	struct mlxsw_sp_hdroom hdroom;
614 	int prio;
615 	int i;
616 
617 	hdroom = *mlxsw_sp_port->hdroom;
618 
619 	if (hdroom.mode != MLXSW_SP_HDROOM_MODE_TC) {
620 		netdev_err(dev, "The use of dcbnl_setbuffer is only allowed if egress is configured using TC\n");
621 		return -EINVAL;
622 	}
623 
624 	for (prio = 0; prio < IEEE_8021Q_MAX_PRIORITIES; prio++)
625 		hdroom.prios.prio[prio].set_buf_idx = buf->prio2buffer[prio];
626 
627 	BUILD_BUG_ON(DCBX_MAX_BUFFERS > MLXSW_SP_PB_COUNT);
628 	for (i = 0; i < DCBX_MAX_BUFFERS; i++)
629 		hdroom.bufs.buf[i].set_size_cells = mlxsw_sp_bytes_cells(mlxsw_sp,
630 									 buf->buffer_size[i]);
631 
632 	mlxsw_sp_hdroom_prios_reset_buf_idx(&hdroom);
633 	mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
634 	mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
635 	return mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
636 }
637 
638 static const struct dcbnl_rtnl_ops mlxsw_sp_dcbnl_ops = {
639 	.ieee_getets		= mlxsw_sp_dcbnl_ieee_getets,
640 	.ieee_setets		= mlxsw_sp_dcbnl_ieee_setets,
641 	.ieee_getmaxrate	= mlxsw_sp_dcbnl_ieee_getmaxrate,
642 	.ieee_setmaxrate	= mlxsw_sp_dcbnl_ieee_setmaxrate,
643 	.ieee_getpfc		= mlxsw_sp_dcbnl_ieee_getpfc,
644 	.ieee_setpfc		= mlxsw_sp_dcbnl_ieee_setpfc,
645 	.ieee_setapp		= mlxsw_sp_dcbnl_ieee_setapp,
646 	.ieee_delapp		= mlxsw_sp_dcbnl_ieee_delapp,
647 
648 	.getdcbx		= mlxsw_sp_dcbnl_getdcbx,
649 	.setdcbx		= mlxsw_sp_dcbnl_setdcbx,
650 
651 	.dcbnl_getbuffer	= mlxsw_sp_dcbnl_getbuffer,
652 	.dcbnl_setbuffer	= mlxsw_sp_dcbnl_setbuffer,
653 };
654 
655 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
656 {
657 	mlxsw_sp_port->dcb.ets = kzalloc(sizeof(*mlxsw_sp_port->dcb.ets),
658 					 GFP_KERNEL);
659 	if (!mlxsw_sp_port->dcb.ets)
660 		return -ENOMEM;
661 
662 	mlxsw_sp_port->dcb.ets->ets_cap = IEEE_8021QAZ_MAX_TCS;
663 
664 	return 0;
665 }
666 
667 static void mlxsw_sp_port_ets_fini(struct mlxsw_sp_port *mlxsw_sp_port)
668 {
669 	kfree(mlxsw_sp_port->dcb.ets);
670 }
671 
672 static int mlxsw_sp_port_maxrate_init(struct mlxsw_sp_port *mlxsw_sp_port)
673 {
674 	int i;
675 
676 	mlxsw_sp_port->dcb.maxrate = kmalloc(sizeof(*mlxsw_sp_port->dcb.maxrate),
677 					     GFP_KERNEL);
678 	if (!mlxsw_sp_port->dcb.maxrate)
679 		return -ENOMEM;
680 
681 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
682 		mlxsw_sp_port->dcb.maxrate->tc_maxrate[i] = MLXSW_REG_QEEC_MAS_DIS;
683 
684 	return 0;
685 }
686 
687 static void mlxsw_sp_port_maxrate_fini(struct mlxsw_sp_port *mlxsw_sp_port)
688 {
689 	kfree(mlxsw_sp_port->dcb.maxrate);
690 }
691 
692 static int mlxsw_sp_port_pfc_init(struct mlxsw_sp_port *mlxsw_sp_port)
693 {
694 	mlxsw_sp_port->dcb.pfc = kzalloc(sizeof(*mlxsw_sp_port->dcb.pfc),
695 					 GFP_KERNEL);
696 	if (!mlxsw_sp_port->dcb.pfc)
697 		return -ENOMEM;
698 
699 	mlxsw_sp_port->dcb.pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
700 
701 	return 0;
702 }
703 
704 static void mlxsw_sp_port_pfc_fini(struct mlxsw_sp_port *mlxsw_sp_port)
705 {
706 	kfree(mlxsw_sp_port->dcb.pfc);
707 }
708 
709 int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port)
710 {
711 	int err;
712 
713 	err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
714 	if (err)
715 		return err;
716 	err = mlxsw_sp_port_maxrate_init(mlxsw_sp_port);
717 	if (err)
718 		goto err_port_maxrate_init;
719 	err = mlxsw_sp_port_pfc_init(mlxsw_sp_port);
720 	if (err)
721 		goto err_port_pfc_init;
722 
723 	mlxsw_sp_port->dcb.trust_state = MLXSW_REG_QPTS_TRUST_STATE_PCP;
724 	mlxsw_sp_port->dev->dcbnl_ops = &mlxsw_sp_dcbnl_ops;
725 
726 	return 0;
727 
728 err_port_pfc_init:
729 	mlxsw_sp_port_maxrate_fini(mlxsw_sp_port);
730 err_port_maxrate_init:
731 	mlxsw_sp_port_ets_fini(mlxsw_sp_port);
732 	return err;
733 }
734 
735 void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
736 {
737 	mlxsw_sp_port_pfc_fini(mlxsw_sp_port);
738 	mlxsw_sp_port_maxrate_fini(mlxsw_sp_port);
739 	mlxsw_sp_port_ets_fini(mlxsw_sp_port);
740 }
741