1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/etherdevice.h>
5 
6 #include "hclge_cmd.h"
7 #include "hclge_main.h"
8 #include "hclge_tm.h"
9 
10 enum hclge_shaper_level {
11 	HCLGE_SHAPER_LVL_PRI	= 0,
12 	HCLGE_SHAPER_LVL_PG	= 1,
13 	HCLGE_SHAPER_LVL_PORT	= 2,
14 	HCLGE_SHAPER_LVL_QSET	= 3,
15 	HCLGE_SHAPER_LVL_CNT	= 4,
16 	HCLGE_SHAPER_LVL_VF	= 0,
17 	HCLGE_SHAPER_LVL_PF	= 1,
18 };
19 
20 #define HCLGE_TM_PFC_PKT_GET_CMD_NUM	3
21 #define HCLGE_TM_PFC_NUM_GET_PER_CMD	3
22 
23 #define HCLGE_SHAPER_BS_U_DEF	5
24 #define HCLGE_SHAPER_BS_S_DEF	20
25 
26 #define HCLGE_ETHER_MAX_RATE	100000
27 
28 /* hclge_shaper_para_calc: calculate ir parameter for the shaper
29  * @ir: Rate to be config, its unit is Mbps
30  * @shaper_level: the shaper level. eg: port, pg, priority, queueset
31  * @ir_b: IR_B parameter of IR shaper
32  * @ir_u: IR_U parameter of IR shaper
33  * @ir_s: IR_S parameter of IR shaper
34  *
35  * the formula:
36  *
37  *		IR_b * (2 ^ IR_u) * 8
38  * IR(Mbps) = -------------------------  *  CLOCK(1000Mbps)
39  *		Tick * (2 ^ IR_s)
40  *
41  * @return: 0: calculate sucessful, negative: fail
42  */
43 static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
44 				  u8 *ir_b, u8 *ir_u, u8 *ir_s)
45 {
46 #define DIVISOR_CLK		(1000 * 8)
47 #define DIVISOR_IR_B_126	(126 * DIVISOR_CLK)
48 
49 	static const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
50 		6 * 256,        /* Prioriy level */
51 		6 * 32,         /* Prioriy group level */
52 		6 * 8,          /* Port level */
53 		6 * 256         /* Qset level */
54 	};
55 	u8 ir_u_calc = 0;
56 	u8 ir_s_calc = 0;
57 	u32 ir_calc;
58 	u32 tick;
59 
60 	/* Calc tick */
61 	if (shaper_level >= HCLGE_SHAPER_LVL_CNT ||
62 	    ir > HCLGE_ETHER_MAX_RATE)
63 		return -EINVAL;
64 
65 	tick = tick_array[shaper_level];
66 
67 	/**
68 	 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
69 	 * the formula is changed to:
70 	 *		126 * 1 * 8
71 	 * ir_calc = ---------------- * 1000
72 	 *		tick * 1
73 	 */
74 	ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick;
75 
76 	if (ir_calc == ir) {
77 		*ir_b = 126;
78 		*ir_u = 0;
79 		*ir_s = 0;
80 
81 		return 0;
82 	} else if (ir_calc > ir) {
83 		/* Increasing the denominator to select ir_s value */
84 		while (ir_calc >= ir && ir) {
85 			ir_s_calc++;
86 			ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc));
87 		}
88 
89 		*ir_b = (ir * tick * (1 << ir_s_calc) + (DIVISOR_CLK >> 1)) /
90 			DIVISOR_CLK;
91 	} else {
92 		/* Increasing the numerator to select ir_u value */
93 		u32 numerator;
94 
95 		while (ir_calc < ir) {
96 			ir_u_calc++;
97 			numerator = DIVISOR_IR_B_126 * (1 << ir_u_calc);
98 			ir_calc = (numerator + (tick >> 1)) / tick;
99 		}
100 
101 		if (ir_calc == ir) {
102 			*ir_b = 126;
103 		} else {
104 			u32 denominator = DIVISOR_CLK * (1 << --ir_u_calc);
105 			*ir_b = (ir * tick + (denominator >> 1)) / denominator;
106 		}
107 	}
108 
109 	*ir_u = ir_u_calc;
110 	*ir_s = ir_s_calc;
111 
112 	return 0;
113 }
114 
115 static int hclge_pfc_stats_get(struct hclge_dev *hdev,
116 			       enum hclge_opcode_type opcode, u64 *stats)
117 {
118 	struct hclge_desc desc[HCLGE_TM_PFC_PKT_GET_CMD_NUM];
119 	int ret, i, j;
120 
121 	if (!(opcode == HCLGE_OPC_QUERY_PFC_RX_PKT_CNT ||
122 	      opcode == HCLGE_OPC_QUERY_PFC_TX_PKT_CNT))
123 		return -EINVAL;
124 
125 	for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM - 1; i++) {
126 		hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
127 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
128 	}
129 
130 	hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
131 
132 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM);
133 	if (ret)
134 		return ret;
135 
136 	for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) {
137 		struct hclge_pfc_stats_cmd *pfc_stats =
138 				(struct hclge_pfc_stats_cmd *)desc[i].data;
139 
140 		for (j = 0; j < HCLGE_TM_PFC_NUM_GET_PER_CMD; j++) {
141 			u32 index = i * HCLGE_TM_PFC_PKT_GET_CMD_NUM + j;
142 
143 			if (index < HCLGE_MAX_TC_NUM)
144 				stats[index] =
145 					le64_to_cpu(pfc_stats->pkt_num[j]);
146 		}
147 	}
148 	return 0;
149 }
150 
151 int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats)
152 {
153 	return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_RX_PKT_CNT, stats);
154 }
155 
156 int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats)
157 {
158 	return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_TX_PKT_CNT, stats);
159 }
160 
161 int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
162 {
163 	struct hclge_desc desc;
164 
165 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false);
166 
167 	desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) |
168 		(rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0));
169 
170 	return hclge_cmd_send(&hdev->hw, &desc, 1);
171 }
172 
173 static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
174 				  u8 pfc_bitmap)
175 {
176 	struct hclge_desc desc;
177 	struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data;
178 
179 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false);
180 
181 	pfc->tx_rx_en_bitmap = tx_rx_bitmap;
182 	pfc->pri_en_bitmap = pfc_bitmap;
183 
184 	return hclge_cmd_send(&hdev->hw, &desc, 1);
185 }
186 
187 static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr,
188 				 u8 pause_trans_gap, u16 pause_trans_time)
189 {
190 	struct hclge_cfg_pause_param_cmd *pause_param;
191 	struct hclge_desc desc;
192 
193 	pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
194 
195 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false);
196 
197 	ether_addr_copy(pause_param->mac_addr, addr);
198 	ether_addr_copy(pause_param->mac_addr_extra, addr);
199 	pause_param->pause_trans_gap = pause_trans_gap;
200 	pause_param->pause_trans_time = cpu_to_le16(pause_trans_time);
201 
202 	return hclge_cmd_send(&hdev->hw, &desc, 1);
203 }
204 
205 int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
206 {
207 	struct hclge_cfg_pause_param_cmd *pause_param;
208 	struct hclge_desc desc;
209 	u16 trans_time;
210 	u8 trans_gap;
211 	int ret;
212 
213 	pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
214 
215 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
216 
217 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
218 	if (ret)
219 		return ret;
220 
221 	trans_gap = pause_param->pause_trans_gap;
222 	trans_time = le16_to_cpu(pause_param->pause_trans_time);
223 
224 	return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, trans_time);
225 }
226 
227 static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
228 {
229 	u8 tc;
230 
231 	tc = hdev->tm_info.prio_tc[pri_id];
232 
233 	if (tc >= hdev->tm_info.num_tc)
234 		return -EINVAL;
235 
236 	/**
237 	 * the register for priority has four bytes, the first bytes includes
238 	 *  priority0 and priority1, the higher 4bit stands for priority1
239 	 *  while the lower 4bit stands for priority0, as below:
240 	 * first byte:	| pri_1 | pri_0 |
241 	 * second byte:	| pri_3 | pri_2 |
242 	 * third byte:	| pri_5 | pri_4 |
243 	 * fourth byte:	| pri_7 | pri_6 |
244 	 */
245 	pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4);
246 
247 	return 0;
248 }
249 
250 static int hclge_up_to_tc_map(struct hclge_dev *hdev)
251 {
252 	struct hclge_desc desc;
253 	u8 *pri = (u8 *)desc.data;
254 	u8 pri_id;
255 	int ret;
256 
257 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false);
258 
259 	for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) {
260 		ret = hclge_fill_pri_array(hdev, pri, pri_id);
261 		if (ret)
262 			return ret;
263 	}
264 
265 	return hclge_cmd_send(&hdev->hw, &desc, 1);
266 }
267 
268 static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
269 				      u8 pg_id, u8 pri_bit_map)
270 {
271 	struct hclge_pg_to_pri_link_cmd *map;
272 	struct hclge_desc desc;
273 
274 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false);
275 
276 	map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
277 
278 	map->pg_id = pg_id;
279 	map->pri_bit_map = pri_bit_map;
280 
281 	return hclge_cmd_send(&hdev->hw, &desc, 1);
282 }
283 
284 static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
285 				      u16 qs_id, u8 pri)
286 {
287 	struct hclge_qs_to_pri_link_cmd *map;
288 	struct hclge_desc desc;
289 
290 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false);
291 
292 	map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
293 
294 	map->qs_id = cpu_to_le16(qs_id);
295 	map->priority = pri;
296 	map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK;
297 
298 	return hclge_cmd_send(&hdev->hw, &desc, 1);
299 }
300 
301 static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev,
302 				    u16 q_id, u16 qs_id)
303 {
304 	struct hclge_nq_to_qs_link_cmd *map;
305 	struct hclge_desc desc;
306 
307 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false);
308 
309 	map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
310 
311 	map->nq_id = cpu_to_le16(q_id);
312 	map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK);
313 
314 	return hclge_cmd_send(&hdev->hw, &desc, 1);
315 }
316 
317 static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id,
318 				  u8 dwrr)
319 {
320 	struct hclge_pg_weight_cmd *weight;
321 	struct hclge_desc desc;
322 
323 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false);
324 
325 	weight = (struct hclge_pg_weight_cmd *)desc.data;
326 
327 	weight->pg_id = pg_id;
328 	weight->dwrr = dwrr;
329 
330 	return hclge_cmd_send(&hdev->hw, &desc, 1);
331 }
332 
333 static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id,
334 				   u8 dwrr)
335 {
336 	struct hclge_priority_weight_cmd *weight;
337 	struct hclge_desc desc;
338 
339 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false);
340 
341 	weight = (struct hclge_priority_weight_cmd *)desc.data;
342 
343 	weight->pri_id = pri_id;
344 	weight->dwrr = dwrr;
345 
346 	return hclge_cmd_send(&hdev->hw, &desc, 1);
347 }
348 
349 static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id,
350 				  u8 dwrr)
351 {
352 	struct hclge_qs_weight_cmd *weight;
353 	struct hclge_desc desc;
354 
355 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false);
356 
357 	weight = (struct hclge_qs_weight_cmd *)desc.data;
358 
359 	weight->qs_id = cpu_to_le16(qs_id);
360 	weight->dwrr = dwrr;
361 
362 	return hclge_cmd_send(&hdev->hw, &desc, 1);
363 }
364 
365 static u32 hclge_tm_get_shapping_para(u8 ir_b, u8 ir_u, u8 ir_s,
366 				      u8 bs_b, u8 bs_s)
367 {
368 	u32 shapping_para = 0;
369 
370 	hclge_tm_set_field(shapping_para, IR_B, ir_b);
371 	hclge_tm_set_field(shapping_para, IR_U, ir_u);
372 	hclge_tm_set_field(shapping_para, IR_S, ir_s);
373 	hclge_tm_set_field(shapping_para, BS_B, bs_b);
374 	hclge_tm_set_field(shapping_para, BS_S, bs_s);
375 
376 	return shapping_para;
377 }
378 
379 static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
380 				    enum hclge_shap_bucket bucket, u8 pg_id,
381 				    u32 shapping_para)
382 {
383 	struct hclge_pg_shapping_cmd *shap_cfg_cmd;
384 	enum hclge_opcode_type opcode;
385 	struct hclge_desc desc;
386 
387 	opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
388 		 HCLGE_OPC_TM_PG_C_SHAPPING;
389 	hclge_cmd_setup_basic_desc(&desc, opcode, false);
390 
391 	shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
392 
393 	shap_cfg_cmd->pg_id = pg_id;
394 
395 	shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para);
396 
397 	return hclge_cmd_send(&hdev->hw, &desc, 1);
398 }
399 
400 static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
401 {
402 	struct hclge_port_shapping_cmd *shap_cfg_cmd;
403 	struct hclge_desc desc;
404 	u8 ir_u, ir_b, ir_s;
405 	u32 shapping_para;
406 	int ret;
407 
408 	ret = hclge_shaper_para_calc(hdev->hw.mac.speed,
409 				     HCLGE_SHAPER_LVL_PORT,
410 				     &ir_b, &ir_u, &ir_s);
411 	if (ret)
412 		return ret;
413 
414 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false);
415 	shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
416 
417 	shapping_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
418 						   HCLGE_SHAPER_BS_U_DEF,
419 						   HCLGE_SHAPER_BS_S_DEF);
420 
421 	shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para);
422 
423 	return hclge_cmd_send(&hdev->hw, &desc, 1);
424 }
425 
426 static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
427 				     enum hclge_shap_bucket bucket, u8 pri_id,
428 				     u32 shapping_para)
429 {
430 	struct hclge_pri_shapping_cmd *shap_cfg_cmd;
431 	enum hclge_opcode_type opcode;
432 	struct hclge_desc desc;
433 
434 	opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
435 		 HCLGE_OPC_TM_PRI_C_SHAPPING;
436 
437 	hclge_cmd_setup_basic_desc(&desc, opcode, false);
438 
439 	shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
440 
441 	shap_cfg_cmd->pri_id = pri_id;
442 
443 	shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para);
444 
445 	return hclge_cmd_send(&hdev->hw, &desc, 1);
446 }
447 
448 static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id)
449 {
450 	struct hclge_desc desc;
451 
452 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false);
453 
454 	if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR)
455 		desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
456 	else
457 		desc.data[1] = 0;
458 
459 	desc.data[0] = cpu_to_le32(pg_id);
460 
461 	return hclge_cmd_send(&hdev->hw, &desc, 1);
462 }
463 
464 static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id)
465 {
466 	struct hclge_desc desc;
467 
468 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false);
469 
470 	if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR)
471 		desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
472 	else
473 		desc.data[1] = 0;
474 
475 	desc.data[0] = cpu_to_le32(pri_id);
476 
477 	return hclge_cmd_send(&hdev->hw, &desc, 1);
478 }
479 
480 static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode)
481 {
482 	struct hclge_desc desc;
483 
484 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false);
485 
486 	if (mode == HCLGE_SCH_MODE_DWRR)
487 		desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
488 	else
489 		desc.data[1] = 0;
490 
491 	desc.data[0] = cpu_to_le32(qs_id);
492 
493 	return hclge_cmd_send(&hdev->hw, &desc, 1);
494 }
495 
496 static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id,
497 			      u32 bit_map)
498 {
499 	struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
500 	struct hclge_desc desc;
501 
502 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
503 				   false);
504 
505 	bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
506 
507 	bp_to_qs_map_cmd->tc_id = tc;
508 	bp_to_qs_map_cmd->qs_group_id = grp_id;
509 	bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(bit_map);
510 
511 	return hclge_cmd_send(&hdev->hw, &desc, 1);
512 }
513 
514 int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate)
515 {
516 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
517 	struct hclge_qs_shapping_cmd *shap_cfg_cmd;
518 	struct hclge_dev *hdev = vport->back;
519 	struct hclge_desc desc;
520 	u8 ir_b, ir_u, ir_s;
521 	u32 shaper_para;
522 	int ret, i;
523 
524 	if (!max_tx_rate)
525 		max_tx_rate = HCLGE_ETHER_MAX_RATE;
526 
527 	ret = hclge_shaper_para_calc(max_tx_rate, HCLGE_SHAPER_LVL_QSET,
528 				     &ir_b, &ir_u, &ir_s);
529 	if (ret)
530 		return ret;
531 
532 	shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
533 						 HCLGE_SHAPER_BS_U_DEF,
534 						 HCLGE_SHAPER_BS_S_DEF);
535 
536 	for (i = 0; i < kinfo->num_tc; i++) {
537 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG,
538 					   false);
539 
540 		shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data;
541 		shap_cfg_cmd->qs_id = cpu_to_le16(vport->qs_offset + i);
542 		shap_cfg_cmd->qs_shapping_para = cpu_to_le32(shaper_para);
543 
544 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
545 		if (ret) {
546 			dev_err(&hdev->pdev->dev,
547 				"vf%u, qs%u failed to set tx_rate:%d, ret=%d\n",
548 				vport->vport_id, shap_cfg_cmd->qs_id,
549 				max_tx_rate, ret);
550 			return ret;
551 		}
552 	}
553 
554 	return 0;
555 }
556 
557 static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
558 {
559 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
560 	struct hclge_dev *hdev = vport->back;
561 	u16 max_rss_size;
562 	u8 i;
563 
564 	/* TC configuration is shared by PF/VF in one port, only allow
565 	 * one tc for VF for simplicity. VF's vport_id is non zero.
566 	 */
567 	kinfo->num_tc = vport->vport_id ? 1 :
568 			min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
569 	vport->qs_offset = (vport->vport_id ? HNAE3_MAX_TC : 0) +
570 				(vport->vport_id ? (vport->vport_id - 1) : 0);
571 
572 	max_rss_size = min_t(u16, hdev->rss_size_max,
573 			     vport->alloc_tqps / kinfo->num_tc);
574 
575 	/* Set to user value, no larger than max_rss_size. */
576 	if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
577 	    kinfo->req_rss_size <= max_rss_size) {
578 		dev_info(&hdev->pdev->dev, "rss changes from %u to %u\n",
579 			 kinfo->rss_size, kinfo->req_rss_size);
580 		kinfo->rss_size = kinfo->req_rss_size;
581 	} else if (kinfo->rss_size > max_rss_size ||
582 		   (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
583 		/* if user not set rss, the rss_size should compare with the
584 		 * valid msi numbers to ensure one to one map between tqp and
585 		 * irq as default.
586 		 */
587 		if (!kinfo->req_rss_size)
588 			max_rss_size = min_t(u16, max_rss_size,
589 					     (hdev->num_nic_msi - 1) /
590 					     kinfo->num_tc);
591 
592 		/* Set to the maximum specification value (max_rss_size). */
593 		kinfo->rss_size = max_rss_size;
594 	}
595 
596 	kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size;
597 	vport->dwrr = 100;  /* 100 percent as init */
598 	vport->alloc_rss_size = kinfo->rss_size;
599 	vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
600 
601 	for (i = 0; i < HNAE3_MAX_TC; i++) {
602 		if (hdev->hw_tc_map & BIT(i) && i < kinfo->num_tc) {
603 			kinfo->tc_info[i].enable = true;
604 			kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
605 			kinfo->tc_info[i].tqp_count = kinfo->rss_size;
606 			kinfo->tc_info[i].tc = i;
607 		} else {
608 			/* Set to default queue if TC is disable */
609 			kinfo->tc_info[i].enable = false;
610 			kinfo->tc_info[i].tqp_offset = 0;
611 			kinfo->tc_info[i].tqp_count = 1;
612 			kinfo->tc_info[i].tc = 0;
613 		}
614 	}
615 
616 	memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc,
617 	       sizeof_field(struct hnae3_knic_private_info, prio_tc));
618 }
619 
620 static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
621 {
622 	struct hclge_vport *vport = hdev->vport;
623 	u32 i;
624 
625 	for (i = 0; i < hdev->num_alloc_vport; i++) {
626 		hclge_tm_vport_tc_info_update(vport);
627 
628 		vport++;
629 	}
630 }
631 
632 static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
633 {
634 	u8 i;
635 
636 	for (i = 0; i < hdev->tm_info.num_tc; i++) {
637 		hdev->tm_info.tc_info[i].tc_id = i;
638 		hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
639 		hdev->tm_info.tc_info[i].pgid = 0;
640 		hdev->tm_info.tc_info[i].bw_limit =
641 			hdev->tm_info.pg_info[0].bw_limit;
642 	}
643 
644 	for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
645 		hdev->tm_info.prio_tc[i] =
646 			(i >= hdev->tm_info.num_tc) ? 0 : i;
647 
648 	/* DCB is enabled if we have more than 1 TC or pfc_en is
649 	 * non-zero.
650 	 */
651 	if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
652 		hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
653 	else
654 		hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
655 }
656 
657 static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
658 {
659 #define BW_PERCENT	100
660 
661 	u8 i;
662 
663 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
664 		int k;
665 
666 		hdev->tm_info.pg_dwrr[i] = i ? 0 : BW_PERCENT;
667 
668 		hdev->tm_info.pg_info[i].pg_id = i;
669 		hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
670 
671 		hdev->tm_info.pg_info[i].bw_limit = HCLGE_ETHER_MAX_RATE;
672 
673 		if (i != 0)
674 			continue;
675 
676 		hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
677 		for (k = 0; k < hdev->tm_info.num_tc; k++)
678 			hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
679 	}
680 }
681 
682 static void hclge_pfc_info_init(struct hclge_dev *hdev)
683 {
684 	if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) {
685 		if (hdev->fc_mode_last_time == HCLGE_FC_PFC)
686 			dev_warn(&hdev->pdev->dev,
687 				 "DCB is disable, but last mode is FC_PFC\n");
688 
689 		hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
690 	} else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
691 		/* fc_mode_last_time record the last fc_mode when
692 		 * DCB is enabled, so that fc_mode can be set to
693 		 * the correct value when DCB is disabled.
694 		 */
695 		hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
696 		hdev->tm_info.fc_mode = HCLGE_FC_PFC;
697 	}
698 }
699 
700 static void hclge_tm_schd_info_init(struct hclge_dev *hdev)
701 {
702 	hclge_tm_pg_info_init(hdev);
703 
704 	hclge_tm_tc_info_init(hdev);
705 
706 	hclge_tm_vport_info_update(hdev);
707 
708 	hclge_pfc_info_init(hdev);
709 }
710 
711 static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
712 {
713 	int ret;
714 	u32 i;
715 
716 	if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
717 		return 0;
718 
719 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
720 		/* Cfg mapping */
721 		ret = hclge_tm_pg_to_pri_map_cfg(
722 			hdev, i, hdev->tm_info.pg_info[i].tc_bit_map);
723 		if (ret)
724 			return ret;
725 	}
726 
727 	return 0;
728 }
729 
730 static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
731 {
732 	u8 ir_u, ir_b, ir_s;
733 	u32 shaper_para;
734 	int ret;
735 	u32 i;
736 
737 	/* Cfg pg schd */
738 	if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
739 		return 0;
740 
741 	/* Pg to pri */
742 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
743 		/* Calc shaper para */
744 		ret = hclge_shaper_para_calc(
745 					hdev->tm_info.pg_info[i].bw_limit,
746 					HCLGE_SHAPER_LVL_PG,
747 					&ir_b, &ir_u, &ir_s);
748 		if (ret)
749 			return ret;
750 
751 		shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
752 							 HCLGE_SHAPER_BS_U_DEF,
753 							 HCLGE_SHAPER_BS_S_DEF);
754 		ret = hclge_tm_pg_shapping_cfg(hdev,
755 					       HCLGE_TM_SHAP_C_BUCKET, i,
756 					       shaper_para);
757 		if (ret)
758 			return ret;
759 
760 		shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
761 							 HCLGE_SHAPER_BS_U_DEF,
762 							 HCLGE_SHAPER_BS_S_DEF);
763 		ret = hclge_tm_pg_shapping_cfg(hdev,
764 					       HCLGE_TM_SHAP_P_BUCKET, i,
765 					       shaper_para);
766 		if (ret)
767 			return ret;
768 	}
769 
770 	return 0;
771 }
772 
773 static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev)
774 {
775 	int ret;
776 	u32 i;
777 
778 	/* cfg pg schd */
779 	if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
780 		return 0;
781 
782 	/* pg to prio */
783 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
784 		/* Cfg dwrr */
785 		ret = hclge_tm_pg_weight_cfg(hdev, i, hdev->tm_info.pg_dwrr[i]);
786 		if (ret)
787 			return ret;
788 	}
789 
790 	return 0;
791 }
792 
793 static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
794 				   struct hclge_vport *vport)
795 {
796 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
797 	struct hnae3_queue **tqp = kinfo->tqp;
798 	struct hnae3_tc_info *v_tc_info;
799 	u32 i, j;
800 	int ret;
801 
802 	for (i = 0; i < kinfo->num_tc; i++) {
803 		v_tc_info = &kinfo->tc_info[i];
804 		for (j = 0; j < v_tc_info->tqp_count; j++) {
805 			struct hnae3_queue *q = tqp[v_tc_info->tqp_offset + j];
806 
807 			ret = hclge_tm_q_to_qs_map_cfg(hdev,
808 						       hclge_get_queue_id(q),
809 						       vport->qs_offset + i);
810 			if (ret)
811 				return ret;
812 		}
813 	}
814 
815 	return 0;
816 }
817 
818 static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
819 {
820 	struct hclge_vport *vport = hdev->vport;
821 	int ret;
822 	u32 i, k;
823 
824 	if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
825 		/* Cfg qs -> pri mapping, one by one mapping */
826 		for (k = 0; k < hdev->num_alloc_vport; k++) {
827 			struct hnae3_knic_private_info *kinfo =
828 				&vport[k].nic.kinfo;
829 
830 			for (i = 0; i < kinfo->num_tc; i++) {
831 				ret = hclge_tm_qs_to_pri_map_cfg(
832 					hdev, vport[k].qs_offset + i, i);
833 				if (ret)
834 					return ret;
835 			}
836 		}
837 	} else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) {
838 		/* Cfg qs -> pri mapping,  qs = tc, pri = vf, 8 qs -> 1 pri */
839 		for (k = 0; k < hdev->num_alloc_vport; k++)
840 			for (i = 0; i < HNAE3_MAX_TC; i++) {
841 				ret = hclge_tm_qs_to_pri_map_cfg(
842 					hdev, vport[k].qs_offset + i, k);
843 				if (ret)
844 					return ret;
845 			}
846 	} else {
847 		return -EINVAL;
848 	}
849 
850 	/* Cfg q -> qs mapping */
851 	for (i = 0; i < hdev->num_alloc_vport; i++) {
852 		ret = hclge_vport_q_to_qs_map(hdev, vport);
853 		if (ret)
854 			return ret;
855 
856 		vport++;
857 	}
858 
859 	return 0;
860 }
861 
862 static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
863 {
864 	u8 ir_u, ir_b, ir_s;
865 	u32 shaper_para;
866 	int ret;
867 	u32 i;
868 
869 	for (i = 0; i < hdev->tm_info.num_tc; i++) {
870 		ret = hclge_shaper_para_calc(
871 					hdev->tm_info.tc_info[i].bw_limit,
872 					HCLGE_SHAPER_LVL_PRI,
873 					&ir_b, &ir_u, &ir_s);
874 		if (ret)
875 			return ret;
876 
877 		shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
878 							 HCLGE_SHAPER_BS_U_DEF,
879 							 HCLGE_SHAPER_BS_S_DEF);
880 		ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i,
881 						shaper_para);
882 		if (ret)
883 			return ret;
884 
885 		shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
886 							 HCLGE_SHAPER_BS_U_DEF,
887 							 HCLGE_SHAPER_BS_S_DEF);
888 		ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i,
889 						shaper_para);
890 		if (ret)
891 			return ret;
892 	}
893 
894 	return 0;
895 }
896 
897 static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
898 {
899 	struct hclge_dev *hdev = vport->back;
900 	u8 ir_u, ir_b, ir_s;
901 	u32 shaper_para;
902 	int ret;
903 
904 	ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
905 				     &ir_b, &ir_u, &ir_s);
906 	if (ret)
907 		return ret;
908 
909 	shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
910 						 HCLGE_SHAPER_BS_U_DEF,
911 						 HCLGE_SHAPER_BS_S_DEF);
912 	ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET,
913 					vport->vport_id, shaper_para);
914 	if (ret)
915 		return ret;
916 
917 	shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
918 						 HCLGE_SHAPER_BS_U_DEF,
919 						 HCLGE_SHAPER_BS_S_DEF);
920 	ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
921 					vport->vport_id, shaper_para);
922 	if (ret)
923 		return ret;
924 
925 	return 0;
926 }
927 
928 static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport)
929 {
930 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
931 	struct hclge_dev *hdev = vport->back;
932 	u8 ir_u, ir_b, ir_s;
933 	u32 i;
934 	int ret;
935 
936 	for (i = 0; i < kinfo->num_tc; i++) {
937 		ret = hclge_shaper_para_calc(
938 					hdev->tm_info.tc_info[i].bw_limit,
939 					HCLGE_SHAPER_LVL_QSET,
940 					&ir_b, &ir_u, &ir_s);
941 		if (ret)
942 			return ret;
943 	}
944 
945 	return 0;
946 }
947 
948 static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev)
949 {
950 	struct hclge_vport *vport = hdev->vport;
951 	int ret;
952 	u32 i;
953 
954 	/* Need config vport shaper */
955 	for (i = 0; i < hdev->num_alloc_vport; i++) {
956 		ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport);
957 		if (ret)
958 			return ret;
959 
960 		ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport);
961 		if (ret)
962 			return ret;
963 
964 		vport++;
965 	}
966 
967 	return 0;
968 }
969 
970 static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev)
971 {
972 	int ret;
973 
974 	if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
975 		ret = hclge_tm_pri_tc_base_shaper_cfg(hdev);
976 		if (ret)
977 			return ret;
978 	} else {
979 		ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev);
980 		if (ret)
981 			return ret;
982 	}
983 
984 	return 0;
985 }
986 
987 static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
988 {
989 	struct hclge_vport *vport = hdev->vport;
990 	struct hclge_pg_info *pg_info;
991 	u8 dwrr;
992 	int ret;
993 	u32 i, k;
994 
995 	for (i = 0; i < hdev->tm_info.num_tc; i++) {
996 		pg_info =
997 			&hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
998 		dwrr = pg_info->tc_dwrr[i];
999 
1000 		ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr);
1001 		if (ret)
1002 			return ret;
1003 
1004 		for (k = 0; k < hdev->num_alloc_vport; k++) {
1005 			ret = hclge_tm_qs_weight_cfg(
1006 				hdev, vport[k].qs_offset + i,
1007 				vport[k].dwrr);
1008 			if (ret)
1009 				return ret;
1010 		}
1011 	}
1012 
1013 	return 0;
1014 }
1015 
1016 static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev)
1017 {
1018 #define DEFAULT_TC_WEIGHT	1
1019 #define DEFAULT_TC_OFFSET	14
1020 
1021 	struct hclge_ets_tc_weight_cmd *ets_weight;
1022 	struct hclge_desc desc;
1023 	unsigned int i;
1024 
1025 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, false);
1026 	ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
1027 
1028 	for (i = 0; i < HNAE3_MAX_TC; i++) {
1029 		struct hclge_pg_info *pg_info;
1030 
1031 		ets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT;
1032 
1033 		if (!(hdev->hw_tc_map & BIT(i)))
1034 			continue;
1035 
1036 		pg_info =
1037 			&hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
1038 		ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
1039 	}
1040 
1041 	ets_weight->weight_offset = DEFAULT_TC_OFFSET;
1042 
1043 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1044 }
1045 
1046 static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport)
1047 {
1048 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1049 	struct hclge_dev *hdev = vport->back;
1050 	int ret;
1051 	u8 i;
1052 
1053 	/* Vf dwrr */
1054 	ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr);
1055 	if (ret)
1056 		return ret;
1057 
1058 	/* Qset dwrr */
1059 	for (i = 0; i < kinfo->num_tc; i++) {
1060 		ret = hclge_tm_qs_weight_cfg(
1061 			hdev, vport->qs_offset + i,
1062 			hdev->tm_info.pg_info[0].tc_dwrr[i]);
1063 		if (ret)
1064 			return ret;
1065 	}
1066 
1067 	return 0;
1068 }
1069 
1070 static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev)
1071 {
1072 	struct hclge_vport *vport = hdev->vport;
1073 	int ret;
1074 	u32 i;
1075 
1076 	for (i = 0; i < hdev->num_alloc_vport; i++) {
1077 		ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport);
1078 		if (ret)
1079 			return ret;
1080 
1081 		vport++;
1082 	}
1083 
1084 	return 0;
1085 }
1086 
1087 static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
1088 {
1089 	int ret;
1090 
1091 	if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1092 		ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev);
1093 		if (ret)
1094 			return ret;
1095 
1096 		if (!hnae3_dev_dcb_supported(hdev))
1097 			return 0;
1098 
1099 		ret = hclge_tm_ets_tc_dwrr_cfg(hdev);
1100 		if (ret == -EOPNOTSUPP) {
1101 			dev_warn(&hdev->pdev->dev,
1102 				 "fw %08x does't support ets tc weight cmd\n",
1103 				 hdev->fw_version);
1104 			ret = 0;
1105 		}
1106 
1107 		return ret;
1108 	} else {
1109 		ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev);
1110 		if (ret)
1111 			return ret;
1112 	}
1113 
1114 	return 0;
1115 }
1116 
1117 static int hclge_tm_map_cfg(struct hclge_dev *hdev)
1118 {
1119 	int ret;
1120 
1121 	ret = hclge_up_to_tc_map(hdev);
1122 	if (ret)
1123 		return ret;
1124 
1125 	ret = hclge_tm_pg_to_pri_map(hdev);
1126 	if (ret)
1127 		return ret;
1128 
1129 	return hclge_tm_pri_q_qs_cfg(hdev);
1130 }
1131 
1132 static int hclge_tm_shaper_cfg(struct hclge_dev *hdev)
1133 {
1134 	int ret;
1135 
1136 	ret = hclge_tm_port_shaper_cfg(hdev);
1137 	if (ret)
1138 		return ret;
1139 
1140 	ret = hclge_tm_pg_shaper_cfg(hdev);
1141 	if (ret)
1142 		return ret;
1143 
1144 	return hclge_tm_pri_shaper_cfg(hdev);
1145 }
1146 
1147 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev)
1148 {
1149 	int ret;
1150 
1151 	ret = hclge_tm_pg_dwrr_cfg(hdev);
1152 	if (ret)
1153 		return ret;
1154 
1155 	return hclge_tm_pri_dwrr_cfg(hdev);
1156 }
1157 
1158 static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev)
1159 {
1160 	int ret;
1161 	u8 i;
1162 
1163 	/* Only being config on TC-Based scheduler mode */
1164 	if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
1165 		return 0;
1166 
1167 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
1168 		ret = hclge_tm_pg_schd_mode_cfg(hdev, i);
1169 		if (ret)
1170 			return ret;
1171 	}
1172 
1173 	return 0;
1174 }
1175 
1176 static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
1177 {
1178 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1179 	struct hclge_dev *hdev = vport->back;
1180 	int ret;
1181 	u8 i;
1182 
1183 	if (vport->vport_id >= HNAE3_MAX_TC)
1184 		return -EINVAL;
1185 
1186 	ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id);
1187 	if (ret)
1188 		return ret;
1189 
1190 	for (i = 0; i < kinfo->num_tc; i++) {
1191 		u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode;
1192 
1193 		ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i,
1194 						sch_mode);
1195 		if (ret)
1196 			return ret;
1197 	}
1198 
1199 	return 0;
1200 }
1201 
1202 static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
1203 {
1204 	struct hclge_vport *vport = hdev->vport;
1205 	int ret;
1206 	u8 i, k;
1207 
1208 	if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1209 		for (i = 0; i < hdev->tm_info.num_tc; i++) {
1210 			ret = hclge_tm_pri_schd_mode_cfg(hdev, i);
1211 			if (ret)
1212 				return ret;
1213 
1214 			for (k = 0; k < hdev->num_alloc_vport; k++) {
1215 				ret = hclge_tm_qs_schd_mode_cfg(
1216 					hdev, vport[k].qs_offset + i,
1217 					HCLGE_SCH_MODE_DWRR);
1218 				if (ret)
1219 					return ret;
1220 			}
1221 		}
1222 	} else {
1223 		for (i = 0; i < hdev->num_alloc_vport; i++) {
1224 			ret = hclge_tm_schd_mode_vnet_base_cfg(vport);
1225 			if (ret)
1226 				return ret;
1227 
1228 			vport++;
1229 		}
1230 	}
1231 
1232 	return 0;
1233 }
1234 
1235 static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev)
1236 {
1237 	int ret;
1238 
1239 	ret = hclge_tm_lvl2_schd_mode_cfg(hdev);
1240 	if (ret)
1241 		return ret;
1242 
1243 	return hclge_tm_lvl34_schd_mode_cfg(hdev);
1244 }
1245 
1246 int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
1247 {
1248 	int ret;
1249 
1250 	/* Cfg tm mapping  */
1251 	ret = hclge_tm_map_cfg(hdev);
1252 	if (ret)
1253 		return ret;
1254 
1255 	/* Cfg tm shaper */
1256 	ret = hclge_tm_shaper_cfg(hdev);
1257 	if (ret)
1258 		return ret;
1259 
1260 	/* Cfg dwrr */
1261 	ret = hclge_tm_dwrr_cfg(hdev);
1262 	if (ret)
1263 		return ret;
1264 
1265 	/* Cfg schd mode for each level schd */
1266 	return hclge_tm_schd_mode_hw(hdev);
1267 }
1268 
1269 static int hclge_pause_param_setup_hw(struct hclge_dev *hdev)
1270 {
1271 	struct hclge_mac *mac = &hdev->hw.mac;
1272 
1273 	return hclge_pause_param_cfg(hdev, mac->mac_addr,
1274 				     HCLGE_DEFAULT_PAUSE_TRANS_GAP,
1275 				     HCLGE_DEFAULT_PAUSE_TRANS_TIME);
1276 }
1277 
1278 static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
1279 {
1280 	u8 enable_bitmap = 0;
1281 
1282 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
1283 		enable_bitmap = HCLGE_TX_MAC_PAUSE_EN_MSK |
1284 				HCLGE_RX_MAC_PAUSE_EN_MSK;
1285 
1286 	return hclge_pfc_pause_en_cfg(hdev, enable_bitmap,
1287 				      hdev->tm_info.pfc_en);
1288 }
1289 
1290 /* Each Tc has a 1024 queue sets to backpress, it divides to
1291  * 32 group, each group contains 32 queue sets, which can be
1292  * represented by u32 bitmap.
1293  */
1294 static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
1295 {
1296 	int i;
1297 
1298 	for (i = 0; i < HCLGE_BP_GRP_NUM; i++) {
1299 		u32 qs_bitmap = 0;
1300 		int k, ret;
1301 
1302 		for (k = 0; k < hdev->num_alloc_vport; k++) {
1303 			struct hclge_vport *vport = &hdev->vport[k];
1304 			u16 qs_id = vport->qs_offset + tc;
1305 			u8 grp, sub_grp;
1306 
1307 			grp = hnae3_get_field(qs_id, HCLGE_BP_GRP_ID_M,
1308 					      HCLGE_BP_GRP_ID_S);
1309 			sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
1310 						  HCLGE_BP_SUB_GRP_ID_S);
1311 			if (i == grp)
1312 				qs_bitmap |= (1 << sub_grp);
1313 		}
1314 
1315 		ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap);
1316 		if (ret)
1317 			return ret;
1318 	}
1319 
1320 	return 0;
1321 }
1322 
1323 static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
1324 {
1325 	bool tx_en, rx_en;
1326 
1327 	switch (hdev->tm_info.fc_mode) {
1328 	case HCLGE_FC_NONE:
1329 		tx_en = false;
1330 		rx_en = false;
1331 		break;
1332 	case HCLGE_FC_RX_PAUSE:
1333 		tx_en = false;
1334 		rx_en = true;
1335 		break;
1336 	case HCLGE_FC_TX_PAUSE:
1337 		tx_en = true;
1338 		rx_en = false;
1339 		break;
1340 	case HCLGE_FC_FULL:
1341 		tx_en = true;
1342 		rx_en = true;
1343 		break;
1344 	case HCLGE_FC_PFC:
1345 		tx_en = false;
1346 		rx_en = false;
1347 		break;
1348 	default:
1349 		tx_en = true;
1350 		rx_en = true;
1351 	}
1352 
1353 	return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
1354 }
1355 
1356 static int hclge_tm_bp_setup(struct hclge_dev *hdev)
1357 {
1358 	int ret = 0;
1359 	int i;
1360 
1361 	for (i = 0; i < hdev->tm_info.num_tc; i++) {
1362 		ret = hclge_bp_setup_hw(hdev, i);
1363 		if (ret)
1364 			return ret;
1365 	}
1366 
1367 	return ret;
1368 }
1369 
1370 int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init)
1371 {
1372 	int ret;
1373 
1374 	ret = hclge_pause_param_setup_hw(hdev);
1375 	if (ret)
1376 		return ret;
1377 
1378 	ret = hclge_mac_pause_setup_hw(hdev);
1379 	if (ret)
1380 		return ret;
1381 
1382 	/* Only DCB-supported dev supports qset back pressure and pfc cmd */
1383 	if (!hnae3_dev_dcb_supported(hdev))
1384 		return 0;
1385 
1386 	/* GE MAC does not support PFC, when driver is initializing and MAC
1387 	 * is in GE Mode, ignore the error here, otherwise initialization
1388 	 * will fail.
1389 	 */
1390 	ret = hclge_pfc_setup_hw(hdev);
1391 	if (init && ret == -EOPNOTSUPP)
1392 		dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n");
1393 	else if (ret) {
1394 		dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n",
1395 			ret);
1396 		return ret;
1397 	}
1398 
1399 	return hclge_tm_bp_setup(hdev);
1400 }
1401 
1402 void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
1403 {
1404 	struct hclge_vport *vport = hdev->vport;
1405 	struct hnae3_knic_private_info *kinfo;
1406 	u32 i, k;
1407 
1408 	for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
1409 		hdev->tm_info.prio_tc[i] = prio_tc[i];
1410 
1411 		for (k = 0;  k < hdev->num_alloc_vport; k++) {
1412 			kinfo = &vport[k].nic.kinfo;
1413 			kinfo->prio_tc[i] = prio_tc[i];
1414 		}
1415 	}
1416 }
1417 
1418 void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
1419 {
1420 	u8 bit_map = 0;
1421 	u8 i;
1422 
1423 	hdev->tm_info.num_tc = num_tc;
1424 
1425 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1426 		bit_map |= BIT(i);
1427 
1428 	if (!bit_map) {
1429 		bit_map = 1;
1430 		hdev->tm_info.num_tc = 1;
1431 	}
1432 
1433 	hdev->hw_tc_map = bit_map;
1434 
1435 	hclge_tm_schd_info_init(hdev);
1436 }
1437 
1438 void hclge_tm_pfc_info_update(struct hclge_dev *hdev)
1439 {
1440 	/* DCB is enabled if we have more than 1 TC or pfc_en is
1441 	 * non-zero.
1442 	 */
1443 	if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
1444 		hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
1445 	else
1446 		hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
1447 
1448 	hclge_pfc_info_init(hdev);
1449 }
1450 
1451 int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)
1452 {
1453 	int ret;
1454 
1455 	if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
1456 	    (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE))
1457 		return -ENOTSUPP;
1458 
1459 	ret = hclge_tm_schd_setup_hw(hdev);
1460 	if (ret)
1461 		return ret;
1462 
1463 	ret = hclge_pause_setup_hw(hdev, init);
1464 	if (ret)
1465 		return ret;
1466 
1467 	return 0;
1468 }
1469 
1470 int hclge_tm_schd_init(struct hclge_dev *hdev)
1471 {
1472 	/* fc_mode is HCLGE_FC_FULL on reset */
1473 	hdev->tm_info.fc_mode = HCLGE_FC_FULL;
1474 	hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
1475 
1476 	if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE &&
1477 	    hdev->tm_info.num_pg != 1)
1478 		return -EINVAL;
1479 
1480 	hclge_tm_schd_info_init(hdev);
1481 
1482 	return hclge_tm_init_hw(hdev, true);
1483 }
1484 
1485 int hclge_tm_vport_map_update(struct hclge_dev *hdev)
1486 {
1487 	struct hclge_vport *vport = hdev->vport;
1488 	int ret;
1489 
1490 	hclge_tm_vport_tc_info_update(vport);
1491 
1492 	ret = hclge_vport_q_to_qs_map(hdev, vport);
1493 	if (ret)
1494 		return ret;
1495 
1496 	if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE))
1497 		return 0;
1498 
1499 	return hclge_tm_bp_setup(hdev);
1500 }
1501