xref: /freebsd/sys/dev/ice/ice_sched.c (revision e17f5b1d)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /*  Copyright (c) 2020, Intel Corporation
3  *  All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions are met:
7  *
8  *   1. Redistributions of source code must retain the above copyright notice,
9  *      this list of conditions and the following disclaimer.
10  *
11  *   2. Redistributions in binary form must reproduce the above copyright
12  *      notice, this list of conditions and the following disclaimer in the
13  *      documentation and/or other materials provided with the distribution.
14  *
15  *   3. Neither the name of the Intel Corporation nor the names of its
16  *      contributors may be used to endorse or promote products derived from
17  *      this software without specific prior written permission.
18  *
19  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  *  POSSIBILITY OF SUCH DAMAGE.
30  */
31 /*$FreeBSD$*/
32 
33 #include "ice_sched.h"
34 
35 /**
36  * ice_sched_add_root_node - Insert the Tx scheduler root node in SW DB
37  * @pi: port information structure
38  * @info: Scheduler element information from firmware
39  *
40  * This function inserts the root node of the scheduling tree topology
41  * to the SW DB.
42  */
43 static enum ice_status
44 ice_sched_add_root_node(struct ice_port_info *pi,
45 			struct ice_aqc_txsched_elem_data *info)
46 {
47 	struct ice_sched_node *root;
48 	struct ice_hw *hw;
49 
50 	if (!pi)
51 		return ICE_ERR_PARAM;
52 
53 	hw = pi->hw;
54 
55 	root = (struct ice_sched_node *)ice_malloc(hw, sizeof(*root));
56 	if (!root)
57 		return ICE_ERR_NO_MEMORY;
58 
59 	/* coverity[suspicious_sizeof] */
60 	root->children = (struct ice_sched_node **)
61 		ice_calloc(hw, hw->max_children[0], sizeof(*root));
62 	if (!root->children) {
63 		ice_free(hw, root);
64 		return ICE_ERR_NO_MEMORY;
65 	}
66 
67 	ice_memcpy(&root->info, info, sizeof(*info), ICE_DMA_TO_NONDMA);
68 	pi->root = root;
69 	return ICE_SUCCESS;
70 }
71 
72 /**
73  * ice_sched_find_node_by_teid - Find the Tx scheduler node in SW DB
74  * @start_node: pointer to the starting ice_sched_node struct in a sub-tree
75  * @teid: node TEID to search
76  *
77  * This function searches for a node matching the TEID in the scheduling tree
78  * from the SW DB. The search is recursive and is restricted by the number of
79  * layers it has searched through; stopping at the max supported layer.
80  *
81  * This function needs to be called when holding the port_info->sched_lock
82  */
83 struct ice_sched_node *
84 ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
85 {
86 	u16 i;
87 
88 	/* The TEID is same as that of the start_node */
89 	if (ICE_TXSCHED_GET_NODE_TEID(start_node) == teid)
90 		return start_node;
91 
92 	/* The node has no children or is at the max layer */
93 	if (!start_node->num_children ||
94 	    start_node->tx_sched_layer >= ICE_AQC_TOPO_MAX_LEVEL_NUM ||
95 	    start_node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF)
96 		return NULL;
97 
98 	/* Check if TEID matches to any of the children nodes */
99 	for (i = 0; i < start_node->num_children; i++)
100 		if (ICE_TXSCHED_GET_NODE_TEID(start_node->children[i]) == teid)
101 			return start_node->children[i];
102 
103 	/* Search within each child's sub-tree */
104 	for (i = 0; i < start_node->num_children; i++) {
105 		struct ice_sched_node *tmp;
106 
107 		tmp = ice_sched_find_node_by_teid(start_node->children[i],
108 						  teid);
109 		if (tmp)
110 			return tmp;
111 	}
112 
113 	return NULL;
114 }
115 
116 /**
117  * ice_aqc_send_sched_elem_cmd - send scheduling elements cmd
118  * @hw: pointer to the HW struct
119  * @cmd_opc: cmd opcode
120  * @elems_req: number of elements to request
121  * @buf: pointer to buffer
122  * @buf_size: buffer size in bytes
123  * @elems_resp: returns total number of elements response
124  * @cd: pointer to command details structure or NULL
125  *
126  * This function sends a scheduling elements cmd (cmd_opc)
127  */
128 static enum ice_status
129 ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
130 			    u16 elems_req, void *buf, u16 buf_size,
131 			    u16 *elems_resp, struct ice_sq_cd *cd)
132 {
133 	struct ice_aqc_sched_elem_cmd *cmd;
134 	struct ice_aq_desc desc;
135 	enum ice_status status;
136 
137 	cmd = &desc.params.sched_elem_cmd;
138 	ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc);
139 	cmd->num_elem_req = CPU_TO_LE16(elems_req);
140 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
141 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
142 	if (!status && elems_resp)
143 		*elems_resp = LE16_TO_CPU(cmd->num_elem_resp);
144 
145 	return status;
146 }
147 
148 /**
149  * ice_aq_query_sched_elems - query scheduler elements
150  * @hw: pointer to the HW struct
151  * @elems_req: number of elements to query
152  * @buf: pointer to buffer
153  * @buf_size: buffer size in bytes
154  * @elems_ret: returns total number of elements returned
155  * @cd: pointer to command details structure or NULL
156  *
157  * Query scheduling elements (0x0404)
158  */
159 enum ice_status
160 ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
161 			 struct ice_aqc_get_elem *buf, u16 buf_size,
162 			 u16 *elems_ret, struct ice_sq_cd *cd)
163 {
164 	return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_get_sched_elems,
165 					   elems_req, (void *)buf, buf_size,
166 					   elems_ret, cd);
167 }
168 
169 /**
170  * ice_sched_add_node - Insert the Tx scheduler node in SW DB
171  * @pi: port information structure
172  * @layer: Scheduler layer of the node
173  * @info: Scheduler element information from firmware
174  *
175  * This function inserts a scheduler node to the SW DB.
176  */
177 enum ice_status
178 ice_sched_add_node(struct ice_port_info *pi, u8 layer,
179 		   struct ice_aqc_txsched_elem_data *info)
180 {
181 	struct ice_sched_node *parent;
182 	struct ice_aqc_get_elem elem;
183 	struct ice_sched_node *node;
184 	enum ice_status status;
185 	struct ice_hw *hw;
186 
187 	if (!pi)
188 		return ICE_ERR_PARAM;
189 
190 	hw = pi->hw;
191 
192 	/* A valid parent node should be there */
193 	parent = ice_sched_find_node_by_teid(pi->root,
194 					     LE32_TO_CPU(info->parent_teid));
195 	if (!parent) {
196 		ice_debug(hw, ICE_DBG_SCHED,
197 			  "Parent Node not found for parent_teid=0x%x\n",
198 			  LE32_TO_CPU(info->parent_teid));
199 		return ICE_ERR_PARAM;
200 	}
201 
202 	/* query the current node information from FW  before additing it
203 	 * to the SW DB
204 	 */
205 	status = ice_sched_query_elem(hw, LE32_TO_CPU(info->node_teid), &elem);
206 	if (status)
207 		return status;
208 	node = (struct ice_sched_node *)ice_malloc(hw, sizeof(*node));
209 	if (!node)
210 		return ICE_ERR_NO_MEMORY;
211 	if (hw->max_children[layer]) {
212 		/* coverity[suspicious_sizeof] */
213 		node->children = (struct ice_sched_node **)
214 			ice_calloc(hw, hw->max_children[layer], sizeof(*node));
215 		if (!node->children) {
216 			ice_free(hw, node);
217 			return ICE_ERR_NO_MEMORY;
218 		}
219 	}
220 
221 	node->in_use = true;
222 	node->parent = parent;
223 	node->tx_sched_layer = layer;
224 	parent->children[parent->num_children++] = node;
225 	node->info = elem.generic[0];
226 	return ICE_SUCCESS;
227 }
228 
229 /**
230  * ice_aq_delete_sched_elems - delete scheduler elements
231  * @hw: pointer to the HW struct
232  * @grps_req: number of groups to delete
233  * @buf: pointer to buffer
234  * @buf_size: buffer size in bytes
235  * @grps_del: returns total number of elements deleted
236  * @cd: pointer to command details structure or NULL
237  *
238  * Delete scheduling elements (0x040F)
239  */
240 static enum ice_status
241 ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
242 			  struct ice_aqc_delete_elem *buf, u16 buf_size,
243 			  u16 *grps_del, struct ice_sq_cd *cd)
244 {
245 	return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_delete_sched_elems,
246 					   grps_req, (void *)buf, buf_size,
247 					   grps_del, cd);
248 }
249 
250 /**
251  * ice_sched_remove_elems - remove nodes from HW
252  * @hw: pointer to the HW struct
253  * @parent: pointer to the parent node
254  * @num_nodes: number of nodes
255  * @node_teids: array of node teids to be deleted
256  *
257  * This function remove nodes from HW
258  */
259 static enum ice_status
260 ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
261 		       u16 num_nodes, u32 *node_teids)
262 {
263 	struct ice_aqc_delete_elem *buf;
264 	u16 i, num_groups_removed = 0;
265 	enum ice_status status;
266 	u16 buf_size;
267 
268 	buf_size = sizeof(*buf) + sizeof(u32) * (num_nodes - 1);
269 	buf = (struct ice_aqc_delete_elem *)ice_malloc(hw, buf_size);
270 	if (!buf)
271 		return ICE_ERR_NO_MEMORY;
272 
273 	buf->hdr.parent_teid = parent->info.node_teid;
274 	buf->hdr.num_elems = CPU_TO_LE16(num_nodes);
275 	for (i = 0; i < num_nodes; i++)
276 		buf->teid[i] = CPU_TO_LE32(node_teids[i]);
277 
278 	status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,
279 					   &num_groups_removed, NULL);
280 	if (status != ICE_SUCCESS || num_groups_removed != 1)
281 		ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n",
282 			  hw->adminq.sq_last_status);
283 
284 	ice_free(hw, buf);
285 	return status;
286 }
287 
288 /**
289  * ice_sched_get_first_node - get the first node of the given layer
290  * @pi: port information structure
291  * @parent: pointer the base node of the subtree
292  * @layer: layer number
293  *
294  * This function retrieves the first node of the given layer from the subtree
295  */
296 static struct ice_sched_node *
297 ice_sched_get_first_node(struct ice_port_info *pi,
298 			 struct ice_sched_node *parent, u8 layer)
299 {
300 	return pi->sib_head[parent->tc_num][layer];
301 }
302 
303 /**
304  * ice_sched_get_tc_node - get pointer to TC node
305  * @pi: port information structure
306  * @tc: TC number
307  *
308  * This function returns the TC node pointer
309  */
310 struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc)
311 {
312 	u8 i;
313 
314 	if (!pi || !pi->root)
315 		return NULL;
316 	for (i = 0; i < pi->root->num_children; i++)
317 		if (pi->root->children[i]->tc_num == tc)
318 			return pi->root->children[i];
319 	return NULL;
320 }
321 
322 /**
323  * ice_free_sched_node - Free a Tx scheduler node from SW DB
324  * @pi: port information structure
325  * @node: pointer to the ice_sched_node struct
326  *
327  * This function frees up a node from SW DB as well as from HW
328  *
329  * This function needs to be called with the port_info->sched_lock held
330  */
331 void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
332 {
333 	struct ice_sched_node *parent;
334 	struct ice_hw *hw = pi->hw;
335 	u8 i, j;
336 
337 	/* Free the children before freeing up the parent node
338 	 * The parent array is updated below and that shifts the nodes
339 	 * in the array. So always pick the first child if num children > 0
340 	 */
341 	while (node->num_children)
342 		ice_free_sched_node(pi, node->children[0]);
343 
344 	/* Leaf, TC and root nodes can't be deleted by SW */
345 	if (node->tx_sched_layer >= hw->sw_entry_point_layer &&
346 	    node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
347 	    node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT &&
348 	    node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) {
349 		u32 teid = LE32_TO_CPU(node->info.node_teid);
350 
351 		ice_sched_remove_elems(hw, node->parent, 1, &teid);
352 	}
353 	parent = node->parent;
354 	/* root has no parent */
355 	if (parent) {
356 		struct ice_sched_node *p;
357 
358 		/* update the parent */
359 		for (i = 0; i < parent->num_children; i++)
360 			if (parent->children[i] == node) {
361 				for (j = i + 1; j < parent->num_children; j++)
362 					parent->children[j - 1] =
363 						parent->children[j];
364 				parent->num_children--;
365 				break;
366 			}
367 
368 		p = ice_sched_get_first_node(pi, node, node->tx_sched_layer);
369 		while (p) {
370 			if (p->sibling == node) {
371 				p->sibling = node->sibling;
372 				break;
373 			}
374 			p = p->sibling;
375 		}
376 
377 		/* update the sibling head if head is getting removed */
378 		if (pi->sib_head[node->tc_num][node->tx_sched_layer] == node)
379 			pi->sib_head[node->tc_num][node->tx_sched_layer] =
380 				node->sibling;
381 	}
382 
383 	/* leaf nodes have no children */
384 	if (node->children)
385 		ice_free(hw, node->children);
386 	ice_free(hw, node);
387 }
388 
389 /**
390  * ice_aq_get_dflt_topo - gets default scheduler topology
391  * @hw: pointer to the HW struct
392  * @lport: logical port number
393  * @buf: pointer to buffer
394  * @buf_size: buffer size in bytes
395  * @num_branches: returns total number of queue to port branches
396  * @cd: pointer to command details structure or NULL
397  *
398  * Get default scheduler topology (0x400)
399  */
400 static enum ice_status
401 ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
402 		     struct ice_aqc_get_topo_elem *buf, u16 buf_size,
403 		     u8 *num_branches, struct ice_sq_cd *cd)
404 {
405 	struct ice_aqc_get_topo *cmd;
406 	struct ice_aq_desc desc;
407 	enum ice_status status;
408 
409 	cmd = &desc.params.get_topo;
410 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo);
411 	cmd->port_num = lport;
412 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
413 	if (!status && num_branches)
414 		*num_branches = cmd->num_branches;
415 
416 	return status;
417 }
418 
419 /**
420  * ice_aq_add_sched_elems - adds scheduling element
421  * @hw: pointer to the HW struct
422  * @grps_req: the number of groups that are requested to be added
423  * @buf: pointer to buffer
424  * @buf_size: buffer size in bytes
425  * @grps_added: returns total number of groups added
426  * @cd: pointer to command details structure or NULL
427  *
428  * Add scheduling elements (0x0401)
429  */
430 static enum ice_status
431 ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
432 		       struct ice_aqc_add_elem *buf, u16 buf_size,
433 		       u16 *grps_added, struct ice_sq_cd *cd)
434 {
435 	return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_add_sched_elems,
436 					   grps_req, (void *)buf, buf_size,
437 					   grps_added, cd);
438 }
439 
440 /**
441  * ice_aq_cfg_sched_elems - configures scheduler elements
442  * @hw: pointer to the HW struct
443  * @elems_req: number of elements to configure
444  * @buf: pointer to buffer
445  * @buf_size: buffer size in bytes
446  * @elems_cfgd: returns total number of elements configured
447  * @cd: pointer to command details structure or NULL
448  *
449  * Configure scheduling elements (0x0403)
450  */
451 static enum ice_status
452 ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
453 		       struct ice_aqc_conf_elem *buf, u16 buf_size,
454 		       u16 *elems_cfgd, struct ice_sq_cd *cd)
455 {
456 	return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_cfg_sched_elems,
457 					   elems_req, (void *)buf, buf_size,
458 					   elems_cfgd, cd);
459 }
460 
461 /**
462  * ice_aq_move_sched_elems - move scheduler elements
463  * @hw: pointer to the HW struct
464  * @grps_req: number of groups to move
465  * @buf: pointer to buffer
466  * @buf_size: buffer size in bytes
467  * @grps_movd: returns total number of groups moved
468  * @cd: pointer to command details structure or NULL
469  *
470  * Move scheduling elements (0x0408)
471  */
472 static enum ice_status
473 ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
474 			struct ice_aqc_move_elem *buf, u16 buf_size,
475 			u16 *grps_movd, struct ice_sq_cd *cd)
476 {
477 	return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_move_sched_elems,
478 					   grps_req, (void *)buf, buf_size,
479 					   grps_movd, cd);
480 }
481 
482 /**
483  * ice_aq_suspend_sched_elems - suspend scheduler elements
484  * @hw: pointer to the HW struct
485  * @elems_req: number of elements to suspend
486  * @buf: pointer to buffer
487  * @buf_size: buffer size in bytes
488  * @elems_ret: returns total number of elements suspended
489  * @cd: pointer to command details structure or NULL
490  *
491  * Suspend scheduling elements (0x0409)
492  */
493 static enum ice_status
494 ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req,
495 			   struct ice_aqc_suspend_resume_elem *buf,
496 			   u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
497 {
498 	return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_suspend_sched_elems,
499 					   elems_req, (void *)buf, buf_size,
500 					   elems_ret, cd);
501 }
502 
503 /**
504  * ice_aq_resume_sched_elems - resume scheduler elements
505  * @hw: pointer to the HW struct
506  * @elems_req: number of elements to resume
507  * @buf: pointer to buffer
508  * @buf_size: buffer size in bytes
509  * @elems_ret: returns total number of elements resumed
510  * @cd: pointer to command details structure or NULL
511  *
512  * resume scheduling elements (0x040A)
513  */
514 static enum ice_status
515 ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req,
516 			  struct ice_aqc_suspend_resume_elem *buf,
517 			  u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
518 {
519 	return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_resume_sched_elems,
520 					   elems_req, (void *)buf, buf_size,
521 					   elems_ret, cd);
522 }
523 
524 /**
525  * ice_aq_query_sched_res - query scheduler resource
526  * @hw: pointer to the HW struct
527  * @buf_size: buffer size in bytes
528  * @buf: pointer to buffer
529  * @cd: pointer to command details structure or NULL
530  *
531  * Query scheduler resource allocation (0x0412)
532  */
533 static enum ice_status
534 ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
535 		       struct ice_aqc_query_txsched_res_resp *buf,
536 		       struct ice_sq_cd *cd)
537 {
538 	struct ice_aq_desc desc;
539 
540 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_sched_res);
541 	return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
542 }
543 
544 /**
545  * ice_sched_suspend_resume_elems - suspend or resume HW nodes
546  * @hw: pointer to the HW struct
547  * @num_nodes: number of nodes
548  * @node_teids: array of node teids to be suspended or resumed
549  * @suspend: true means suspend / false means resume
550  *
551  * This function suspends or resumes HW nodes
552  */
553 static enum ice_status
554 ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
555 			       bool suspend)
556 {
557 	struct ice_aqc_suspend_resume_elem *buf;
558 	u16 i, buf_size, num_elem_ret = 0;
559 	enum ice_status status;
560 
561 	buf_size = sizeof(*buf) * num_nodes;
562 	buf = (struct ice_aqc_suspend_resume_elem *)
563 		ice_malloc(hw, buf_size);
564 	if (!buf)
565 		return ICE_ERR_NO_MEMORY;
566 
567 	for (i = 0; i < num_nodes; i++)
568 		buf->teid[i] = CPU_TO_LE32(node_teids[i]);
569 
570 	if (suspend)
571 		status = ice_aq_suspend_sched_elems(hw, num_nodes, buf,
572 						    buf_size, &num_elem_ret,
573 						    NULL);
574 	else
575 		status = ice_aq_resume_sched_elems(hw, num_nodes, buf,
576 						   buf_size, &num_elem_ret,
577 						   NULL);
578 	if (status != ICE_SUCCESS || num_elem_ret != num_nodes)
579 		ice_debug(hw, ICE_DBG_SCHED, "suspend/resume failed\n");
580 
581 	ice_free(hw, buf);
582 	return status;
583 }
584 
585 /**
586  * ice_alloc_lan_q_ctx - allocate LAN queue contexts for the given VSI and TC
587  * @hw: pointer to the HW struct
588  * @vsi_handle: VSI handle
589  * @tc: TC number
590  * @new_numqs: number of queues
591  */
592 static enum ice_status
593 ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
594 {
595 	struct ice_vsi_ctx *vsi_ctx;
596 	struct ice_q_ctx *q_ctx;
597 
598 	vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
599 	if (!vsi_ctx)
600 		return ICE_ERR_PARAM;
601 	/* allocate LAN queue contexts */
602 	if (!vsi_ctx->lan_q_ctx[tc]) {
603 		vsi_ctx->lan_q_ctx[tc] = (struct ice_q_ctx *)
604 			ice_calloc(hw, new_numqs, sizeof(*q_ctx));
605 		if (!vsi_ctx->lan_q_ctx[tc])
606 			return ICE_ERR_NO_MEMORY;
607 		vsi_ctx->num_lan_q_entries[tc] = new_numqs;
608 		return ICE_SUCCESS;
609 	}
610 	/* num queues are increased, update the queue contexts */
611 	if (new_numqs > vsi_ctx->num_lan_q_entries[tc]) {
612 		u16 prev_num = vsi_ctx->num_lan_q_entries[tc];
613 
614 		q_ctx = (struct ice_q_ctx *)
615 			ice_calloc(hw, new_numqs, sizeof(*q_ctx));
616 		if (!q_ctx)
617 			return ICE_ERR_NO_MEMORY;
618 		ice_memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc],
619 			   prev_num * sizeof(*q_ctx), ICE_DMA_TO_NONDMA);
620 		ice_free(hw, vsi_ctx->lan_q_ctx[tc]);
621 		vsi_ctx->lan_q_ctx[tc] = q_ctx;
622 		vsi_ctx->num_lan_q_entries[tc] = new_numqs;
623 	}
624 	return ICE_SUCCESS;
625 }
626 
627 /**
628  * ice_aq_rl_profile - performs a rate limiting task
629  * @hw: pointer to the HW struct
630  * @opcode:opcode for add, query, or remove profile(s)
631  * @num_profiles: the number of profiles
632  * @buf: pointer to buffer
633  * @buf_size: buffer size in bytes
634  * @num_processed: number of processed add or remove profile(s) to return
635  * @cd: pointer to command details structure
636  *
637  * Rl profile function to add, query, or remove profile(s)
638  */
639 static enum ice_status
640 ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
641 		  u16 num_profiles, struct ice_aqc_rl_profile_generic_elem *buf,
642 		  u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd)
643 {
644 	struct ice_aqc_rl_profile *cmd;
645 	struct ice_aq_desc desc;
646 	enum ice_status status;
647 
648 	cmd = &desc.params.rl_profile;
649 
650 	ice_fill_dflt_direct_cmd_desc(&desc, opcode);
651 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
652 	cmd->num_profiles = CPU_TO_LE16(num_profiles);
653 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
654 	if (!status && num_processed)
655 		*num_processed = LE16_TO_CPU(cmd->num_processed);
656 	return status;
657 }
658 
659 /**
660  * ice_aq_add_rl_profile - adds rate limiting profile(s)
661  * @hw: pointer to the HW struct
662  * @num_profiles: the number of profile(s) to be add
663  * @buf: pointer to buffer
664  * @buf_size: buffer size in bytes
665  * @num_profiles_added: total number of profiles added to return
666  * @cd: pointer to command details structure
667  *
668  * Add RL profile (0x0410)
669  */
670 static enum ice_status
671 ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,
672 		      struct ice_aqc_rl_profile_generic_elem *buf,
673 		      u16 buf_size, u16 *num_profiles_added,
674 		      struct ice_sq_cd *cd)
675 {
676 	return ice_aq_rl_profile(hw, ice_aqc_opc_add_rl_profiles,
677 				 num_profiles, buf,
678 				 buf_size, num_profiles_added, cd);
679 }
680 
681 /**
682  * ice_aq_query_rl_profile - query rate limiting profile(s)
683  * @hw: pointer to the HW struct
684  * @num_profiles: the number of profile(s) to query
685  * @buf: pointer to buffer
686  * @buf_size: buffer size in bytes
687  * @cd: pointer to command details structure
688  *
689  * Query RL profile (0x0411)
690  */
691 enum ice_status
692 ice_aq_query_rl_profile(struct ice_hw *hw, u16 num_profiles,
693 			struct ice_aqc_rl_profile_generic_elem *buf,
694 			u16 buf_size, struct ice_sq_cd *cd)
695 {
696 	return ice_aq_rl_profile(hw, ice_aqc_opc_query_rl_profiles,
697 				 num_profiles, buf, buf_size, NULL, cd);
698 }
699 
700 /**
701  * ice_aq_remove_rl_profile - removes RL profile(s)
702  * @hw: pointer to the HW struct
703  * @num_profiles: the number of profile(s) to remove
704  * @buf: pointer to buffer
705  * @buf_size: buffer size in bytes
706  * @num_profiles_removed: total number of profiles removed to return
707  * @cd: pointer to command details structure or NULL
708  *
709  * Remove RL profile (0x0415)
710  */
711 static enum ice_status
712 ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles,
713 			 struct ice_aqc_rl_profile_generic_elem *buf,
714 			 u16 buf_size, u16 *num_profiles_removed,
715 			 struct ice_sq_cd *cd)
716 {
717 	return ice_aq_rl_profile(hw, ice_aqc_opc_remove_rl_profiles,
718 				 num_profiles, buf,
719 				 buf_size, num_profiles_removed, cd);
720 }
721 
722 /**
723  * ice_sched_del_rl_profile - remove RL profile
724  * @hw: pointer to the HW struct
725  * @rl_info: rate limit profile information
726  *
727  * If the profile ID is not referenced anymore, it removes profile ID with
728  * its associated parameters from HW DB,and locally. The caller needs to
729  * hold scheduler lock.
730  */
731 static enum ice_status
732 ice_sched_del_rl_profile(struct ice_hw *hw,
733 			 struct ice_aqc_rl_profile_info *rl_info)
734 {
735 	struct ice_aqc_rl_profile_generic_elem *buf;
736 	u16 num_profiles_removed;
737 	enum ice_status status;
738 	u16 num_profiles = 1;
739 
740 	if (rl_info->prof_id_ref != 0)
741 		return ICE_ERR_IN_USE;
742 
743 	/* Safe to remove profile ID */
744 	buf = (struct ice_aqc_rl_profile_generic_elem *)
745 		&rl_info->profile;
746 	status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf),
747 					  &num_profiles_removed, NULL);
748 	if (status || num_profiles_removed != num_profiles)
749 		return ICE_ERR_CFG;
750 
751 	/* Delete stale entry now */
752 	LIST_DEL(&rl_info->list_entry);
753 	ice_free(hw, rl_info);
754 	return status;
755 }
756 
757 /**
758  * ice_sched_clear_rl_prof - clears RL prof entries
759  * @pi: port information structure
760  *
761  * This function removes all RL profile from HW as well as from SW DB.
762  */
763 static void ice_sched_clear_rl_prof(struct ice_port_info *pi)
764 {
765 	u16 ln;
766 
767 	for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) {
768 		struct ice_aqc_rl_profile_info *rl_prof_elem;
769 		struct ice_aqc_rl_profile_info *rl_prof_tmp;
770 
771 		LIST_FOR_EACH_ENTRY_SAFE(rl_prof_elem, rl_prof_tmp,
772 					 &pi->rl_prof_list[ln],
773 					 ice_aqc_rl_profile_info, list_entry) {
774 			struct ice_hw *hw = pi->hw;
775 			enum ice_status status;
776 
777 			rl_prof_elem->prof_id_ref = 0;
778 			status = ice_sched_del_rl_profile(hw, rl_prof_elem);
779 			if (status) {
780 				ice_debug(hw, ICE_DBG_SCHED,
781 					  "Remove rl profile failed\n");
782 				/* On error, free mem required */
783 				LIST_DEL(&rl_prof_elem->list_entry);
784 				ice_free(hw, rl_prof_elem);
785 			}
786 		}
787 	}
788 }
789 
790 /**
791  * ice_sched_clear_agg - clears the aggregator related information
792  * @hw: pointer to the hardware structure
793  *
794  * This function removes aggregator list and free up aggregator related memory
795  * previously allocated.
796  */
797 void ice_sched_clear_agg(struct ice_hw *hw)
798 {
799 	struct ice_sched_agg_info *agg_info;
800 	struct ice_sched_agg_info *atmp;
801 
802 	LIST_FOR_EACH_ENTRY_SAFE(agg_info, atmp, &hw->agg_list,
803 				 ice_sched_agg_info,
804 				 list_entry) {
805 		struct ice_sched_agg_vsi_info *agg_vsi_info;
806 		struct ice_sched_agg_vsi_info *vtmp;
807 
808 		LIST_FOR_EACH_ENTRY_SAFE(agg_vsi_info, vtmp,
809 					 &agg_info->agg_vsi_list,
810 					 ice_sched_agg_vsi_info, list_entry) {
811 			LIST_DEL(&agg_vsi_info->list_entry);
812 			ice_free(hw, agg_vsi_info);
813 		}
814 		LIST_DEL(&agg_info->list_entry);
815 		ice_free(hw, agg_info);
816 	}
817 }
818 
819 /**
820  * ice_sched_clear_tx_topo - clears the schduler tree nodes
821  * @pi: port information structure
822  *
823  * This function removes all the nodes from HW as well as from SW DB.
824  */
825 static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
826 {
827 	if (!pi)
828 		return;
829 	/* remove RL profiles related lists */
830 	ice_sched_clear_rl_prof(pi);
831 	if (pi->root) {
832 		ice_free_sched_node(pi, pi->root);
833 		pi->root = NULL;
834 	}
835 }
836 
837 /**
838  * ice_sched_clear_port - clear the scheduler elements from SW DB for a port
839  * @pi: port information structure
840  *
841  * Cleanup scheduling elements from SW DB
842  */
843 void ice_sched_clear_port(struct ice_port_info *pi)
844 {
845 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
846 		return;
847 
848 	pi->port_state = ICE_SCHED_PORT_STATE_INIT;
849 	ice_acquire_lock(&pi->sched_lock);
850 	ice_sched_clear_tx_topo(pi);
851 	ice_release_lock(&pi->sched_lock);
852 	ice_destroy_lock(&pi->sched_lock);
853 }
854 
855 /**
856  * ice_sched_cleanup_all - cleanup scheduler elements from SW DB for all ports
857  * @hw: pointer to the HW struct
858  *
859  * Cleanup scheduling elements from SW DB for all the ports
860  */
861 void ice_sched_cleanup_all(struct ice_hw *hw)
862 {
863 	if (!hw)
864 		return;
865 
866 	if (hw->layer_info) {
867 		ice_free(hw, hw->layer_info);
868 		hw->layer_info = NULL;
869 	}
870 
871 	ice_sched_clear_port(hw->port_info);
872 
873 	hw->num_tx_sched_layers = 0;
874 	hw->num_tx_sched_phys_layers = 0;
875 	hw->flattened_layers = 0;
876 	hw->max_cgds = 0;
877 }
878 
879 /**
880  * ice_aq_cfg_l2_node_cgd - configures L2 node to CGD mapping
881  * @hw: pointer to the HW struct
882  * @num_l2_nodes: the number of L2 nodes whose CGDs to configure
883  * @buf: pointer to buffer
884  * @buf_size: buffer size in bytes
885  * @cd: pointer to command details structure or NULL
886  *
887  * Configure L2 Node CGD (0x0414)
888  */
889 enum ice_status
890 ice_aq_cfg_l2_node_cgd(struct ice_hw *hw, u16 num_l2_nodes,
891 		       struct ice_aqc_cfg_l2_node_cgd_data *buf,
892 		       u16 buf_size, struct ice_sq_cd *cd)
893 {
894 	struct ice_aqc_cfg_l2_node_cgd *cmd;
895 	struct ice_aq_desc desc;
896 
897 	cmd = &desc.params.cfg_l2_node_cgd;
898 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_cfg_l2_node_cgd);
899 	desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
900 
901 	cmd->num_l2_nodes = CPU_TO_LE16(num_l2_nodes);
902 	return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
903 }
904 
905 /**
906  * ice_sched_add_elems - add nodes to HW and SW DB
907  * @pi: port information structure
908  * @tc_node: pointer to the branch node
909  * @parent: pointer to the parent node
910  * @layer: layer number to add nodes
911  * @num_nodes: number of nodes
912  * @num_nodes_added: pointer to num nodes added
913  * @first_node_teid: if new nodes are added then return the TEID of first node
914  *
915  * This function add nodes to HW as well as to SW DB for a given layer
916  */
917 static enum ice_status
918 ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
919 		    struct ice_sched_node *parent, u8 layer, u16 num_nodes,
920 		    u16 *num_nodes_added, u32 *first_node_teid)
921 {
922 	struct ice_sched_node *prev, *new_node;
923 	struct ice_aqc_add_elem *buf;
924 	u16 i, num_groups_added = 0;
925 	enum ice_status status = ICE_SUCCESS;
926 	struct ice_hw *hw = pi->hw;
927 	u16 buf_size;
928 	u32 teid;
929 
930 	buf_size = ice_struct_size(buf, generic, num_nodes - 1);
931 	buf = (struct ice_aqc_add_elem *)ice_malloc(hw, buf_size);
932 	if (!buf)
933 		return ICE_ERR_NO_MEMORY;
934 
935 	buf->hdr.parent_teid = parent->info.node_teid;
936 	buf->hdr.num_elems = CPU_TO_LE16(num_nodes);
937 	for (i = 0; i < num_nodes; i++) {
938 		buf->generic[i].parent_teid = parent->info.node_teid;
939 		buf->generic[i].data.elem_type = ICE_AQC_ELEM_TYPE_SE_GENERIC;
940 		buf->generic[i].data.valid_sections =
941 			ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
942 			ICE_AQC_ELEM_VALID_EIR;
943 		buf->generic[i].data.generic = 0;
944 		buf->generic[i].data.cir_bw.bw_profile_idx =
945 			CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
946 		buf->generic[i].data.cir_bw.bw_alloc =
947 			CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
948 		buf->generic[i].data.eir_bw.bw_profile_idx =
949 			CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
950 		buf->generic[i].data.eir_bw.bw_alloc =
951 			CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
952 	}
953 
954 	status = ice_aq_add_sched_elems(hw, 1, buf, buf_size,
955 					&num_groups_added, NULL);
956 	if (status != ICE_SUCCESS || num_groups_added != 1) {
957 		ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n",
958 			  hw->adminq.sq_last_status);
959 		ice_free(hw, buf);
960 		return ICE_ERR_CFG;
961 	}
962 
963 	*num_nodes_added = num_nodes;
964 	/* add nodes to the SW DB */
965 	for (i = 0; i < num_nodes; i++) {
966 		status = ice_sched_add_node(pi, layer, &buf->generic[i]);
967 		if (status != ICE_SUCCESS) {
968 			ice_debug(hw, ICE_DBG_SCHED,
969 				  "add nodes in SW DB failed status =%d\n",
970 				  status);
971 			break;
972 		}
973 
974 		teid = LE32_TO_CPU(buf->generic[i].node_teid);
975 		new_node = ice_sched_find_node_by_teid(parent, teid);
976 		if (!new_node) {
977 			ice_debug(hw, ICE_DBG_SCHED,
978 				  "Node is missing for teid =%d\n", teid);
979 			break;
980 		}
981 
982 		new_node->sibling = NULL;
983 		new_node->tc_num = tc_node->tc_num;
984 
985 		/* add it to previous node sibling pointer */
986 		/* Note: siblings are not linked across branches */
987 		prev = ice_sched_get_first_node(pi, tc_node, layer);
988 		if (prev && prev != new_node) {
989 			while (prev->sibling)
990 				prev = prev->sibling;
991 			prev->sibling = new_node;
992 		}
993 
994 		/* initialize the sibling head */
995 		if (!pi->sib_head[tc_node->tc_num][layer])
996 			pi->sib_head[tc_node->tc_num][layer] = new_node;
997 
998 		if (i == 0)
999 			*first_node_teid = teid;
1000 	}
1001 
1002 	ice_free(hw, buf);
1003 	return status;
1004 }
1005 
1006 /**
1007  * ice_sched_add_nodes_to_layer - Add nodes to a given layer
1008  * @pi: port information structure
1009  * @tc_node: pointer to TC node
1010  * @parent: pointer to parent node
1011  * @layer: layer number to add nodes
1012  * @num_nodes: number of nodes to be added
1013  * @first_node_teid: pointer to the first node TEID
1014  * @num_nodes_added: pointer to number of nodes added
1015  *
1016  * This function add nodes to a given layer.
1017  */
1018 static enum ice_status
1019 ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
1020 			     struct ice_sched_node *tc_node,
1021 			     struct ice_sched_node *parent, u8 layer,
1022 			     u16 num_nodes, u32 *first_node_teid,
1023 			     u16 *num_nodes_added)
1024 {
1025 	u32 *first_teid_ptr = first_node_teid;
1026 	u16 new_num_nodes, max_child_nodes;
1027 	enum ice_status status = ICE_SUCCESS;
1028 	struct ice_hw *hw = pi->hw;
1029 	u16 num_added = 0;
1030 	u32 temp;
1031 
1032 	*num_nodes_added = 0;
1033 
1034 	if (!num_nodes)
1035 		return status;
1036 
1037 	if (!parent || layer < hw->sw_entry_point_layer)
1038 		return ICE_ERR_PARAM;
1039 
1040 	/* max children per node per layer */
1041 	max_child_nodes = hw->max_children[parent->tx_sched_layer];
1042 
1043 	/* current number of children + required nodes exceed max children ? */
1044 	if ((parent->num_children + num_nodes) > max_child_nodes) {
1045 		/* Fail if the parent is a TC node */
1046 		if (parent == tc_node)
1047 			return ICE_ERR_CFG;
1048 
1049 		/* utilize all the spaces if the parent is not full */
1050 		if (parent->num_children < max_child_nodes) {
1051 			new_num_nodes = max_child_nodes - parent->num_children;
1052 			/* this recursion is intentional, and wouldn't
1053 			 * go more than 2 calls
1054 			 */
1055 			status = ice_sched_add_nodes_to_layer(pi, tc_node,
1056 							      parent, layer,
1057 							      new_num_nodes,
1058 							      first_node_teid,
1059 							      &num_added);
1060 			if (status != ICE_SUCCESS)
1061 				return status;
1062 
1063 			*num_nodes_added += num_added;
1064 		}
1065 		/* Don't modify the first node TEID memory if the first node was
1066 		 * added already in the above call. Instead send some temp
1067 		 * memory for all other recursive calls.
1068 		 */
1069 		if (num_added)
1070 			first_teid_ptr = &temp;
1071 
1072 		new_num_nodes = num_nodes - num_added;
1073 
1074 		/* This parent is full, try the next sibling */
1075 		parent = parent->sibling;
1076 
1077 		/* this recursion is intentional, for 1024 queues
1078 		 * per VSI, it goes max of 16 iterations.
1079 		 * 1024 / 8 = 128 layer 8 nodes
1080 		 * 128 /8 = 16 (add 8 nodes per iteration)
1081 		 */
1082 		status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
1083 						      layer, new_num_nodes,
1084 						      first_teid_ptr,
1085 						      &num_added);
1086 		*num_nodes_added += num_added;
1087 		return status;
1088 	}
1089 
1090 	status = ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes,
1091 				     num_nodes_added, first_node_teid);
1092 	return status;
1093 }
1094 
1095 /**
1096  * ice_sched_get_qgrp_layer - get the current queue group layer number
1097  * @hw: pointer to the HW struct
1098  *
1099  * This function returns the current queue group layer number
1100  */
1101 static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw)
1102 {
1103 	/* It's always total layers - 1, the array is 0 relative so -2 */
1104 	return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET;
1105 }
1106 
1107 /**
1108  * ice_sched_get_vsi_layer - get the current VSI layer number
1109  * @hw: pointer to the HW struct
1110  *
1111  * This function returns the current VSI layer number
1112  */
1113 static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
1114 {
1115 	/* Num Layers       VSI layer
1116 	 *     9               6
1117 	 *     7               4
1118 	 *     5 or less       sw_entry_point_layer
1119 	 */
1120 	/* calculate the VSI layer based on number of layers. */
1121 	if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) {
1122 		u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
1123 
1124 		if (layer > hw->sw_entry_point_layer)
1125 			return layer;
1126 	}
1127 	return hw->sw_entry_point_layer;
1128 }
1129 
1130 /**
1131  * ice_sched_get_agg_layer - get the current aggregator layer number
1132  * @hw: pointer to the HW struct
1133  *
1134  * This function returns the current aggregator layer number
1135  */
1136 static u8 ice_sched_get_agg_layer(struct ice_hw *hw)
1137 {
1138 	/* Num Layers       aggregator layer
1139 	 *     9               4
1140 	 *     7 or less       sw_entry_point_layer
1141 	 */
1142 	/* calculate the aggregator layer based on number of layers. */
1143 	if (hw->num_tx_sched_layers > ICE_AGG_LAYER_OFFSET + 1) {
1144 		u8 layer = hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET;
1145 
1146 		if (layer > hw->sw_entry_point_layer)
1147 			return layer;
1148 	}
1149 	return hw->sw_entry_point_layer;
1150 }
1151 
1152 /**
1153  * ice_rm_dflt_leaf_node - remove the default leaf node in the tree
1154  * @pi: port information structure
1155  *
1156  * This function removes the leaf node that was created by the FW
1157  * during initialization
1158  */
1159 static void ice_rm_dflt_leaf_node(struct ice_port_info *pi)
1160 {
1161 	struct ice_sched_node *node;
1162 
1163 	node = pi->root;
1164 	while (node) {
1165 		if (!node->num_children)
1166 			break;
1167 		node = node->children[0];
1168 	}
1169 	if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) {
1170 		u32 teid = LE32_TO_CPU(node->info.node_teid);
1171 		enum ice_status status;
1172 
1173 		/* remove the default leaf node */
1174 		status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid);
1175 		if (!status)
1176 			ice_free_sched_node(pi, node);
1177 	}
1178 }
1179 
1180 /**
1181  * ice_sched_rm_dflt_nodes - free the default nodes in the tree
1182  * @pi: port information structure
1183  *
1184  * This function frees all the nodes except root and TC that were created by
1185  * the FW during initialization
1186  */
1187 static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi)
1188 {
1189 	struct ice_sched_node *node;
1190 
1191 	ice_rm_dflt_leaf_node(pi);
1192 
1193 	/* remove the default nodes except TC and root nodes */
1194 	node = pi->root;
1195 	while (node) {
1196 		if (node->tx_sched_layer >= pi->hw->sw_entry_point_layer &&
1197 		    node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
1198 		    node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT) {
1199 			ice_free_sched_node(pi, node);
1200 			break;
1201 		}
1202 
1203 		if (!node->num_children)
1204 			break;
1205 		node = node->children[0];
1206 	}
1207 }
1208 
1209 /**
1210  * ice_sched_init_port - Initialize scheduler by querying information from FW
1211  * @pi: port info structure for the tree to cleanup
1212  *
1213  * This function is the initial call to find the total number of Tx scheduler
1214  * resources, default topology created by firmware and storing the information
1215  * in SW DB.
1216  */
1217 enum ice_status ice_sched_init_port(struct ice_port_info *pi)
1218 {
1219 	struct ice_aqc_get_topo_elem *buf;
1220 	enum ice_status status;
1221 	struct ice_hw *hw;
1222 	u8 num_branches;
1223 	u16 num_elems;
1224 	u8 i, j;
1225 
1226 	if (!pi)
1227 		return ICE_ERR_PARAM;
1228 	hw = pi->hw;
1229 
1230 	/* Query the Default Topology from FW */
1231 	buf = (struct ice_aqc_get_topo_elem *)ice_malloc(hw,
1232 							 ICE_AQ_MAX_BUF_LEN);
1233 	if (!buf)
1234 		return ICE_ERR_NO_MEMORY;
1235 
1236 	/* Query default scheduling tree topology */
1237 	status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN,
1238 				      &num_branches, NULL);
1239 	if (status)
1240 		goto err_init_port;
1241 
1242 	/* num_branches should be between 1-8 */
1243 	if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) {
1244 		ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n",
1245 			  num_branches);
1246 		status = ICE_ERR_PARAM;
1247 		goto err_init_port;
1248 	}
1249 
1250 	/* get the number of elements on the default/first branch */
1251 	num_elems = LE16_TO_CPU(buf[0].hdr.num_elems);
1252 
1253 	/* num_elems should always be between 1-9 */
1254 	if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) {
1255 		ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n",
1256 			  num_elems);
1257 		status = ICE_ERR_PARAM;
1258 		goto err_init_port;
1259 	}
1260 
1261 	/* If the last node is a leaf node then the index of the queue group
1262 	 * layer is two less than the number of elements.
1263 	 */
1264 	if (num_elems > 2 && buf[0].generic[num_elems - 1].data.elem_type ==
1265 	    ICE_AQC_ELEM_TYPE_LEAF)
1266 		pi->last_node_teid =
1267 			LE32_TO_CPU(buf[0].generic[num_elems - 2].node_teid);
1268 	else
1269 		pi->last_node_teid =
1270 			LE32_TO_CPU(buf[0].generic[num_elems - 1].node_teid);
1271 
1272 	/* Insert the Tx Sched root node */
1273 	status = ice_sched_add_root_node(pi, &buf[0].generic[0]);
1274 	if (status)
1275 		goto err_init_port;
1276 
1277 	/* Parse the default tree and cache the information */
1278 	for (i = 0; i < num_branches; i++) {
1279 		num_elems = LE16_TO_CPU(buf[i].hdr.num_elems);
1280 
1281 		/* Skip root element as already inserted */
1282 		for (j = 1; j < num_elems; j++) {
1283 			/* update the sw entry point */
1284 			if (buf[0].generic[j].data.elem_type ==
1285 			    ICE_AQC_ELEM_TYPE_ENTRY_POINT)
1286 				hw->sw_entry_point_layer = j;
1287 
1288 			status = ice_sched_add_node(pi, j, &buf[i].generic[j]);
1289 			if (status)
1290 				goto err_init_port;
1291 		}
1292 	}
1293 
1294 	/* Remove the default nodes. */
1295 	if (pi->root)
1296 		ice_sched_rm_dflt_nodes(pi);
1297 
1298 	/* initialize the port for handling the scheduler tree */
1299 	pi->port_state = ICE_SCHED_PORT_STATE_READY;
1300 	ice_init_lock(&pi->sched_lock);
1301 	for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
1302 		INIT_LIST_HEAD(&pi->rl_prof_list[i]);
1303 
1304 err_init_port:
1305 	if (status && pi->root) {
1306 		ice_free_sched_node(pi, pi->root);
1307 		pi->root = NULL;
1308 	}
1309 
1310 	ice_free(hw, buf);
1311 	return status;
1312 }
1313 
1314 /**
1315  * ice_sched_get_node - Get the struct ice_sched_node for given TEID
1316  * @pi: port information structure
1317  * @teid: Scheduler node TEID
1318  *
1319  * This function retrieves the ice_sched_node struct for given TEID from
1320  * the SW DB and returns it to the caller.
1321  */
1322 struct ice_sched_node *ice_sched_get_node(struct ice_port_info *pi, u32 teid)
1323 {
1324 	struct ice_sched_node *node;
1325 
1326 	if (!pi)
1327 		return NULL;
1328 
1329 	/* Find the node starting from root */
1330 	ice_acquire_lock(&pi->sched_lock);
1331 	node = ice_sched_find_node_by_teid(pi->root, teid);
1332 	ice_release_lock(&pi->sched_lock);
1333 
1334 	if (!node)
1335 		ice_debug(pi->hw, ICE_DBG_SCHED,
1336 			  "Node not found for teid=0x%x\n", teid);
1337 
1338 	return node;
1339 }
1340 
1341 /**
1342  * ice_sched_query_res_alloc - query the FW for num of logical sched layers
1343  * @hw: pointer to the HW struct
1344  *
1345  * query FW for allocated scheduler resources and store in HW struct
1346  */
1347 enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
1348 {
1349 	struct ice_aqc_query_txsched_res_resp *buf;
1350 	enum ice_status status = ICE_SUCCESS;
1351 	__le16 max_sibl;
1352 	u8 i;
1353 
1354 	if (hw->layer_info)
1355 		return status;
1356 
1357 	buf = (struct ice_aqc_query_txsched_res_resp *)
1358 		ice_malloc(hw, sizeof(*buf));
1359 	if (!buf)
1360 		return ICE_ERR_NO_MEMORY;
1361 
1362 	status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL);
1363 	if (status)
1364 		goto sched_query_out;
1365 
1366 	hw->num_tx_sched_layers = LE16_TO_CPU(buf->sched_props.logical_levels);
1367 	hw->num_tx_sched_phys_layers =
1368 		LE16_TO_CPU(buf->sched_props.phys_levels);
1369 	hw->flattened_layers = buf->sched_props.flattening_bitmap;
1370 	hw->max_cgds = buf->sched_props.max_pf_cgds;
1371 
1372 	/* max sibling group size of current layer refers to the max children
1373 	 * of the below layer node.
1374 	 * layer 1 node max children will be layer 2 max sibling group size
1375 	 * layer 2 node max children will be layer 3 max sibling group size
1376 	 * and so on. This array will be populated from root (index 0) to
1377 	 * qgroup layer 7. Leaf node has no children.
1378 	 */
1379 	for (i = 0; i < hw->num_tx_sched_layers - 1; i++) {
1380 		max_sibl = buf->layer_props[i + 1].max_sibl_grp_sz;
1381 		hw->max_children[i] = LE16_TO_CPU(max_sibl);
1382 	}
1383 
1384 	hw->layer_info = (struct ice_aqc_layer_props *)
1385 			 ice_memdup(hw, buf->layer_props,
1386 				    (hw->num_tx_sched_layers *
1387 				     sizeof(*hw->layer_info)),
1388 				    ICE_DMA_TO_DMA);
1389 	if (!hw->layer_info) {
1390 		status = ICE_ERR_NO_MEMORY;
1391 		goto sched_query_out;
1392 	}
1393 
1394 sched_query_out:
1395 	ice_free(hw, buf);
1396 	return status;
1397 }
1398 
1399 /**
1400  * ice_sched_get_psm_clk_freq - determine the PSM clock frequency
1401  * @hw: pointer to the HW struct
1402  *
1403  * Determine the PSM clock frequency and store in HW struct
1404  */
1405 void ice_sched_get_psm_clk_freq(struct ice_hw *hw)
1406 {
1407 	u32 val, clk_src;
1408 
1409 	val = rd32(hw, GLGEN_CLKSTAT_SRC);
1410 	clk_src = (val & GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M) >>
1411 		GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_S;
1412 
1413 #define PSM_CLK_SRC_367_MHZ 0x0
1414 #define PSM_CLK_SRC_416_MHZ 0x1
1415 #define PSM_CLK_SRC_446_MHZ 0x2
1416 #define PSM_CLK_SRC_390_MHZ 0x3
1417 
1418 	switch (clk_src) {
1419 	case PSM_CLK_SRC_367_MHZ:
1420 		hw->psm_clk_freq = ICE_PSM_CLK_367MHZ_IN_HZ;
1421 		break;
1422 	case PSM_CLK_SRC_416_MHZ:
1423 		hw->psm_clk_freq = ICE_PSM_CLK_416MHZ_IN_HZ;
1424 		break;
1425 	case PSM_CLK_SRC_446_MHZ:
1426 		hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ;
1427 		break;
1428 	case PSM_CLK_SRC_390_MHZ:
1429 		hw->psm_clk_freq = ICE_PSM_CLK_390MHZ_IN_HZ;
1430 		break;
1431 	default:
1432 		ice_debug(hw, ICE_DBG_SCHED, "PSM clk_src unexpected %u\n",
1433 			  clk_src);
1434 		/* fall back to a safe default */
1435 		hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ;
1436 	}
1437 }
1438 
1439 /**
1440  * ice_sched_find_node_in_subtree - Find node in part of base node subtree
1441  * @hw: pointer to the HW struct
1442  * @base: pointer to the base node
1443  * @node: pointer to the node to search
1444  *
1445  * This function checks whether a given node is part of the base node
1446  * subtree or not
1447  */
1448 bool
1449 ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
1450 			       struct ice_sched_node *node)
1451 {
1452 	u8 i;
1453 
1454 	for (i = 0; i < base->num_children; i++) {
1455 		struct ice_sched_node *child = base->children[i];
1456 
1457 		if (node == child)
1458 			return true;
1459 
1460 		if (child->tx_sched_layer > node->tx_sched_layer)
1461 			return false;
1462 
1463 		/* this recursion is intentional, and wouldn't
1464 		 * go more than 8 calls
1465 		 */
1466 		if (ice_sched_find_node_in_subtree(hw, child, node))
1467 			return true;
1468 	}
1469 	return false;
1470 }
1471 
1472 /**
1473  * ice_sched_get_free_qparent - Get a free LAN or RDMA queue group node
1474  * @pi: port information structure
1475  * @vsi_handle: software VSI handle
1476  * @tc: branch number
1477  * @owner: LAN or RDMA
1478  *
1479  * This function retrieves a free LAN or RDMA queue group node
1480  */
1481 struct ice_sched_node *
1482 ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
1483 			   u8 owner)
1484 {
1485 	struct ice_sched_node *vsi_node, *qgrp_node = NULL;
1486 	struct ice_vsi_ctx *vsi_ctx;
1487 	u16 max_children;
1488 	u8 qgrp_layer;
1489 
1490 	qgrp_layer = ice_sched_get_qgrp_layer(pi->hw);
1491 	max_children = pi->hw->max_children[qgrp_layer];
1492 
1493 	vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
1494 	if (!vsi_ctx)
1495 		return NULL;
1496 	vsi_node = vsi_ctx->sched.vsi_node[tc];
1497 	/* validate invalid VSI ID */
1498 	if (!vsi_node)
1499 		goto lan_q_exit;
1500 
1501 	/* get the first queue group node from VSI sub-tree */
1502 	qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
1503 	while (qgrp_node) {
1504 		/* make sure the qgroup node is part of the VSI subtree */
1505 		if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
1506 			if (qgrp_node->num_children < max_children &&
1507 			    qgrp_node->owner == owner)
1508 				break;
1509 		qgrp_node = qgrp_node->sibling;
1510 	}
1511 
1512 lan_q_exit:
1513 	return qgrp_node;
1514 }
1515 
1516 /**
1517  * ice_sched_get_vsi_node - Get a VSI node based on VSI ID
1518  * @pi: pointer to the port information structure
1519  * @tc_node: pointer to the TC node
1520  * @vsi_handle: software VSI handle
1521  *
1522  * This function retrieves a VSI node for a given VSI ID from a given
1523  * TC branch
1524  */
1525 struct ice_sched_node *
1526 ice_sched_get_vsi_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
1527 		       u16 vsi_handle)
1528 {
1529 	struct ice_sched_node *node;
1530 	u8 vsi_layer;
1531 
1532 	vsi_layer = ice_sched_get_vsi_layer(pi->hw);
1533 	node = ice_sched_get_first_node(pi, tc_node, vsi_layer);
1534 
1535 	/* Check whether it already exists */
1536 	while (node) {
1537 		if (node->vsi_handle == vsi_handle)
1538 			return node;
1539 		node = node->sibling;
1540 	}
1541 
1542 	return node;
1543 }
1544 
1545 /**
1546  * ice_sched_get_agg_node - Get an aggregator node based on aggregator ID
1547  * @pi: pointer to the port information structure
1548  * @tc_node: pointer to the TC node
1549  * @agg_id: aggregator ID
1550  *
1551  * This function retrieves an aggregator node for a given aggregator ID from
1552  * a given TC branch
1553  */
1554 static struct ice_sched_node *
1555 ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
1556 		       u32 agg_id)
1557 {
1558 	struct ice_sched_node *node;
1559 	struct ice_hw *hw = pi->hw;
1560 	u8 agg_layer;
1561 
1562 	if (!hw)
1563 		return NULL;
1564 	agg_layer = ice_sched_get_agg_layer(hw);
1565 	node = ice_sched_get_first_node(pi, tc_node, agg_layer);
1566 
1567 	/* Check whether it already exists */
1568 	while (node) {
1569 		if (node->agg_id == agg_id)
1570 			return node;
1571 		node = node->sibling;
1572 	}
1573 
1574 	return node;
1575 }
1576 
1577 /**
1578  * ice_sched_check_node - Compare node parameters between SW DB and HW DB
1579  * @hw: pointer to the HW struct
1580  * @node: pointer to the ice_sched_node struct
1581  *
1582  * This function queries and compares the HW element with SW DB node parameters
1583  */
1584 static bool ice_sched_check_node(struct ice_hw *hw, struct ice_sched_node *node)
1585 {
1586 	struct ice_aqc_get_elem buf;
1587 	enum ice_status status;
1588 	u32 node_teid;
1589 
1590 	node_teid = LE32_TO_CPU(node->info.node_teid);
1591 	status = ice_sched_query_elem(hw, node_teid, &buf);
1592 	if (status != ICE_SUCCESS)
1593 		return false;
1594 
1595 	if (memcmp(buf.generic, &node->info, sizeof(*buf.generic))) {
1596 		ice_debug(hw, ICE_DBG_SCHED, "Node mismatch for teid=0x%x\n",
1597 			  node_teid);
1598 		return false;
1599 	}
1600 
1601 	return true;
1602 }
1603 
1604 /**
1605  * ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes
1606  * @hw: pointer to the HW struct
1607  * @num_qs: number of queues
1608  * @num_nodes: num nodes array
1609  *
1610  * This function calculates the number of VSI child nodes based on the
1611  * number of queues.
1612  */
1613 static void
1614 ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
1615 {
1616 	u16 num = num_qs;
1617 	u8 i, qgl, vsil;
1618 
1619 	qgl = ice_sched_get_qgrp_layer(hw);
1620 	vsil = ice_sched_get_vsi_layer(hw);
1621 
1622 	/* calculate num nodes from queue group to VSI layer */
1623 	for (i = qgl; i > vsil; i--) {
1624 		/* round to the next integer if there is a remainder */
1625 		num = DIVIDE_AND_ROUND_UP(num, hw->max_children[i]);
1626 
1627 		/* need at least one node */
1628 		num_nodes[i] = num ? num : 1;
1629 	}
1630 }
1631 
1632 /**
1633  * ice_sched_add_vsi_child_nodes - add VSI child nodes to tree
1634  * @pi: port information structure
1635  * @vsi_handle: software VSI handle
1636  * @tc_node: pointer to the TC node
1637  * @num_nodes: pointer to the num nodes that needs to be added per layer
1638  * @owner: node owner (LAN or RDMA)
1639  *
1640  * This function adds the VSI child nodes to tree. It gets called for
1641  * LAN and RDMA separately.
1642  */
1643 static enum ice_status
1644 ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
1645 			      struct ice_sched_node *tc_node, u16 *num_nodes,
1646 			      u8 owner)
1647 {
1648 	struct ice_sched_node *parent, *node;
1649 	struct ice_hw *hw = pi->hw;
1650 	enum ice_status status;
1651 	u32 first_node_teid;
1652 	u16 num_added = 0;
1653 	u8 i, qgl, vsil;
1654 
1655 	qgl = ice_sched_get_qgrp_layer(hw);
1656 	vsil = ice_sched_get_vsi_layer(hw);
1657 	parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
1658 	for (i = vsil + 1; i <= qgl; i++) {
1659 		if (!parent)
1660 			return ICE_ERR_CFG;
1661 
1662 		status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
1663 						      num_nodes[i],
1664 						      &first_node_teid,
1665 						      &num_added);
1666 		if (status != ICE_SUCCESS || num_nodes[i] != num_added)
1667 			return ICE_ERR_CFG;
1668 
1669 		/* The newly added node can be a new parent for the next
1670 		 * layer nodes
1671 		 */
1672 		if (num_added) {
1673 			parent = ice_sched_find_node_by_teid(tc_node,
1674 							     first_node_teid);
1675 			node = parent;
1676 			while (node) {
1677 				node->owner = owner;
1678 				node = node->sibling;
1679 			}
1680 		} else {
1681 			parent = parent->children[0];
1682 		}
1683 	}
1684 
1685 	return ICE_SUCCESS;
1686 }
1687 
1688 /**
1689  * ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes
1690  * @pi: pointer to the port info structure
1691  * @tc_node: pointer to TC node
1692  * @num_nodes: pointer to num nodes array
1693  *
1694  * This function calculates the number of supported nodes needed to add this
1695  * VSI into Tx tree including the VSI, parent and intermediate nodes in below
1696  * layers
1697  */
1698 static void
1699 ice_sched_calc_vsi_support_nodes(struct ice_port_info *pi,
1700 				 struct ice_sched_node *tc_node, u16 *num_nodes)
1701 {
1702 	struct ice_sched_node *node;
1703 	u8 vsil;
1704 	int i;
1705 
1706 	vsil = ice_sched_get_vsi_layer(pi->hw);
1707 	for (i = vsil; i >= pi->hw->sw_entry_point_layer; i--)
1708 		/* Add intermediate nodes if TC has no children and
1709 		 * need at least one node for VSI
1710 		 */
1711 		if (!tc_node->num_children || i == vsil) {
1712 			num_nodes[i]++;
1713 		} else {
1714 			/* If intermediate nodes are reached max children
1715 			 * then add a new one.
1716 			 */
1717 			node = ice_sched_get_first_node(pi, tc_node, (u8)i);
1718 			/* scan all the siblings */
1719 			while (node) {
1720 				if (node->num_children <
1721 				    pi->hw->max_children[i])
1722 					break;
1723 				node = node->sibling;
1724 			}
1725 
1726 			/* tree has one intermediate node to add this new VSI.
1727 			 * So no need to calculate supported nodes for below
1728 			 * layers.
1729 			 */
1730 			if (node)
1731 				break;
1732 			/* all the nodes are full, allocate a new one */
1733 			num_nodes[i]++;
1734 		}
1735 }
1736 
1737 /**
1738  * ice_sched_add_vsi_support_nodes - add VSI supported nodes into Tx tree
1739  * @pi: port information structure
1740  * @vsi_handle: software VSI handle
1741  * @tc_node: pointer to TC node
1742  * @num_nodes: pointer to num nodes array
1743  *
1744  * This function adds the VSI supported nodes into Tx tree including the
1745  * VSI, its parent and intermediate nodes in below layers
1746  */
1747 static enum ice_status
1748 ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
1749 				struct ice_sched_node *tc_node, u16 *num_nodes)
1750 {
1751 	struct ice_sched_node *parent = tc_node;
1752 	enum ice_status status;
1753 	u32 first_node_teid;
1754 	u16 num_added = 0;
1755 	u8 i, vsil;
1756 
1757 	if (!pi)
1758 		return ICE_ERR_PARAM;
1759 
1760 	vsil = ice_sched_get_vsi_layer(pi->hw);
1761 	for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {
1762 		status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
1763 						      i, num_nodes[i],
1764 						      &first_node_teid,
1765 						      &num_added);
1766 		if (status != ICE_SUCCESS || num_nodes[i] != num_added)
1767 			return ICE_ERR_CFG;
1768 
1769 		/* The newly added node can be a new parent for the next
1770 		 * layer nodes
1771 		 */
1772 		if (num_added)
1773 			parent = ice_sched_find_node_by_teid(tc_node,
1774 							     first_node_teid);
1775 		else
1776 			parent = parent->children[0];
1777 
1778 		if (!parent)
1779 			return ICE_ERR_CFG;
1780 
1781 		if (i == vsil)
1782 			parent->vsi_handle = vsi_handle;
1783 	}
1784 
1785 	return ICE_SUCCESS;
1786 }
1787 
1788 /**
1789  * ice_sched_add_vsi_to_topo - add a new VSI into tree
1790  * @pi: port information structure
1791  * @vsi_handle: software VSI handle
1792  * @tc: TC number
1793  *
1794  * This function adds a new VSI into scheduler tree
1795  */
1796 static enum ice_status
1797 ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
1798 {
1799 	u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
1800 	struct ice_sched_node *tc_node;
1801 
1802 	tc_node = ice_sched_get_tc_node(pi, tc);
1803 	if (!tc_node)
1804 		return ICE_ERR_PARAM;
1805 
1806 	/* calculate number of supported nodes needed for this VSI */
1807 	ice_sched_calc_vsi_support_nodes(pi, tc_node, num_nodes);
1808 
1809 	/* add VSI supported nodes to TC subtree */
1810 	return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node,
1811 					       num_nodes);
1812 }
1813 
1814 /**
1815  * ice_sched_update_vsi_child_nodes - update VSI child nodes
1816  * @pi: port information structure
1817  * @vsi_handle: software VSI handle
1818  * @tc: TC number
1819  * @new_numqs: new number of max queues
1820  * @owner: owner of this subtree
1821  *
1822  * This function updates the VSI child nodes based on the number of queues
1823  */
1824 static enum ice_status
1825 ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
1826 				 u8 tc, u16 new_numqs, u8 owner)
1827 {
1828 	u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
1829 	struct ice_sched_node *vsi_node;
1830 	struct ice_sched_node *tc_node;
1831 	struct ice_vsi_ctx *vsi_ctx;
1832 	enum ice_status status = ICE_SUCCESS;
1833 	struct ice_hw *hw = pi->hw;
1834 	u16 prev_numqs;
1835 
1836 	tc_node = ice_sched_get_tc_node(pi, tc);
1837 	if (!tc_node)
1838 		return ICE_ERR_CFG;
1839 
1840 	vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
1841 	if (!vsi_node)
1842 		return ICE_ERR_CFG;
1843 
1844 	vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1845 	if (!vsi_ctx)
1846 		return ICE_ERR_PARAM;
1847 
1848 	prev_numqs = vsi_ctx->sched.max_lanq[tc];
1849 	/* num queues are not changed or less than the previous number */
1850 	if (new_numqs <= prev_numqs)
1851 		return status;
1852 	status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs);
1853 	if (status)
1854 		return status;
1855 
1856 	if (new_numqs)
1857 		ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
1858 	/* Keep the max number of queue configuration all the time. Update the
1859 	 * tree only if number of queues > previous number of queues. This may
1860 	 * leave some extra nodes in the tree if number of queues < previous
1861 	 * number but that wouldn't harm anything. Removing those extra nodes
1862 	 * may complicate the code if those nodes are part of SRL or
1863 	 * individually rate limited.
1864 	 */
1865 	status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
1866 					       new_num_nodes, owner);
1867 	if (status)
1868 		return status;
1869 	vsi_ctx->sched.max_lanq[tc] = new_numqs;
1870 
1871 	return ICE_SUCCESS;
1872 }
1873 
1874 /**
1875  * ice_sched_cfg_vsi - configure the new/existing VSI
1876  * @pi: port information structure
1877  * @vsi_handle: software VSI handle
1878  * @tc: TC number
1879  * @maxqs: max number of queues
1880  * @owner: LAN or RDMA
1881  * @enable: TC enabled or disabled
1882  *
1883  * This function adds/updates VSI nodes based on the number of queues. If TC is
1884  * enabled and VSI is in suspended state then resume the VSI back. If TC is
1885  * disabled then suspend the VSI if it is not already.
1886  */
1887 enum ice_status
1888 ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
1889 		  u8 owner, bool enable)
1890 {
1891 	struct ice_sched_node *vsi_node, *tc_node;
1892 	struct ice_vsi_ctx *vsi_ctx;
1893 	enum ice_status status = ICE_SUCCESS;
1894 	struct ice_hw *hw = pi->hw;
1895 
1896 	ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle);
1897 	tc_node = ice_sched_get_tc_node(pi, tc);
1898 	if (!tc_node)
1899 		return ICE_ERR_PARAM;
1900 	vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1901 	if (!vsi_ctx)
1902 		return ICE_ERR_PARAM;
1903 	vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
1904 
1905 	/* suspend the VSI if TC is not enabled */
1906 	if (!enable) {
1907 		if (vsi_node && vsi_node->in_use) {
1908 			u32 teid = LE32_TO_CPU(vsi_node->info.node_teid);
1909 
1910 			status = ice_sched_suspend_resume_elems(hw, 1, &teid,
1911 								true);
1912 			if (!status)
1913 				vsi_node->in_use = false;
1914 		}
1915 		return status;
1916 	}
1917 
1918 	/* TC is enabled, if it is a new VSI then add it to the tree */
1919 	if (!vsi_node) {
1920 		status = ice_sched_add_vsi_to_topo(pi, vsi_handle, tc);
1921 		if (status)
1922 			return status;
1923 
1924 		vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
1925 		if (!vsi_node)
1926 			return ICE_ERR_CFG;
1927 
1928 		vsi_ctx->sched.vsi_node[tc] = vsi_node;
1929 		vsi_node->in_use = true;
1930 		/* invalidate the max queues whenever VSI gets added first time
1931 		 * into the scheduler tree (boot or after reset). We need to
1932 		 * recreate the child nodes all the time in these cases.
1933 		 */
1934 		vsi_ctx->sched.max_lanq[tc] = 0;
1935 	}
1936 
1937 	/* update the VSI child nodes */
1938 	status = ice_sched_update_vsi_child_nodes(pi, vsi_handle, tc, maxqs,
1939 						  owner);
1940 	if (status)
1941 		return status;
1942 
1943 	/* TC is enabled, resume the VSI if it is in the suspend state */
1944 	if (!vsi_node->in_use) {
1945 		u32 teid = LE32_TO_CPU(vsi_node->info.node_teid);
1946 
1947 		status = ice_sched_suspend_resume_elems(hw, 1, &teid, false);
1948 		if (!status)
1949 			vsi_node->in_use = true;
1950 	}
1951 
1952 	return status;
1953 }
1954 
1955 /**
1956  * ice_sched_rm_agg_vsi_entry - remove aggregator related VSI info entry
1957  * @pi: port information structure
1958  * @vsi_handle: software VSI handle
1959  *
1960  * This function removes single aggregator VSI info entry from
1961  * aggregator list.
1962  */
1963 static void
1964 ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle)
1965 {
1966 	struct ice_sched_agg_info *agg_info;
1967 	struct ice_sched_agg_info *atmp;
1968 
1969 	LIST_FOR_EACH_ENTRY_SAFE(agg_info, atmp, &pi->hw->agg_list,
1970 				 ice_sched_agg_info,
1971 				 list_entry) {
1972 		struct ice_sched_agg_vsi_info *agg_vsi_info;
1973 		struct ice_sched_agg_vsi_info *vtmp;
1974 
1975 		LIST_FOR_EACH_ENTRY_SAFE(agg_vsi_info, vtmp,
1976 					 &agg_info->agg_vsi_list,
1977 					 ice_sched_agg_vsi_info, list_entry)
1978 			if (agg_vsi_info->vsi_handle == vsi_handle) {
1979 				LIST_DEL(&agg_vsi_info->list_entry);
1980 				ice_free(pi->hw, agg_vsi_info);
1981 				return;
1982 			}
1983 	}
1984 }
1985 
1986 /**
1987  * ice_sched_is_leaf_node_present - check for a leaf node in the sub-tree
1988  * @node: pointer to the sub-tree node
1989  *
1990  * This function checks for a leaf node presence in a given sub-tree node.
1991  */
1992 static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node)
1993 {
1994 	u8 i;
1995 
1996 	for (i = 0; i < node->num_children; i++)
1997 		if (ice_sched_is_leaf_node_present(node->children[i]))
1998 			return true;
1999 	/* check for a leaf node */
2000 	return (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF);
2001 }
2002 
2003 /**
2004  * ice_sched_rm_vsi_cfg - remove the VSI and its children nodes
2005  * @pi: port information structure
2006  * @vsi_handle: software VSI handle
2007  * @owner: LAN or RDMA
2008  *
2009  * This function removes the VSI and its LAN or RDMA children nodes from the
2010  * scheduler tree.
2011  */
2012 static enum ice_status
2013 ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
2014 {
2015 	enum ice_status status = ICE_ERR_PARAM;
2016 	struct ice_vsi_ctx *vsi_ctx;
2017 	u8 i;
2018 
2019 	ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle);
2020 	if (!ice_is_vsi_valid(pi->hw, vsi_handle))
2021 		return status;
2022 	ice_acquire_lock(&pi->sched_lock);
2023 	vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
2024 	if (!vsi_ctx)
2025 		goto exit_sched_rm_vsi_cfg;
2026 
2027 	ice_for_each_traffic_class(i) {
2028 		struct ice_sched_node *vsi_node, *tc_node;
2029 		u8 j = 0;
2030 
2031 		tc_node = ice_sched_get_tc_node(pi, i);
2032 		if (!tc_node)
2033 			continue;
2034 
2035 		vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
2036 		if (!vsi_node)
2037 			continue;
2038 
2039 		if (ice_sched_is_leaf_node_present(vsi_node)) {
2040 			ice_debug(pi->hw, ICE_DBG_SCHED,
2041 				  "VSI has leaf nodes in TC %d\n", i);
2042 			status = ICE_ERR_IN_USE;
2043 			goto exit_sched_rm_vsi_cfg;
2044 		}
2045 		while (j < vsi_node->num_children) {
2046 			if (vsi_node->children[j]->owner == owner) {
2047 				ice_free_sched_node(pi, vsi_node->children[j]);
2048 
2049 				/* reset the counter again since the num
2050 				 * children will be updated after node removal
2051 				 */
2052 				j = 0;
2053 			} else {
2054 				j++;
2055 			}
2056 		}
2057 		/* remove the VSI if it has no children */
2058 		if (!vsi_node->num_children) {
2059 			ice_free_sched_node(pi, vsi_node);
2060 			vsi_ctx->sched.vsi_node[i] = NULL;
2061 
2062 			/* clean up aggregator related VSI info if any */
2063 			ice_sched_rm_agg_vsi_info(pi, vsi_handle);
2064 		}
2065 		if (owner == ICE_SCHED_NODE_OWNER_LAN)
2066 			vsi_ctx->sched.max_lanq[i] = 0;
2067 	}
2068 	status = ICE_SUCCESS;
2069 
2070 exit_sched_rm_vsi_cfg:
2071 	ice_release_lock(&pi->sched_lock);
2072 	return status;
2073 }
2074 
2075 /**
2076  * ice_rm_vsi_lan_cfg - remove VSI and its LAN children nodes
2077  * @pi: port information structure
2078  * @vsi_handle: software VSI handle
2079  *
2080  * This function clears the VSI and its LAN children nodes from scheduler tree
2081  * for all TCs.
2082  */
2083 enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
2084 {
2085 	return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN);
2086 }
2087 
2088 /**
2089  * ice_sched_is_tree_balanced - Check tree nodes are identical or not
2090  * @hw: pointer to the HW struct
2091  * @node: pointer to the ice_sched_node struct
2092  *
2093  * This function compares all the nodes for a given tree against HW DB nodes
2094  * This function needs to be called with the port_info->sched_lock held
2095  */
2096 bool ice_sched_is_tree_balanced(struct ice_hw *hw, struct ice_sched_node *node)
2097 {
2098 	u8 i;
2099 
2100 	/* start from the leaf node */
2101 	for (i = 0; i < node->num_children; i++)
2102 		/* Fail if node doesn't match with the SW DB
2103 		 * this recursion is intentional, and wouldn't
2104 		 * go more than 9 calls
2105 		 */
2106 		if (!ice_sched_is_tree_balanced(hw, node->children[i]))
2107 			return false;
2108 
2109 	return ice_sched_check_node(hw, node);
2110 }
2111 
2112 /**
2113  * ice_aq_query_node_to_root - retrieve the tree topology for a given node TEID
2114  * @hw: pointer to the HW struct
2115  * @node_teid: node TEID
2116  * @buf: pointer to buffer
2117  * @buf_size: buffer size in bytes
2118  * @cd: pointer to command details structure or NULL
2119  *
2120  * This function retrieves the tree topology from the firmware for a given
2121  * node TEID to the root node.
2122  */
2123 enum ice_status
2124 ice_aq_query_node_to_root(struct ice_hw *hw, u32 node_teid,
2125 			  struct ice_aqc_get_elem *buf, u16 buf_size,
2126 			  struct ice_sq_cd *cd)
2127 {
2128 	struct ice_aqc_query_node_to_root *cmd;
2129 	struct ice_aq_desc desc;
2130 
2131 	cmd = &desc.params.query_node_to_root;
2132 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_node_to_root);
2133 	cmd->teid = CPU_TO_LE32(node_teid);
2134 	return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2135 }
2136 
2137 /**
2138  * ice_get_agg_info - get the aggregator ID
2139  * @hw: pointer to the hardware structure
2140  * @agg_id: aggregator ID
2141  *
2142  * This function validates aggregator ID. The function returns info if
2143  * aggregator ID is present in list otherwise it returns null.
2144  */
2145 static struct ice_sched_agg_info*
2146 ice_get_agg_info(struct ice_hw *hw, u32 agg_id)
2147 {
2148 	struct ice_sched_agg_info *agg_info;
2149 
2150 	LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info,
2151 			    list_entry)
2152 		if (agg_info->agg_id == agg_id)
2153 			return agg_info;
2154 
2155 	return NULL;
2156 }
2157 
2158 /**
2159  * ice_sched_get_free_vsi_parent - Find a free parent node in aggregator subtree
2160  * @hw: pointer to the HW struct
2161  * @node: pointer to a child node
2162  * @num_nodes: num nodes count array
2163  *
2164  * This function walks through the aggregator subtree to find a free parent
2165  * node
2166  */
2167 static struct ice_sched_node *
2168 ice_sched_get_free_vsi_parent(struct ice_hw *hw, struct ice_sched_node *node,
2169 			      u16 *num_nodes)
2170 {
2171 	u8 l = node->tx_sched_layer;
2172 	u8 vsil, i;
2173 
2174 	vsil = ice_sched_get_vsi_layer(hw);
2175 
2176 	/* Is it VSI parent layer ? */
2177 	if (l == vsil - 1)
2178 		return (node->num_children < hw->max_children[l]) ? node : NULL;
2179 
2180 	/* We have intermediate nodes. Let's walk through the subtree. If the
2181 	 * intermediate node has space to add a new node then clear the count
2182 	 */
2183 	if (node->num_children < hw->max_children[l])
2184 		num_nodes[l] = 0;
2185 	/* The below recursive call is intentional and wouldn't go more than
2186 	 * 2 or 3 iterations.
2187 	 */
2188 
2189 	for (i = 0; i < node->num_children; i++) {
2190 		struct ice_sched_node *parent;
2191 
2192 		parent = ice_sched_get_free_vsi_parent(hw, node->children[i],
2193 						       num_nodes);
2194 		if (parent)
2195 			return parent;
2196 	}
2197 
2198 	return NULL;
2199 }
2200 
2201 /**
2202  * ice_sched_update_parent - update the new parent in SW DB
2203  * @new_parent: pointer to a new parent node
2204  * @node: pointer to a child node
2205  *
2206  * This function removes the child from the old parent and adds it to a new
2207  * parent
2208  */
2209 static void
2210 ice_sched_update_parent(struct ice_sched_node *new_parent,
2211 			struct ice_sched_node *node)
2212 {
2213 	struct ice_sched_node *old_parent;
2214 	u8 i, j;
2215 
2216 	old_parent = node->parent;
2217 
2218 	/* update the old parent children */
2219 	for (i = 0; i < old_parent->num_children; i++)
2220 		if (old_parent->children[i] == node) {
2221 			for (j = i + 1; j < old_parent->num_children; j++)
2222 				old_parent->children[j - 1] =
2223 					old_parent->children[j];
2224 			old_parent->num_children--;
2225 			break;
2226 		}
2227 
2228 	/* now move the node to a new parent */
2229 	new_parent->children[new_parent->num_children++] = node;
2230 	node->parent = new_parent;
2231 	node->info.parent_teid = new_parent->info.node_teid;
2232 }
2233 
2234 /**
2235  * ice_sched_move_nodes - move child nodes to a given parent
2236  * @pi: port information structure
2237  * @parent: pointer to parent node
2238  * @num_items: number of child nodes to be moved
2239  * @list: pointer to child node teids
2240  *
2241  * This function move the child nodes to a given parent.
2242  */
2243 static enum ice_status
2244 ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
2245 		     u16 num_items, u32 *list)
2246 {
2247 	enum ice_status status = ICE_SUCCESS;
2248 	struct ice_aqc_move_elem *buf;
2249 	struct ice_sched_node *node;
2250 	u16 i, grps_movd = 0;
2251 	struct ice_hw *hw;
2252 
2253 	hw = pi->hw;
2254 
2255 	if (!parent || !num_items)
2256 		return ICE_ERR_PARAM;
2257 
2258 	/* Does parent have enough space */
2259 	if (parent->num_children + num_items >=
2260 	    hw->max_children[parent->tx_sched_layer])
2261 		return ICE_ERR_AQ_FULL;
2262 
2263 	buf = (struct ice_aqc_move_elem *)ice_malloc(hw, sizeof(*buf));
2264 	if (!buf)
2265 		return ICE_ERR_NO_MEMORY;
2266 
2267 	for (i = 0; i < num_items; i++) {
2268 		node = ice_sched_find_node_by_teid(pi->root, list[i]);
2269 		if (!node) {
2270 			status = ICE_ERR_PARAM;
2271 			goto move_err_exit;
2272 		}
2273 
2274 		buf->hdr.src_parent_teid = node->info.parent_teid;
2275 		buf->hdr.dest_parent_teid = parent->info.node_teid;
2276 		buf->teid[0] = node->info.node_teid;
2277 		buf->hdr.num_elems = CPU_TO_LE16(1);
2278 		status = ice_aq_move_sched_elems(hw, 1, buf, sizeof(*buf),
2279 						 &grps_movd, NULL);
2280 		if (status && grps_movd != 1) {
2281 			status = ICE_ERR_CFG;
2282 			goto move_err_exit;
2283 		}
2284 
2285 		/* update the SW DB */
2286 		ice_sched_update_parent(parent, node);
2287 	}
2288 
2289 move_err_exit:
2290 	ice_free(hw, buf);
2291 	return status;
2292 }
2293 
2294 /**
2295  * ice_sched_move_vsi_to_agg - move VSI to aggregator node
2296  * @pi: port information structure
2297  * @vsi_handle: software VSI handle
2298  * @agg_id: aggregator ID
2299  * @tc: TC number
2300  *
2301  * This function moves a VSI to an aggregator node or its subtree.
2302  * Intermediate nodes may be created if required.
2303  */
2304 static enum ice_status
2305 ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id,
2306 			  u8 tc)
2307 {
2308 	struct ice_sched_node *vsi_node, *agg_node, *tc_node, *parent;
2309 	u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
2310 	u32 first_node_teid, vsi_teid;
2311 	enum ice_status status;
2312 	u16 num_nodes_added;
2313 	u8 aggl, vsil, i;
2314 
2315 	tc_node = ice_sched_get_tc_node(pi, tc);
2316 	if (!tc_node)
2317 		return ICE_ERR_CFG;
2318 
2319 	agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
2320 	if (!agg_node)
2321 		return ICE_ERR_DOES_NOT_EXIST;
2322 
2323 	vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
2324 	if (!vsi_node)
2325 		return ICE_ERR_DOES_NOT_EXIST;
2326 
2327 	aggl = ice_sched_get_agg_layer(pi->hw);
2328 	vsil = ice_sched_get_vsi_layer(pi->hw);
2329 
2330 	/* set intermediate node count to 1 between aggregator and VSI layers */
2331 	for (i = aggl + 1; i < vsil; i++)
2332 		num_nodes[i] = 1;
2333 
2334 	/* Check if the aggregator subtree has any free node to add the VSI */
2335 	for (i = 0; i < agg_node->num_children; i++) {
2336 		parent = ice_sched_get_free_vsi_parent(pi->hw,
2337 						       agg_node->children[i],
2338 						       num_nodes);
2339 		if (parent)
2340 			goto move_nodes;
2341 	}
2342 
2343 	/* add new nodes */
2344 	parent = agg_node;
2345 	for (i = aggl + 1; i < vsil; i++) {
2346 		status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
2347 						      num_nodes[i],
2348 						      &first_node_teid,
2349 						      &num_nodes_added);
2350 		if (status != ICE_SUCCESS || num_nodes[i] != num_nodes_added)
2351 			return ICE_ERR_CFG;
2352 
2353 		/* The newly added node can be a new parent for the next
2354 		 * layer nodes
2355 		 */
2356 		if (num_nodes_added)
2357 			parent = ice_sched_find_node_by_teid(tc_node,
2358 							     first_node_teid);
2359 		else
2360 			parent = parent->children[0];
2361 
2362 		if (!parent)
2363 			return ICE_ERR_CFG;
2364 	}
2365 
2366 move_nodes:
2367 	vsi_teid = LE32_TO_CPU(vsi_node->info.node_teid);
2368 	return ice_sched_move_nodes(pi, parent, 1, &vsi_teid);
2369 }
2370 
2371 /**
2372  * ice_move_all_vsi_to_dflt_agg - move all VSI(s) to default aggregator
2373  * @pi: port information structure
2374  * @agg_info: aggregator info
2375  * @tc: traffic class number
2376  * @rm_vsi_info: true or false
2377  *
2378  * This function move all the VSI(s) to the default aggregator and delete
2379  * aggregator VSI info based on passed in boolean parameter rm_vsi_info. The
2380  * caller holds the scheduler lock.
2381  */
2382 static enum ice_status
2383 ice_move_all_vsi_to_dflt_agg(struct ice_port_info *pi,
2384 			     struct ice_sched_agg_info *agg_info, u8 tc,
2385 			     bool rm_vsi_info)
2386 {
2387 	struct ice_sched_agg_vsi_info *agg_vsi_info;
2388 	struct ice_sched_agg_vsi_info *tmp;
2389 	enum ice_status status = ICE_SUCCESS;
2390 
2391 	LIST_FOR_EACH_ENTRY_SAFE(agg_vsi_info, tmp, &agg_info->agg_vsi_list,
2392 				 ice_sched_agg_vsi_info, list_entry) {
2393 		u16 vsi_handle = agg_vsi_info->vsi_handle;
2394 
2395 		/* Move VSI to default aggregator */
2396 		if (!ice_is_tc_ena(agg_vsi_info->tc_bitmap[0], tc))
2397 			continue;
2398 
2399 		status = ice_sched_move_vsi_to_agg(pi, vsi_handle,
2400 						   ICE_DFLT_AGG_ID, tc);
2401 		if (status)
2402 			break;
2403 
2404 		ice_clear_bit(tc, agg_vsi_info->tc_bitmap);
2405 		if (rm_vsi_info && !agg_vsi_info->tc_bitmap[0]) {
2406 			LIST_DEL(&agg_vsi_info->list_entry);
2407 			ice_free(pi->hw, agg_vsi_info);
2408 		}
2409 	}
2410 
2411 	return status;
2412 }
2413 
2414 /**
2415  * ice_sched_is_agg_inuse - check whether the aggregator is in use or not
2416  * @pi: port information structure
2417  * @node: node pointer
2418  *
2419  * This function checks whether the aggregator is attached with any VSI or not.
2420  */
2421 static bool
2422 ice_sched_is_agg_inuse(struct ice_port_info *pi, struct ice_sched_node *node)
2423 {
2424 	u8 vsil, i;
2425 
2426 	vsil = ice_sched_get_vsi_layer(pi->hw);
2427 	if (node->tx_sched_layer < vsil - 1) {
2428 		for (i = 0; i < node->num_children; i++)
2429 			if (ice_sched_is_agg_inuse(pi, node->children[i]))
2430 				return true;
2431 		return false;
2432 	} else {
2433 		return node->num_children ? true : false;
2434 	}
2435 }
2436 
2437 /**
2438  * ice_sched_rm_agg_cfg - remove the aggregator node
2439  * @pi: port information structure
2440  * @agg_id: aggregator ID
2441  * @tc: TC number
2442  *
2443  * This function removes the aggregator node and intermediate nodes if any
2444  * from the given TC
2445  */
2446 static enum ice_status
2447 ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
2448 {
2449 	struct ice_sched_node *tc_node, *agg_node;
2450 	struct ice_hw *hw = pi->hw;
2451 
2452 	tc_node = ice_sched_get_tc_node(pi, tc);
2453 	if (!tc_node)
2454 		return ICE_ERR_CFG;
2455 
2456 	agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
2457 	if (!agg_node)
2458 		return ICE_ERR_DOES_NOT_EXIST;
2459 
2460 	/* Can't remove the aggregator node if it has children */
2461 	if (ice_sched_is_agg_inuse(pi, agg_node))
2462 		return ICE_ERR_IN_USE;
2463 
2464 	/* need to remove the whole subtree if aggregator node is the
2465 	 * only child.
2466 	 */
2467 	while (agg_node->tx_sched_layer > hw->sw_entry_point_layer) {
2468 		struct ice_sched_node *parent = agg_node->parent;
2469 
2470 		if (!parent)
2471 			return ICE_ERR_CFG;
2472 
2473 		if (parent->num_children > 1)
2474 			break;
2475 
2476 		agg_node = parent;
2477 	}
2478 
2479 	ice_free_sched_node(pi, agg_node);
2480 	return ICE_SUCCESS;
2481 }
2482 
2483 /**
2484  * ice_rm_agg_cfg_tc - remove aggregator configuration for TC
2485  * @pi: port information structure
2486  * @agg_info: aggregator ID
2487  * @tc: TC number
2488  * @rm_vsi_info: bool value true or false
2489  *
2490  * This function removes aggregator reference to VSI of given TC. It removes
2491  * the aggregator configuration completely for requested TC. The caller needs
2492  * to hold the scheduler lock.
2493  */
2494 static enum ice_status
2495 ice_rm_agg_cfg_tc(struct ice_port_info *pi, struct ice_sched_agg_info *agg_info,
2496 		  u8 tc, bool rm_vsi_info)
2497 {
2498 	enum ice_status status = ICE_SUCCESS;
2499 
2500 	/* If nothing to remove - return success */
2501 	if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
2502 		goto exit_rm_agg_cfg_tc;
2503 
2504 	status = ice_move_all_vsi_to_dflt_agg(pi, agg_info, tc, rm_vsi_info);
2505 	if (status)
2506 		goto exit_rm_agg_cfg_tc;
2507 
2508 	/* Delete aggregator node(s) */
2509 	status = ice_sched_rm_agg_cfg(pi, agg_info->agg_id, tc);
2510 	if (status)
2511 		goto exit_rm_agg_cfg_tc;
2512 
2513 	ice_clear_bit(tc, agg_info->tc_bitmap);
2514 exit_rm_agg_cfg_tc:
2515 	return status;
2516 }
2517 
2518 /**
2519  * ice_save_agg_tc_bitmap - save aggregator TC bitmap
2520  * @pi: port information structure
2521  * @agg_id: aggregator ID
2522  * @tc_bitmap: 8 bits TC bitmap
2523  *
2524  * Save aggregator TC bitmap. This function needs to be called with scheduler
2525  * lock held.
2526  */
2527 static enum ice_status
2528 ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id,
2529 		       ice_bitmap_t *tc_bitmap)
2530 {
2531 	struct ice_sched_agg_info *agg_info;
2532 
2533 	agg_info = ice_get_agg_info(pi->hw, agg_id);
2534 	if (!agg_info)
2535 		return ICE_ERR_PARAM;
2536 	ice_cp_bitmap(agg_info->replay_tc_bitmap, tc_bitmap,
2537 		      ICE_MAX_TRAFFIC_CLASS);
2538 	return ICE_SUCCESS;
2539 }
2540 
2541 /**
2542  * ice_sched_add_agg_cfg - create an aggregator node
2543  * @pi: port information structure
2544  * @agg_id: aggregator ID
2545  * @tc: TC number
2546  *
2547  * This function creates an aggregator node and intermediate nodes if required
2548  * for the given TC
2549  */
2550 static enum ice_status
2551 ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
2552 {
2553 	struct ice_sched_node *parent, *agg_node, *tc_node;
2554 	u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
2555 	enum ice_status status = ICE_SUCCESS;
2556 	struct ice_hw *hw = pi->hw;
2557 	u32 first_node_teid;
2558 	u16 num_nodes_added;
2559 	u8 i, aggl;
2560 
2561 	tc_node = ice_sched_get_tc_node(pi, tc);
2562 	if (!tc_node)
2563 		return ICE_ERR_CFG;
2564 
2565 	agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
2566 	/* Does Agg node already exist ? */
2567 	if (agg_node)
2568 		return status;
2569 
2570 	aggl = ice_sched_get_agg_layer(hw);
2571 
2572 	/* need one node in Agg layer */
2573 	num_nodes[aggl] = 1;
2574 
2575 	/* Check whether the intermediate nodes have space to add the
2576 	 * new aggregator. If they are full, then SW needs to allocate a new
2577 	 * intermediate node on those layers
2578 	 */
2579 	for (i = hw->sw_entry_point_layer; i < aggl; i++) {
2580 		parent = ice_sched_get_first_node(pi, tc_node, i);
2581 
2582 		/* scan all the siblings */
2583 		while (parent) {
2584 			if (parent->num_children < hw->max_children[i])
2585 				break;
2586 			parent = parent->sibling;
2587 		}
2588 
2589 		/* all the nodes are full, reserve one for this layer */
2590 		if (!parent)
2591 			num_nodes[i]++;
2592 	}
2593 
2594 	/* add the aggregator node */
2595 	parent = tc_node;
2596 	for (i = hw->sw_entry_point_layer; i <= aggl; i++) {
2597 		if (!parent)
2598 			return ICE_ERR_CFG;
2599 
2600 		status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
2601 						      num_nodes[i],
2602 						      &first_node_teid,
2603 						      &num_nodes_added);
2604 		if (status != ICE_SUCCESS || num_nodes[i] != num_nodes_added)
2605 			return ICE_ERR_CFG;
2606 
2607 		/* The newly added node can be a new parent for the next
2608 		 * layer nodes
2609 		 */
2610 		if (num_nodes_added) {
2611 			parent = ice_sched_find_node_by_teid(tc_node,
2612 							     first_node_teid);
2613 			/* register aggregator ID with the aggregator node */
2614 			if (parent && i == aggl)
2615 				parent->agg_id = agg_id;
2616 		} else {
2617 			parent = parent->children[0];
2618 		}
2619 	}
2620 
2621 	return ICE_SUCCESS;
2622 }
2623 
2624 /**
2625  * ice_sched_cfg_agg - configure aggregator node
2626  * @pi: port information structure
2627  * @agg_id: aggregator ID
2628  * @agg_type: aggregator type queue, VSI, or aggregator group
2629  * @tc_bitmap: bits TC bitmap
2630  *
2631  * It registers a unique aggregator node into scheduler services. It
2632  * allows a user to register with a unique ID to track it's resources.
2633  * The aggregator type determines if this is a queue group, VSI group
2634  * or aggregator group. It then creates the aggregator node(s) for requested
2635  * TC(s) or removes an existing aggregator node including its configuration
2636  * if indicated via tc_bitmap. Call ice_rm_agg_cfg to release aggregator
2637  * resources and remove aggregator ID.
2638  * This function needs to be called with scheduler lock held.
2639  */
2640 static enum ice_status
2641 ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id,
2642 		  enum ice_agg_type agg_type, ice_bitmap_t *tc_bitmap)
2643 {
2644 	struct ice_sched_agg_info *agg_info;
2645 	enum ice_status status = ICE_SUCCESS;
2646 	struct ice_hw *hw = pi->hw;
2647 	u8 tc;
2648 
2649 	agg_info = ice_get_agg_info(hw, agg_id);
2650 	if (!agg_info) {
2651 		/* Create new entry for new aggregator ID */
2652 		agg_info = (struct ice_sched_agg_info *)
2653 			ice_malloc(hw, sizeof(*agg_info));
2654 		if (!agg_info) {
2655 			status = ICE_ERR_NO_MEMORY;
2656 			goto exit_reg_agg;
2657 		}
2658 		agg_info->agg_id = agg_id;
2659 		agg_info->agg_type = agg_type;
2660 		agg_info->tc_bitmap[0] = 0;
2661 
2662 		/* Initialize the aggregator VSI list head */
2663 		INIT_LIST_HEAD(&agg_info->agg_vsi_list);
2664 
2665 		/* Add new entry in aggregator list */
2666 		LIST_ADD(&agg_info->list_entry, &hw->agg_list);
2667 	}
2668 	/* Create aggregator node(s) for requested TC(s) */
2669 	ice_for_each_traffic_class(tc) {
2670 		if (!ice_is_tc_ena(*tc_bitmap, tc)) {
2671 			/* Delete aggregator cfg TC if it exists previously */
2672 			status = ice_rm_agg_cfg_tc(pi, agg_info, tc, false);
2673 			if (status)
2674 				break;
2675 			continue;
2676 		}
2677 
2678 		/* Check if aggregator node for TC already exists */
2679 		if (ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
2680 			continue;
2681 
2682 		/* Create new aggregator node for TC */
2683 		status = ice_sched_add_agg_cfg(pi, agg_id, tc);
2684 		if (status)
2685 			break;
2686 
2687 		/* Save aggregator node's TC information */
2688 		ice_set_bit(tc, agg_info->tc_bitmap);
2689 	}
2690 exit_reg_agg:
2691 	return status;
2692 }
2693 
2694 /**
2695  * ice_cfg_agg - config aggregator node
2696  * @pi: port information structure
2697  * @agg_id: aggregator ID
2698  * @agg_type: aggregator type queue, VSI, or aggregator group
2699  * @tc_bitmap: bits TC bitmap
2700  *
2701  * This function configures aggregator node(s).
2702  */
2703 enum ice_status
2704 ice_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type,
2705 	    u8 tc_bitmap)
2706 {
2707 	ice_bitmap_t bitmap = tc_bitmap;
2708 	enum ice_status status;
2709 
2710 	ice_acquire_lock(&pi->sched_lock);
2711 	status = ice_sched_cfg_agg(pi, agg_id, agg_type,
2712 				   (ice_bitmap_t *)&bitmap);
2713 	if (!status)
2714 		status = ice_save_agg_tc_bitmap(pi, agg_id,
2715 						(ice_bitmap_t *)&bitmap);
2716 	ice_release_lock(&pi->sched_lock);
2717 	return status;
2718 }
2719 
2720 /**
2721  * ice_get_agg_vsi_info - get the aggregator ID
2722  * @agg_info: aggregator info
2723  * @vsi_handle: software VSI handle
2724  *
2725  * The function returns aggregator VSI info based on VSI handle. This function
2726  * needs to be called with scheduler lock held.
2727  */
2728 static struct ice_sched_agg_vsi_info*
2729 ice_get_agg_vsi_info(struct ice_sched_agg_info *agg_info, u16 vsi_handle)
2730 {
2731 	struct ice_sched_agg_vsi_info *agg_vsi_info;
2732 
2733 	LIST_FOR_EACH_ENTRY(agg_vsi_info, &agg_info->agg_vsi_list,
2734 			    ice_sched_agg_vsi_info, list_entry)
2735 		if (agg_vsi_info->vsi_handle == vsi_handle)
2736 			return agg_vsi_info;
2737 
2738 	return NULL;
2739 }
2740 
2741 /**
2742  * ice_get_vsi_agg_info - get the aggregator info of VSI
2743  * @hw: pointer to the hardware structure
2744  * @vsi_handle: Sw VSI handle
2745  *
2746  * The function returns aggregator info of VSI represented via vsi_handle. The
2747  * VSI has in this case a different aggregator than the default one. This
2748  * function needs to be called with scheduler lock held.
2749  */
2750 static struct ice_sched_agg_info*
2751 ice_get_vsi_agg_info(struct ice_hw *hw, u16 vsi_handle)
2752 {
2753 	struct ice_sched_agg_info *agg_info;
2754 
2755 	LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info,
2756 			    list_entry) {
2757 		struct ice_sched_agg_vsi_info *agg_vsi_info;
2758 
2759 		agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
2760 		if (agg_vsi_info)
2761 			return agg_info;
2762 	}
2763 	return NULL;
2764 }
2765 
2766 /**
2767  * ice_save_agg_vsi_tc_bitmap - save aggregator VSI TC bitmap
2768  * @pi: port information structure
2769  * @agg_id: aggregator ID
2770  * @vsi_handle: software VSI handle
2771  * @tc_bitmap: TC bitmap of enabled TC(s)
2772  *
2773  * Save VSI to aggregator TC bitmap. This function needs to call with scheduler
2774  * lock held.
2775  */
2776 static enum ice_status
2777 ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
2778 			   ice_bitmap_t *tc_bitmap)
2779 {
2780 	struct ice_sched_agg_vsi_info *agg_vsi_info;
2781 	struct ice_sched_agg_info *agg_info;
2782 
2783 	agg_info = ice_get_agg_info(pi->hw, agg_id);
2784 	if (!agg_info)
2785 		return ICE_ERR_PARAM;
2786 	/* check if entry already exist */
2787 	agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
2788 	if (!agg_vsi_info)
2789 		return ICE_ERR_PARAM;
2790 	ice_cp_bitmap(agg_vsi_info->replay_tc_bitmap, tc_bitmap,
2791 		      ICE_MAX_TRAFFIC_CLASS);
2792 	return ICE_SUCCESS;
2793 }
2794 
2795 /**
2796  * ice_sched_assoc_vsi_to_agg - associate/move VSI to new/default aggregator
2797  * @pi: port information structure
2798  * @agg_id: aggregator ID
2799  * @vsi_handle: software VSI handle
2800  * @tc_bitmap: TC bitmap of enabled TC(s)
2801  *
2802  * This function moves VSI to a new or default aggregator node. If VSI is
2803  * already associated to the aggregator node then no operation is performed on
2804  * the tree. This function needs to be called with scheduler lock held.
2805  */
2806 static enum ice_status
2807 ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
2808 			   u16 vsi_handle, ice_bitmap_t *tc_bitmap)
2809 {
2810 	struct ice_sched_agg_vsi_info *agg_vsi_info;
2811 	struct ice_sched_agg_info *agg_info;
2812 	enum ice_status status = ICE_SUCCESS;
2813 	struct ice_hw *hw = pi->hw;
2814 	u8 tc;
2815 
2816 	if (!ice_is_vsi_valid(pi->hw, vsi_handle))
2817 		return ICE_ERR_PARAM;
2818 	agg_info = ice_get_agg_info(hw, agg_id);
2819 	if (!agg_info)
2820 		return ICE_ERR_PARAM;
2821 	/* check if entry already exist */
2822 	agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
2823 	if (!agg_vsi_info) {
2824 		/* Create new entry for VSI under aggregator list */
2825 		agg_vsi_info = (struct ice_sched_agg_vsi_info *)
2826 			ice_malloc(hw, sizeof(*agg_vsi_info));
2827 		if (!agg_vsi_info)
2828 			return ICE_ERR_PARAM;
2829 
2830 		/* add VSI ID into the aggregator list */
2831 		agg_vsi_info->vsi_handle = vsi_handle;
2832 		LIST_ADD(&agg_vsi_info->list_entry, &agg_info->agg_vsi_list);
2833 	}
2834 	/* Move VSI node to new aggregator node for requested TC(s) */
2835 	ice_for_each_traffic_class(tc) {
2836 		if (!ice_is_tc_ena(*tc_bitmap, tc))
2837 			continue;
2838 
2839 		/* Move VSI to new aggregator */
2840 		status = ice_sched_move_vsi_to_agg(pi, vsi_handle, agg_id, tc);
2841 		if (status)
2842 			break;
2843 
2844 		if (agg_id != ICE_DFLT_AGG_ID)
2845 			ice_set_bit(tc, agg_vsi_info->tc_bitmap);
2846 		else
2847 			ice_clear_bit(tc, agg_vsi_info->tc_bitmap);
2848 	}
2849 	/* If VSI moved back to default aggregator, delete agg_vsi_info. */
2850 	if (!ice_is_any_bit_set(agg_vsi_info->tc_bitmap,
2851 				ICE_MAX_TRAFFIC_CLASS)) {
2852 		LIST_DEL(&agg_vsi_info->list_entry);
2853 		ice_free(hw, agg_vsi_info);
2854 	}
2855 	return status;
2856 }
2857 
2858 /**
2859  * ice_sched_rm_unused_rl_prof - remove unused RL profile
2860  * @pi: port information structure
2861  *
2862  * This function removes unused rate limit profiles from the HW and
2863  * SW DB. The caller needs to hold scheduler lock.
2864  */
2865 static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi)
2866 {
2867 	u16 ln;
2868 
2869 	for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) {
2870 		struct ice_aqc_rl_profile_info *rl_prof_elem;
2871 		struct ice_aqc_rl_profile_info *rl_prof_tmp;
2872 
2873 		LIST_FOR_EACH_ENTRY_SAFE(rl_prof_elem, rl_prof_tmp,
2874 					 &pi->rl_prof_list[ln],
2875 					 ice_aqc_rl_profile_info, list_entry) {
2876 			if (!ice_sched_del_rl_profile(pi->hw, rl_prof_elem))
2877 				ice_debug(pi->hw, ICE_DBG_SCHED,
2878 					  "Removed rl profile\n");
2879 		}
2880 	}
2881 }
2882 
2883 /**
2884  * ice_sched_update_elem - update element
2885  * @hw: pointer to the HW struct
2886  * @node: pointer to node
2887  * @info: node info to update
2888  *
2889  * It updates the HW DB, and local SW DB of node. It updates the scheduling
2890  * parameters of node from argument info data buffer (Info->data buf) and
2891  * returns success or error on config sched element failure. The caller
2892  * needs to hold scheduler lock.
2893  */
2894 static enum ice_status
2895 ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
2896 		      struct ice_aqc_txsched_elem_data *info)
2897 {
2898 	struct ice_aqc_conf_elem buf;
2899 	enum ice_status status;
2900 	u16 elem_cfgd = 0;
2901 	u16 num_elems = 1;
2902 
2903 	buf.generic[0] = *info;
2904 	/* Parent TEID is reserved field in this aq call */
2905 	buf.generic[0].parent_teid = 0;
2906 	/* Element type is reserved field in this aq call */
2907 	buf.generic[0].data.elem_type = 0;
2908 	/* Flags is reserved field in this aq call */
2909 	buf.generic[0].data.flags = 0;
2910 
2911 	/* Update HW DB */
2912 	/* Configure element node */
2913 	status = ice_aq_cfg_sched_elems(hw, num_elems, &buf, sizeof(buf),
2914 					&elem_cfgd, NULL);
2915 	if (status || elem_cfgd != num_elems) {
2916 		ice_debug(hw, ICE_DBG_SCHED, "Config sched elem error\n");
2917 		return ICE_ERR_CFG;
2918 	}
2919 
2920 	/* Config success case */
2921 	/* Now update local SW DB */
2922 	/* Only copy the data portion of info buffer */
2923 	node->info.data = info->data;
2924 	return status;
2925 }
2926 
2927 /**
2928  * ice_sched_cfg_node_bw_alloc - configure node BW weight/alloc params
2929  * @hw: pointer to the HW struct
2930  * @node: sched node to configure
2931  * @rl_type: rate limit type CIR, EIR, or shared
2932  * @bw_alloc: BW weight/allocation
2933  *
2934  * This function configures node element's BW allocation.
2935  */
2936 static enum ice_status
2937 ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
2938 			    enum ice_rl_type rl_type, u16 bw_alloc)
2939 {
2940 	struct ice_aqc_txsched_elem_data buf;
2941 	struct ice_aqc_txsched_elem *data;
2942 	enum ice_status status;
2943 
2944 	buf = node->info;
2945 	data = &buf.data;
2946 	if (rl_type == ICE_MIN_BW) {
2947 		data->valid_sections |= ICE_AQC_ELEM_VALID_CIR;
2948 		data->cir_bw.bw_alloc = CPU_TO_LE16(bw_alloc);
2949 	} else if (rl_type == ICE_MAX_BW) {
2950 		data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
2951 		data->eir_bw.bw_alloc = CPU_TO_LE16(bw_alloc);
2952 	} else {
2953 		return ICE_ERR_PARAM;
2954 	}
2955 
2956 	/* Configure element */
2957 	status = ice_sched_update_elem(hw, node, &buf);
2958 	return status;
2959 }
2960 
2961 /**
2962  * ice_move_vsi_to_agg - moves VSI to new or default aggregator
2963  * @pi: port information structure
2964  * @agg_id: aggregator ID
2965  * @vsi_handle: software VSI handle
2966  * @tc_bitmap: TC bitmap of enabled TC(s)
2967  *
2968  * Move or associate VSI to a new or default aggregator node.
2969  */
2970 enum ice_status
2971 ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
2972 		    u8 tc_bitmap)
2973 {
2974 	ice_bitmap_t bitmap = tc_bitmap;
2975 	enum ice_status status;
2976 
2977 	ice_acquire_lock(&pi->sched_lock);
2978 	status = ice_sched_assoc_vsi_to_agg(pi, agg_id, vsi_handle,
2979 					    (ice_bitmap_t *)&bitmap);
2980 	if (!status)
2981 		status = ice_save_agg_vsi_tc_bitmap(pi, agg_id, vsi_handle,
2982 						    (ice_bitmap_t *)&bitmap);
2983 	ice_release_lock(&pi->sched_lock);
2984 	return status;
2985 }
2986 
2987 /**
2988  * ice_rm_agg_cfg - remove aggregator configuration
2989  * @pi: port information structure
2990  * @agg_id: aggregator ID
2991  *
2992  * This function removes aggregator reference to VSI and delete aggregator ID
2993  * info. It removes the aggregator configuration completely.
2994  */
2995 enum ice_status ice_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id)
2996 {
2997 	struct ice_sched_agg_info *agg_info;
2998 	enum ice_status status = ICE_SUCCESS;
2999 	u8 tc;
3000 
3001 	ice_acquire_lock(&pi->sched_lock);
3002 	agg_info = ice_get_agg_info(pi->hw, agg_id);
3003 	if (!agg_info) {
3004 		status = ICE_ERR_DOES_NOT_EXIST;
3005 		goto exit_ice_rm_agg_cfg;
3006 	}
3007 
3008 	ice_for_each_traffic_class(tc) {
3009 		status = ice_rm_agg_cfg_tc(pi, agg_info, tc, true);
3010 		if (status)
3011 			goto exit_ice_rm_agg_cfg;
3012 	}
3013 
3014 	if (ice_is_any_bit_set(agg_info->tc_bitmap, ICE_MAX_TRAFFIC_CLASS)) {
3015 		status = ICE_ERR_IN_USE;
3016 		goto exit_ice_rm_agg_cfg;
3017 	}
3018 
3019 	/* Safe to delete entry now */
3020 	LIST_DEL(&agg_info->list_entry);
3021 	ice_free(pi->hw, agg_info);
3022 
3023 	/* Remove unused RL profile IDs from HW and SW DB */
3024 	ice_sched_rm_unused_rl_prof(pi);
3025 
3026 exit_ice_rm_agg_cfg:
3027 	ice_release_lock(&pi->sched_lock);
3028 	return status;
3029 }
3030 
3031 /**
3032  * ice_set_clear_cir_bw_alloc - set or clear CIR BW alloc information
3033  * @bw_t_info: bandwidth type information structure
3034  * @bw_alloc: Bandwidth allocation information
3035  *
3036  * Save or clear CIR BW alloc information (bw_alloc) in the passed param
3037  * bw_t_info.
3038  */
3039 static void
3040 ice_set_clear_cir_bw_alloc(struct ice_bw_type_info *bw_t_info, u16 bw_alloc)
3041 {
3042 	bw_t_info->cir_bw.bw_alloc = bw_alloc;
3043 	if (bw_t_info->cir_bw.bw_alloc)
3044 		ice_set_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap);
3045 	else
3046 		ice_clear_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap);
3047 }
3048 
3049 /**
3050  * ice_set_clear_eir_bw_alloc - set or clear EIR BW alloc information
3051  * @bw_t_info: bandwidth type information structure
3052  * @bw_alloc: Bandwidth allocation information
3053  *
3054  * Save or clear EIR BW alloc information (bw_alloc) in the passed param
3055  * bw_t_info.
3056  */
3057 static void
3058 ice_set_clear_eir_bw_alloc(struct ice_bw_type_info *bw_t_info, u16 bw_alloc)
3059 {
3060 	bw_t_info->eir_bw.bw_alloc = bw_alloc;
3061 	if (bw_t_info->eir_bw.bw_alloc)
3062 		ice_set_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap);
3063 	else
3064 		ice_clear_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap);
3065 }
3066 
3067 /**
3068  * ice_sched_save_vsi_bw_alloc - save VSI node's BW alloc information
3069  * @pi: port information structure
3070  * @vsi_handle: sw VSI handle
3071  * @tc: traffic class
3072  * @rl_type: rate limit type min or max
3073  * @bw_alloc: Bandwidth allocation information
3074  *
3075  * Save BW alloc information of VSI type node for post replay use.
3076  */
3077 static enum ice_status
3078 ice_sched_save_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
3079 			    enum ice_rl_type rl_type, u16 bw_alloc)
3080 {
3081 	struct ice_vsi_ctx *vsi_ctx;
3082 
3083 	if (!ice_is_vsi_valid(pi->hw, vsi_handle))
3084 		return ICE_ERR_PARAM;
3085 	vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
3086 	if (!vsi_ctx)
3087 		return ICE_ERR_PARAM;
3088 	switch (rl_type) {
3089 	case ICE_MIN_BW:
3090 		ice_set_clear_cir_bw_alloc(&vsi_ctx->sched.bw_t_info[tc],
3091 					   bw_alloc);
3092 		break;
3093 	case ICE_MAX_BW:
3094 		ice_set_clear_eir_bw_alloc(&vsi_ctx->sched.bw_t_info[tc],
3095 					   bw_alloc);
3096 		break;
3097 	default:
3098 		return ICE_ERR_PARAM;
3099 	}
3100 	return ICE_SUCCESS;
3101 }
3102 
3103 /**
3104  * ice_set_clear_cir_bw - set or clear CIR BW
3105  * @bw_t_info: bandwidth type information structure
3106  * @bw: bandwidth in Kbps - Kilo bits per sec
3107  *
3108  * Save or clear CIR bandwidth (BW) in the passed param bw_t_info.
3109  */
3110 static void
3111 ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
3112 {
3113 	if (bw == ICE_SCHED_DFLT_BW) {
3114 		ice_clear_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
3115 		bw_t_info->cir_bw.bw = 0;
3116 	} else {
3117 		/* Save type of BW information */
3118 		ice_set_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
3119 		bw_t_info->cir_bw.bw = bw;
3120 	}
3121 }
3122 
3123 /**
3124  * ice_set_clear_eir_bw - set or clear EIR BW
3125  * @bw_t_info: bandwidth type information structure
3126  * @bw: bandwidth in Kbps - Kilo bits per sec
3127  *
3128  * Save or clear EIR bandwidth (BW) in the passed param bw_t_info.
3129  */
3130 static void
3131 ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
3132 {
3133 	if (bw == ICE_SCHED_DFLT_BW) {
3134 		ice_clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
3135 		bw_t_info->eir_bw.bw = 0;
3136 	} else {
3137 		/* EIR BW and Shared BW profiles are mutually exclusive and
3138 		 * hence only one of them may be set for any given element.
3139 		 * First clear earlier saved shared BW information.
3140 		 */
3141 		ice_clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
3142 		bw_t_info->shared_bw = 0;
3143 		/* save EIR BW information */
3144 		ice_set_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
3145 		bw_t_info->eir_bw.bw = bw;
3146 	}
3147 }
3148 
3149 /**
3150  * ice_set_clear_shared_bw - set or clear shared BW
3151  * @bw_t_info: bandwidth type information structure
3152  * @bw: bandwidth in Kbps - Kilo bits per sec
3153  *
3154  * Save or clear shared bandwidth (BW) in the passed param bw_t_info.
3155  */
3156 static void
3157 ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
3158 {
3159 	if (bw == ICE_SCHED_DFLT_BW) {
3160 		ice_clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
3161 		bw_t_info->shared_bw = 0;
3162 	} else {
3163 		/* EIR BW and Shared BW profiles are mutually exclusive and
3164 		 * hence only one of them may be set for any given element.
3165 		 * First clear earlier saved EIR BW information.
3166 		 */
3167 		ice_clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
3168 		bw_t_info->eir_bw.bw = 0;
3169 		/* save shared BW information */
3170 		ice_set_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
3171 		bw_t_info->shared_bw = bw;
3172 	}
3173 }
3174 
3175 /**
3176  * ice_sched_save_vsi_bw - save VSI node's BW information
3177  * @pi: port information structure
3178  * @vsi_handle: sw VSI handle
3179  * @tc: traffic class
3180  * @rl_type: rate limit type min, max, or shared
3181  * @bw: bandwidth in Kbps - Kilo bits per sec
3182  *
3183  * Save BW information of VSI type node for post replay use.
3184  */
3185 static enum ice_status
3186 ice_sched_save_vsi_bw(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
3187 		      enum ice_rl_type rl_type, u32 bw)
3188 {
3189 	struct ice_vsi_ctx *vsi_ctx;
3190 
3191 	if (!ice_is_vsi_valid(pi->hw, vsi_handle))
3192 		return ICE_ERR_PARAM;
3193 	vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
3194 	if (!vsi_ctx)
3195 		return ICE_ERR_PARAM;
3196 	switch (rl_type) {
3197 	case ICE_MIN_BW:
3198 		ice_set_clear_cir_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
3199 		break;
3200 	case ICE_MAX_BW:
3201 		ice_set_clear_eir_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
3202 		break;
3203 	case ICE_SHARED_BW:
3204 		ice_set_clear_shared_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
3205 		break;
3206 	default:
3207 		return ICE_ERR_PARAM;
3208 	}
3209 	return ICE_SUCCESS;
3210 }
3211 
3212 /**
3213  * ice_set_clear_prio - set or clear priority information
3214  * @bw_t_info: bandwidth type information structure
3215  * @prio: priority to save
3216  *
3217  * Save or clear priority (prio) in the passed param bw_t_info.
3218  */
3219 static void
3220 ice_set_clear_prio(struct ice_bw_type_info *bw_t_info, u8 prio)
3221 {
3222 	bw_t_info->generic = prio;
3223 	if (bw_t_info->generic)
3224 		ice_set_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap);
3225 	else
3226 		ice_clear_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap);
3227 }
3228 
3229 /**
3230  * ice_sched_save_vsi_prio - save VSI node's priority information
3231  * @pi: port information structure
3232  * @vsi_handle: Software VSI handle
3233  * @tc: traffic class
3234  * @prio: priority to save
3235  *
3236  * Save priority information of VSI type node for post replay use.
3237  */
3238 static enum ice_status
3239 ice_sched_save_vsi_prio(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
3240 			u8 prio)
3241 {
3242 	struct ice_vsi_ctx *vsi_ctx;
3243 
3244 	if (!ice_is_vsi_valid(pi->hw, vsi_handle))
3245 		return ICE_ERR_PARAM;
3246 	vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
3247 	if (!vsi_ctx)
3248 		return ICE_ERR_PARAM;
3249 	if (tc >= ICE_MAX_TRAFFIC_CLASS)
3250 		return ICE_ERR_PARAM;
3251 	ice_set_clear_prio(&vsi_ctx->sched.bw_t_info[tc], prio);
3252 	return ICE_SUCCESS;
3253 }
3254 
3255 /**
3256  * ice_sched_save_agg_bw_alloc - save aggregator node's BW alloc information
3257  * @pi: port information structure
3258  * @agg_id: node aggregator ID
3259  * @tc: traffic class
3260  * @rl_type: rate limit type min or max
3261  * @bw_alloc: bandwidth alloc information
3262  *
3263  * Save BW alloc information of AGG type node for post replay use.
3264  */
3265 static enum ice_status
3266 ice_sched_save_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 tc,
3267 			    enum ice_rl_type rl_type, u16 bw_alloc)
3268 {
3269 	struct ice_sched_agg_info *agg_info;
3270 
3271 	agg_info = ice_get_agg_info(pi->hw, agg_id);
3272 	if (!agg_info)
3273 		return ICE_ERR_PARAM;
3274 	if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
3275 		return ICE_ERR_PARAM;
3276 	switch (rl_type) {
3277 	case ICE_MIN_BW:
3278 		ice_set_clear_cir_bw_alloc(&agg_info->bw_t_info[tc], bw_alloc);
3279 		break;
3280 	case ICE_MAX_BW:
3281 		ice_set_clear_eir_bw_alloc(&agg_info->bw_t_info[tc], bw_alloc);
3282 		break;
3283 	default:
3284 		return ICE_ERR_PARAM;
3285 	}
3286 	return ICE_SUCCESS;
3287 }
3288 
3289 /**
3290  * ice_sched_save_agg_bw - save aggregator node's BW information
3291  * @pi: port information structure
3292  * @agg_id: node aggregator ID
3293  * @tc: traffic class
3294  * @rl_type: rate limit type min, max, or shared
3295  * @bw: bandwidth in Kbps - Kilo bits per sec
3296  *
3297  * Save BW information of AGG type node for post replay use.
3298  */
3299 static enum ice_status
3300 ice_sched_save_agg_bw(struct ice_port_info *pi, u32 agg_id, u8 tc,
3301 		      enum ice_rl_type rl_type, u32 bw)
3302 {
3303 	struct ice_sched_agg_info *agg_info;
3304 
3305 	agg_info = ice_get_agg_info(pi->hw, agg_id);
3306 	if (!agg_info)
3307 		return ICE_ERR_PARAM;
3308 	if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
3309 		return ICE_ERR_PARAM;
3310 	switch (rl_type) {
3311 	case ICE_MIN_BW:
3312 		ice_set_clear_cir_bw(&agg_info->bw_t_info[tc], bw);
3313 		break;
3314 	case ICE_MAX_BW:
3315 		ice_set_clear_eir_bw(&agg_info->bw_t_info[tc], bw);
3316 		break;
3317 	case ICE_SHARED_BW:
3318 		ice_set_clear_shared_bw(&agg_info->bw_t_info[tc], bw);
3319 		break;
3320 	default:
3321 		return ICE_ERR_PARAM;
3322 	}
3323 	return ICE_SUCCESS;
3324 }
3325 
3326 /**
3327  * ice_cfg_vsi_bw_lmt_per_tc - configure VSI BW limit per TC
3328  * @pi: port information structure
3329  * @vsi_handle: software VSI handle
3330  * @tc: traffic class
3331  * @rl_type: min or max
3332  * @bw: bandwidth in Kbps
3333  *
3334  * This function configures BW limit of VSI scheduling node based on TC
3335  * information.
3336  */
3337 enum ice_status
3338 ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
3339 			  enum ice_rl_type rl_type, u32 bw)
3340 {
3341 	enum ice_status status;
3342 
3343 	status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle,
3344 						  ICE_AGG_TYPE_VSI,
3345 						  tc, rl_type, bw);
3346 	if (!status) {
3347 		ice_acquire_lock(&pi->sched_lock);
3348 		status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, bw);
3349 		ice_release_lock(&pi->sched_lock);
3350 	}
3351 	return status;
3352 }
3353 
3354 /**
3355  * ice_cfg_dflt_vsi_bw_lmt_per_tc - configure default VSI BW limit per TC
3356  * @pi: port information structure
3357  * @vsi_handle: software VSI handle
3358  * @tc: traffic class
3359  * @rl_type: min or max
3360  *
3361  * This function configures default BW limit of VSI scheduling node based on TC
3362  * information.
3363  */
3364 enum ice_status
3365 ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
3366 			       enum ice_rl_type rl_type)
3367 {
3368 	enum ice_status status;
3369 
3370 	status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle,
3371 						  ICE_AGG_TYPE_VSI,
3372 						  tc, rl_type,
3373 						  ICE_SCHED_DFLT_BW);
3374 	if (!status) {
3375 		ice_acquire_lock(&pi->sched_lock);
3376 		status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type,
3377 					       ICE_SCHED_DFLT_BW);
3378 		ice_release_lock(&pi->sched_lock);
3379 	}
3380 	return status;
3381 }
3382 
3383 /**
3384  * ice_cfg_agg_bw_lmt_per_tc - configure aggregator BW limit per TC
3385  * @pi: port information structure
3386  * @agg_id: aggregator ID
3387  * @tc: traffic class
3388  * @rl_type: min or max
3389  * @bw: bandwidth in Kbps
3390  *
3391  * This function applies BW limit to aggregator scheduling node based on TC
3392  * information.
3393  */
3394 enum ice_status
3395 ice_cfg_agg_bw_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
3396 			  enum ice_rl_type rl_type, u32 bw)
3397 {
3398 	enum ice_status status;
3399 
3400 	status = ice_sched_set_node_bw_lmt_per_tc(pi, agg_id, ICE_AGG_TYPE_AGG,
3401 						  tc, rl_type, bw);
3402 	if (!status) {
3403 		ice_acquire_lock(&pi->sched_lock);
3404 		status = ice_sched_save_agg_bw(pi, agg_id, tc, rl_type, bw);
3405 		ice_release_lock(&pi->sched_lock);
3406 	}
3407 	return status;
3408 }
3409 
3410 /**
3411  * ice_cfg_agg_bw_dflt_lmt_per_tc - configure aggregator BW default limit per TC
3412  * @pi: port information structure
3413  * @agg_id: aggregator ID
3414  * @tc: traffic class
3415  * @rl_type: min or max
3416  *
3417  * This function applies default BW limit to aggregator scheduling node based
3418  * on TC information.
3419  */
3420 enum ice_status
3421 ice_cfg_agg_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
3422 			       enum ice_rl_type rl_type)
3423 {
3424 	enum ice_status status;
3425 
3426 	status = ice_sched_set_node_bw_lmt_per_tc(pi, agg_id, ICE_AGG_TYPE_AGG,
3427 						  tc, rl_type,
3428 						  ICE_SCHED_DFLT_BW);
3429 	if (!status) {
3430 		ice_acquire_lock(&pi->sched_lock);
3431 		status = ice_sched_save_agg_bw(pi, agg_id, tc, rl_type,
3432 					       ICE_SCHED_DFLT_BW);
3433 		ice_release_lock(&pi->sched_lock);
3434 	}
3435 	return status;
3436 }
3437 
3438 /**
3439  * ice_cfg_vsi_bw_shared_lmt - configure VSI BW shared limit
3440  * @pi: port information structure
3441  * @vsi_handle: software VSI handle
3442  * @bw: bandwidth in Kbps
3443  *
3444  * This function Configures shared rate limiter(SRL) of all VSI type nodes
3445  * across all traffic classes for VSI matching handle.
3446  */
3447 enum ice_status
3448 ice_cfg_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle, u32 bw)
3449 {
3450 	return ice_sched_set_vsi_bw_shared_lmt(pi, vsi_handle, bw);
3451 }
3452 
3453 /**
3454  * ice_cfg_vsi_bw_no_shared_lmt - configure VSI BW for no shared limiter
3455  * @pi: port information structure
3456  * @vsi_handle: software VSI handle
3457  *
3458  * This function removes the shared rate limiter(SRL) of all VSI type nodes
3459  * across all traffic classes for VSI matching handle.
3460  */
3461 enum ice_status
3462 ice_cfg_vsi_bw_no_shared_lmt(struct ice_port_info *pi, u16 vsi_handle)
3463 {
3464 	return ice_sched_set_vsi_bw_shared_lmt(pi, vsi_handle,
3465 					       ICE_SCHED_DFLT_BW);
3466 }
3467 
3468 /**
3469  * ice_cfg_agg_bw_shared_lmt - configure aggregator BW shared limit
3470  * @pi: port information structure
3471  * @agg_id: aggregator ID
3472  * @bw: bandwidth in Kbps
3473  *
3474  * This function configures the shared rate limiter(SRL) of all aggregator type
3475  * nodes across all traffic classes for aggregator matching agg_id.
3476  */
3477 enum ice_status
3478 ice_cfg_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 bw)
3479 {
3480 	return ice_sched_set_agg_bw_shared_lmt(pi, agg_id, bw);
3481 }
3482 
3483 /**
3484  * ice_cfg_agg_bw_no_shared_lmt - configure aggregator BW for no shared limiter
3485  * @pi: port information structure
3486  * @agg_id: aggregator ID
3487  *
3488  * This function removes the shared rate limiter(SRL) of all aggregator type
3489  * nodes across all traffic classes for aggregator matching agg_id.
3490  */
3491 enum ice_status
3492 ice_cfg_agg_bw_no_shared_lmt(struct ice_port_info *pi, u32 agg_id)
3493 {
3494 	return ice_sched_set_agg_bw_shared_lmt(pi, agg_id, ICE_SCHED_DFLT_BW);
3495 }
3496 
3497 /**
3498  * ice_config_vsi_queue_priority - config VSI queue priority of node
3499  * @pi: port information structure
3500  * @num_qs: number of VSI queues
3501  * @q_ids: queue IDs array
3502  * @q_ids: queue IDs array
3503  * @q_prio: queue priority array
3504  *
3505  * This function configures the queue node priority (Sibling Priority) of the
3506  * passed in VSI's queue(s) for a given traffic class (TC).
3507  */
3508 enum ice_status
3509 ice_cfg_vsi_q_priority(struct ice_port_info *pi, u16 num_qs, u32 *q_ids,
3510 		       u8 *q_prio)
3511 {
3512 	enum ice_status status = ICE_ERR_PARAM;
3513 	u16 i;
3514 
3515 	ice_acquire_lock(&pi->sched_lock);
3516 
3517 	for (i = 0; i < num_qs; i++) {
3518 		struct ice_sched_node *node;
3519 
3520 		node = ice_sched_find_node_by_teid(pi->root, q_ids[i]);
3521 		if (!node || node->info.data.elem_type !=
3522 		    ICE_AQC_ELEM_TYPE_LEAF) {
3523 			status = ICE_ERR_PARAM;
3524 			break;
3525 		}
3526 		/* Configure Priority */
3527 		status = ice_sched_cfg_sibl_node_prio(pi, node, q_prio[i]);
3528 		if (status)
3529 			break;
3530 	}
3531 
3532 	ice_release_lock(&pi->sched_lock);
3533 	return status;
3534 }
3535 
3536 /**
3537  * ice_cfg_agg_vsi_priority_per_tc - config aggregator's VSI priority per TC
3538  * @pi: port information structure
3539  * @agg_id: Aggregator ID
3540  * @num_vsis: number of VSI(s)
3541  * @vsi_handle_arr: array of software VSI handles
3542  * @node_prio: pointer to node priority
3543  * @tc: traffic class
3544  *
3545  * This function configures the node priority (Sibling Priority) of the
3546  * passed in VSI's for a given traffic class (TC) of an Aggregator ID.
3547  */
3548 enum ice_status
3549 ice_cfg_agg_vsi_priority_per_tc(struct ice_port_info *pi, u32 agg_id,
3550 				u16 num_vsis, u16 *vsi_handle_arr,
3551 				u8 *node_prio, u8 tc)
3552 {
3553 	struct ice_sched_agg_vsi_info *agg_vsi_info;
3554 	struct ice_sched_node *tc_node, *agg_node;
3555 	enum ice_status status = ICE_ERR_PARAM;
3556 	struct ice_sched_agg_info *agg_info;
3557 	bool agg_id_present = false;
3558 	struct ice_hw *hw = pi->hw;
3559 	u16 i;
3560 
3561 	ice_acquire_lock(&pi->sched_lock);
3562 	LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info,
3563 			    list_entry)
3564 		if (agg_info->agg_id == agg_id) {
3565 			agg_id_present = true;
3566 			break;
3567 		}
3568 	if (!agg_id_present)
3569 		goto exit_agg_priority_per_tc;
3570 
3571 	tc_node = ice_sched_get_tc_node(pi, tc);
3572 	if (!tc_node)
3573 		goto exit_agg_priority_per_tc;
3574 
3575 	agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
3576 	if (!agg_node)
3577 		goto exit_agg_priority_per_tc;
3578 
3579 	if (num_vsis > hw->max_children[agg_node->tx_sched_layer])
3580 		goto exit_agg_priority_per_tc;
3581 
3582 	for (i = 0; i < num_vsis; i++) {
3583 		struct ice_sched_node *vsi_node;
3584 		bool vsi_handle_valid = false;
3585 		u16 vsi_handle;
3586 
3587 		status = ICE_ERR_PARAM;
3588 		vsi_handle = vsi_handle_arr[i];
3589 		if (!ice_is_vsi_valid(hw, vsi_handle))
3590 			goto exit_agg_priority_per_tc;
3591 		/* Verify child nodes before applying settings */
3592 		LIST_FOR_EACH_ENTRY(agg_vsi_info, &agg_info->agg_vsi_list,
3593 				    ice_sched_agg_vsi_info, list_entry)
3594 			if (agg_vsi_info->vsi_handle == vsi_handle) {
3595 				/* cppcheck-suppress unreadVariable */
3596 				vsi_handle_valid = true;
3597 				break;
3598 			}
3599 
3600 		if (!vsi_handle_valid)
3601 			goto exit_agg_priority_per_tc;
3602 
3603 		vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
3604 		if (!vsi_node)
3605 			goto exit_agg_priority_per_tc;
3606 
3607 		if (ice_sched_find_node_in_subtree(hw, agg_node, vsi_node)) {
3608 			/* Configure Priority */
3609 			status = ice_sched_cfg_sibl_node_prio(pi, vsi_node,
3610 							      node_prio[i]);
3611 			if (status)
3612 				break;
3613 			status = ice_sched_save_vsi_prio(pi, vsi_handle, tc,
3614 							 node_prio[i]);
3615 			if (status)
3616 				break;
3617 		}
3618 	}
3619 
3620 exit_agg_priority_per_tc:
3621 	ice_release_lock(&pi->sched_lock);
3622 	return status;
3623 }
3624 
3625 /**
3626  * ice_cfg_vsi_bw_alloc - config VSI BW alloc per TC
3627  * @pi: port information structure
3628  * @vsi_handle: software VSI handle
3629  * @ena_tcmap: enabled TC map
3630  * @rl_type: Rate limit type CIR/EIR
3631  * @bw_alloc: Array of BW alloc
3632  *
3633  * This function configures the BW allocation of the passed in VSI's
3634  * node(s) for enabled traffic class.
3635  */
3636 enum ice_status
3637 ice_cfg_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 ena_tcmap,
3638 		     enum ice_rl_type rl_type, u8 *bw_alloc)
3639 {
3640 	enum ice_status status = ICE_SUCCESS;
3641 	u8 tc;
3642 
3643 	if (!ice_is_vsi_valid(pi->hw, vsi_handle))
3644 		return ICE_ERR_PARAM;
3645 
3646 	ice_acquire_lock(&pi->sched_lock);
3647 
3648 	/* Return success if no nodes are present across TC */
3649 	ice_for_each_traffic_class(tc) {
3650 		struct ice_sched_node *tc_node, *vsi_node;
3651 
3652 		if (!ice_is_tc_ena(ena_tcmap, tc))
3653 			continue;
3654 
3655 		tc_node = ice_sched_get_tc_node(pi, tc);
3656 		if (!tc_node)
3657 			continue;
3658 
3659 		vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
3660 		if (!vsi_node)
3661 			continue;
3662 
3663 		status = ice_sched_cfg_node_bw_alloc(pi->hw, vsi_node, rl_type,
3664 						     bw_alloc[tc]);
3665 		if (status)
3666 			break;
3667 		status = ice_sched_save_vsi_bw_alloc(pi, vsi_handle, tc,
3668 						     rl_type, bw_alloc[tc]);
3669 		if (status)
3670 			break;
3671 	}
3672 
3673 	ice_release_lock(&pi->sched_lock);
3674 	return status;
3675 }
3676 
3677 /**
3678  * ice_cfg_agg_bw_alloc - config aggregator BW alloc
3679  * @pi: port information structure
3680  * @agg_id: aggregator ID
3681  * @ena_tcmap: enabled TC map
3682  * @rl_type: rate limit type CIR/EIR
3683  * @bw_alloc: array of BW alloc
3684  *
3685  * This function configures the BW allocation of passed in aggregator for
3686  * enabled traffic class(s).
3687  */
3688 enum ice_status
3689 ice_cfg_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 ena_tcmap,
3690 		     enum ice_rl_type rl_type, u8 *bw_alloc)
3691 {
3692 	struct ice_sched_agg_info *agg_info;
3693 	bool agg_id_present = false;
3694 	enum ice_status status = ICE_SUCCESS;
3695 	struct ice_hw *hw = pi->hw;
3696 	u8 tc;
3697 
3698 	ice_acquire_lock(&pi->sched_lock);
3699 	LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info,
3700 			    list_entry)
3701 		if (agg_info->agg_id == agg_id) {
3702 			agg_id_present = true;
3703 			break;
3704 		}
3705 	if (!agg_id_present) {
3706 		status = ICE_ERR_PARAM;
3707 		goto exit_cfg_agg_bw_alloc;
3708 	}
3709 
3710 	/* Return success if no nodes are present across TC */
3711 	ice_for_each_traffic_class(tc) {
3712 		struct ice_sched_node *tc_node, *agg_node;
3713 
3714 		if (!ice_is_tc_ena(ena_tcmap, tc))
3715 			continue;
3716 
3717 		tc_node = ice_sched_get_tc_node(pi, tc);
3718 		if (!tc_node)
3719 			continue;
3720 
3721 		agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
3722 		if (!agg_node)
3723 			continue;
3724 
3725 		status = ice_sched_cfg_node_bw_alloc(hw, agg_node, rl_type,
3726 						     bw_alloc[tc]);
3727 		if (status)
3728 			break;
3729 		status = ice_sched_save_agg_bw_alloc(pi, agg_id, tc, rl_type,
3730 						     bw_alloc[tc]);
3731 		if (status)
3732 			break;
3733 	}
3734 
3735 exit_cfg_agg_bw_alloc:
3736 	ice_release_lock(&pi->sched_lock);
3737 	return status;
3738 }
3739 
3740 /**
3741  * ice_sched_calc_wakeup - calculate RL profile wakeup parameter
3742  * @hw: pointer to the HW struct
3743  * @bw: bandwidth in Kbps
3744  *
3745  * This function calculates the wakeup parameter of RL profile.
3746  */
3747 static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw)
3748 {
3749 	s64 bytes_per_sec, wakeup_int, wakeup_a, wakeup_b, wakeup_f;
3750 	s32 wakeup_f_int;
3751 	u16 wakeup = 0;
3752 
3753 	/* Get the wakeup integer value */
3754 	bytes_per_sec = DIV_64BIT(((s64)bw * 1000), BITS_PER_BYTE);
3755 	wakeup_int = DIV_64BIT(hw->psm_clk_freq, bytes_per_sec);
3756 	if (wakeup_int > 63) {
3757 		wakeup = (u16)((1 << 15) | wakeup_int);
3758 	} else {
3759 		/* Calculate fraction value up to 4 decimals
3760 		 * Convert Integer value to a constant multiplier
3761 		 */
3762 		wakeup_b = (s64)ICE_RL_PROF_MULTIPLIER * wakeup_int;
3763 		wakeup_a = DIV_64BIT((s64)ICE_RL_PROF_MULTIPLIER *
3764 				     hw->psm_clk_freq, bytes_per_sec);
3765 
3766 		/* Get Fraction value */
3767 		wakeup_f = wakeup_a - wakeup_b;
3768 
3769 		/* Round up the Fractional value via Ceil(Fractional value) */
3770 		if (wakeup_f > DIV_64BIT(ICE_RL_PROF_MULTIPLIER, 2))
3771 			wakeup_f += 1;
3772 
3773 		wakeup_f_int = (s32)DIV_64BIT(wakeup_f * ICE_RL_PROF_FRACTION,
3774 					      ICE_RL_PROF_MULTIPLIER);
3775 		wakeup |= (u16)(wakeup_int << 9);
3776 		wakeup |= (u16)(0x1ff & wakeup_f_int);
3777 	}
3778 
3779 	return wakeup;
3780 }
3781 
3782 /**
3783  * ice_sched_bw_to_rl_profile - convert BW to profile parameters
3784  * @hw: pointer to the HW struct
3785  * @bw: bandwidth in Kbps
3786  * @profile: profile parameters to return
3787  *
3788  * This function converts the BW to profile structure format.
3789  */
3790 static enum ice_status
3791 ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw,
3792 			   struct ice_aqc_rl_profile_elem *profile)
3793 {
3794 	enum ice_status status = ICE_ERR_PARAM;
3795 	s64 bytes_per_sec, ts_rate, mv_tmp;
3796 	bool found = false;
3797 	s32 encode = 0;
3798 	s64 mv = 0;
3799 	s32 i;
3800 
3801 	/* Bw settings range is from 0.5Mb/sec to 100Gb/sec */
3802 	if (bw < ICE_SCHED_MIN_BW || bw > ICE_SCHED_MAX_BW)
3803 		return status;
3804 
3805 	/* Bytes per second from Kbps */
3806 	bytes_per_sec = DIV_64BIT(((s64)bw * 1000), BITS_PER_BYTE);
3807 
3808 	/* encode is 6 bits but really useful are 5 bits */
3809 	for (i = 0; i < 64; i++) {
3810 		u64 pow_result = BIT_ULL(i);
3811 
3812 		ts_rate = DIV_64BIT((s64)hw->psm_clk_freq,
3813 				    pow_result * ICE_RL_PROF_TS_MULTIPLIER);
3814 		if (ts_rate <= 0)
3815 			continue;
3816 
3817 		/* Multiplier value */
3818 		mv_tmp = DIV_64BIT(bytes_per_sec * ICE_RL_PROF_MULTIPLIER,
3819 				   ts_rate);
3820 
3821 		/* Round to the nearest ICE_RL_PROF_MULTIPLIER */
3822 		mv = round_up_64bit(mv_tmp, ICE_RL_PROF_MULTIPLIER);
3823 
3824 		/* First multiplier value greater than the given
3825 		 * accuracy bytes
3826 		 */
3827 		if (mv > ICE_RL_PROF_ACCURACY_BYTES) {
3828 			encode = i;
3829 			found = true;
3830 			break;
3831 		}
3832 	}
3833 	if (found) {
3834 		u16 wm;
3835 
3836 		wm = ice_sched_calc_wakeup(hw, bw);
3837 		profile->rl_multiply = CPU_TO_LE16(mv);
3838 		profile->wake_up_calc = CPU_TO_LE16(wm);
3839 		profile->rl_encode = CPU_TO_LE16(encode);
3840 		status = ICE_SUCCESS;
3841 	} else {
3842 		status = ICE_ERR_DOES_NOT_EXIST;
3843 	}
3844 
3845 	return status;
3846 }
3847 
3848 /**
3849  * ice_sched_add_rl_profile - add RL profile
3850  * @pi: port information structure
3851  * @rl_type: type of rate limit BW - min, max, or shared
3852  * @bw: bandwidth in Kbps - Kilo bits per sec
3853  * @layer_num: specifies in which layer to create profile
3854  *
3855  * This function first checks the existing list for corresponding BW
3856  * parameter. If it exists, it returns the associated profile otherwise
3857  * it creates a new rate limit profile for requested BW, and adds it to
3858  * the HW DB and local list. It returns the new profile or null on error.
3859  * The caller needs to hold the scheduler lock.
3860  */
3861 static struct ice_aqc_rl_profile_info *
3862 ice_sched_add_rl_profile(struct ice_port_info *pi,
3863 			 enum ice_rl_type rl_type, u32 bw, u8 layer_num)
3864 {
3865 	struct ice_aqc_rl_profile_generic_elem *buf;
3866 	struct ice_aqc_rl_profile_info *rl_prof_elem;
3867 	u16 profiles_added = 0, num_profiles = 1;
3868 	enum ice_status status;
3869 	struct ice_hw *hw;
3870 	u8 profile_type;
3871 
3872 	if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
3873 		return NULL;
3874 	switch (rl_type) {
3875 	case ICE_MIN_BW:
3876 		profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR;
3877 		break;
3878 	case ICE_MAX_BW:
3879 		profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR;
3880 		break;
3881 	case ICE_SHARED_BW:
3882 		profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL;
3883 		break;
3884 	default:
3885 		return NULL;
3886 	}
3887 
3888 	if (!pi)
3889 		return NULL;
3890 	hw = pi->hw;
3891 	LIST_FOR_EACH_ENTRY(rl_prof_elem, &pi->rl_prof_list[layer_num],
3892 			    ice_aqc_rl_profile_info, list_entry)
3893 		if (rl_prof_elem->profile.flags == profile_type &&
3894 		    rl_prof_elem->bw == bw)
3895 			/* Return existing profile ID info */
3896 			return rl_prof_elem;
3897 
3898 	/* Create new profile ID */
3899 	rl_prof_elem = (struct ice_aqc_rl_profile_info *)
3900 		ice_malloc(hw, sizeof(*rl_prof_elem));
3901 
3902 	if (!rl_prof_elem)
3903 		return NULL;
3904 
3905 	status = ice_sched_bw_to_rl_profile(hw, bw, &rl_prof_elem->profile);
3906 	if (status != ICE_SUCCESS)
3907 		goto exit_add_rl_prof;
3908 
3909 	rl_prof_elem->bw = bw;
3910 	/* layer_num is zero relative, and fw expects level from 1 to 9 */
3911 	rl_prof_elem->profile.level = layer_num + 1;
3912 	rl_prof_elem->profile.flags = profile_type;
3913 	rl_prof_elem->profile.max_burst_size = CPU_TO_LE16(hw->max_burst_size);
3914 
3915 	/* Create new entry in HW DB */
3916 	buf = (struct ice_aqc_rl_profile_generic_elem *)
3917 		&rl_prof_elem->profile;
3918 	status = ice_aq_add_rl_profile(hw, num_profiles, buf, sizeof(*buf),
3919 				       &profiles_added, NULL);
3920 	if (status || profiles_added != num_profiles)
3921 		goto exit_add_rl_prof;
3922 
3923 	/* Good entry - add in the list */
3924 	rl_prof_elem->prof_id_ref = 0;
3925 	LIST_ADD(&rl_prof_elem->list_entry, &pi->rl_prof_list[layer_num]);
3926 	return rl_prof_elem;
3927 
3928 exit_add_rl_prof:
3929 	ice_free(hw, rl_prof_elem);
3930 	return NULL;
3931 }
3932 
3933 /**
3934  * ice_sched_cfg_node_bw_lmt - configure node sched params
3935  * @hw: pointer to the HW struct
3936  * @node: sched node to configure
3937  * @rl_type: rate limit type CIR, EIR, or shared
3938  * @rl_prof_id: rate limit profile ID
3939  *
3940  * This function configures node element's BW limit.
3941  */
3942 static enum ice_status
3943 ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,
3944 			  enum ice_rl_type rl_type, u16 rl_prof_id)
3945 {
3946 	struct ice_aqc_txsched_elem_data buf;
3947 	struct ice_aqc_txsched_elem *data;
3948 
3949 	buf = node->info;
3950 	data = &buf.data;
3951 	switch (rl_type) {
3952 	case ICE_MIN_BW:
3953 		data->valid_sections |= ICE_AQC_ELEM_VALID_CIR;
3954 		data->cir_bw.bw_profile_idx = CPU_TO_LE16(rl_prof_id);
3955 		break;
3956 	case ICE_MAX_BW:
3957 		/* EIR BW and Shared BW profiles are mutually exclusive and
3958 		 * hence only one of them may be set for any given element
3959 		 */
3960 		if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED)
3961 			return ICE_ERR_CFG;
3962 		data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
3963 		data->eir_bw.bw_profile_idx = CPU_TO_LE16(rl_prof_id);
3964 		break;
3965 	case ICE_SHARED_BW:
3966 		/* Check for removing shared BW */
3967 		if (rl_prof_id == ICE_SCHED_NO_SHARED_RL_PROF_ID) {
3968 			/* remove shared profile */
3969 			data->valid_sections &= ~ICE_AQC_ELEM_VALID_SHARED;
3970 			data->srl_id = 0; /* clear SRL field */
3971 
3972 			/* enable back EIR to default profile */
3973 			data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
3974 			data->eir_bw.bw_profile_idx =
3975 				CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
3976 			break;
3977 		}
3978 		/* EIR BW and Shared BW profiles are mutually exclusive and
3979 		 * hence only one of them may be set for any given element
3980 		 */
3981 		if ((data->valid_sections & ICE_AQC_ELEM_VALID_EIR) &&
3982 		    (LE16_TO_CPU(data->eir_bw.bw_profile_idx) !=
3983 			    ICE_SCHED_DFLT_RL_PROF_ID))
3984 			return ICE_ERR_CFG;
3985 		/* EIR BW is set to default, disable it */
3986 		data->valid_sections &= ~ICE_AQC_ELEM_VALID_EIR;
3987 		/* Okay to enable shared BW now */
3988 		data->valid_sections |= ICE_AQC_ELEM_VALID_SHARED;
3989 		data->srl_id = CPU_TO_LE16(rl_prof_id);
3990 		break;
3991 	default:
3992 		/* Unknown rate limit type */
3993 		return ICE_ERR_PARAM;
3994 	}
3995 
3996 	/* Configure element */
3997 	return ice_sched_update_elem(hw, node, &buf);
3998 }
3999 
4000 /**
4001  * ice_sched_get_node_rl_prof_id - get node's rate limit profile ID
4002  * @node: sched node
4003  * @rl_type: rate limit type
4004  *
4005  * If existing profile matches, it returns the corresponding rate
4006  * limit profile ID, otherwise it returns an invalid ID as error.
4007  */
4008 static u16
4009 ice_sched_get_node_rl_prof_id(struct ice_sched_node *node,
4010 			      enum ice_rl_type rl_type)
4011 {
4012 	u16 rl_prof_id = ICE_SCHED_INVAL_PROF_ID;
4013 	struct ice_aqc_txsched_elem *data;
4014 
4015 	data = &node->info.data;
4016 	switch (rl_type) {
4017 	case ICE_MIN_BW:
4018 		if (data->valid_sections & ICE_AQC_ELEM_VALID_CIR)
4019 			rl_prof_id = LE16_TO_CPU(data->cir_bw.bw_profile_idx);
4020 		break;
4021 	case ICE_MAX_BW:
4022 		if (data->valid_sections & ICE_AQC_ELEM_VALID_EIR)
4023 			rl_prof_id = LE16_TO_CPU(data->eir_bw.bw_profile_idx);
4024 		break;
4025 	case ICE_SHARED_BW:
4026 		if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED)
4027 			rl_prof_id = LE16_TO_CPU(data->srl_id);
4028 		break;
4029 	default:
4030 		break;
4031 	}
4032 
4033 	return rl_prof_id;
4034 }
4035 
4036 /**
4037  * ice_sched_get_rl_prof_layer - selects rate limit profile creation layer
4038  * @pi: port information structure
4039  * @rl_type: type of rate limit BW - min, max, or shared
4040  * @layer_index: layer index
4041  *
4042  * This function returns requested profile creation layer.
4043  */
4044 static u8
4045 ice_sched_get_rl_prof_layer(struct ice_port_info *pi, enum ice_rl_type rl_type,
4046 			    u8 layer_index)
4047 {
4048 	struct ice_hw *hw = pi->hw;
4049 
4050 	if (layer_index >= hw->num_tx_sched_layers)
4051 		return ICE_SCHED_INVAL_LAYER_NUM;
4052 	switch (rl_type) {
4053 	case ICE_MIN_BW:
4054 		if (hw->layer_info[layer_index].max_cir_rl_profiles)
4055 			return layer_index;
4056 		break;
4057 	case ICE_MAX_BW:
4058 		if (hw->layer_info[layer_index].max_eir_rl_profiles)
4059 			return layer_index;
4060 		break;
4061 	case ICE_SHARED_BW:
4062 		/* if current layer doesn't support SRL profile creation
4063 		 * then try a layer up or down.
4064 		 */
4065 		if (hw->layer_info[layer_index].max_srl_profiles)
4066 			return layer_index;
4067 		else if (layer_index < hw->num_tx_sched_layers - 1 &&
4068 			 hw->layer_info[layer_index + 1].max_srl_profiles)
4069 			return layer_index + 1;
4070 		else if (layer_index > 0 &&
4071 			 hw->layer_info[layer_index - 1].max_srl_profiles)
4072 			return layer_index - 1;
4073 		break;
4074 	default:
4075 		break;
4076 	}
4077 	return ICE_SCHED_INVAL_LAYER_NUM;
4078 }
4079 
4080 /**
4081  * ice_sched_get_srl_node - get shared rate limit node
4082  * @node: tree node
4083  * @srl_layer: shared rate limit layer
4084  *
4085  * This function returns SRL node to be used for shared rate limit purpose.
4086  * The caller needs to hold scheduler lock.
4087  */
4088 static struct ice_sched_node *
4089 ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer)
4090 {
4091 	if (srl_layer > node->tx_sched_layer)
4092 		return node->children[0];
4093 	else if (srl_layer < node->tx_sched_layer)
4094 		/* Node can't be created without a parent. It will always
4095 		 * have a valid parent except root node.
4096 		 */
4097 		return node->parent;
4098 	else
4099 		return node;
4100 }
4101 
4102 /**
4103  * ice_sched_rm_rl_profile - remove RL profile ID
4104  * @pi: port information structure
4105  * @layer_num: layer number where profiles are saved
4106  * @profile_type: profile type like EIR, CIR, or SRL
4107  * @profile_id: profile ID to remove
4108  *
4109  * This function removes rate limit profile from layer 'layer_num' of type
4110  * 'profile_type' and profile ID as 'profile_id'. The caller needs to hold
4111  * scheduler lock.
4112  */
4113 static enum ice_status
4114 ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
4115 			u16 profile_id)
4116 {
4117 	struct ice_aqc_rl_profile_info *rl_prof_elem;
4118 	enum ice_status status = ICE_SUCCESS;
4119 
4120 	if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
4121 		return ICE_ERR_PARAM;
4122 	/* Check the existing list for RL profile */
4123 	LIST_FOR_EACH_ENTRY(rl_prof_elem, &pi->rl_prof_list[layer_num],
4124 			    ice_aqc_rl_profile_info, list_entry)
4125 		if (rl_prof_elem->profile.flags == profile_type &&
4126 		    LE16_TO_CPU(rl_prof_elem->profile.profile_id) ==
4127 		    profile_id) {
4128 			if (rl_prof_elem->prof_id_ref)
4129 				rl_prof_elem->prof_id_ref--;
4130 
4131 			/* Remove old profile ID from database */
4132 			status = ice_sched_del_rl_profile(pi->hw, rl_prof_elem);
4133 			if (status && status != ICE_ERR_IN_USE)
4134 				ice_debug(pi->hw, ICE_DBG_SCHED,
4135 					  "Remove rl profile failed\n");
4136 			break;
4137 		}
4138 	if (status == ICE_ERR_IN_USE)
4139 		status = ICE_SUCCESS;
4140 	return status;
4141 }
4142 
4143 /**
4144  * ice_sched_set_node_bw_dflt - set node's bandwidth limit to default
4145  * @pi: port information structure
4146  * @node: pointer to node structure
4147  * @rl_type: rate limit type min, max, or shared
4148  * @layer_num: layer number where RL profiles are saved
4149  *
4150  * This function configures node element's BW rate limit profile ID of
4151  * type CIR, EIR, or SRL to default. This function needs to be called
4152  * with the scheduler lock held.
4153  */
4154 static enum ice_status
4155 ice_sched_set_node_bw_dflt(struct ice_port_info *pi,
4156 			   struct ice_sched_node *node,
4157 			   enum ice_rl_type rl_type, u8 layer_num)
4158 {
4159 	enum ice_status status;
4160 	struct ice_hw *hw;
4161 	u8 profile_type;
4162 	u16 rl_prof_id;
4163 	u16 old_id;
4164 
4165 	hw = pi->hw;
4166 	switch (rl_type) {
4167 	case ICE_MIN_BW:
4168 		profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR;
4169 		rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID;
4170 		break;
4171 	case ICE_MAX_BW:
4172 		profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR;
4173 		rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID;
4174 		break;
4175 	case ICE_SHARED_BW:
4176 		profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL;
4177 		/* No SRL is configured for default case */
4178 		rl_prof_id = ICE_SCHED_NO_SHARED_RL_PROF_ID;
4179 		break;
4180 	default:
4181 		return ICE_ERR_PARAM;
4182 	}
4183 	/* Save existing RL prof ID for later clean up */
4184 	old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
4185 	/* Configure BW scheduling parameters */
4186 	status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id);
4187 	if (status)
4188 		return status;
4189 
4190 	/* Remove stale RL profile ID */
4191 	if (old_id == ICE_SCHED_DFLT_RL_PROF_ID ||
4192 	    old_id == ICE_SCHED_INVAL_PROF_ID)
4193 		return ICE_SUCCESS;
4194 
4195 	return ice_sched_rm_rl_profile(pi, layer_num, profile_type, old_id);
4196 }
4197 
4198 /**
4199  * ice_sched_set_eir_srl_excl - set EIR/SRL exclusiveness
4200  * @pi: port information structure
4201  * @node: pointer to node structure
4202  * @layer_num: layer number where rate limit profiles are saved
4203  * @rl_type: rate limit type min, max, or shared
4204  * @bw: bandwidth value
4205  *
4206  * This function prepares node element's bandwidth to SRL or EIR exclusively.
4207  * EIR BW and Shared BW profiles are mutually exclusive and hence only one of
4208  * them may be set for any given element. This function needs to be called
4209  * with the scheduler lock held.
4210  */
4211 static enum ice_status
4212 ice_sched_set_eir_srl_excl(struct ice_port_info *pi,
4213 			   struct ice_sched_node *node,
4214 			   u8 layer_num, enum ice_rl_type rl_type, u32 bw)
4215 {
4216 	if (rl_type == ICE_SHARED_BW) {
4217 		/* SRL node passed in this case, it may be different node */
4218 		if (bw == ICE_SCHED_DFLT_BW)
4219 			/* SRL being removed, ice_sched_cfg_node_bw_lmt()
4220 			 * enables EIR to default. EIR is not set in this
4221 			 * case, so no additional action is required.
4222 			 */
4223 			return ICE_SUCCESS;
4224 
4225 		/* SRL being configured, set EIR to default here.
4226 		 * ice_sched_cfg_node_bw_lmt() disables EIR when it
4227 		 * configures SRL
4228 		 */
4229 		return ice_sched_set_node_bw_dflt(pi, node, ICE_MAX_BW,
4230 						  layer_num);
4231 	} else if (rl_type == ICE_MAX_BW &&
4232 		   node->info.data.valid_sections & ICE_AQC_ELEM_VALID_SHARED) {
4233 		/* Remove Shared profile. Set default shared BW call
4234 		 * removes shared profile for a node.
4235 		 */
4236 		return ice_sched_set_node_bw_dflt(pi, node,
4237 						  ICE_SHARED_BW,
4238 						  layer_num);
4239 	}
4240 	return ICE_SUCCESS;
4241 }
4242 
4243 /**
4244  * ice_sched_set_node_bw - set node's bandwidth
4245  * @pi: port information structure
4246  * @node: tree node
4247  * @rl_type: rate limit type min, max, or shared
4248  * @bw: bandwidth in Kbps - Kilo bits per sec
4249  * @layer_num: layer number
4250  *
4251  * This function adds new profile corresponding to requested BW, configures
4252  * node's RL profile ID of type CIR, EIR, or SRL, and removes old profile
4253  * ID from local database. The caller needs to hold scheduler lock.
4254  */
4255 static enum ice_status
4256 ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
4257 		      enum ice_rl_type rl_type, u32 bw, u8 layer_num)
4258 {
4259 	struct ice_aqc_rl_profile_info *rl_prof_info;
4260 	enum ice_status status = ICE_ERR_PARAM;
4261 	struct ice_hw *hw = pi->hw;
4262 	u16 old_id, rl_prof_id;
4263 
4264 	rl_prof_info = ice_sched_add_rl_profile(pi, rl_type, bw, layer_num);
4265 	if (!rl_prof_info)
4266 		return status;
4267 
4268 	rl_prof_id = LE16_TO_CPU(rl_prof_info->profile.profile_id);
4269 
4270 	/* Save existing RL prof ID for later clean up */
4271 	old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
4272 	/* Configure BW scheduling parameters */
4273 	status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id);
4274 	if (status)
4275 		return status;
4276 
4277 	/* New changes has been applied */
4278 	/* Increment the profile ID reference count */
4279 	rl_prof_info->prof_id_ref++;
4280 
4281 	/* Check for old ID removal */
4282 	if ((old_id == ICE_SCHED_DFLT_RL_PROF_ID && rl_type != ICE_SHARED_BW) ||
4283 	    old_id == ICE_SCHED_INVAL_PROF_ID || old_id == rl_prof_id)
4284 		return ICE_SUCCESS;
4285 
4286 	return ice_sched_rm_rl_profile(pi, layer_num,
4287 				       rl_prof_info->profile.flags,
4288 				       old_id);
4289 }
4290 
4291 /**
4292  * ice_sched_set_node_bw_lmt - set node's BW limit
4293  * @pi: port information structure
4294  * @node: tree node
4295  * @rl_type: rate limit type min, max, or shared
4296  * @bw: bandwidth in Kbps - Kilo bits per sec
4297  *
4298  * It updates node's BW limit parameters like BW RL profile ID of type CIR,
4299  * EIR, or SRL. The caller needs to hold scheduler lock.
4300  */
4301 static enum ice_status
4302 ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
4303 			  enum ice_rl_type rl_type, u32 bw)
4304 {
4305 	struct ice_sched_node *cfg_node = node;
4306 	enum ice_status status;
4307 
4308 	struct ice_hw *hw;
4309 	u8 layer_num;
4310 
4311 	if (!pi)
4312 		return ICE_ERR_PARAM;
4313 	hw = pi->hw;
4314 	/* Remove unused RL profile IDs from HW and SW DB */
4315 	ice_sched_rm_unused_rl_prof(pi);
4316 	layer_num = ice_sched_get_rl_prof_layer(pi, rl_type,
4317 						node->tx_sched_layer);
4318 	if (layer_num >= hw->num_tx_sched_layers)
4319 		return ICE_ERR_PARAM;
4320 
4321 	if (rl_type == ICE_SHARED_BW) {
4322 		/* SRL node may be different */
4323 		cfg_node = ice_sched_get_srl_node(node, layer_num);
4324 		if (!cfg_node)
4325 			return ICE_ERR_CFG;
4326 	}
4327 	/* EIR BW and Shared BW profiles are mutually exclusive and
4328 	 * hence only one of them may be set for any given element
4329 	 */
4330 	status = ice_sched_set_eir_srl_excl(pi, cfg_node, layer_num, rl_type,
4331 					    bw);
4332 	if (status)
4333 		return status;
4334 	if (bw == ICE_SCHED_DFLT_BW)
4335 		return ice_sched_set_node_bw_dflt(pi, cfg_node, rl_type,
4336 						  layer_num);
4337 	return ice_sched_set_node_bw(pi, cfg_node, rl_type, bw, layer_num);
4338 }
4339 
4340 /**
4341  * ice_sched_set_node_bw_dflt_lmt - set node's BW limit to default
4342  * @pi: port information structure
4343  * @node: pointer to node structure
4344  * @rl_type: rate limit type min, max, or shared
4345  *
4346  * This function configures node element's BW rate limit profile ID of
4347  * type CIR, EIR, or SRL to default. This function needs to be called
4348  * with the scheduler lock held.
4349  */
4350 static enum ice_status
4351 ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi,
4352 			       struct ice_sched_node *node,
4353 			       enum ice_rl_type rl_type)
4354 {
4355 	return ice_sched_set_node_bw_lmt(pi, node, rl_type,
4356 					 ICE_SCHED_DFLT_BW);
4357 }
4358 
4359 /**
4360  * ice_sched_validate_srl_node - Check node for SRL applicability
4361  * @node: sched node to configure
4362  * @sel_layer: selected SRL layer
4363  *
4364  * This function checks if the SRL can be applied to a selceted layer node on
4365  * behalf of the requested node (first argument). This function needs to be
4366  * called with scheduler lock held.
4367  */
4368 static enum ice_status
4369 ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer)
4370 {
4371 	/* SRL profiles are not available on all layers. Check if the
4372 	 * SRL profile can be applied to a node above or below the
4373 	 * requested node. SRL configuration is possible only if the
4374 	 * selected layer's node has single child.
4375 	 */
4376 	if (sel_layer == node->tx_sched_layer ||
4377 	    ((sel_layer == node->tx_sched_layer + 1) &&
4378 	    node->num_children == 1) ||
4379 	    ((sel_layer == node->tx_sched_layer - 1) &&
4380 	    (node->parent && node->parent->num_children == 1)))
4381 		return ICE_SUCCESS;
4382 
4383 	return ICE_ERR_CFG;
4384 }
4385 
4386 /**
4387  * ice_sched_save_q_bw - save queue node's BW information
4388  * @q_ctx: queue context structure
4389  * @rl_type: rate limit type min, max, or shared
4390  * @bw: bandwidth in Kbps - Kilo bits per sec
4391  *
4392  * Save BW information of queue type node for post replay use.
4393  */
4394 static enum ice_status
4395 ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw)
4396 {
4397 	switch (rl_type) {
4398 	case ICE_MIN_BW:
4399 		ice_set_clear_cir_bw(&q_ctx->bw_t_info, bw);
4400 		break;
4401 	case ICE_MAX_BW:
4402 		ice_set_clear_eir_bw(&q_ctx->bw_t_info, bw);
4403 		break;
4404 	case ICE_SHARED_BW:
4405 		ice_set_clear_shared_bw(&q_ctx->bw_t_info, bw);
4406 		break;
4407 	default:
4408 		return ICE_ERR_PARAM;
4409 	}
4410 	return ICE_SUCCESS;
4411 }
4412 
4413 /**
4414  * ice_sched_set_q_bw_lmt - sets queue BW limit
4415  * @pi: port information structure
4416  * @vsi_handle: sw VSI handle
4417  * @tc: traffic class
4418  * @q_handle: software queue handle
4419  * @rl_type: min, max, or shared
4420  * @bw: bandwidth in Kbps
4421  *
4422  * This function sets BW limit of queue scheduling node.
4423  */
4424 static enum ice_status
4425 ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
4426 		       u16 q_handle, enum ice_rl_type rl_type, u32 bw)
4427 {
4428 	enum ice_status status = ICE_ERR_PARAM;
4429 	struct ice_sched_node *node;
4430 	struct ice_q_ctx *q_ctx;
4431 
4432 	if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4433 		return ICE_ERR_PARAM;
4434 	ice_acquire_lock(&pi->sched_lock);
4435 	q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handle);
4436 	if (!q_ctx)
4437 		goto exit_q_bw_lmt;
4438 	node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
4439 	if (!node) {
4440 		ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong q_teid\n");
4441 		goto exit_q_bw_lmt;
4442 	}
4443 
4444 	/* Return error if it is not a leaf node */
4445 	if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF)
4446 		goto exit_q_bw_lmt;
4447 
4448 	/* SRL bandwidth layer selection */
4449 	if (rl_type == ICE_SHARED_BW) {
4450 		u8 sel_layer; /* selected layer */
4451 
4452 		sel_layer = ice_sched_get_rl_prof_layer(pi, rl_type,
4453 							node->tx_sched_layer);
4454 		if (sel_layer >= pi->hw->num_tx_sched_layers) {
4455 			status = ICE_ERR_PARAM;
4456 			goto exit_q_bw_lmt;
4457 		}
4458 		status = ice_sched_validate_srl_node(node, sel_layer);
4459 		if (status)
4460 			goto exit_q_bw_lmt;
4461 	}
4462 
4463 	if (bw == ICE_SCHED_DFLT_BW)
4464 		status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type);
4465 	else
4466 		status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw);
4467 
4468 	if (!status)
4469 		status = ice_sched_save_q_bw(q_ctx, rl_type, bw);
4470 
4471 exit_q_bw_lmt:
4472 	ice_release_lock(&pi->sched_lock);
4473 	return status;
4474 }
4475 
4476 /**
4477  * ice_cfg_q_bw_lmt - configure queue BW limit
4478  * @pi: port information structure
4479  * @vsi_handle: sw VSI handle
4480  * @tc: traffic class
4481  * @q_handle: software queue handle
4482  * @rl_type: min, max, or shared
4483  * @bw: bandwidth in Kbps
4484  *
4485  * This function configures BW limit of queue scheduling node.
4486  */
4487 enum ice_status
4488 ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
4489 		 u16 q_handle, enum ice_rl_type rl_type, u32 bw)
4490 {
4491 	return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type,
4492 				      bw);
4493 }
4494 
4495 /**
4496  * ice_cfg_q_bw_dflt_lmt - configure queue BW default limit
4497  * @pi: port information structure
4498  * @vsi_handle: sw VSI handle
4499  * @tc: traffic class
4500  * @q_handle: software queue handle
4501  * @rl_type: min, max, or shared
4502  *
4503  * This function configures BW default limit of queue scheduling node.
4504  */
4505 enum ice_status
4506 ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
4507 		      u16 q_handle, enum ice_rl_type rl_type)
4508 {
4509 	return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type,
4510 				      ICE_SCHED_DFLT_BW);
4511 }
4512 
4513 /**
4514  * ice_sched_save_tc_node_bw - save TC node BW limit
4515  * @pi: port information structure
4516  * @tc: TC number
4517  * @rl_type: min or max
4518  * @bw: bandwidth in Kbps
4519  *
4520  * This function saves the modified values of bandwidth settings for later
4521  * replay purpose (restore) after reset.
4522  */
4523 static enum ice_status
4524 ice_sched_save_tc_node_bw(struct ice_port_info *pi, u8 tc,
4525 			  enum ice_rl_type rl_type, u32 bw)
4526 {
4527 	if (tc >= ICE_MAX_TRAFFIC_CLASS)
4528 		return ICE_ERR_PARAM;
4529 	switch (rl_type) {
4530 	case ICE_MIN_BW:
4531 		ice_set_clear_cir_bw(&pi->tc_node_bw_t_info[tc], bw);
4532 		break;
4533 	case ICE_MAX_BW:
4534 		ice_set_clear_eir_bw(&pi->tc_node_bw_t_info[tc], bw);
4535 		break;
4536 	case ICE_SHARED_BW:
4537 		ice_set_clear_shared_bw(&pi->tc_node_bw_t_info[tc], bw);
4538 		break;
4539 	default:
4540 		return ICE_ERR_PARAM;
4541 	}
4542 	return ICE_SUCCESS;
4543 }
4544 
4545 /**
4546  * ice_sched_set_tc_node_bw_lmt - sets TC node BW limit
4547  * @pi: port information structure
4548  * @tc: TC number
4549  * @rl_type: min or max
4550  * @bw: bandwidth in Kbps
4551  *
4552  * This function configures bandwidth limit of TC node.
4553  */
4554 static enum ice_status
4555 ice_sched_set_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc,
4556 			     enum ice_rl_type rl_type, u32 bw)
4557 {
4558 	enum ice_status status = ICE_ERR_PARAM;
4559 	struct ice_sched_node *tc_node;
4560 
4561 	if (tc >= ICE_MAX_TRAFFIC_CLASS)
4562 		return status;
4563 	ice_acquire_lock(&pi->sched_lock);
4564 	tc_node = ice_sched_get_tc_node(pi, tc);
4565 	if (!tc_node)
4566 		goto exit_set_tc_node_bw;
4567 	if (bw == ICE_SCHED_DFLT_BW)
4568 		status = ice_sched_set_node_bw_dflt_lmt(pi, tc_node, rl_type);
4569 	else
4570 		status = ice_sched_set_node_bw_lmt(pi, tc_node, rl_type, bw);
4571 	if (!status)
4572 		status = ice_sched_save_tc_node_bw(pi, tc, rl_type, bw);
4573 
4574 exit_set_tc_node_bw:
4575 	ice_release_lock(&pi->sched_lock);
4576 	return status;
4577 }
4578 
4579 /**
4580  * ice_cfg_tc_node_bw_lmt - configure TC node BW limit
4581  * @pi: port information structure
4582  * @tc: TC number
4583  * @rl_type: min or max
4584  * @bw: bandwidth in Kbps
4585  *
4586  * This function configures BW limit of TC node.
4587  * Note: The minimum guaranteed reservation is done via DCBX.
4588  */
4589 enum ice_status
4590 ice_cfg_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc,
4591 		       enum ice_rl_type rl_type, u32 bw)
4592 {
4593 	return ice_sched_set_tc_node_bw_lmt(pi, tc, rl_type, bw);
4594 }
4595 
4596 /**
4597  * ice_cfg_tc_node_bw_dflt_lmt - configure TC node BW default limit
4598  * @pi: port information structure
4599  * @tc: TC number
4600  * @rl_type: min or max
4601  *
4602  * This function configures BW default limit of TC node.
4603  */
4604 enum ice_status
4605 ice_cfg_tc_node_bw_dflt_lmt(struct ice_port_info *pi, u8 tc,
4606 			    enum ice_rl_type rl_type)
4607 {
4608 	return ice_sched_set_tc_node_bw_lmt(pi, tc, rl_type, ICE_SCHED_DFLT_BW);
4609 }
4610 
4611 /**
4612  * ice_sched_save_tc_node_bw_alloc - save TC node's BW alloc information
4613  * @pi: port information structure
4614  * @tc: traffic class
4615  * @rl_type: rate limit type min or max
4616  * @bw_alloc: Bandwidth allocation information
4617  *
4618  * Save BW alloc information of VSI type node for post replay use.
4619  */
4620 static enum ice_status
4621 ice_sched_save_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc,
4622 				enum ice_rl_type rl_type, u16 bw_alloc)
4623 {
4624 	if (tc >= ICE_MAX_TRAFFIC_CLASS)
4625 		return ICE_ERR_PARAM;
4626 	switch (rl_type) {
4627 	case ICE_MIN_BW:
4628 		ice_set_clear_cir_bw_alloc(&pi->tc_node_bw_t_info[tc],
4629 					   bw_alloc);
4630 		break;
4631 	case ICE_MAX_BW:
4632 		ice_set_clear_eir_bw_alloc(&pi->tc_node_bw_t_info[tc],
4633 					   bw_alloc);
4634 		break;
4635 	default:
4636 		return ICE_ERR_PARAM;
4637 	}
4638 	return ICE_SUCCESS;
4639 }
4640 
4641 /**
4642  * ice_sched_set_tc_node_bw_alloc - set TC node BW alloc
4643  * @pi: port information structure
4644  * @tc: TC number
4645  * @rl_type: min or max
4646  * @bw_alloc: bandwidth alloc
4647  *
4648  * This function configures bandwidth alloc of TC node, also saves the
4649  * changed settings for replay purpose, and return success if it succeeds
4650  * in modifying bandwidth alloc setting.
4651  */
4652 static enum ice_status
4653 ice_sched_set_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc,
4654 			       enum ice_rl_type rl_type, u8 bw_alloc)
4655 {
4656 	enum ice_status status = ICE_ERR_PARAM;
4657 	struct ice_sched_node *tc_node;
4658 
4659 	if (tc >= ICE_MAX_TRAFFIC_CLASS)
4660 		return status;
4661 	ice_acquire_lock(&pi->sched_lock);
4662 	tc_node = ice_sched_get_tc_node(pi, tc);
4663 	if (!tc_node)
4664 		goto exit_set_tc_node_bw_alloc;
4665 	status = ice_sched_cfg_node_bw_alloc(pi->hw, tc_node, rl_type,
4666 					     bw_alloc);
4667 	if (status)
4668 		goto exit_set_tc_node_bw_alloc;
4669 	status = ice_sched_save_tc_node_bw_alloc(pi, tc, rl_type, bw_alloc);
4670 
4671 exit_set_tc_node_bw_alloc:
4672 	ice_release_lock(&pi->sched_lock);
4673 	return status;
4674 }
4675 
4676 /**
4677  * ice_cfg_tc_node_bw_alloc - configure TC node BW alloc
4678  * @pi: port information structure
4679  * @tc: TC number
4680  * @rl_type: min or max
4681  * @bw_alloc: bandwidth alloc
4682  *
4683  * This function configures BW limit of TC node.
4684  * Note: The minimum guaranteed reservation is done via DCBX.
4685  */
4686 enum ice_status
4687 ice_cfg_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc,
4688 			 enum ice_rl_type rl_type, u8 bw_alloc)
4689 {
4690 	return ice_sched_set_tc_node_bw_alloc(pi, tc, rl_type, bw_alloc);
4691 }
4692 
4693 /**
4694  * ice_sched_set_agg_bw_dflt_lmt - set aggregator node's BW limit to default
4695  * @pi: port information structure
4696  * @vsi_handle: software VSI handle
4697  *
4698  * This function retrieves the aggregator ID based on VSI ID and TC,
4699  * and sets node's BW limit to default. This function needs to be
4700  * called with the scheduler lock held.
4701  */
4702 enum ice_status
4703 ice_sched_set_agg_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle)
4704 {
4705 	struct ice_vsi_ctx *vsi_ctx;
4706 	enum ice_status status = ICE_SUCCESS;
4707 	u8 tc;
4708 
4709 	if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4710 		return ICE_ERR_PARAM;
4711 	vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
4712 	if (!vsi_ctx)
4713 		return ICE_ERR_PARAM;
4714 
4715 	ice_for_each_traffic_class(tc) {
4716 		struct ice_sched_node *node;
4717 
4718 		node = vsi_ctx->sched.ag_node[tc];
4719 		if (!node)
4720 			continue;
4721 
4722 		/* Set min profile to default */
4723 		status = ice_sched_set_node_bw_dflt_lmt(pi, node, ICE_MIN_BW);
4724 		if (status)
4725 			break;
4726 
4727 		/* Set max profile to default */
4728 		status = ice_sched_set_node_bw_dflt_lmt(pi, node, ICE_MAX_BW);
4729 		if (status)
4730 			break;
4731 
4732 		/* Remove shared profile, if there is one */
4733 		status = ice_sched_set_node_bw_dflt_lmt(pi, node,
4734 							ICE_SHARED_BW);
4735 		if (status)
4736 			break;
4737 	}
4738 
4739 	return status;
4740 }
4741 
4742 /**
4743  * ice_sched_get_node_by_id_type - get node from ID type
4744  * @pi: port information structure
4745  * @id: identifier
4746  * @agg_type: type of aggregator
4747  * @tc: traffic class
4748  *
4749  * This function returns node identified by ID of type aggregator, and
4750  * based on traffic class (TC). This function needs to be called with
4751  * the scheduler lock held.
4752  */
4753 static struct ice_sched_node *
4754 ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id,
4755 			      enum ice_agg_type agg_type, u8 tc)
4756 {
4757 	struct ice_sched_node *node = NULL;
4758 	struct ice_sched_node *child_node;
4759 
4760 	switch (agg_type) {
4761 	case ICE_AGG_TYPE_VSI: {
4762 		struct ice_vsi_ctx *vsi_ctx;
4763 		u16 vsi_handle = (u16)id;
4764 
4765 		if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4766 			break;
4767 		/* Get sched_vsi_info */
4768 		vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
4769 		if (!vsi_ctx)
4770 			break;
4771 		node = vsi_ctx->sched.vsi_node[tc];
4772 		break;
4773 	}
4774 
4775 	case ICE_AGG_TYPE_AGG: {
4776 		struct ice_sched_node *tc_node;
4777 
4778 		tc_node = ice_sched_get_tc_node(pi, tc);
4779 		if (tc_node)
4780 			node = ice_sched_get_agg_node(pi, tc_node, id);
4781 		break;
4782 	}
4783 
4784 	case ICE_AGG_TYPE_Q:
4785 		/* The current implementation allows single queue to modify */
4786 		node = ice_sched_get_node(pi, id);
4787 		break;
4788 
4789 	case ICE_AGG_TYPE_QG:
4790 		/* The current implementation allows single qg to modify */
4791 		child_node = ice_sched_get_node(pi, id);
4792 		if (!child_node)
4793 			break;
4794 		node = child_node->parent;
4795 		break;
4796 
4797 	default:
4798 		break;
4799 	}
4800 
4801 	return node;
4802 }
4803 
4804 /**
4805  * ice_sched_set_node_bw_lmt_per_tc - set node BW limit per TC
4806  * @pi: port information structure
4807  * @id: ID (software VSI handle or AGG ID)
4808  * @agg_type: aggregator type (VSI or AGG type node)
4809  * @tc: traffic class
4810  * @rl_type: min or max
4811  * @bw: bandwidth in Kbps
4812  *
4813  * This function sets BW limit of VSI or Aggregator scheduling node
4814  * based on TC information from passed in argument BW.
4815  */
4816 enum ice_status
4817 ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id,
4818 				 enum ice_agg_type agg_type, u8 tc,
4819 				 enum ice_rl_type rl_type, u32 bw)
4820 {
4821 	enum ice_status status = ICE_ERR_PARAM;
4822 	struct ice_sched_node *node;
4823 
4824 	if (!pi)
4825 		return status;
4826 
4827 	if (rl_type == ICE_UNKNOWN_BW)
4828 		return status;
4829 
4830 	ice_acquire_lock(&pi->sched_lock);
4831 	node = ice_sched_get_node_by_id_type(pi, id, agg_type, tc);
4832 	if (!node) {
4833 		ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong id, agg type, or tc\n");
4834 		goto exit_set_node_bw_lmt_per_tc;
4835 	}
4836 	if (bw == ICE_SCHED_DFLT_BW)
4837 		status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type);
4838 	else
4839 		status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw);
4840 
4841 exit_set_node_bw_lmt_per_tc:
4842 	ice_release_lock(&pi->sched_lock);
4843 	return status;
4844 }
4845 
4846 /**
4847  * ice_sched_validate_vsi_srl_node - validate VSI SRL node
4848  * @pi: port information structure
4849  * @vsi_handle: software VSI handle
4850  *
4851  * This function validates SRL node of the VSI node if available SRL layer is
4852  * different than the VSI node layer on all TC(s).This function needs to be
4853  * called with scheduler lock held.
4854  */
4855 static enum ice_status
4856 ice_sched_validate_vsi_srl_node(struct ice_port_info *pi, u16 vsi_handle)
4857 {
4858 	u8 sel_layer = ICE_SCHED_INVAL_LAYER_NUM;
4859 	u8 tc;
4860 
4861 	if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4862 		return ICE_ERR_PARAM;
4863 
4864 	/* Return success if no nodes are present across TC */
4865 	ice_for_each_traffic_class(tc) {
4866 		struct ice_sched_node *tc_node, *vsi_node;
4867 		enum ice_rl_type rl_type = ICE_SHARED_BW;
4868 		enum ice_status status;
4869 
4870 		tc_node = ice_sched_get_tc_node(pi, tc);
4871 		if (!tc_node)
4872 			continue;
4873 
4874 		vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
4875 		if (!vsi_node)
4876 			continue;
4877 
4878 		/* SRL bandwidth layer selection */
4879 		if (sel_layer == ICE_SCHED_INVAL_LAYER_NUM) {
4880 			u8 node_layer = vsi_node->tx_sched_layer;
4881 			u8 layer_num;
4882 
4883 			layer_num = ice_sched_get_rl_prof_layer(pi, rl_type,
4884 								node_layer);
4885 			if (layer_num >= pi->hw->num_tx_sched_layers)
4886 				return ICE_ERR_PARAM;
4887 			sel_layer = layer_num;
4888 		}
4889 
4890 		status = ice_sched_validate_srl_node(vsi_node, sel_layer);
4891 		if (status)
4892 			return status;
4893 	}
4894 	return ICE_SUCCESS;
4895 }
4896 
4897 /**
4898  * ice_sched_set_vsi_bw_shared_lmt - set VSI BW shared limit
4899  * @pi: port information structure
4900  * @vsi_handle: software VSI handle
4901  * @bw: bandwidth in Kbps
4902  *
4903  * This function Configures shared rate limiter(SRL) of all VSI type nodes
4904  * across all traffic classes for VSI matching handle. When BW value of
4905  * ICE_SCHED_DFLT_BW is passed, it removes the SRL from the node.
4906  */
4907 enum ice_status
4908 ice_sched_set_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle,
4909 				u32 bw)
4910 {
4911 	enum ice_status status = ICE_SUCCESS;
4912 	u8 tc;
4913 
4914 	if (!pi)
4915 		return ICE_ERR_PARAM;
4916 
4917 	if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4918 		return ICE_ERR_PARAM;
4919 
4920 	ice_acquire_lock(&pi->sched_lock);
4921 	status = ice_sched_validate_vsi_srl_node(pi, vsi_handle);
4922 	if (status)
4923 		goto exit_set_vsi_bw_shared_lmt;
4924 	/* Return success if no nodes are present across TC */
4925 	ice_for_each_traffic_class(tc) {
4926 		struct ice_sched_node *tc_node, *vsi_node;
4927 		enum ice_rl_type rl_type = ICE_SHARED_BW;
4928 
4929 		tc_node = ice_sched_get_tc_node(pi, tc);
4930 		if (!tc_node)
4931 			continue;
4932 
4933 		vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
4934 		if (!vsi_node)
4935 			continue;
4936 
4937 		if (bw == ICE_SCHED_DFLT_BW)
4938 			/* It removes existing SRL from the node */
4939 			status = ice_sched_set_node_bw_dflt_lmt(pi, vsi_node,
4940 								rl_type);
4941 		else
4942 			status = ice_sched_set_node_bw_lmt(pi, vsi_node,
4943 							   rl_type, bw);
4944 		if (status)
4945 			break;
4946 		status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, bw);
4947 		if (status)
4948 			break;
4949 	}
4950 
4951 exit_set_vsi_bw_shared_lmt:
4952 	ice_release_lock(&pi->sched_lock);
4953 	return status;
4954 }
4955 
4956 /**
4957  * ice_sched_validate_agg_srl_node - validate AGG SRL node
4958  * @pi: port information structure
4959  * @agg_id: aggregator ID
4960  *
4961  * This function validates SRL node of the AGG node if available SRL layer is
4962  * different than the AGG node layer on all TC(s).This function needs to be
4963  * called with scheduler lock held.
4964  */
4965 static enum ice_status
4966 ice_sched_validate_agg_srl_node(struct ice_port_info *pi, u32 agg_id)
4967 {
4968 	u8 sel_layer = ICE_SCHED_INVAL_LAYER_NUM;
4969 	struct ice_sched_agg_info *agg_info;
4970 	bool agg_id_present = false;
4971 	enum ice_status status = ICE_SUCCESS;
4972 	u8 tc;
4973 
4974 	LIST_FOR_EACH_ENTRY(agg_info, &pi->hw->agg_list, ice_sched_agg_info,
4975 			    list_entry)
4976 		if (agg_info->agg_id == agg_id) {
4977 			agg_id_present = true;
4978 			break;
4979 		}
4980 	if (!agg_id_present)
4981 		return ICE_ERR_PARAM;
4982 	/* Return success if no nodes are present across TC */
4983 	ice_for_each_traffic_class(tc) {
4984 		struct ice_sched_node *tc_node, *agg_node;
4985 		enum ice_rl_type rl_type = ICE_SHARED_BW;
4986 
4987 		tc_node = ice_sched_get_tc_node(pi, tc);
4988 		if (!tc_node)
4989 			continue;
4990 
4991 		agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
4992 		if (!agg_node)
4993 			continue;
4994 		/* SRL bandwidth layer selection */
4995 		if (sel_layer == ICE_SCHED_INVAL_LAYER_NUM) {
4996 			u8 node_layer = agg_node->tx_sched_layer;
4997 			u8 layer_num;
4998 
4999 			layer_num = ice_sched_get_rl_prof_layer(pi, rl_type,
5000 								node_layer);
5001 			if (layer_num >= pi->hw->num_tx_sched_layers)
5002 				return ICE_ERR_PARAM;
5003 			sel_layer = layer_num;
5004 		}
5005 
5006 		status = ice_sched_validate_srl_node(agg_node, sel_layer);
5007 		if (status)
5008 			break;
5009 	}
5010 	return status;
5011 }
5012 
5013 /**
5014  * ice_sched_set_agg_bw_shared_lmt - set aggregator BW shared limit
5015  * @pi: port information structure
5016  * @agg_id: aggregator ID
5017  * @bw: bandwidth in Kbps
5018  *
5019  * This function configures the shared rate limiter(SRL) of all aggregator type
5020  * nodes across all traffic classes for aggregator matching agg_id. When
5021  * BW value of ICE_SCHED_DFLT_BW is passed, it removes SRL from the
5022  * node(s).
5023  */
5024 enum ice_status
5025 ice_sched_set_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 bw)
5026 {
5027 	struct ice_sched_agg_info *agg_info;
5028 	struct ice_sched_agg_info *tmp;
5029 	bool agg_id_present = false;
5030 	enum ice_status status = ICE_SUCCESS;
5031 	u8 tc;
5032 
5033 	if (!pi)
5034 		return ICE_ERR_PARAM;
5035 
5036 	ice_acquire_lock(&pi->sched_lock);
5037 	status = ice_sched_validate_agg_srl_node(pi, agg_id);
5038 	if (status)
5039 		goto exit_agg_bw_shared_lmt;
5040 
5041 	LIST_FOR_EACH_ENTRY_SAFE(agg_info, tmp, &pi->hw->agg_list,
5042 				 ice_sched_agg_info, list_entry)
5043 		if (agg_info->agg_id == agg_id) {
5044 			agg_id_present = true;
5045 			break;
5046 		}
5047 
5048 	if (!agg_id_present) {
5049 		status = ICE_ERR_PARAM;
5050 		goto exit_agg_bw_shared_lmt;
5051 	}
5052 
5053 	/* Return success if no nodes are present across TC */
5054 	ice_for_each_traffic_class(tc) {
5055 		enum ice_rl_type rl_type = ICE_SHARED_BW;
5056 		struct ice_sched_node *tc_node, *agg_node;
5057 
5058 		tc_node = ice_sched_get_tc_node(pi, tc);
5059 		if (!tc_node)
5060 			continue;
5061 
5062 		agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
5063 		if (!agg_node)
5064 			continue;
5065 
5066 		if (bw == ICE_SCHED_DFLT_BW)
5067 			/* It removes existing SRL from the node */
5068 			status = ice_sched_set_node_bw_dflt_lmt(pi, agg_node,
5069 								rl_type);
5070 		else
5071 			status = ice_sched_set_node_bw_lmt(pi, agg_node,
5072 							   rl_type, bw);
5073 		if (status)
5074 			break;
5075 		status = ice_sched_save_agg_bw(pi, agg_id, tc, rl_type, bw);
5076 		if (status)
5077 			break;
5078 	}
5079 
5080 exit_agg_bw_shared_lmt:
5081 	ice_release_lock(&pi->sched_lock);
5082 	return status;
5083 }
5084 
5085 /**
5086  * ice_sched_cfg_sibl_node_prio - configure node sibling priority
5087  * @pi: port information structure
5088  * @node: sched node to configure
5089  * @priority: sibling priority
5090  *
5091  * This function configures node element's sibling priority only. This
5092  * function needs to be called with scheduler lock held.
5093  */
5094 enum ice_status
5095 ice_sched_cfg_sibl_node_prio(struct ice_port_info *pi,
5096 			     struct ice_sched_node *node, u8 priority)
5097 {
5098 	struct ice_aqc_txsched_elem_data buf;
5099 	struct ice_aqc_txsched_elem *data;
5100 	struct ice_hw *hw = pi->hw;
5101 	enum ice_status status;
5102 
5103 	if (!hw)
5104 		return ICE_ERR_PARAM;
5105 	buf = node->info;
5106 	data = &buf.data;
5107 	data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC;
5108 	priority = (priority << ICE_AQC_ELEM_GENERIC_PRIO_S) &
5109 		   ICE_AQC_ELEM_GENERIC_PRIO_M;
5110 	data->generic &= ~ICE_AQC_ELEM_GENERIC_PRIO_M;
5111 	data->generic |= priority;
5112 
5113 	/* Configure element */
5114 	status = ice_sched_update_elem(hw, node, &buf);
5115 	return status;
5116 }
5117 
5118 /**
5119  * ice_cfg_rl_burst_size - Set burst size value
5120  * @hw: pointer to the HW struct
5121  * @bytes: burst size in bytes
5122  *
5123  * This function configures/set the burst size to requested new value. The new
5124  * burst size value is used for future rate limit calls. It doesn't change the
5125  * existing or previously created RL profiles.
5126  */
5127 enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)
5128 {
5129 	u16 burst_size_to_prog;
5130 
5131 	if (bytes < ICE_MIN_BURST_SIZE_ALLOWED ||
5132 	    bytes > ICE_MAX_BURST_SIZE_ALLOWED)
5133 		return ICE_ERR_PARAM;
5134 	if (ice_round_to_num(bytes, 64) <=
5135 	    ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY) {
5136 		/* 64 byte granularity case */
5137 		/* Disable MSB granularity bit */
5138 		burst_size_to_prog = ICE_64_BYTE_GRANULARITY;
5139 		/* round number to nearest 64 byte granularity */
5140 		bytes = ice_round_to_num(bytes, 64);
5141 		/* The value is in 64 byte chunks */
5142 		burst_size_to_prog |= (u16)(bytes / 64);
5143 	} else {
5144 		/* k bytes granularity case */
5145 		/* Enable MSB granularity bit */
5146 		burst_size_to_prog = ICE_KBYTE_GRANULARITY;
5147 		/* round number to nearest 1024 granularity */
5148 		bytes = ice_round_to_num(bytes, 1024);
5149 		/* check rounding doesn't go beyond allowed */
5150 		if (bytes > ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY)
5151 			bytes = ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY;
5152 		/* The value is in k bytes */
5153 		burst_size_to_prog |= (u16)(bytes / 1024);
5154 	}
5155 	hw->max_burst_size = burst_size_to_prog;
5156 	return ICE_SUCCESS;
5157 }
5158 
5159 /*
5160  * ice_sched_replay_node_prio - re-configure node priority
5161  * @hw: pointer to the HW struct
5162  * @node: sched node to configure
5163  * @priority: priority value
5164  *
5165  * This function configures node element's priority value. It
5166  * needs to be called with scheduler lock held.
5167  */
5168 static enum ice_status
5169 ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node,
5170 			   u8 priority)
5171 {
5172 	struct ice_aqc_txsched_elem_data buf;
5173 	struct ice_aqc_txsched_elem *data;
5174 	enum ice_status status;
5175 
5176 	buf = node->info;
5177 	data = &buf.data;
5178 	data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC;
5179 	data->generic = priority;
5180 
5181 	/* Configure element */
5182 	status = ice_sched_update_elem(hw, node, &buf);
5183 	return status;
5184 }
5185 
5186 /**
5187  * ice_sched_replay_node_bw - replay node(s) BW
5188  * @hw: pointer to the HW struct
5189  * @node: sched node to configure
5190  * @bw_t_info: BW type information
5191  *
5192  * This function restores node's BW from bw_t_info. The caller needs
5193  * to hold the scheduler lock.
5194  */
5195 static enum ice_status
5196 ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node,
5197 			 struct ice_bw_type_info *bw_t_info)
5198 {
5199 	struct ice_port_info *pi = hw->port_info;
5200 	enum ice_status status = ICE_ERR_PARAM;
5201 	u16 bw_alloc;
5202 
5203 	if (!node)
5204 		return status;
5205 	if (!ice_is_any_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CNT))
5206 		return ICE_SUCCESS;
5207 	if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_PRIO)) {
5208 		status = ice_sched_replay_node_prio(hw, node,
5209 						    bw_t_info->generic);
5210 		if (status)
5211 			return status;
5212 	}
5213 	if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CIR)) {
5214 		status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW,
5215 						   bw_t_info->cir_bw.bw);
5216 		if (status)
5217 			return status;
5218 	}
5219 	if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CIR_WT)) {
5220 		bw_alloc = bw_t_info->cir_bw.bw_alloc;
5221 		status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MIN_BW,
5222 						     bw_alloc);
5223 		if (status)
5224 			return status;
5225 	}
5226 	if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_EIR)) {
5227 		status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW,
5228 						   bw_t_info->eir_bw.bw);
5229 		if (status)
5230 			return status;
5231 	}
5232 	if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_EIR_WT)) {
5233 		bw_alloc = bw_t_info->eir_bw.bw_alloc;
5234 		status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MAX_BW,
5235 						     bw_alloc);
5236 		if (status)
5237 			return status;
5238 	}
5239 	if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_SHARED))
5240 		status = ice_sched_set_node_bw_lmt(pi, node, ICE_SHARED_BW,
5241 						   bw_t_info->shared_bw);
5242 	return status;
5243 }
5244 
5245 /**
5246  * ice_sched_replay_agg_bw - replay aggregator node(s) BW
5247  * @hw: pointer to the HW struct
5248  * @agg_info: aggregator data structure
5249  *
5250  * This function re-creates aggregator type nodes. The caller needs to hold
5251  * the scheduler lock.
5252  */
5253 static enum ice_status
5254 ice_sched_replay_agg_bw(struct ice_hw *hw, struct ice_sched_agg_info *agg_info)
5255 {
5256 	struct ice_sched_node *tc_node, *agg_node;
5257 	enum ice_status status = ICE_SUCCESS;
5258 	u8 tc;
5259 
5260 	if (!agg_info)
5261 		return ICE_ERR_PARAM;
5262 	ice_for_each_traffic_class(tc) {
5263 		if (!ice_is_any_bit_set(agg_info->bw_t_info[tc].bw_t_bitmap,
5264 					ICE_BW_TYPE_CNT))
5265 			continue;
5266 		tc_node = ice_sched_get_tc_node(hw->port_info, tc);
5267 		if (!tc_node) {
5268 			status = ICE_ERR_PARAM;
5269 			break;
5270 		}
5271 		agg_node = ice_sched_get_agg_node(hw->port_info, tc_node,
5272 						  agg_info->agg_id);
5273 		if (!agg_node) {
5274 			status = ICE_ERR_PARAM;
5275 			break;
5276 		}
5277 		status = ice_sched_replay_node_bw(hw, agg_node,
5278 						  &agg_info->bw_t_info[tc]);
5279 		if (status)
5280 			break;
5281 	}
5282 	return status;
5283 }
5284 
5285 /**
5286  * ice_sched_get_ena_tc_bitmap - get enabled TC bitmap
5287  * @pi: port info struct
5288  * @tc_bitmap: 8 bits TC bitmap to check
5289  * @ena_tc_bitmap: 8 bits enabled TC bitmap to return
5290  *
5291  * This function returns enabled TC bitmap in variable ena_tc_bitmap. Some TCs
5292  * may be missing, it returns enabled TCs. This function needs to be called with
5293  * scheduler lock held.
5294  */
5295 static void
5296 ice_sched_get_ena_tc_bitmap(struct ice_port_info *pi, ice_bitmap_t *tc_bitmap,
5297 			    ice_bitmap_t *ena_tc_bitmap)
5298 {
5299 	u8 tc;
5300 
5301 	/* Some TC(s) may be missing after reset, adjust for replay */
5302 	ice_for_each_traffic_class(tc)
5303 		if (ice_is_tc_ena(*tc_bitmap, tc) &&
5304 		    (ice_sched_get_tc_node(pi, tc)))
5305 			ice_set_bit(tc, ena_tc_bitmap);
5306 }
5307 
5308 /**
5309  * ice_sched_replay_agg - recreate aggregator node(s)
5310  * @hw: pointer to the HW struct
5311  *
5312  * This function recreate aggregator type nodes which are not replayed earlier.
5313  * It also replay aggregator BW information. These aggregator nodes are not
5314  * associated with VSI type node yet.
5315  */
5316 void ice_sched_replay_agg(struct ice_hw *hw)
5317 {
5318 	struct ice_port_info *pi = hw->port_info;
5319 	struct ice_sched_agg_info *agg_info;
5320 
5321 	ice_acquire_lock(&pi->sched_lock);
5322 	LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info,
5323 			    list_entry) {
5324 		/* replay aggregator (re-create aggregator node) */
5325 		if (!ice_cmp_bitmap(agg_info->tc_bitmap,
5326 				    agg_info->replay_tc_bitmap,
5327 				    ICE_MAX_TRAFFIC_CLASS)) {
5328 			ice_declare_bitmap(replay_bitmap,
5329 					   ICE_MAX_TRAFFIC_CLASS);
5330 			enum ice_status status;
5331 
5332 			ice_zero_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
5333 			ice_sched_get_ena_tc_bitmap(pi,
5334 						    agg_info->replay_tc_bitmap,
5335 						    replay_bitmap);
5336 			status = ice_sched_cfg_agg(hw->port_info,
5337 						   agg_info->agg_id,
5338 						   ICE_AGG_TYPE_AGG,
5339 						   replay_bitmap);
5340 			if (status) {
5341 				ice_info(hw, "Replay agg id[%d] failed\n",
5342 					 agg_info->agg_id);
5343 				/* Move on to next one */
5344 				continue;
5345 			}
5346 			/* Replay aggregator node BW (restore aggregator BW) */
5347 			status = ice_sched_replay_agg_bw(hw, agg_info);
5348 			if (status)
5349 				ice_info(hw, "Replay agg bw [id=%d] failed\n",
5350 					 agg_info->agg_id);
5351 		}
5352 	}
5353 	ice_release_lock(&pi->sched_lock);
5354 }
5355 
5356 /**
5357  * ice_sched_replay_agg_vsi_preinit - Agg/VSI replay pre initialization
5358  * @hw: pointer to the HW struct
5359  *
5360  * This function initialize aggregator(s) TC bitmap to zero. A required
5361  * preinit step for replaying aggregators.
5362  */
5363 void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw)
5364 {
5365 	struct ice_port_info *pi = hw->port_info;
5366 	struct ice_sched_agg_info *agg_info;
5367 
5368 	ice_acquire_lock(&pi->sched_lock);
5369 	LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info,
5370 			    list_entry) {
5371 		struct ice_sched_agg_vsi_info *agg_vsi_info;
5372 
5373 		agg_info->tc_bitmap[0] = 0;
5374 		LIST_FOR_EACH_ENTRY(agg_vsi_info, &agg_info->agg_vsi_list,
5375 				    ice_sched_agg_vsi_info, list_entry)
5376 			agg_vsi_info->tc_bitmap[0] = 0;
5377 	}
5378 	ice_release_lock(&pi->sched_lock);
5379 }
5380 
5381 /**
5382  * ice_sched_replay_tc_node_bw - replay TC node(s) BW
5383  * @pi: port information structure
5384  *
5385  * This function replay TC nodes.
5386  */
5387 enum ice_status
5388 ice_sched_replay_tc_node_bw(struct ice_port_info *pi)
5389 {
5390 	enum ice_status status = ICE_SUCCESS;
5391 	u8 tc;
5392 
5393 	if (!pi->hw)
5394 		return ICE_ERR_PARAM;
5395 	ice_acquire_lock(&pi->sched_lock);
5396 	ice_for_each_traffic_class(tc) {
5397 		struct ice_sched_node *tc_node;
5398 
5399 		tc_node = ice_sched_get_tc_node(pi, tc);
5400 		if (!tc_node)
5401 			continue; /* TC not present */
5402 		status = ice_sched_replay_node_bw(pi->hw, tc_node,
5403 						  &pi->tc_node_bw_t_info[tc]);
5404 		if (status)
5405 			break;
5406 	}
5407 	ice_release_lock(&pi->sched_lock);
5408 	return status;
5409 }
5410 
5411 /**
5412  * ice_sched_replay_vsi_bw - replay VSI type node(s) BW
5413  * @hw: pointer to the HW struct
5414  * @vsi_handle: software VSI handle
5415  * @tc_bitmap: 8 bits TC bitmap
5416  *
5417  * This function replays VSI type nodes bandwidth. This function needs to be
5418  * called with scheduler lock held.
5419  */
5420 static enum ice_status
5421 ice_sched_replay_vsi_bw(struct ice_hw *hw, u16 vsi_handle,
5422 			ice_bitmap_t *tc_bitmap)
5423 {
5424 	struct ice_sched_node *vsi_node, *tc_node;
5425 	struct ice_port_info *pi = hw->port_info;
5426 	struct ice_bw_type_info *bw_t_info;
5427 	struct ice_vsi_ctx *vsi_ctx;
5428 	enum ice_status status = ICE_SUCCESS;
5429 	u8 tc;
5430 
5431 	vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
5432 	if (!vsi_ctx)
5433 		return ICE_ERR_PARAM;
5434 	ice_for_each_traffic_class(tc) {
5435 		if (!ice_is_tc_ena(*tc_bitmap, tc))
5436 			continue;
5437 		tc_node = ice_sched_get_tc_node(pi, tc);
5438 		if (!tc_node)
5439 			continue;
5440 		vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
5441 		if (!vsi_node)
5442 			continue;
5443 		bw_t_info = &vsi_ctx->sched.bw_t_info[tc];
5444 		status = ice_sched_replay_node_bw(hw, vsi_node, bw_t_info);
5445 		if (status)
5446 			break;
5447 	}
5448 	return status;
5449 }
5450 
5451 /**
5452  * ice_sched_replay_vsi_agg - replay aggregator & VSI to aggregator node(s)
5453  * @hw: pointer to the HW struct
5454  * @vsi_handle: software VSI handle
5455  *
5456  * This function replays aggregator node, VSI to aggregator type nodes, and
5457  * their node bandwidth information. This function needs to be called with
5458  * scheduler lock held.
5459  */
5460 static enum ice_status
5461 ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
5462 {
5463 	ice_declare_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
5464 	struct ice_sched_agg_vsi_info *agg_vsi_info;
5465 	struct ice_port_info *pi = hw->port_info;
5466 	struct ice_sched_agg_info *agg_info;
5467 	enum ice_status status;
5468 
5469 	ice_zero_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
5470 	if (!ice_is_vsi_valid(hw, vsi_handle))
5471 		return ICE_ERR_PARAM;
5472 	agg_info = ice_get_vsi_agg_info(hw, vsi_handle);
5473 	if (!agg_info)
5474 		return ICE_SUCCESS; /* Not present in list - default Agg case */
5475 	agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
5476 	if (!agg_vsi_info)
5477 		return ICE_SUCCESS; /* Not present in list - default Agg case */
5478 	ice_sched_get_ena_tc_bitmap(pi, agg_info->replay_tc_bitmap,
5479 				    replay_bitmap);
5480 	/* Replay aggregator node associated to vsi_handle */
5481 	status = ice_sched_cfg_agg(hw->port_info, agg_info->agg_id,
5482 				   ICE_AGG_TYPE_AGG, replay_bitmap);
5483 	if (status)
5484 		return status;
5485 	/* Replay aggregator node BW (restore aggregator BW) */
5486 	status = ice_sched_replay_agg_bw(hw, agg_info);
5487 	if (status)
5488 		return status;
5489 
5490 	ice_zero_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
5491 	ice_sched_get_ena_tc_bitmap(pi, agg_vsi_info->replay_tc_bitmap,
5492 				    replay_bitmap);
5493 	/* Move this VSI (vsi_handle) to above aggregator */
5494 	status = ice_sched_assoc_vsi_to_agg(pi, agg_info->agg_id, vsi_handle,
5495 					    replay_bitmap);
5496 	if (status)
5497 		return status;
5498 	/* Replay VSI BW (restore VSI BW) */
5499 	return ice_sched_replay_vsi_bw(hw, vsi_handle,
5500 				       agg_vsi_info->tc_bitmap);
5501 }
5502 
5503 /**
5504  * ice_replay_vsi_agg - replay VSI to aggregator node
5505  * @hw: pointer to the HW struct
5506  * @vsi_handle: software VSI handle
5507  *
5508  * This function replays association of VSI to aggregator type nodes, and
5509  * node bandwidth information.
5510  */
5511 enum ice_status
5512 ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
5513 {
5514 	struct ice_port_info *pi = hw->port_info;
5515 	enum ice_status status;
5516 
5517 	ice_acquire_lock(&pi->sched_lock);
5518 	status = ice_sched_replay_vsi_agg(hw, vsi_handle);
5519 	ice_release_lock(&pi->sched_lock);
5520 	return status;
5521 }
5522 
5523 /**
5524  * ice_sched_replay_q_bw - replay queue type node BW
5525  * @pi: port information structure
5526  * @q_ctx: queue context structure
5527  *
5528  * This function replays queue type node bandwidth. This function needs to be
5529  * called with scheduler lock held.
5530  */
5531 enum ice_status
5532 ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx)
5533 {
5534 	struct ice_sched_node *q_node;
5535 
5536 	/* Following also checks the presence of node in tree */
5537 	q_node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
5538 	if (!q_node)
5539 		return ICE_ERR_PARAM;
5540 	return ice_sched_replay_node_bw(pi->hw, q_node, &q_ctx->bw_t_info);
5541 }
5542