1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright (c) 2024, Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the Intel Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include "ice_sched.h"
33
34 /**
35 * ice_sched_add_root_node - Insert the Tx scheduler root node in SW DB
36 * @pi: port information structure
37 * @info: Scheduler element information from firmware
38 *
39 * This function inserts the root node of the scheduling tree topology
40 * to the SW DB.
41 */
42 static enum ice_status
ice_sched_add_root_node(struct ice_port_info * pi,struct ice_aqc_txsched_elem_data * info)43 ice_sched_add_root_node(struct ice_port_info *pi,
44 struct ice_aqc_txsched_elem_data *info)
45 {
46 struct ice_sched_node *root;
47 struct ice_hw *hw;
48
49 if (!pi)
50 return ICE_ERR_PARAM;
51
52 hw = pi->hw;
53
54 root = (struct ice_sched_node *)ice_malloc(hw, sizeof(*root));
55 if (!root)
56 return ICE_ERR_NO_MEMORY;
57
58 root->children = (struct ice_sched_node **)
59 ice_calloc(hw, hw->max_children[0], sizeof(*root->children));
60 if (!root->children) {
61 ice_free(hw, root);
62 return ICE_ERR_NO_MEMORY;
63 }
64
65 ice_memcpy(&root->info, info, sizeof(*info), ICE_DMA_TO_NONDMA);
66 pi->root = root;
67 return ICE_SUCCESS;
68 }
69
70 /**
71 * ice_sched_find_node_by_teid - Find the Tx scheduler node in SW DB
72 * @start_node: pointer to the starting ice_sched_node struct in a sub-tree
73 * @teid: node TEID to search
74 *
75 * This function searches for a node matching the TEID in the scheduling tree
76 * from the SW DB. The search is recursive and is restricted by the number of
77 * layers it has searched through; stopping at the max supported layer.
78 *
79 * This function needs to be called when holding the port_info->sched_lock
80 */
81 struct ice_sched_node *
ice_sched_find_node_by_teid(struct ice_sched_node * start_node,u32 teid)82 ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
83 {
84 u16 i;
85
86 /* The TEID is same as that of the start_node */
87 if (ICE_TXSCHED_GET_NODE_TEID(start_node) == teid)
88 return start_node;
89
90 /* The node has no children or is at the max layer */
91 if (!start_node->num_children ||
92 start_node->tx_sched_layer >= ICE_AQC_TOPO_MAX_LEVEL_NUM ||
93 start_node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF)
94 return NULL;
95
96 /* Check if TEID matches to any of the children nodes */
97 for (i = 0; i < start_node->num_children; i++)
98 if (ICE_TXSCHED_GET_NODE_TEID(start_node->children[i]) == teid)
99 return start_node->children[i];
100
101 /* Search within each child's sub-tree */
102 for (i = 0; i < start_node->num_children; i++) {
103 struct ice_sched_node *tmp;
104
105 tmp = ice_sched_find_node_by_teid(start_node->children[i],
106 teid);
107 if (tmp)
108 return tmp;
109 }
110
111 return NULL;
112 }
113
114 /**
115 * ice_aqc_send_sched_elem_cmd - send scheduling elements cmd
116 * @hw: pointer to the HW struct
117 * @cmd_opc: cmd opcode
118 * @elems_req: number of elements to request
119 * @buf: pointer to buffer
120 * @buf_size: buffer size in bytes
121 * @elems_resp: returns total number of elements response
122 * @cd: pointer to command details structure or NULL
123 *
124 * This function sends a scheduling elements cmd (cmd_opc)
125 */
126 static enum ice_status
ice_aqc_send_sched_elem_cmd(struct ice_hw * hw,enum ice_adminq_opc cmd_opc,u16 elems_req,void * buf,u16 buf_size,u16 * elems_resp,struct ice_sq_cd * cd)127 ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
128 u16 elems_req, void *buf, u16 buf_size,
129 u16 *elems_resp, struct ice_sq_cd *cd)
130 {
131 struct ice_aqc_sched_elem_cmd *cmd;
132 struct ice_aq_desc desc;
133 enum ice_status status;
134
135 cmd = &desc.params.sched_elem_cmd;
136 ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc);
137 cmd->num_elem_req = CPU_TO_LE16(elems_req);
138 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
139 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
140 if (!status && elems_resp)
141 *elems_resp = LE16_TO_CPU(cmd->num_elem_resp);
142
143 return status;
144 }
145
146 /**
147 * ice_aq_query_sched_elems - query scheduler elements
148 * @hw: pointer to the HW struct
149 * @elems_req: number of elements to query
150 * @buf: pointer to buffer
151 * @buf_size: buffer size in bytes
152 * @elems_ret: returns total number of elements returned
153 * @cd: pointer to command details structure or NULL
154 *
155 * Query scheduling elements (0x0404)
156 */
157 enum ice_status
ice_aq_query_sched_elems(struct ice_hw * hw,u16 elems_req,struct ice_aqc_txsched_elem_data * buf,u16 buf_size,u16 * elems_ret,struct ice_sq_cd * cd)158 ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
159 struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
160 u16 *elems_ret, struct ice_sq_cd *cd)
161 {
162 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_get_sched_elems,
163 elems_req, (void *)buf, buf_size,
164 elems_ret, cd);
165 }
166
167 /**
168 * ice_sched_add_node - Insert the Tx scheduler node in SW DB
169 * @pi: port information structure
170 * @layer: Scheduler layer of the node
171 * @info: Scheduler element information from firmware
172 * @prealloc_node: preallocated ice_sched_node struct for SW DB
173 *
174 * This function inserts a scheduler node to the SW DB.
175 */
176 enum ice_status
ice_sched_add_node(struct ice_port_info * pi,u8 layer,struct ice_aqc_txsched_elem_data * info,struct ice_sched_node * prealloc_node)177 ice_sched_add_node(struct ice_port_info *pi, u8 layer,
178 struct ice_aqc_txsched_elem_data *info,
179 struct ice_sched_node *prealloc_node)
180 {
181 struct ice_aqc_txsched_elem_data elem;
182 struct ice_sched_node *parent;
183 struct ice_sched_node *node;
184 enum ice_status status;
185 struct ice_hw *hw;
186
187 if (!pi)
188 return ICE_ERR_PARAM;
189
190 hw = pi->hw;
191
192 /* A valid parent node should be there */
193 parent = ice_sched_find_node_by_teid(pi->root,
194 LE32_TO_CPU(info->parent_teid));
195 if (!parent) {
196 ice_debug(hw, ICE_DBG_SCHED, "Parent Node not found for parent_teid=0x%x\n",
197 LE32_TO_CPU(info->parent_teid));
198 return ICE_ERR_PARAM;
199 }
200
201 /* query the current node information from FW before adding it
202 * to the SW DB
203 */
204 status = ice_sched_query_elem(hw, LE32_TO_CPU(info->node_teid), &elem);
205 if (status)
206 return status;
207
208 if (prealloc_node)
209 node = prealloc_node;
210 else
211 node = (struct ice_sched_node *)ice_malloc(hw, sizeof(*node));
212 if (!node)
213 return ICE_ERR_NO_MEMORY;
214 if (hw->max_children[layer]) {
215 node->children = (struct ice_sched_node **)
216 ice_calloc(hw, hw->max_children[layer],
217 sizeof(*node->children));
218 if (!node->children) {
219 ice_free(hw, node);
220 return ICE_ERR_NO_MEMORY;
221 }
222 }
223
224 node->in_use = true;
225 node->parent = parent;
226 node->tx_sched_layer = layer;
227 parent->children[parent->num_children++] = node;
228 node->info = elem;
229 return ICE_SUCCESS;
230 }
231
232 /**
233 * ice_aq_delete_sched_elems - delete scheduler elements
234 * @hw: pointer to the HW struct
235 * @grps_req: number of groups to delete
236 * @buf: pointer to buffer
237 * @buf_size: buffer size in bytes
238 * @grps_del: returns total number of elements deleted
239 * @cd: pointer to command details structure or NULL
240 *
241 * Delete scheduling elements (0x040F)
242 */
243 static enum ice_status
ice_aq_delete_sched_elems(struct ice_hw * hw,u16 grps_req,struct ice_aqc_delete_elem * buf,u16 buf_size,u16 * grps_del,struct ice_sq_cd * cd)244 ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
245 struct ice_aqc_delete_elem *buf, u16 buf_size,
246 u16 *grps_del, struct ice_sq_cd *cd)
247 {
248 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_delete_sched_elems,
249 grps_req, (void *)buf, buf_size,
250 grps_del, cd);
251 }
252
253 /**
254 * ice_sched_remove_elems - remove nodes from HW
255 * @hw: pointer to the HW struct
256 * @parent: pointer to the parent node
257 * @num_nodes: number of nodes
258 * @node_teids: array of node teids to be deleted
259 *
260 * This function remove nodes from HW
261 */
262 static enum ice_status
ice_sched_remove_elems(struct ice_hw * hw,struct ice_sched_node * parent,u16 num_nodes,u32 * node_teids)263 ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
264 u16 num_nodes, u32 *node_teids)
265 {
266 struct ice_aqc_delete_elem *buf;
267 u16 i, num_groups_removed = 0;
268 enum ice_status status;
269 u16 buf_size;
270
271 buf_size = ice_struct_size(buf, teid, num_nodes);
272 buf = (struct ice_aqc_delete_elem *)ice_malloc(hw, buf_size);
273 if (!buf)
274 return ICE_ERR_NO_MEMORY;
275
276 buf->hdr.parent_teid = parent->info.node_teid;
277 buf->hdr.num_elems = CPU_TO_LE16(num_nodes);
278 for (i = 0; i < num_nodes; i++)
279 buf->teid[i] = CPU_TO_LE32(node_teids[i]);
280
281 status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,
282 &num_groups_removed, NULL);
283 if (status != ICE_SUCCESS || num_groups_removed != 1)
284 ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n",
285 hw->adminq.sq_last_status);
286
287 ice_free(hw, buf);
288 return status;
289 }
290
291 /**
292 * ice_sched_get_first_node - get the first node of the given layer
293 * @pi: port information structure
294 * @parent: pointer the base node of the subtree
295 * @layer: layer number
296 *
297 * This function retrieves the first node of the given layer from the subtree
298 */
299 static struct ice_sched_node *
ice_sched_get_first_node(struct ice_port_info * pi,struct ice_sched_node * parent,u8 layer)300 ice_sched_get_first_node(struct ice_port_info *pi,
301 struct ice_sched_node *parent, u8 layer)
302 {
303 return pi->sib_head[parent->tc_num][layer];
304 }
305
306 /**
307 * ice_sched_get_tc_node - get pointer to TC node
308 * @pi: port information structure
309 * @tc: TC number
310 *
311 * This function returns the TC node pointer
312 */
ice_sched_get_tc_node(struct ice_port_info * pi,u8 tc)313 struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc)
314 {
315 u8 i;
316
317 if (!pi || !pi->root)
318 return NULL;
319 for (i = 0; i < pi->root->num_children; i++)
320 if (pi->root->children[i]->tc_num == tc)
321 return pi->root->children[i];
322 return NULL;
323 }
324
325 /**
326 * ice_free_sched_node - Free a Tx scheduler node from SW DB
327 * @pi: port information structure
328 * @node: pointer to the ice_sched_node struct
329 *
330 * This function frees up a node from SW DB as well as from HW
331 *
332 * This function needs to be called with the port_info->sched_lock held
333 */
ice_free_sched_node(struct ice_port_info * pi,struct ice_sched_node * node)334 void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
335 {
336 struct ice_sched_node *parent;
337 struct ice_hw *hw = pi->hw;
338 u8 i, j;
339
340 /* Free the children before freeing up the parent node
341 * The parent array is updated below and that shifts the nodes
342 * in the array. So always pick the first child if num children > 0
343 */
344 while (node->num_children)
345 ice_free_sched_node(pi, node->children[0]);
346
347 /* Leaf, TC and root nodes can't be deleted by SW */
348 if (node->tx_sched_layer >= hw->sw_entry_point_layer &&
349 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
350 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT &&
351 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) {
352 u32 teid = LE32_TO_CPU(node->info.node_teid);
353
354 ice_sched_remove_elems(hw, node->parent, 1, &teid);
355 }
356 parent = node->parent;
357 /* root has no parent */
358 if (parent) {
359 struct ice_sched_node *p;
360
361 /* update the parent */
362 for (i = 0; i < parent->num_children; i++)
363 if (parent->children[i] == node) {
364 for (j = i + 1; j < parent->num_children; j++)
365 parent->children[j - 1] =
366 parent->children[j];
367 parent->num_children--;
368 break;
369 }
370
371 p = ice_sched_get_first_node(pi, node, node->tx_sched_layer);
372 while (p) {
373 if (p->sibling == node) {
374 p->sibling = node->sibling;
375 break;
376 }
377 p = p->sibling;
378 }
379
380 /* update the sibling head if head is getting removed */
381 if (pi->sib_head[node->tc_num][node->tx_sched_layer] == node)
382 pi->sib_head[node->tc_num][node->tx_sched_layer] =
383 node->sibling;
384 }
385
386 /* leaf nodes have no children */
387 if (node->children)
388 ice_free(hw, node->children);
389 ice_free(hw, node);
390 }
391
392 /**
393 * ice_aq_get_dflt_topo - gets default scheduler topology
394 * @hw: pointer to the HW struct
395 * @lport: logical port number
396 * @buf: pointer to buffer
397 * @buf_size: buffer size in bytes
398 * @num_branches: returns total number of queue to port branches
399 * @cd: pointer to command details structure or NULL
400 *
401 * Get default scheduler topology (0x400)
402 */
403 static enum ice_status
ice_aq_get_dflt_topo(struct ice_hw * hw,u8 lport,struct ice_aqc_get_topo_elem * buf,u16 buf_size,u8 * num_branches,struct ice_sq_cd * cd)404 ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
405 struct ice_aqc_get_topo_elem *buf, u16 buf_size,
406 u8 *num_branches, struct ice_sq_cd *cd)
407 {
408 struct ice_aqc_get_topo *cmd;
409 struct ice_aq_desc desc;
410 enum ice_status status;
411
412 cmd = &desc.params.get_topo;
413 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo);
414 cmd->port_num = lport;
415 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
416 if (!status && num_branches)
417 *num_branches = cmd->num_branches;
418
419 return status;
420 }
421
422 /**
423 * ice_aq_add_sched_elems - adds scheduling element
424 * @hw: pointer to the HW struct
425 * @grps_req: the number of groups that are requested to be added
426 * @buf: pointer to buffer
427 * @buf_size: buffer size in bytes
428 * @grps_added: returns total number of groups added
429 * @cd: pointer to command details structure or NULL
430 *
431 * Add scheduling elements (0x0401)
432 */
433 static enum ice_status
ice_aq_add_sched_elems(struct ice_hw * hw,u16 grps_req,struct ice_aqc_add_elem * buf,u16 buf_size,u16 * grps_added,struct ice_sq_cd * cd)434 ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
435 struct ice_aqc_add_elem *buf, u16 buf_size,
436 u16 *grps_added, struct ice_sq_cd *cd)
437 {
438 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_add_sched_elems,
439 grps_req, (void *)buf, buf_size,
440 grps_added, cd);
441 }
442
443 /**
444 * ice_aq_cfg_sched_elems - configures scheduler elements
445 * @hw: pointer to the HW struct
446 * @elems_req: number of elements to configure
447 * @buf: pointer to buffer
448 * @buf_size: buffer size in bytes
449 * @elems_cfgd: returns total number of elements configured
450 * @cd: pointer to command details structure or NULL
451 *
452 * Configure scheduling elements (0x0403)
453 */
454 static enum ice_status
ice_aq_cfg_sched_elems(struct ice_hw * hw,u16 elems_req,struct ice_aqc_txsched_elem_data * buf,u16 buf_size,u16 * elems_cfgd,struct ice_sq_cd * cd)455 ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
456 struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
457 u16 *elems_cfgd, struct ice_sq_cd *cd)
458 {
459 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_cfg_sched_elems,
460 elems_req, (void *)buf, buf_size,
461 elems_cfgd, cd);
462 }
463
464 /**
465 * ice_aq_move_sched_elems - move scheduler elements
466 * @hw: pointer to the HW struct
467 * @grps_req: number of groups to move
468 * @buf: pointer to buffer
469 * @buf_size: buffer size in bytes
470 * @grps_movd: returns total number of groups moved
471 * @cd: pointer to command details structure or NULL
472 *
473 * Move scheduling elements (0x0408)
474 */
475 enum ice_status
ice_aq_move_sched_elems(struct ice_hw * hw,u16 grps_req,struct ice_aqc_move_elem * buf,u16 buf_size,u16 * grps_movd,struct ice_sq_cd * cd)476 ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
477 struct ice_aqc_move_elem *buf, u16 buf_size,
478 u16 *grps_movd, struct ice_sq_cd *cd)
479 {
480 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_move_sched_elems,
481 grps_req, (void *)buf, buf_size,
482 grps_movd, cd);
483 }
484
485 /**
486 * ice_aq_suspend_sched_elems - suspend scheduler elements
487 * @hw: pointer to the HW struct
488 * @elems_req: number of elements to suspend
489 * @buf: pointer to buffer
490 * @buf_size: buffer size in bytes
491 * @elems_ret: returns total number of elements suspended
492 * @cd: pointer to command details structure or NULL
493 *
494 * Suspend scheduling elements (0x0409)
495 */
496 static enum ice_status
ice_aq_suspend_sched_elems(struct ice_hw * hw,u16 elems_req,__le32 * buf,u16 buf_size,u16 * elems_ret,struct ice_sq_cd * cd)497 ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
498 u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
499 {
500 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_suspend_sched_elems,
501 elems_req, (void *)buf, buf_size,
502 elems_ret, cd);
503 }
504
505 /**
506 * ice_aq_resume_sched_elems - resume scheduler elements
507 * @hw: pointer to the HW struct
508 * @elems_req: number of elements to resume
509 * @buf: pointer to buffer
510 * @buf_size: buffer size in bytes
511 * @elems_ret: returns total number of elements resumed
512 * @cd: pointer to command details structure or NULL
513 *
514 * resume scheduling elements (0x040A)
515 */
516 static enum ice_status
ice_aq_resume_sched_elems(struct ice_hw * hw,u16 elems_req,__le32 * buf,u16 buf_size,u16 * elems_ret,struct ice_sq_cd * cd)517 ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
518 u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
519 {
520 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_resume_sched_elems,
521 elems_req, (void *)buf, buf_size,
522 elems_ret, cd);
523 }
524
525 /**
526 * ice_aq_query_sched_res - query scheduler resource
527 * @hw: pointer to the HW struct
528 * @buf_size: buffer size in bytes
529 * @buf: pointer to buffer
530 * @cd: pointer to command details structure or NULL
531 *
532 * Query scheduler resource allocation (0x0412)
533 */
534 static enum ice_status
ice_aq_query_sched_res(struct ice_hw * hw,u16 buf_size,struct ice_aqc_query_txsched_res_resp * buf,struct ice_sq_cd * cd)535 ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
536 struct ice_aqc_query_txsched_res_resp *buf,
537 struct ice_sq_cd *cd)
538 {
539 struct ice_aq_desc desc;
540
541 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_sched_res);
542 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
543 }
544
545 /**
546 * ice_sched_suspend_resume_elems - suspend or resume HW nodes
547 * @hw: pointer to the HW struct
548 * @num_nodes: number of nodes
549 * @node_teids: array of node teids to be suspended or resumed
550 * @suspend: true means suspend / false means resume
551 *
552 * This function suspends or resumes HW nodes
553 */
554 static enum ice_status
ice_sched_suspend_resume_elems(struct ice_hw * hw,u8 num_nodes,u32 * node_teids,bool suspend)555 ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
556 bool suspend)
557 {
558 u16 i, buf_size, num_elem_ret = 0;
559 enum ice_status status;
560 __le32 *buf;
561
562 buf_size = sizeof(*buf) * num_nodes;
563 buf = (__le32 *)ice_malloc(hw, buf_size);
564 if (!buf)
565 return ICE_ERR_NO_MEMORY;
566
567 for (i = 0; i < num_nodes; i++)
568 buf[i] = CPU_TO_LE32(node_teids[i]);
569
570 if (suspend)
571 status = ice_aq_suspend_sched_elems(hw, num_nodes, buf,
572 buf_size, &num_elem_ret,
573 NULL);
574 else
575 status = ice_aq_resume_sched_elems(hw, num_nodes, buf,
576 buf_size, &num_elem_ret,
577 NULL);
578 if (status != ICE_SUCCESS || num_elem_ret != num_nodes)
579 ice_debug(hw, ICE_DBG_SCHED, "suspend/resume failed\n");
580
581 ice_free(hw, buf);
582 return status;
583 }
584
585 /**
586 * ice_alloc_lan_q_ctx - allocate LAN queue contexts for the given VSI and TC
587 * @hw: pointer to the HW struct
588 * @vsi_handle: VSI handle
589 * @tc: TC number
590 * @new_numqs: number of queues
591 */
592 static enum ice_status
ice_alloc_lan_q_ctx(struct ice_hw * hw,u16 vsi_handle,u8 tc,u16 new_numqs)593 ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
594 {
595 struct ice_vsi_ctx *vsi_ctx;
596 struct ice_q_ctx *q_ctx;
597
598 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
599 if (!vsi_ctx)
600 return ICE_ERR_PARAM;
601 /* allocate LAN queue contexts */
602 if (!vsi_ctx->lan_q_ctx[tc]) {
603 vsi_ctx->lan_q_ctx[tc] = (struct ice_q_ctx *)
604 ice_calloc(hw, new_numqs, sizeof(*q_ctx));
605 if (!vsi_ctx->lan_q_ctx[tc])
606 return ICE_ERR_NO_MEMORY;
607 vsi_ctx->num_lan_q_entries[tc] = new_numqs;
608 return ICE_SUCCESS;
609 }
610 /* num queues are increased, update the queue contexts */
611 if (new_numqs > vsi_ctx->num_lan_q_entries[tc]) {
612 u16 prev_num = vsi_ctx->num_lan_q_entries[tc];
613
614 q_ctx = (struct ice_q_ctx *)
615 ice_calloc(hw, new_numqs, sizeof(*q_ctx));
616 if (!q_ctx)
617 return ICE_ERR_NO_MEMORY;
618 ice_memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc],
619 prev_num * sizeof(*q_ctx), ICE_DMA_TO_NONDMA);
620 ice_free(hw, vsi_ctx->lan_q_ctx[tc]);
621 vsi_ctx->lan_q_ctx[tc] = q_ctx;
622 vsi_ctx->num_lan_q_entries[tc] = new_numqs;
623 }
624 return ICE_SUCCESS;
625 }
626
627 /**
628 * ice_alloc_rdma_q_ctx - allocate RDMA queue contexts for the given VSI and TC
629 * @hw: pointer to the HW struct
630 * @vsi_handle: VSI handle
631 * @tc: TC number
632 * @new_numqs: number of queues
633 */
634 static enum ice_status
ice_alloc_rdma_q_ctx(struct ice_hw * hw,u16 vsi_handle,u8 tc,u16 new_numqs)635 ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
636 {
637 struct ice_vsi_ctx *vsi_ctx;
638 struct ice_q_ctx *q_ctx;
639
640 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
641 if (!vsi_ctx)
642 return ICE_ERR_PARAM;
643 /* allocate RDMA queue contexts */
644 if (!vsi_ctx->rdma_q_ctx[tc]) {
645 vsi_ctx->rdma_q_ctx[tc] = (struct ice_q_ctx *)
646 ice_calloc(hw, new_numqs, sizeof(*q_ctx));
647 if (!vsi_ctx->rdma_q_ctx[tc])
648 return ICE_ERR_NO_MEMORY;
649 vsi_ctx->num_rdma_q_entries[tc] = new_numqs;
650 return ICE_SUCCESS;
651 }
652 /* num queues are increased, update the queue contexts */
653 if (new_numqs > vsi_ctx->num_rdma_q_entries[tc]) {
654 u16 prev_num = vsi_ctx->num_rdma_q_entries[tc];
655
656 q_ctx = (struct ice_q_ctx *)
657 ice_calloc(hw, new_numqs, sizeof(*q_ctx));
658 if (!q_ctx)
659 return ICE_ERR_NO_MEMORY;
660 ice_memcpy(q_ctx, vsi_ctx->rdma_q_ctx[tc],
661 prev_num * sizeof(*q_ctx), ICE_DMA_TO_NONDMA);
662 ice_free(hw, vsi_ctx->rdma_q_ctx[tc]);
663 vsi_ctx->rdma_q_ctx[tc] = q_ctx;
664 vsi_ctx->num_rdma_q_entries[tc] = new_numqs;
665 }
666 return ICE_SUCCESS;
667 }
668
669 /**
670 * ice_aq_rl_profile - performs a rate limiting task
671 * @hw: pointer to the HW struct
672 * @opcode: opcode for add, query, or remove profile(s)
673 * @num_profiles: the number of profiles
674 * @buf: pointer to buffer
675 * @buf_size: buffer size in bytes
676 * @num_processed: number of processed add or remove profile(s) to return
677 * @cd: pointer to command details structure
678 *
679 * RL profile function to add, query, or remove profile(s)
680 */
681 static enum ice_status
ice_aq_rl_profile(struct ice_hw * hw,enum ice_adminq_opc opcode,u16 num_profiles,struct ice_aqc_rl_profile_elem * buf,u16 buf_size,u16 * num_processed,struct ice_sq_cd * cd)682 ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
683 u16 num_profiles, struct ice_aqc_rl_profile_elem *buf,
684 u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd)
685 {
686 struct ice_aqc_rl_profile *cmd;
687 struct ice_aq_desc desc;
688 enum ice_status status;
689
690 cmd = &desc.params.rl_profile;
691
692 ice_fill_dflt_direct_cmd_desc(&desc, opcode);
693 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
694 cmd->num_profiles = CPU_TO_LE16(num_profiles);
695 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
696 if (!status && num_processed)
697 *num_processed = LE16_TO_CPU(cmd->num_processed);
698 return status;
699 }
700
701 /**
702 * ice_aq_add_rl_profile - adds rate limiting profile(s)
703 * @hw: pointer to the HW struct
704 * @num_profiles: the number of profile(s) to be add
705 * @buf: pointer to buffer
706 * @buf_size: buffer size in bytes
707 * @num_profiles_added: total number of profiles added to return
708 * @cd: pointer to command details structure
709 *
710 * Add RL profile (0x0410)
711 */
712 static enum ice_status
ice_aq_add_rl_profile(struct ice_hw * hw,u16 num_profiles,struct ice_aqc_rl_profile_elem * buf,u16 buf_size,u16 * num_profiles_added,struct ice_sq_cd * cd)713 ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,
714 struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
715 u16 *num_profiles_added, struct ice_sq_cd *cd)
716 {
717 return ice_aq_rl_profile(hw, ice_aqc_opc_add_rl_profiles, num_profiles,
718 buf, buf_size, num_profiles_added, cd);
719 }
720
721 /**
722 * ice_aq_query_rl_profile - query rate limiting profile(s)
723 * @hw: pointer to the HW struct
724 * @num_profiles: the number of profile(s) to query
725 * @buf: pointer to buffer
726 * @buf_size: buffer size in bytes
727 * @cd: pointer to command details structure
728 *
729 * Query RL profile (0x0411)
730 */
731 enum ice_status
ice_aq_query_rl_profile(struct ice_hw * hw,u16 num_profiles,struct ice_aqc_rl_profile_elem * buf,u16 buf_size,struct ice_sq_cd * cd)732 ice_aq_query_rl_profile(struct ice_hw *hw, u16 num_profiles,
733 struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
734 struct ice_sq_cd *cd)
735 {
736 return ice_aq_rl_profile(hw, ice_aqc_opc_query_rl_profiles,
737 num_profiles, buf, buf_size, NULL, cd);
738 }
739
740 /**
741 * ice_aq_remove_rl_profile - removes RL profile(s)
742 * @hw: pointer to the HW struct
743 * @num_profiles: the number of profile(s) to remove
744 * @buf: pointer to buffer
745 * @buf_size: buffer size in bytes
746 * @num_profiles_removed: total number of profiles removed to return
747 * @cd: pointer to command details structure or NULL
748 *
749 * Remove RL profile (0x0415)
750 */
751 static enum ice_status
ice_aq_remove_rl_profile(struct ice_hw * hw,u16 num_profiles,struct ice_aqc_rl_profile_elem * buf,u16 buf_size,u16 * num_profiles_removed,struct ice_sq_cd * cd)752 ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles,
753 struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
754 u16 *num_profiles_removed, struct ice_sq_cd *cd)
755 {
756 return ice_aq_rl_profile(hw, ice_aqc_opc_remove_rl_profiles,
757 num_profiles, buf, buf_size,
758 num_profiles_removed, cd);
759 }
760
761 /**
762 * ice_sched_del_rl_profile - remove RL profile
763 * @hw: pointer to the HW struct
764 * @rl_info: rate limit profile information
765 *
766 * If the profile ID is not referenced anymore, it removes profile ID with
767 * its associated parameters from HW DB,and locally. The caller needs to
768 * hold scheduler lock.
769 */
770 static enum ice_status
ice_sched_del_rl_profile(struct ice_hw * hw,struct ice_aqc_rl_profile_info * rl_info)771 ice_sched_del_rl_profile(struct ice_hw *hw,
772 struct ice_aqc_rl_profile_info *rl_info)
773 {
774 struct ice_aqc_rl_profile_elem *buf;
775 u16 num_profiles_removed;
776 enum ice_status status;
777 u16 num_profiles = 1;
778
779 if (rl_info->prof_id_ref != 0)
780 return ICE_ERR_IN_USE;
781
782 /* Safe to remove profile ID */
783 buf = &rl_info->profile;
784 status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf),
785 &num_profiles_removed, NULL);
786 if (status || num_profiles_removed != num_profiles)
787 return ICE_ERR_CFG;
788
789 /* Delete stale entry now */
790 LIST_DEL(&rl_info->list_entry);
791 ice_free(hw, rl_info);
792 return status;
793 }
794
795 /**
796 * ice_sched_clear_rl_prof - clears RL prof entries
797 * @pi: port information structure
798 *
799 * This function removes all RL profile from HW as well as from SW DB.
800 */
ice_sched_clear_rl_prof(struct ice_port_info * pi)801 static void ice_sched_clear_rl_prof(struct ice_port_info *pi)
802 {
803 u16 ln;
804 struct ice_hw *hw = pi->hw;
805
806 for (ln = 0; ln < hw->num_tx_sched_layers; ln++) {
807 struct ice_aqc_rl_profile_info *rl_prof_elem;
808 struct ice_aqc_rl_profile_info *rl_prof_tmp;
809
810 LIST_FOR_EACH_ENTRY_SAFE(rl_prof_elem, rl_prof_tmp,
811 &hw->rl_prof_list[ln],
812 ice_aqc_rl_profile_info, list_entry) {
813 enum ice_status status;
814
815 rl_prof_elem->prof_id_ref = 0;
816 status = ice_sched_del_rl_profile(hw, rl_prof_elem);
817 if (status) {
818 ice_debug(hw, ICE_DBG_SCHED, "Remove rl profile failed\n");
819 /* On error, free mem required */
820 LIST_DEL(&rl_prof_elem->list_entry);
821 ice_free(hw, rl_prof_elem);
822 }
823 }
824 }
825 }
826
827 /**
828 * ice_sched_clear_agg - clears the aggregator related information
829 * @hw: pointer to the hardware structure
830 *
831 * This function removes aggregator list and free up aggregator related memory
832 * previously allocated.
833 */
ice_sched_clear_agg(struct ice_hw * hw)834 void ice_sched_clear_agg(struct ice_hw *hw)
835 {
836 struct ice_sched_agg_info *agg_info;
837 struct ice_sched_agg_info *atmp;
838
839 LIST_FOR_EACH_ENTRY_SAFE(agg_info, atmp, &hw->agg_list,
840 ice_sched_agg_info,
841 list_entry) {
842 struct ice_sched_agg_vsi_info *agg_vsi_info;
843 struct ice_sched_agg_vsi_info *vtmp;
844
845 LIST_FOR_EACH_ENTRY_SAFE(agg_vsi_info, vtmp,
846 &agg_info->agg_vsi_list,
847 ice_sched_agg_vsi_info, list_entry) {
848 LIST_DEL(&agg_vsi_info->list_entry);
849 ice_free(hw, agg_vsi_info);
850 }
851 LIST_DEL(&agg_info->list_entry);
852 ice_free(hw, agg_info);
853 }
854 }
855
856 /**
857 * ice_sched_clear_tx_topo - clears the scheduler tree nodes
858 * @pi: port information structure
859 *
860 * This function removes all the nodes from HW as well as from SW DB.
861 */
ice_sched_clear_tx_topo(struct ice_port_info * pi)862 static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
863 {
864 if (!pi)
865 return;
866 /* remove RL profiles related lists */
867 ice_sched_clear_rl_prof(pi);
868 if (pi->root) {
869 ice_free_sched_node(pi, pi->root);
870 pi->root = NULL;
871 }
872 }
873
874 /**
875 * ice_sched_clear_port - clear the scheduler elements from SW DB for a port
876 * @pi: port information structure
877 *
878 * Cleanup scheduling elements from SW DB
879 */
ice_sched_clear_port(struct ice_port_info * pi)880 void ice_sched_clear_port(struct ice_port_info *pi)
881 {
882 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
883 return;
884
885 pi->port_state = ICE_SCHED_PORT_STATE_INIT;
886 ice_acquire_lock(&pi->sched_lock);
887 ice_sched_clear_tx_topo(pi);
888 ice_release_lock(&pi->sched_lock);
889 ice_destroy_lock(&pi->sched_lock);
890 }
891
892 /**
893 * ice_sched_cleanup_all - cleanup scheduler elements from SW DB for all ports
894 * @hw: pointer to the HW struct
895 *
896 * Cleanup scheduling elements from SW DB for all the ports
897 */
ice_sched_cleanup_all(struct ice_hw * hw)898 void ice_sched_cleanup_all(struct ice_hw *hw)
899 {
900 if (!hw)
901 return;
902
903 if (hw->layer_info) {
904 ice_free(hw, hw->layer_info);
905 hw->layer_info = NULL;
906 }
907
908 ice_sched_clear_port(hw->port_info);
909
910 hw->num_tx_sched_layers = 0;
911 hw->num_tx_sched_phys_layers = 0;
912 hw->flattened_layers = 0;
913 hw->max_cgds = 0;
914 }
915
916 /**
917 * ice_aq_cfg_node_attr - configure nodes' per-cone flattening attributes
918 * @hw: pointer to the HW struct
919 * @num_nodes: the number of nodes whose attributes to configure
920 * @buf: pointer to buffer
921 * @buf_size: buffer size in bytes
922 * @cd: pointer to command details structure or NULL
923 *
924 * Configure Node Attributes (0x0417)
925 */
926 enum ice_status
ice_aq_cfg_node_attr(struct ice_hw * hw,u16 num_nodes,struct ice_aqc_node_attr_elem * buf,u16 buf_size,struct ice_sq_cd * cd)927 ice_aq_cfg_node_attr(struct ice_hw *hw, u16 num_nodes,
928 struct ice_aqc_node_attr_elem *buf, u16 buf_size,
929 struct ice_sq_cd *cd)
930 {
931 struct ice_aqc_node_attr *cmd;
932 struct ice_aq_desc desc;
933
934 cmd = &desc.params.node_attr;
935 ice_fill_dflt_direct_cmd_desc(&desc,
936 ice_aqc_opc_cfg_node_attr);
937 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
938
939 cmd->num_entries = CPU_TO_LE16(num_nodes);
940 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
941 }
942
943 /**
944 * ice_aq_cfg_l2_node_cgd - configures L2 node to CGD mapping
945 * @hw: pointer to the HW struct
946 * @num_l2_nodes: the number of L2 nodes whose CGDs to configure
947 * @buf: pointer to buffer
948 * @buf_size: buffer size in bytes
949 * @cd: pointer to command details structure or NULL
950 *
951 * Configure L2 Node CGD (0x0414)
952 */
953 enum ice_status
ice_aq_cfg_l2_node_cgd(struct ice_hw * hw,u16 num_l2_nodes,struct ice_aqc_cfg_l2_node_cgd_elem * buf,u16 buf_size,struct ice_sq_cd * cd)954 ice_aq_cfg_l2_node_cgd(struct ice_hw *hw, u16 num_l2_nodes,
955 struct ice_aqc_cfg_l2_node_cgd_elem *buf,
956 u16 buf_size, struct ice_sq_cd *cd)
957 {
958 struct ice_aqc_cfg_l2_node_cgd *cmd;
959 struct ice_aq_desc desc;
960
961 cmd = &desc.params.cfg_l2_node_cgd;
962 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_cfg_l2_node_cgd);
963 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
964
965 cmd->num_l2_nodes = CPU_TO_LE16(num_l2_nodes);
966 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
967 }
968
969 /**
970 * ice_sched_add_elems - add nodes to HW and SW DB
971 * @pi: port information structure
972 * @tc_node: pointer to the branch node
973 * @parent: pointer to the parent node
974 * @layer: layer number to add nodes
975 * @num_nodes: number of nodes
976 * @num_nodes_added: pointer to num nodes added
977 * @first_node_teid: if new nodes are added then return the TEID of first node
978 * @prealloc_nodes: preallocated nodes struct for software DB
979 *
980 * This function add nodes to HW as well as to SW DB for a given layer
981 */
982 enum ice_status
ice_sched_add_elems(struct ice_port_info * pi,struct ice_sched_node * tc_node,struct ice_sched_node * parent,u8 layer,u16 num_nodes,u16 * num_nodes_added,u32 * first_node_teid,struct ice_sched_node ** prealloc_nodes)983 ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
984 struct ice_sched_node *parent, u8 layer, u16 num_nodes,
985 u16 *num_nodes_added, u32 *first_node_teid,
986 struct ice_sched_node **prealloc_nodes)
987 {
988 struct ice_sched_node *prev, *new_node;
989 struct ice_aqc_add_elem *buf;
990 u16 i, num_groups_added = 0;
991 enum ice_status status = ICE_SUCCESS;
992 struct ice_hw *hw = pi->hw;
993 u16 buf_size;
994 u32 teid;
995
996 buf_size = ice_struct_size(buf, generic, num_nodes);
997 buf = (struct ice_aqc_add_elem *)ice_malloc(hw, buf_size);
998 if (!buf)
999 return ICE_ERR_NO_MEMORY;
1000
1001 buf->hdr.parent_teid = parent->info.node_teid;
1002 buf->hdr.num_elems = CPU_TO_LE16(num_nodes);
1003 for (i = 0; i < num_nodes; i++) {
1004 buf->generic[i].parent_teid = parent->info.node_teid;
1005 buf->generic[i].data.elem_type = ICE_AQC_ELEM_TYPE_SE_GENERIC;
1006 buf->generic[i].data.valid_sections =
1007 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
1008 ICE_AQC_ELEM_VALID_EIR;
1009 buf->generic[i].data.generic = 0;
1010 buf->generic[i].data.cir_bw.bw_profile_idx =
1011 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
1012 buf->generic[i].data.cir_bw.bw_alloc =
1013 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
1014 buf->generic[i].data.eir_bw.bw_profile_idx =
1015 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
1016 buf->generic[i].data.eir_bw.bw_alloc =
1017 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
1018 }
1019
1020 status = ice_aq_add_sched_elems(hw, 1, buf, buf_size,
1021 &num_groups_added, NULL);
1022 if (status != ICE_SUCCESS || num_groups_added != 1) {
1023 ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n",
1024 hw->adminq.sq_last_status);
1025 ice_free(hw, buf);
1026 return ICE_ERR_CFG;
1027 }
1028
1029 *num_nodes_added = num_nodes;
1030 /* add nodes to the SW DB */
1031 for (i = 0; i < num_nodes; i++) {
1032 if (prealloc_nodes)
1033 status = ice_sched_add_node(pi, layer, &buf->generic[i], prealloc_nodes[i]);
1034 else
1035 status = ice_sched_add_node(pi, layer, &buf->generic[i], NULL);
1036
1037 if (status != ICE_SUCCESS) {
1038 ice_debug(hw, ICE_DBG_SCHED, "add nodes in SW DB failed status =%d\n",
1039 status);
1040 break;
1041 }
1042
1043 teid = LE32_TO_CPU(buf->generic[i].node_teid);
1044 new_node = ice_sched_find_node_by_teid(parent, teid);
1045 if (!new_node) {
1046 ice_debug(hw, ICE_DBG_SCHED, "Node is missing for teid =%d\n", teid);
1047 break;
1048 }
1049
1050 new_node->sibling = NULL;
1051 new_node->tc_num = tc_node->tc_num;
1052
1053 /* add it to previous node sibling pointer */
1054 /* Note: siblings are not linked across branches */
1055 prev = ice_sched_get_first_node(pi, tc_node, layer);
1056 if (prev && prev != new_node) {
1057 while (prev->sibling)
1058 prev = prev->sibling;
1059 prev->sibling = new_node;
1060 }
1061
1062 /* initialize the sibling head */
1063 if (!pi->sib_head[tc_node->tc_num][layer])
1064 pi->sib_head[tc_node->tc_num][layer] = new_node;
1065
1066 if (i == 0)
1067 *first_node_teid = teid;
1068 }
1069
1070 ice_free(hw, buf);
1071 return status;
1072 }
1073
1074 /**
1075 * ice_sched_add_nodes_to_hw_layer - Add nodes to hw layer
1076 * @pi: port information structure
1077 * @tc_node: pointer to TC node
1078 * @parent: pointer to parent node
1079 * @layer: layer number to add nodes
1080 * @num_nodes: number of nodes to be added
1081 * @first_node_teid: pointer to the first node TEID
1082 * @num_nodes_added: pointer to number of nodes added
1083 *
1084 * Add nodes into specific hw layer.
1085 */
1086 static enum ice_status
ice_sched_add_nodes_to_hw_layer(struct ice_port_info * pi,struct ice_sched_node * tc_node,struct ice_sched_node * parent,u8 layer,u16 num_nodes,u32 * first_node_teid,u16 * num_nodes_added)1087 ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,
1088 struct ice_sched_node *tc_node,
1089 struct ice_sched_node *parent, u8 layer,
1090 u16 num_nodes, u32 *first_node_teid,
1091 u16 *num_nodes_added)
1092 {
1093 u16 max_child_nodes;
1094
1095 *num_nodes_added = 0;
1096
1097 if (!num_nodes)
1098 return ICE_SUCCESS;
1099
1100 if (!parent || layer < pi->hw->sw_entry_point_layer)
1101 return ICE_ERR_PARAM;
1102
1103 /* max children per node per layer */
1104 max_child_nodes = pi->hw->max_children[parent->tx_sched_layer];
1105
1106 /* current number of children + required nodes exceed max children */
1107 if ((parent->num_children + num_nodes) > max_child_nodes) {
1108 /* Fail if the parent is a TC node */
1109 if (parent == tc_node)
1110 return ICE_ERR_CFG;
1111 return ICE_ERR_MAX_LIMIT;
1112 }
1113
1114 return ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes,
1115 num_nodes_added, first_node_teid, NULL);
1116 }
1117
1118 /**
1119 * ice_sched_add_nodes_to_layer - Add nodes to a given layer
1120 * @pi: port information structure
1121 * @tc_node: pointer to TC node
1122 * @parent: pointer to parent node
1123 * @layer: layer number to add nodes
1124 * @num_nodes: number of nodes to be added
1125 * @first_node_teid: pointer to the first node TEID
1126 * @num_nodes_added: pointer to number of nodes added
1127 *
1128 * This function add nodes to a given layer.
1129 */
1130 static enum ice_status
ice_sched_add_nodes_to_layer(struct ice_port_info * pi,struct ice_sched_node * tc_node,struct ice_sched_node * parent,u8 layer,u16 num_nodes,u32 * first_node_teid,u16 * num_nodes_added)1131 ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
1132 struct ice_sched_node *tc_node,
1133 struct ice_sched_node *parent, u8 layer,
1134 u16 num_nodes, u32 *first_node_teid,
1135 u16 *num_nodes_added)
1136 {
1137 u32 *first_teid_ptr = first_node_teid;
1138 u16 new_num_nodes = num_nodes;
1139 enum ice_status status = ICE_SUCCESS;
1140 u32 temp;
1141
1142 *num_nodes_added = 0;
1143 while (*num_nodes_added < num_nodes) {
1144 u16 max_child_nodes, num_added = 0;
1145
1146 status = ice_sched_add_nodes_to_hw_layer(pi, tc_node, parent,
1147 layer, new_num_nodes,
1148 first_teid_ptr,
1149 &num_added);
1150 if (status == ICE_SUCCESS)
1151 *num_nodes_added += num_added;
1152 /* added more nodes than requested ? */
1153 if (*num_nodes_added > num_nodes) {
1154 ice_debug(pi->hw, ICE_DBG_SCHED, "added extra nodes %d %d\n", num_nodes,
1155 *num_nodes_added);
1156 status = ICE_ERR_CFG;
1157 break;
1158 }
1159 /* break if all the nodes are added successfully */
1160 if (status == ICE_SUCCESS && (*num_nodes_added == num_nodes))
1161 break;
1162 /* break if the error is not max limit */
1163 if (status != ICE_SUCCESS && status != ICE_ERR_MAX_LIMIT)
1164 break;
1165 /* Exceeded the max children */
1166 max_child_nodes = pi->hw->max_children[parent->tx_sched_layer];
1167 /* utilize all the spaces if the parent is not full */
1168 if (parent->num_children < max_child_nodes) {
1169 new_num_nodes = max_child_nodes - parent->num_children;
1170 } else {
1171 /* This parent is full, try the next sibling */
1172 parent = parent->sibling;
1173 /* Don't modify the first node TEID memory if the
1174 * first node was added already in the above call.
1175 * Instead send some temp memory for all other
1176 * recursive calls.
1177 */
1178 if (num_added)
1179 first_teid_ptr = &temp;
1180
1181 new_num_nodes = num_nodes - *num_nodes_added;
1182 }
1183 }
1184 return status;
1185 }
1186
1187 /**
1188 * ice_sched_get_qgrp_layer - get the current queue group layer number
1189 * @hw: pointer to the HW struct
1190 *
1191 * This function returns the current queue group layer number
1192 */
ice_sched_get_qgrp_layer(struct ice_hw * hw)1193 static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw)
1194 {
1195 /* It's always total layers - 1, the array is 0 relative so -2 */
1196 return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET;
1197 }
1198
1199 /**
1200 * ice_sched_get_vsi_layer - get the current VSI layer number
1201 * @hw: pointer to the HW struct
1202 *
1203 * This function returns the current VSI layer number
1204 */
ice_sched_get_vsi_layer(struct ice_hw * hw)1205 static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
1206 {
1207 /* Num Layers VSI layer
1208 * 9 6
1209 * 7 4
1210 * 5 or less sw_entry_point_layer
1211 */
1212 /* calculate the VSI layer based on number of layers. */
1213 if (hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS)
1214 return hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
1215 else if (hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS)
1216 /* qgroup and VSI layers are same */
1217 return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET;
1218 return hw->sw_entry_point_layer;
1219 }
1220
1221 /**
1222 * ice_sched_get_agg_layer - get the current aggregator layer number
1223 * @hw: pointer to the HW struct
1224 *
1225 * This function returns the current aggregator layer number
1226 */
ice_sched_get_agg_layer(struct ice_hw * hw)1227 static u8 ice_sched_get_agg_layer(struct ice_hw *hw)
1228 {
1229 /* Num Layers aggregator layer
1230 * 9 4
1231 * 7 or less sw_entry_point_layer
1232 */
1233 /* calculate the aggregator layer based on number of layers. */
1234 if (hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS)
1235 return hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET;
1236 return hw->sw_entry_point_layer;
1237 }
1238
1239 /**
1240 * ice_rm_dflt_leaf_node - remove the default leaf node in the tree
1241 * @pi: port information structure
1242 *
1243 * This function removes the leaf node that was created by the FW
1244 * during initialization
1245 */
ice_rm_dflt_leaf_node(struct ice_port_info * pi)1246 static void ice_rm_dflt_leaf_node(struct ice_port_info *pi)
1247 {
1248 struct ice_sched_node *node;
1249
1250 node = pi->root;
1251 while (node) {
1252 if (!node->num_children)
1253 break;
1254 node = node->children[0];
1255 }
1256 if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) {
1257 u32 teid = LE32_TO_CPU(node->info.node_teid);
1258 enum ice_status status;
1259
1260 /* remove the default leaf node */
1261 status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid);
1262 if (!status)
1263 ice_free_sched_node(pi, node);
1264 }
1265 }
1266
1267 /**
1268 * ice_sched_rm_dflt_nodes - free the default nodes in the tree
1269 * @pi: port information structure
1270 *
1271 * This function frees all the nodes except root and TC that were created by
1272 * the FW during initialization
1273 */
ice_sched_rm_dflt_nodes(struct ice_port_info * pi)1274 static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi)
1275 {
1276 struct ice_sched_node *node;
1277
1278 ice_rm_dflt_leaf_node(pi);
1279
1280 /* remove the default nodes except TC and root nodes */
1281 node = pi->root;
1282 while (node) {
1283 if (node->tx_sched_layer >= pi->hw->sw_entry_point_layer &&
1284 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
1285 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT) {
1286 ice_free_sched_node(pi, node);
1287 break;
1288 }
1289
1290 if (!node->num_children)
1291 break;
1292 node = node->children[0];
1293 }
1294 }
1295
1296 /**
1297 * ice_sched_init_port - Initialize scheduler by querying information from FW
1298 * @pi: port info structure for the tree to cleanup
1299 *
1300 * This function is the initial call to find the total number of Tx scheduler
1301 * resources, default topology created by firmware and storing the information
1302 * in SW DB.
1303 */
ice_sched_init_port(struct ice_port_info * pi)1304 enum ice_status ice_sched_init_port(struct ice_port_info *pi)
1305 {
1306 struct ice_aqc_get_topo_elem *buf;
1307 enum ice_status status;
1308 struct ice_hw *hw;
1309 u8 num_branches;
1310 u16 num_elems;
1311 u8 i, j;
1312
1313 if (!pi)
1314 return ICE_ERR_PARAM;
1315 hw = pi->hw;
1316
1317 /* Query the Default Topology from FW */
1318 buf = (struct ice_aqc_get_topo_elem *)ice_malloc(hw,
1319 ICE_AQ_MAX_BUF_LEN);
1320 if (!buf)
1321 return ICE_ERR_NO_MEMORY;
1322
1323 /* Query default scheduling tree topology */
1324 status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN,
1325 &num_branches, NULL);
1326 if (status)
1327 goto err_init_port;
1328
1329 /* num_branches should be between 1-8 */
1330 if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) {
1331 ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n",
1332 num_branches);
1333 status = ICE_ERR_PARAM;
1334 goto err_init_port;
1335 }
1336
1337 /* get the number of elements on the default/first branch */
1338 num_elems = LE16_TO_CPU(buf[0].hdr.num_elems);
1339
1340 /* num_elems should always be between 1-9 */
1341 if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) {
1342 ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n",
1343 num_elems);
1344 status = ICE_ERR_PARAM;
1345 goto err_init_port;
1346 }
1347
1348 /* If the last node is a leaf node then the index of the queue group
1349 * layer is two less than the number of elements.
1350 */
1351 if (num_elems > 2 && buf[0].generic[num_elems - 1].data.elem_type ==
1352 ICE_AQC_ELEM_TYPE_LEAF)
1353 pi->last_node_teid =
1354 LE32_TO_CPU(buf[0].generic[num_elems - 2].node_teid);
1355 else
1356 pi->last_node_teid =
1357 LE32_TO_CPU(buf[0].generic[num_elems - 1].node_teid);
1358
1359 /* Insert the Tx Sched root node */
1360 status = ice_sched_add_root_node(pi, &buf[0].generic[0]);
1361 if (status)
1362 goto err_init_port;
1363
1364 /* Parse the default tree and cache the information */
1365 for (i = 0; i < num_branches; i++) {
1366 num_elems = LE16_TO_CPU(buf[i].hdr.num_elems);
1367
1368 /* Skip root element as already inserted */
1369 for (j = 1; j < num_elems; j++) {
1370 /* update the sw entry point */
1371 if (buf[0].generic[j].data.elem_type ==
1372 ICE_AQC_ELEM_TYPE_ENTRY_POINT)
1373 hw->sw_entry_point_layer = j;
1374
1375 status = ice_sched_add_node(pi, j, &buf[i].generic[j], NULL);
1376 if (status)
1377 goto err_init_port;
1378 }
1379 }
1380
1381 /* Remove the default nodes. */
1382 if (pi->root)
1383 ice_sched_rm_dflt_nodes(pi);
1384
1385 /* initialize the port for handling the scheduler tree */
1386 pi->port_state = ICE_SCHED_PORT_STATE_READY;
1387 ice_init_lock(&pi->sched_lock);
1388 for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
1389 INIT_LIST_HEAD(&hw->rl_prof_list[i]);
1390
1391 err_init_port:
1392 if (status && pi->root) {
1393 ice_free_sched_node(pi, pi->root);
1394 pi->root = NULL;
1395 }
1396
1397 ice_free(hw, buf);
1398 return status;
1399 }
1400
1401 /**
1402 * ice_sched_get_node - Get the struct ice_sched_node for given TEID
1403 * @pi: port information structure
1404 * @teid: Scheduler node TEID
1405 *
1406 * This function retrieves the ice_sched_node struct for given TEID from
1407 * the SW DB and returns it to the caller.
1408 */
ice_sched_get_node(struct ice_port_info * pi,u32 teid)1409 struct ice_sched_node *ice_sched_get_node(struct ice_port_info *pi, u32 teid)
1410 {
1411 struct ice_sched_node *node;
1412
1413 if (!pi)
1414 return NULL;
1415
1416 /* Find the node starting from root */
1417 ice_acquire_lock(&pi->sched_lock);
1418 node = ice_sched_find_node_by_teid(pi->root, teid);
1419 ice_release_lock(&pi->sched_lock);
1420
1421 if (!node)
1422 ice_debug(pi->hw, ICE_DBG_SCHED, "Node not found for teid=0x%x\n", teid);
1423
1424 return node;
1425 }
1426
1427 /**
1428 * ice_sched_query_res_alloc - query the FW for num of logical sched layers
1429 * @hw: pointer to the HW struct
1430 *
1431 * query FW for allocated scheduler resources and store in HW struct
1432 */
ice_sched_query_res_alloc(struct ice_hw * hw)1433 enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
1434 {
1435 struct ice_aqc_query_txsched_res_resp *buf;
1436 enum ice_status status = ICE_SUCCESS;
1437 __le16 max_sibl;
1438 u8 i;
1439
1440 if (hw->layer_info)
1441 return status;
1442
1443 buf = (struct ice_aqc_query_txsched_res_resp *)
1444 ice_malloc(hw, sizeof(*buf));
1445 if (!buf)
1446 return ICE_ERR_NO_MEMORY;
1447
1448 status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL);
1449 if (status)
1450 goto sched_query_out;
1451
1452 hw->num_tx_sched_layers =
1453 (u8)LE16_TO_CPU(buf->sched_props.logical_levels);
1454 hw->num_tx_sched_phys_layers =
1455 (u8)LE16_TO_CPU(buf->sched_props.phys_levels);
1456 hw->flattened_layers = buf->sched_props.flattening_bitmap;
1457 hw->max_cgds = buf->sched_props.max_pf_cgds;
1458
1459 /* max sibling group size of current layer refers to the max children
1460 * of the below layer node.
1461 * layer 1 node max children will be layer 2 max sibling group size
1462 * layer 2 node max children will be layer 3 max sibling group size
1463 * and so on. This array will be populated from root (index 0) to
1464 * qgroup layer 7. Leaf node has no children.
1465 */
1466 for (i = 0; i < hw->num_tx_sched_layers - 1; i++) {
1467 max_sibl = buf->layer_props[i + 1].max_sibl_grp_sz;
1468 hw->max_children[i] = LE16_TO_CPU(max_sibl);
1469 }
1470
1471 hw->layer_info = (struct ice_aqc_layer_props *)
1472 ice_memdup(hw, buf->layer_props,
1473 (hw->num_tx_sched_layers *
1474 sizeof(*hw->layer_info)),
1475 ICE_NONDMA_TO_NONDMA);
1476 if (!hw->layer_info) {
1477 status = ICE_ERR_NO_MEMORY;
1478 goto sched_query_out;
1479 }
1480
1481 sched_query_out:
1482 ice_free(hw, buf);
1483 return status;
1484 }
1485
1486 /**
1487 * ice_sched_get_psm_clk_freq - determine the PSM clock frequency
1488 * @hw: pointer to the HW struct
1489 *
1490 * Determine the PSM clock frequency and store in HW struct
1491 */
ice_sched_get_psm_clk_freq(struct ice_hw * hw)1492 void ice_sched_get_psm_clk_freq(struct ice_hw *hw)
1493 {
1494 u32 val, clk_src;
1495
1496 val = rd32(hw, GLGEN_CLKSTAT_SRC);
1497 clk_src = (val & GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M) >>
1498 GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_S;
1499
1500 switch (clk_src) {
1501 case PSM_CLK_SRC_367_MHZ:
1502 hw->psm_clk_freq = ICE_PSM_CLK_367MHZ_IN_HZ;
1503 break;
1504 case PSM_CLK_SRC_416_MHZ:
1505 hw->psm_clk_freq = ICE_PSM_CLK_416MHZ_IN_HZ;
1506 break;
1507 case PSM_CLK_SRC_446_MHZ:
1508 hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ;
1509 break;
1510 case PSM_CLK_SRC_390_MHZ:
1511 hw->psm_clk_freq = ICE_PSM_CLK_390MHZ_IN_HZ;
1512 break;
1513
1514 /* default condition is not required as clk_src is restricted
1515 * to a 2-bit value from GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M mask.
1516 * The above switch statements cover the possible values of
1517 * this variable.
1518 */
1519 }
1520 }
1521
1522 /**
1523 * ice_sched_find_node_in_subtree - Find node in part of base node subtree
1524 * @hw: pointer to the HW struct
1525 * @base: pointer to the base node
1526 * @node: pointer to the node to search
1527 *
1528 * This function checks whether a given node is part of the base node
1529 * subtree or not
1530 */
1531 bool
ice_sched_find_node_in_subtree(struct ice_hw * hw,struct ice_sched_node * base,struct ice_sched_node * node)1532 ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
1533 struct ice_sched_node *node)
1534 {
1535 u8 i;
1536
1537 for (i = 0; i < base->num_children; i++) {
1538 struct ice_sched_node *child = base->children[i];
1539
1540 if (node == child)
1541 return true;
1542
1543 if (child->tx_sched_layer > node->tx_sched_layer)
1544 return false;
1545
1546 /* this recursion is intentional, and wouldn't
1547 * go more than 8 calls
1548 */
1549 if (ice_sched_find_node_in_subtree(hw, child, node))
1550 return true;
1551 }
1552 return false;
1553 }
1554
1555 /**
1556 * ice_sched_get_free_qgrp - Scan all queue group siblings and find a free node
1557 * @pi: port information structure
1558 * @vsi_node: software VSI handle
1559 * @qgrp_node: first queue group node identified for scanning
1560 * @owner: LAN or RDMA
1561 *
1562 * This function retrieves a free LAN or RDMA queue group node by scanning
1563 * qgrp_node and its siblings for the queue group with the fewest number
1564 * of queues currently assigned.
1565 */
1566 static struct ice_sched_node *
ice_sched_get_free_qgrp(struct ice_port_info * pi,struct ice_sched_node * vsi_node,struct ice_sched_node * qgrp_node,u8 owner)1567 ice_sched_get_free_qgrp(struct ice_port_info *pi,
1568 struct ice_sched_node *vsi_node,
1569 struct ice_sched_node *qgrp_node, u8 owner)
1570 {
1571 struct ice_sched_node *min_qgrp;
1572 u8 min_children;
1573
1574 if (!qgrp_node)
1575 return qgrp_node;
1576 min_children = qgrp_node->num_children;
1577 if (!min_children)
1578 return qgrp_node;
1579 min_qgrp = qgrp_node;
1580 /* scan all queue groups until find a node which has less than the
1581 * minimum number of children. This way all queue group nodes get
1582 * equal number of shares and active. The bandwidth will be equally
1583 * distributed across all queues.
1584 */
1585 while (qgrp_node) {
1586 /* make sure the qgroup node is part of the VSI subtree */
1587 if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
1588 if (qgrp_node->num_children < min_children &&
1589 qgrp_node->owner == owner) {
1590 /* replace the new min queue group node */
1591 min_qgrp = qgrp_node;
1592 min_children = min_qgrp->num_children;
1593 /* break if it has no children, */
1594 if (!min_children)
1595 break;
1596 }
1597 qgrp_node = qgrp_node->sibling;
1598 }
1599 return min_qgrp;
1600 }
1601
1602 /**
1603 * ice_sched_get_free_qparent - Get a free LAN or RDMA queue group node
1604 * @pi: port information structure
1605 * @vsi_handle: software VSI handle
1606 * @tc: branch number
1607 * @owner: LAN or RDMA
1608 *
1609 * This function retrieves a free LAN or RDMA queue group node
1610 */
1611 struct ice_sched_node *
ice_sched_get_free_qparent(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u8 owner)1612 ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
1613 u8 owner)
1614 {
1615 struct ice_sched_node *vsi_node, *qgrp_node;
1616 struct ice_vsi_ctx *vsi_ctx;
1617 u8 qgrp_layer, vsi_layer;
1618 u16 max_children;
1619
1620 qgrp_layer = ice_sched_get_qgrp_layer(pi->hw);
1621 vsi_layer = ice_sched_get_vsi_layer(pi->hw);
1622 max_children = pi->hw->max_children[qgrp_layer];
1623
1624 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
1625 if (!vsi_ctx)
1626 return NULL;
1627 vsi_node = vsi_ctx->sched.vsi_node[tc];
1628 /* validate invalid VSI ID */
1629 if (!vsi_node)
1630 return NULL;
1631
1632 /* If the queue group and vsi layer are same then queues
1633 * are all attached directly to VSI
1634 */
1635 if (qgrp_layer == vsi_layer)
1636 return vsi_node;
1637
1638 /* get the first queue group node from VSI sub-tree */
1639 qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
1640 while (qgrp_node) {
1641 /* make sure the qgroup node is part of the VSI subtree */
1642 if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
1643 if (qgrp_node->num_children < max_children &&
1644 qgrp_node->owner == owner)
1645 break;
1646 qgrp_node = qgrp_node->sibling;
1647 }
1648
1649 /* Select the best queue group */
1650 return ice_sched_get_free_qgrp(pi, vsi_node, qgrp_node, owner);
1651 }
1652
1653 /**
1654 * ice_sched_get_vsi_node - Get a VSI node based on VSI ID
1655 * @pi: pointer to the port information structure
1656 * @tc_node: pointer to the TC node
1657 * @vsi_handle: software VSI handle
1658 *
1659 * This function retrieves a VSI node for a given VSI ID from a given
1660 * TC branch
1661 */
1662 struct ice_sched_node *
ice_sched_get_vsi_node(struct ice_port_info * pi,struct ice_sched_node * tc_node,u16 vsi_handle)1663 ice_sched_get_vsi_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
1664 u16 vsi_handle)
1665 {
1666 struct ice_sched_node *node;
1667 u8 vsi_layer;
1668
1669 vsi_layer = ice_sched_get_vsi_layer(pi->hw);
1670 node = ice_sched_get_first_node(pi, tc_node, vsi_layer);
1671
1672 /* Check whether it already exists */
1673 while (node) {
1674 if (node->vsi_handle == vsi_handle)
1675 return node;
1676 node = node->sibling;
1677 }
1678
1679 return node;
1680 }
1681
1682 /**
1683 * ice_sched_get_agg_node - Get an aggregator node based on aggregator ID
1684 * @pi: pointer to the port information structure
1685 * @tc_node: pointer to the TC node
1686 * @agg_id: aggregator ID
1687 *
1688 * This function retrieves an aggregator node for a given aggregator ID from
1689 * a given TC branch
1690 */
1691 static struct ice_sched_node *
ice_sched_get_agg_node(struct ice_port_info * pi,struct ice_sched_node * tc_node,u32 agg_id)1692 ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
1693 u32 agg_id)
1694 {
1695 struct ice_sched_node *node;
1696 struct ice_hw *hw = pi->hw;
1697 u8 agg_layer;
1698
1699 if (!hw)
1700 return NULL;
1701 agg_layer = ice_sched_get_agg_layer(hw);
1702 node = ice_sched_get_first_node(pi, tc_node, agg_layer);
1703
1704 /* Check whether it already exists */
1705 while (node) {
1706 if (node->agg_id == agg_id)
1707 return node;
1708 node = node->sibling;
1709 }
1710
1711 return node;
1712 }
1713
1714 /**
1715 * ice_sched_check_node - Compare node parameters between SW DB and HW DB
1716 * @hw: pointer to the HW struct
1717 * @node: pointer to the ice_sched_node struct
1718 *
1719 * This function queries and compares the HW element with SW DB node parameters
1720 */
ice_sched_check_node(struct ice_hw * hw,struct ice_sched_node * node)1721 static bool ice_sched_check_node(struct ice_hw *hw, struct ice_sched_node *node)
1722 {
1723 struct ice_aqc_txsched_elem_data buf;
1724 enum ice_status status;
1725 u32 node_teid;
1726
1727 node_teid = LE32_TO_CPU(node->info.node_teid);
1728 status = ice_sched_query_elem(hw, node_teid, &buf);
1729 if (status != ICE_SUCCESS)
1730 return false;
1731
1732 if (memcmp(&buf, &node->info, sizeof(buf))) {
1733 ice_debug(hw, ICE_DBG_SCHED, "Node mismatch for teid=0x%x\n",
1734 node_teid);
1735 return false;
1736 }
1737
1738 return true;
1739 }
1740
1741 /**
1742 * ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes
1743 * @hw: pointer to the HW struct
1744 * @num_qs: number of queues
1745 * @num_nodes: num nodes array
1746 *
1747 * This function calculates the number of VSI child nodes based on the
1748 * number of queues.
1749 */
1750 static void
ice_sched_calc_vsi_child_nodes(struct ice_hw * hw,u16 num_qs,u16 * num_nodes)1751 ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
1752 {
1753 u16 num = num_qs;
1754 u8 i, qgl, vsil;
1755
1756 qgl = ice_sched_get_qgrp_layer(hw);
1757 vsil = ice_sched_get_vsi_layer(hw);
1758
1759 /* calculate num nodes from queue group to VSI layer */
1760 for (i = qgl; i > vsil; i--) {
1761 /* round to the next integer if there is a remainder */
1762 num = DIVIDE_AND_ROUND_UP(num, hw->max_children[i]);
1763
1764 /* need at least one node */
1765 num_nodes[i] = num ? num : 1;
1766 }
1767 }
1768
1769 /**
1770 * ice_sched_add_vsi_child_nodes - add VSI child nodes to tree
1771 * @pi: port information structure
1772 * @vsi_handle: software VSI handle
1773 * @tc_node: pointer to the TC node
1774 * @num_nodes: pointer to the num nodes that needs to be added per layer
1775 * @owner: node owner (LAN or RDMA)
1776 *
1777 * This function adds the VSI child nodes to tree. It gets called for
1778 * LAN and RDMA separately.
1779 */
1780 static enum ice_status
ice_sched_add_vsi_child_nodes(struct ice_port_info * pi,u16 vsi_handle,struct ice_sched_node * tc_node,u16 * num_nodes,u8 owner)1781 ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
1782 struct ice_sched_node *tc_node, u16 *num_nodes,
1783 u8 owner)
1784 {
1785 struct ice_sched_node *parent, *node;
1786 struct ice_hw *hw = pi->hw;
1787 u32 first_node_teid;
1788 u16 num_added = 0;
1789 u8 i, qgl, vsil;
1790
1791 qgl = ice_sched_get_qgrp_layer(hw);
1792 vsil = ice_sched_get_vsi_layer(hw);
1793 parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
1794 for (i = vsil + 1; i <= qgl; i++) {
1795 enum ice_status status;
1796
1797 if (!parent)
1798 return ICE_ERR_CFG;
1799
1800 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
1801 num_nodes[i],
1802 &first_node_teid,
1803 &num_added);
1804 if (status != ICE_SUCCESS || num_nodes[i] != num_added)
1805 return ICE_ERR_CFG;
1806
1807 /* The newly added node can be a new parent for the next
1808 * layer nodes
1809 */
1810 if (num_added) {
1811 parent = ice_sched_find_node_by_teid(tc_node,
1812 first_node_teid);
1813 node = parent;
1814 while (node) {
1815 node->owner = owner;
1816 node = node->sibling;
1817 }
1818 } else {
1819 parent = parent->children[0];
1820 }
1821 }
1822
1823 return ICE_SUCCESS;
1824 }
1825
1826 /**
1827 * ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes
1828 * @pi: pointer to the port info structure
1829 * @tc_node: pointer to TC node
1830 * @num_nodes: pointer to num nodes array
1831 *
1832 * This function calculates the number of supported nodes needed to add this
1833 * VSI into Tx tree including the VSI, parent and intermediate nodes in below
1834 * layers
1835 */
1836 static void
ice_sched_calc_vsi_support_nodes(struct ice_port_info * pi,struct ice_sched_node * tc_node,u16 * num_nodes)1837 ice_sched_calc_vsi_support_nodes(struct ice_port_info *pi,
1838 struct ice_sched_node *tc_node, u16 *num_nodes)
1839 {
1840 struct ice_sched_node *node;
1841 u8 vsil;
1842 int i;
1843
1844 vsil = ice_sched_get_vsi_layer(pi->hw);
1845 for (i = vsil; i >= pi->hw->sw_entry_point_layer; i--)
1846 /* Add intermediate nodes if TC has no children and
1847 * need at least one node for VSI
1848 */
1849 if (!tc_node->num_children || i == vsil) {
1850 num_nodes[i]++;
1851 } else {
1852 /* If intermediate nodes are reached max children
1853 * then add a new one.
1854 */
1855 node = ice_sched_get_first_node(pi, tc_node, (u8)i);
1856 /* scan all the siblings */
1857 while (node) {
1858 if (node->num_children <
1859 pi->hw->max_children[i])
1860 break;
1861 node = node->sibling;
1862 }
1863
1864 /* tree has one intermediate node to add this new VSI.
1865 * So no need to calculate supported nodes for below
1866 * layers.
1867 */
1868 if (node)
1869 break;
1870 /* all the nodes are full, allocate a new one */
1871 num_nodes[i]++;
1872 }
1873 }
1874
1875 /**
1876 * ice_sched_add_vsi_support_nodes - add VSI supported nodes into Tx tree
1877 * @pi: port information structure
1878 * @vsi_handle: software VSI handle
1879 * @tc_node: pointer to TC node
1880 * @num_nodes: pointer to num nodes array
1881 *
1882 * This function adds the VSI supported nodes into Tx tree including the
1883 * VSI, its parent and intermediate nodes in below layers
1884 */
1885 static enum ice_status
ice_sched_add_vsi_support_nodes(struct ice_port_info * pi,u16 vsi_handle,struct ice_sched_node * tc_node,u16 * num_nodes)1886 ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
1887 struct ice_sched_node *tc_node, u16 *num_nodes)
1888 {
1889 struct ice_sched_node *parent = tc_node;
1890 u32 first_node_teid;
1891 u16 num_added = 0;
1892 u8 i, vsil;
1893
1894 if (!pi)
1895 return ICE_ERR_PARAM;
1896
1897 vsil = ice_sched_get_vsi_layer(pi->hw);
1898 for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {
1899 enum ice_status status;
1900
1901 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
1902 i, num_nodes[i],
1903 &first_node_teid,
1904 &num_added);
1905 if (status != ICE_SUCCESS || num_nodes[i] != num_added)
1906 return ICE_ERR_CFG;
1907
1908 /* The newly added node can be a new parent for the next
1909 * layer nodes
1910 */
1911 if (num_added)
1912 parent = ice_sched_find_node_by_teid(tc_node,
1913 first_node_teid);
1914 else
1915 parent = parent->children[0];
1916
1917 if (!parent)
1918 return ICE_ERR_CFG;
1919
1920 if (i == vsil)
1921 parent->vsi_handle = vsi_handle;
1922 }
1923
1924 return ICE_SUCCESS;
1925 }
1926
1927 /**
1928 * ice_sched_add_vsi_to_topo - add a new VSI into tree
1929 * @pi: port information structure
1930 * @vsi_handle: software VSI handle
1931 * @tc: TC number
1932 *
1933 * This function adds a new VSI into scheduler tree
1934 */
1935 static enum ice_status
ice_sched_add_vsi_to_topo(struct ice_port_info * pi,u16 vsi_handle,u8 tc)1936 ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
1937 {
1938 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
1939 struct ice_sched_node *tc_node;
1940
1941 tc_node = ice_sched_get_tc_node(pi, tc);
1942 if (!tc_node)
1943 return ICE_ERR_PARAM;
1944
1945 /* calculate number of supported nodes needed for this VSI */
1946 ice_sched_calc_vsi_support_nodes(pi, tc_node, num_nodes);
1947
1948 /* add VSI supported nodes to TC subtree */
1949 return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node,
1950 num_nodes);
1951 }
1952
1953 /**
1954 * ice_sched_update_vsi_child_nodes - update VSI child nodes
1955 * @pi: port information structure
1956 * @vsi_handle: software VSI handle
1957 * @tc: TC number
1958 * @new_numqs: new number of max queues
1959 * @owner: owner of this subtree
1960 *
1961 * This function updates the VSI child nodes based on the number of queues
1962 */
1963 static enum ice_status
ice_sched_update_vsi_child_nodes(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u16 new_numqs,u8 owner)1964 ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
1965 u8 tc, u16 new_numqs, u8 owner)
1966 {
1967 u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
1968 struct ice_sched_node *vsi_node;
1969 struct ice_sched_node *tc_node;
1970 struct ice_vsi_ctx *vsi_ctx;
1971 enum ice_status status = ICE_SUCCESS;
1972 struct ice_hw *hw = pi->hw;
1973 u16 prev_numqs;
1974
1975 tc_node = ice_sched_get_tc_node(pi, tc);
1976 if (!tc_node)
1977 return ICE_ERR_CFG;
1978
1979 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
1980 if (!vsi_node)
1981 return ICE_ERR_CFG;
1982
1983 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1984 if (!vsi_ctx)
1985 return ICE_ERR_PARAM;
1986
1987 if (owner == ICE_SCHED_NODE_OWNER_LAN)
1988 prev_numqs = vsi_ctx->sched.max_lanq[tc];
1989 else
1990 prev_numqs = vsi_ctx->sched.max_rdmaq[tc];
1991 /* num queues are not changed or less than the previous number */
1992 if (new_numqs <= prev_numqs)
1993 return status;
1994 if (owner == ICE_SCHED_NODE_OWNER_LAN) {
1995 status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs);
1996 if (status)
1997 return status;
1998 } else {
1999 status = ice_alloc_rdma_q_ctx(hw, vsi_handle, tc, new_numqs);
2000 if (status)
2001 return status;
2002 }
2003
2004 if (new_numqs)
2005 ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
2006 /* Keep the max number of queue configuration all the time. Update the
2007 * tree only if number of queues > previous number of queues. This may
2008 * leave some extra nodes in the tree if number of queues < previous
2009 * number but that wouldn't harm anything. Removing those extra nodes
2010 * may complicate the code if those nodes are part of SRL or
2011 * individually rate limited.
2012 */
2013 status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
2014 new_num_nodes, owner);
2015 if (status)
2016 return status;
2017 if (owner == ICE_SCHED_NODE_OWNER_LAN)
2018 vsi_ctx->sched.max_lanq[tc] = new_numqs;
2019 else
2020 vsi_ctx->sched.max_rdmaq[tc] = new_numqs;
2021
2022 return ICE_SUCCESS;
2023 }
2024
2025 /**
2026 * ice_sched_cfg_vsi - configure the new/existing VSI
2027 * @pi: port information structure
2028 * @vsi_handle: software VSI handle
2029 * @tc: TC number
2030 * @maxqs: max number of queues
2031 * @owner: LAN or RDMA
2032 * @enable: TC enabled or disabled
2033 *
2034 * This function adds/updates VSI nodes based on the number of queues. If TC is
2035 * enabled and VSI is in suspended state then resume the VSI back. If TC is
2036 * disabled then suspend the VSI if it is not already.
2037 */
2038 enum ice_status
ice_sched_cfg_vsi(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u16 maxqs,u8 owner,bool enable)2039 ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
2040 u8 owner, bool enable)
2041 {
2042 struct ice_sched_node *vsi_node, *tc_node;
2043 struct ice_vsi_ctx *vsi_ctx;
2044 enum ice_status status = ICE_SUCCESS;
2045 struct ice_hw *hw = pi->hw;
2046
2047 ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle);
2048 tc_node = ice_sched_get_tc_node(pi, tc);
2049 if (!tc_node)
2050 return ICE_ERR_PARAM;
2051 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
2052 if (!vsi_ctx)
2053 return ICE_ERR_PARAM;
2054 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
2055
2056 /* suspend the VSI if TC is not enabled */
2057 if (!enable) {
2058 if (vsi_node && vsi_node->in_use) {
2059 u32 teid = LE32_TO_CPU(vsi_node->info.node_teid);
2060
2061 status = ice_sched_suspend_resume_elems(hw, 1, &teid,
2062 true);
2063 if (!status)
2064 vsi_node->in_use = false;
2065 }
2066 return status;
2067 }
2068
2069 /* TC is enabled, if it is a new VSI then add it to the tree */
2070 if (!vsi_node) {
2071 status = ice_sched_add_vsi_to_topo(pi, vsi_handle, tc);
2072 if (status)
2073 return status;
2074
2075 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
2076 if (!vsi_node)
2077 return ICE_ERR_CFG;
2078
2079 vsi_ctx->sched.vsi_node[tc] = vsi_node;
2080 vsi_node->in_use = true;
2081 /* invalidate the max queues whenever VSI gets added first time
2082 * into the scheduler tree (boot or after reset). We need to
2083 * recreate the child nodes all the time in these cases.
2084 */
2085 vsi_ctx->sched.max_lanq[tc] = 0;
2086 vsi_ctx->sched.max_rdmaq[tc] = 0;
2087 }
2088
2089 /* update the VSI child nodes */
2090 status = ice_sched_update_vsi_child_nodes(pi, vsi_handle, tc, maxqs,
2091 owner);
2092 if (status)
2093 return status;
2094
2095 /* TC is enabled, resume the VSI if it is in the suspend state */
2096 if (!vsi_node->in_use) {
2097 u32 teid = LE32_TO_CPU(vsi_node->info.node_teid);
2098
2099 status = ice_sched_suspend_resume_elems(hw, 1, &teid, false);
2100 if (!status)
2101 vsi_node->in_use = true;
2102 }
2103
2104 return status;
2105 }
2106
2107 /**
2108 * ice_sched_rm_agg_vsi_info - remove aggregator related VSI info entry
2109 * @pi: port information structure
2110 * @vsi_handle: software VSI handle
2111 *
2112 * This function removes single aggregator VSI info entry from
2113 * aggregator list.
2114 */
ice_sched_rm_agg_vsi_info(struct ice_port_info * pi,u16 vsi_handle)2115 static void ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle)
2116 {
2117 struct ice_sched_agg_info *agg_info;
2118 struct ice_sched_agg_info *atmp;
2119
2120 LIST_FOR_EACH_ENTRY_SAFE(agg_info, atmp, &pi->hw->agg_list,
2121 ice_sched_agg_info,
2122 list_entry) {
2123 struct ice_sched_agg_vsi_info *agg_vsi_info;
2124 struct ice_sched_agg_vsi_info *vtmp;
2125
2126 LIST_FOR_EACH_ENTRY_SAFE(agg_vsi_info, vtmp,
2127 &agg_info->agg_vsi_list,
2128 ice_sched_agg_vsi_info, list_entry)
2129 if (agg_vsi_info->vsi_handle == vsi_handle) {
2130 LIST_DEL(&agg_vsi_info->list_entry);
2131 ice_free(pi->hw, agg_vsi_info);
2132 return;
2133 }
2134 }
2135 }
2136
2137 /**
2138 * ice_sched_is_leaf_node_present - check for a leaf node in the sub-tree
2139 * @node: pointer to the sub-tree node
2140 *
2141 * This function checks for a leaf node presence in a given sub-tree node.
2142 */
ice_sched_is_leaf_node_present(struct ice_sched_node * node)2143 static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node)
2144 {
2145 u8 i;
2146
2147 for (i = 0; i < node->num_children; i++)
2148 if (ice_sched_is_leaf_node_present(node->children[i]))
2149 return true;
2150 /* check for a leaf node */
2151 return (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF);
2152 }
2153
2154 /**
2155 * ice_sched_rm_vsi_cfg - remove the VSI and its children nodes
2156 * @pi: port information structure
2157 * @vsi_handle: software VSI handle
2158 * @owner: LAN or RDMA
2159 *
2160 * This function removes the VSI and its LAN or RDMA children nodes from the
2161 * scheduler tree.
2162 */
2163 static enum ice_status
ice_sched_rm_vsi_cfg(struct ice_port_info * pi,u16 vsi_handle,u8 owner)2164 ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
2165 {
2166 enum ice_status status = ICE_ERR_PARAM;
2167 struct ice_vsi_ctx *vsi_ctx;
2168 u8 i;
2169
2170 ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle);
2171 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
2172 return status;
2173 ice_acquire_lock(&pi->sched_lock);
2174 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
2175 if (!vsi_ctx)
2176 goto exit_sched_rm_vsi_cfg;
2177
2178 ice_for_each_traffic_class(i) {
2179 struct ice_sched_node *vsi_node, *tc_node;
2180 u8 j = 0;
2181
2182 tc_node = ice_sched_get_tc_node(pi, i);
2183 if (!tc_node)
2184 continue;
2185
2186 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
2187 if (!vsi_node)
2188 continue;
2189
2190 if (ice_sched_is_leaf_node_present(vsi_node)) {
2191 ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i);
2192 status = ICE_ERR_IN_USE;
2193 goto exit_sched_rm_vsi_cfg;
2194 }
2195 while (j < vsi_node->num_children) {
2196 if (vsi_node->children[j]->owner == owner) {
2197 ice_free_sched_node(pi, vsi_node->children[j]);
2198
2199 /* reset the counter again since the num
2200 * children will be updated after node removal
2201 */
2202 j = 0;
2203 } else {
2204 j++;
2205 }
2206 }
2207 /* remove the VSI if it has no children */
2208 if (!vsi_node->num_children) {
2209 ice_free_sched_node(pi, vsi_node);
2210 vsi_ctx->sched.vsi_node[i] = NULL;
2211
2212 /* clean up aggregator related VSI info if any */
2213 ice_sched_rm_agg_vsi_info(pi, vsi_handle);
2214 }
2215 if (owner == ICE_SCHED_NODE_OWNER_LAN)
2216 vsi_ctx->sched.max_lanq[i] = 0;
2217 else
2218 vsi_ctx->sched.max_rdmaq[i] = 0;
2219 }
2220 status = ICE_SUCCESS;
2221
2222 exit_sched_rm_vsi_cfg:
2223 ice_release_lock(&pi->sched_lock);
2224 return status;
2225 }
2226
2227 /**
2228 * ice_rm_vsi_lan_cfg - remove VSI and its LAN children nodes
2229 * @pi: port information structure
2230 * @vsi_handle: software VSI handle
2231 *
2232 * This function clears the VSI and its LAN children nodes from scheduler tree
2233 * for all TCs.
2234 */
ice_rm_vsi_lan_cfg(struct ice_port_info * pi,u16 vsi_handle)2235 enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
2236 {
2237 return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN);
2238 }
2239
2240 /**
2241 * ice_rm_vsi_rdma_cfg - remove VSI and its RDMA children nodes
2242 * @pi: port information structure
2243 * @vsi_handle: software VSI handle
2244 *
2245 * This function clears the VSI and its RDMA children nodes from scheduler tree
2246 * for all TCs.
2247 */
ice_rm_vsi_rdma_cfg(struct ice_port_info * pi,u16 vsi_handle)2248 enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle)
2249 {
2250 return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_RDMA);
2251 }
2252
2253 /**
2254 * ice_sched_is_tree_balanced - Check tree nodes are identical or not
2255 * @hw: pointer to the HW struct
2256 * @node: pointer to the ice_sched_node struct
2257 *
2258 * This function compares all the nodes for a given tree against HW DB nodes
2259 * This function needs to be called with the port_info->sched_lock held
2260 */
ice_sched_is_tree_balanced(struct ice_hw * hw,struct ice_sched_node * node)2261 bool ice_sched_is_tree_balanced(struct ice_hw *hw, struct ice_sched_node *node)
2262 {
2263 u8 i;
2264
2265 /* start from the leaf node */
2266 for (i = 0; i < node->num_children; i++)
2267 /* Fail if node doesn't match with the SW DB
2268 * this recursion is intentional, and wouldn't
2269 * go more than 9 calls
2270 */
2271 if (!ice_sched_is_tree_balanced(hw, node->children[i]))
2272 return false;
2273
2274 return ice_sched_check_node(hw, node);
2275 }
2276
2277 /**
2278 * ice_aq_query_node_to_root - retrieve the tree topology for a given node TEID
2279 * @hw: pointer to the HW struct
2280 * @node_teid: node TEID
2281 * @buf: pointer to buffer
2282 * @buf_size: buffer size in bytes
2283 * @cd: pointer to command details structure or NULL
2284 *
2285 * This function retrieves the tree topology from the firmware for a given
2286 * node TEID to the root node.
2287 */
2288 enum ice_status
ice_aq_query_node_to_root(struct ice_hw * hw,u32 node_teid,struct ice_aqc_txsched_elem_data * buf,u16 buf_size,struct ice_sq_cd * cd)2289 ice_aq_query_node_to_root(struct ice_hw *hw, u32 node_teid,
2290 struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
2291 struct ice_sq_cd *cd)
2292 {
2293 struct ice_aqc_query_node_to_root *cmd;
2294 struct ice_aq_desc desc;
2295
2296 cmd = &desc.params.query_node_to_root;
2297 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_node_to_root);
2298 cmd->teid = CPU_TO_LE32(node_teid);
2299 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2300 }
2301
2302 /**
2303 * ice_get_agg_info - get the aggregator ID
2304 * @hw: pointer to the hardware structure
2305 * @agg_id: aggregator ID
2306 *
2307 * This function validates aggregator ID. The function returns info if
2308 * aggregator ID is present in list otherwise it returns null.
2309 */
2310 static struct ice_sched_agg_info *
ice_get_agg_info(struct ice_hw * hw,u32 agg_id)2311 ice_get_agg_info(struct ice_hw *hw, u32 agg_id)
2312 {
2313 struct ice_sched_agg_info *agg_info;
2314
2315 LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info,
2316 list_entry)
2317 if (agg_info->agg_id == agg_id)
2318 return agg_info;
2319
2320 return NULL;
2321 }
2322
2323 /**
2324 * ice_sched_get_free_vsi_parent - Find a free parent node in aggregator subtree
2325 * @hw: pointer to the HW struct
2326 * @node: pointer to a child node
2327 * @num_nodes: num nodes count array
2328 *
2329 * This function walks through the aggregator subtree to find a free parent
2330 * node
2331 */
2332 static struct ice_sched_node *
ice_sched_get_free_vsi_parent(struct ice_hw * hw,struct ice_sched_node * node,u16 * num_nodes)2333 ice_sched_get_free_vsi_parent(struct ice_hw *hw, struct ice_sched_node *node,
2334 u16 *num_nodes)
2335 {
2336 u8 l = node->tx_sched_layer;
2337 u8 vsil, i;
2338
2339 vsil = ice_sched_get_vsi_layer(hw);
2340
2341 /* Is it VSI parent layer ? */
2342 if (l == vsil - 1)
2343 return (node->num_children < hw->max_children[l]) ? node : NULL;
2344
2345 /* We have intermediate nodes. Let's walk through the subtree. If the
2346 * intermediate node has space to add a new node then clear the count
2347 */
2348 if (node->num_children < hw->max_children[l])
2349 num_nodes[l] = 0;
2350 /* The below recursive call is intentional and wouldn't go more than
2351 * 2 or 3 iterations.
2352 */
2353
2354 for (i = 0; i < node->num_children; i++) {
2355 struct ice_sched_node *parent;
2356
2357 parent = ice_sched_get_free_vsi_parent(hw, node->children[i],
2358 num_nodes);
2359 if (parent)
2360 return parent;
2361 }
2362
2363 return NULL;
2364 }
2365
2366 /**
2367 * ice_sched_update_parent - update the new parent in SW DB
2368 * @new_parent: pointer to a new parent node
2369 * @node: pointer to a child node
2370 *
2371 * This function removes the child from the old parent and adds it to a new
2372 * parent
2373 */
2374 void
ice_sched_update_parent(struct ice_sched_node * new_parent,struct ice_sched_node * node)2375 ice_sched_update_parent(struct ice_sched_node *new_parent,
2376 struct ice_sched_node *node)
2377 {
2378 struct ice_sched_node *old_parent;
2379 u8 i, j;
2380
2381 old_parent = node->parent;
2382
2383 /* update the old parent children */
2384 for (i = 0; i < old_parent->num_children; i++)
2385 if (old_parent->children[i] == node) {
2386 for (j = i + 1; j < old_parent->num_children; j++)
2387 old_parent->children[j - 1] =
2388 old_parent->children[j];
2389 old_parent->num_children--;
2390 break;
2391 }
2392
2393 /* now move the node to a new parent */
2394 new_parent->children[new_parent->num_children++] = node;
2395 node->parent = new_parent;
2396 node->info.parent_teid = new_parent->info.node_teid;
2397 }
2398
2399 /**
2400 * ice_sched_move_nodes - move child nodes to a given parent
2401 * @pi: port information structure
2402 * @parent: pointer to parent node
2403 * @num_items: number of child nodes to be moved
2404 * @list: pointer to child node teids
2405 *
2406 * This function move the child nodes to a given parent.
2407 */
2408 enum ice_status
ice_sched_move_nodes(struct ice_port_info * pi,struct ice_sched_node * parent,u16 num_items,u32 * list)2409 ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
2410 u16 num_items, u32 *list)
2411 {
2412 struct ice_aqc_move_elem *buf;
2413 struct ice_sched_node *node;
2414 enum ice_status status = ICE_SUCCESS;
2415 u16 i, grps_movd = 0;
2416 struct ice_hw *hw;
2417 u16 buf_len;
2418
2419 hw = pi->hw;
2420
2421 if (!parent || !num_items)
2422 return ICE_ERR_PARAM;
2423
2424 /* Does parent have enough space */
2425 if (parent->num_children + num_items >
2426 hw->max_children[parent->tx_sched_layer])
2427 return ICE_ERR_AQ_FULL;
2428
2429 buf_len = ice_struct_size(buf, teid, 1);
2430 buf = (struct ice_aqc_move_elem *)ice_malloc(hw, buf_len);
2431 if (!buf)
2432 return ICE_ERR_NO_MEMORY;
2433
2434 for (i = 0; i < num_items; i++) {
2435 node = ice_sched_find_node_by_teid(pi->root, list[i]);
2436 if (!node) {
2437 status = ICE_ERR_PARAM;
2438 goto move_err_exit;
2439 }
2440
2441 buf->hdr.src_parent_teid = node->info.parent_teid;
2442 buf->hdr.dest_parent_teid = parent->info.node_teid;
2443 buf->teid[0] = node->info.node_teid;
2444 buf->hdr.num_elems = CPU_TO_LE16(1);
2445 status = ice_aq_move_sched_elems(hw, 1, buf, buf_len,
2446 &grps_movd, NULL);
2447 if (status && grps_movd != 1) {
2448 status = ICE_ERR_CFG;
2449 goto move_err_exit;
2450 }
2451
2452 /* update the SW DB */
2453 ice_sched_update_parent(parent, node);
2454 }
2455
2456 move_err_exit:
2457 ice_free(hw, buf);
2458 return status;
2459 }
2460
2461 /**
2462 * ice_sched_move_vsi_to_agg - move VSI to aggregator node
2463 * @pi: port information structure
2464 * @vsi_handle: software VSI handle
2465 * @agg_id: aggregator ID
2466 * @tc: TC number
2467 *
2468 * This function moves a VSI to an aggregator node or its subtree.
2469 * Intermediate nodes may be created if required.
2470 */
2471 static enum ice_status
ice_sched_move_vsi_to_agg(struct ice_port_info * pi,u16 vsi_handle,u32 agg_id,u8 tc)2472 ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id,
2473 u8 tc)
2474 {
2475 struct ice_sched_node *vsi_node, *agg_node, *tc_node, *parent;
2476 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
2477 u32 first_node_teid, vsi_teid;
2478 enum ice_status status;
2479 u16 num_nodes_added;
2480 u8 aggl, vsil, i;
2481
2482 tc_node = ice_sched_get_tc_node(pi, tc);
2483 if (!tc_node)
2484 return ICE_ERR_CFG;
2485
2486 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
2487 if (!agg_node)
2488 return ICE_ERR_DOES_NOT_EXIST;
2489
2490 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
2491 if (!vsi_node)
2492 return ICE_ERR_DOES_NOT_EXIST;
2493
2494 /* Is this VSI already part of given aggregator? */
2495 if (ice_sched_find_node_in_subtree(pi->hw, agg_node, vsi_node))
2496 return ICE_SUCCESS;
2497
2498 aggl = ice_sched_get_agg_layer(pi->hw);
2499 vsil = ice_sched_get_vsi_layer(pi->hw);
2500
2501 /* set intermediate node count to 1 between aggregator and VSI layers */
2502 for (i = aggl + 1; i < vsil; i++)
2503 num_nodes[i] = 1;
2504
2505 /* Check if the aggregator subtree has any free node to add the VSI */
2506 for (i = 0; i < agg_node->num_children; i++) {
2507 parent = ice_sched_get_free_vsi_parent(pi->hw,
2508 agg_node->children[i],
2509 num_nodes);
2510 if (parent)
2511 goto move_nodes;
2512 }
2513
2514 /* add new nodes */
2515 parent = agg_node;
2516 for (i = aggl + 1; i < vsil; i++) {
2517 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
2518 num_nodes[i],
2519 &first_node_teid,
2520 &num_nodes_added);
2521 if (status != ICE_SUCCESS || num_nodes[i] != num_nodes_added)
2522 return ICE_ERR_CFG;
2523
2524 /* The newly added node can be a new parent for the next
2525 * layer nodes
2526 */
2527 if (num_nodes_added)
2528 parent = ice_sched_find_node_by_teid(tc_node,
2529 first_node_teid);
2530 else
2531 parent = parent->children[0];
2532
2533 if (!parent)
2534 return ICE_ERR_CFG;
2535 }
2536
2537 move_nodes:
2538 vsi_teid = LE32_TO_CPU(vsi_node->info.node_teid);
2539 return ice_sched_move_nodes(pi, parent, 1, &vsi_teid);
2540 }
2541
2542 /**
2543 * ice_move_all_vsi_to_dflt_agg - move all VSI(s) to default aggregator
2544 * @pi: port information structure
2545 * @agg_info: aggregator info
2546 * @tc: traffic class number
2547 * @rm_vsi_info: true or false
2548 *
2549 * This function move all the VSI(s) to the default aggregator and delete
2550 * aggregator VSI info based on passed in boolean parameter rm_vsi_info. The
2551 * caller holds the scheduler lock.
2552 */
2553 static enum ice_status
ice_move_all_vsi_to_dflt_agg(struct ice_port_info * pi,struct ice_sched_agg_info * agg_info,u8 tc,bool rm_vsi_info)2554 ice_move_all_vsi_to_dflt_agg(struct ice_port_info *pi,
2555 struct ice_sched_agg_info *agg_info, u8 tc,
2556 bool rm_vsi_info)
2557 {
2558 struct ice_sched_agg_vsi_info *agg_vsi_info;
2559 struct ice_sched_agg_vsi_info *tmp;
2560 enum ice_status status = ICE_SUCCESS;
2561
2562 LIST_FOR_EACH_ENTRY_SAFE(agg_vsi_info, tmp, &agg_info->agg_vsi_list,
2563 ice_sched_agg_vsi_info, list_entry) {
2564 u16 vsi_handle = agg_vsi_info->vsi_handle;
2565
2566 /* Move VSI to default aggregator */
2567 if (!ice_is_tc_ena(agg_vsi_info->tc_bitmap[0], tc))
2568 continue;
2569
2570 status = ice_sched_move_vsi_to_agg(pi, vsi_handle,
2571 ICE_DFLT_AGG_ID, tc);
2572 if (status)
2573 break;
2574
2575 ice_clear_bit(tc, agg_vsi_info->tc_bitmap);
2576 if (rm_vsi_info && !agg_vsi_info->tc_bitmap[0]) {
2577 LIST_DEL(&agg_vsi_info->list_entry);
2578 ice_free(pi->hw, agg_vsi_info);
2579 }
2580 }
2581
2582 return status;
2583 }
2584
2585 /**
2586 * ice_sched_is_agg_inuse - check whether the aggregator is in use or not
2587 * @pi: port information structure
2588 * @node: node pointer
2589 *
2590 * This function checks whether the aggregator is attached with any VSI or not.
2591 */
2592 static bool
ice_sched_is_agg_inuse(struct ice_port_info * pi,struct ice_sched_node * node)2593 ice_sched_is_agg_inuse(struct ice_port_info *pi, struct ice_sched_node *node)
2594 {
2595 u8 vsil, i;
2596
2597 vsil = ice_sched_get_vsi_layer(pi->hw);
2598 if (node->tx_sched_layer < vsil - 1) {
2599 for (i = 0; i < node->num_children; i++)
2600 if (ice_sched_is_agg_inuse(pi, node->children[i]))
2601 return true;
2602 return false;
2603 } else {
2604 return node->num_children ? true : false;
2605 }
2606 }
2607
2608 /**
2609 * ice_sched_rm_agg_cfg - remove the aggregator node
2610 * @pi: port information structure
2611 * @agg_id: aggregator ID
2612 * @tc: TC number
2613 *
2614 * This function removes the aggregator node and intermediate nodes if any
2615 * from the given TC
2616 */
2617 static enum ice_status
ice_sched_rm_agg_cfg(struct ice_port_info * pi,u32 agg_id,u8 tc)2618 ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
2619 {
2620 struct ice_sched_node *tc_node, *agg_node;
2621 struct ice_hw *hw = pi->hw;
2622
2623 tc_node = ice_sched_get_tc_node(pi, tc);
2624 if (!tc_node)
2625 return ICE_ERR_CFG;
2626
2627 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
2628 if (!agg_node)
2629 return ICE_ERR_DOES_NOT_EXIST;
2630
2631 /* Can't remove the aggregator node if it has children */
2632 if (ice_sched_is_agg_inuse(pi, agg_node))
2633 return ICE_ERR_IN_USE;
2634
2635 /* need to remove the whole subtree if aggregator node is the
2636 * only child.
2637 */
2638 while (agg_node->tx_sched_layer > hw->sw_entry_point_layer) {
2639 struct ice_sched_node *parent = agg_node->parent;
2640
2641 if (!parent)
2642 return ICE_ERR_CFG;
2643
2644 if (parent->num_children > 1)
2645 break;
2646
2647 agg_node = parent;
2648 }
2649
2650 ice_free_sched_node(pi, agg_node);
2651 return ICE_SUCCESS;
2652 }
2653
2654 /**
2655 * ice_rm_agg_cfg_tc - remove aggregator configuration for TC
2656 * @pi: port information structure
2657 * @agg_info: aggregator ID
2658 * @tc: TC number
2659 * @rm_vsi_info: bool value true or false
2660 *
2661 * This function removes aggregator reference to VSI of given TC. It removes
2662 * the aggregator configuration completely for requested TC. The caller needs
2663 * to hold the scheduler lock.
2664 */
2665 static enum ice_status
ice_rm_agg_cfg_tc(struct ice_port_info * pi,struct ice_sched_agg_info * agg_info,u8 tc,bool rm_vsi_info)2666 ice_rm_agg_cfg_tc(struct ice_port_info *pi, struct ice_sched_agg_info *agg_info,
2667 u8 tc, bool rm_vsi_info)
2668 {
2669 enum ice_status status = ICE_SUCCESS;
2670
2671 /* If nothing to remove - return success */
2672 if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
2673 goto exit_rm_agg_cfg_tc;
2674
2675 status = ice_move_all_vsi_to_dflt_agg(pi, agg_info, tc, rm_vsi_info);
2676 if (status)
2677 goto exit_rm_agg_cfg_tc;
2678
2679 /* Delete aggregator node(s) */
2680 status = ice_sched_rm_agg_cfg(pi, agg_info->agg_id, tc);
2681 if (status)
2682 goto exit_rm_agg_cfg_tc;
2683
2684 ice_clear_bit(tc, agg_info->tc_bitmap);
2685 exit_rm_agg_cfg_tc:
2686 return status;
2687 }
2688
2689 /**
2690 * ice_save_agg_tc_bitmap - save aggregator TC bitmap
2691 * @pi: port information structure
2692 * @agg_id: aggregator ID
2693 * @tc_bitmap: 8 bits TC bitmap
2694 *
2695 * Save aggregator TC bitmap. This function needs to be called with scheduler
2696 * lock held.
2697 */
2698 static enum ice_status
ice_save_agg_tc_bitmap(struct ice_port_info * pi,u32 agg_id,ice_bitmap_t * tc_bitmap)2699 ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id,
2700 ice_bitmap_t *tc_bitmap)
2701 {
2702 struct ice_sched_agg_info *agg_info;
2703
2704 agg_info = ice_get_agg_info(pi->hw, agg_id);
2705 if (!agg_info)
2706 return ICE_ERR_PARAM;
2707 ice_cp_bitmap(agg_info->replay_tc_bitmap, tc_bitmap,
2708 ICE_MAX_TRAFFIC_CLASS);
2709 return ICE_SUCCESS;
2710 }
2711
2712 /**
2713 * ice_sched_add_agg_cfg - create an aggregator node
2714 * @pi: port information structure
2715 * @agg_id: aggregator ID
2716 * @tc: TC number
2717 *
2718 * This function creates an aggregator node and intermediate nodes if required
2719 * for the given TC
2720 */
2721 static enum ice_status
ice_sched_add_agg_cfg(struct ice_port_info * pi,u32 agg_id,u8 tc)2722 ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
2723 {
2724 struct ice_sched_node *parent, *agg_node, *tc_node;
2725 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
2726 enum ice_status status = ICE_SUCCESS;
2727 struct ice_hw *hw = pi->hw;
2728 u32 first_node_teid;
2729 u16 num_nodes_added;
2730 u8 i, aggl;
2731
2732 tc_node = ice_sched_get_tc_node(pi, tc);
2733 if (!tc_node)
2734 return ICE_ERR_CFG;
2735
2736 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
2737 /* Does Agg node already exist ? */
2738 if (agg_node)
2739 return status;
2740
2741 aggl = ice_sched_get_agg_layer(hw);
2742
2743 /* need one node in Agg layer */
2744 num_nodes[aggl] = 1;
2745
2746 /* Check whether the intermediate nodes have space to add the
2747 * new aggregator. If they are full, then SW needs to allocate a new
2748 * intermediate node on those layers
2749 */
2750 for (i = hw->sw_entry_point_layer; i < aggl; i++) {
2751 parent = ice_sched_get_first_node(pi, tc_node, i);
2752
2753 /* scan all the siblings */
2754 while (parent) {
2755 if (parent->num_children < hw->max_children[i])
2756 break;
2757 parent = parent->sibling;
2758 }
2759
2760 /* all the nodes are full, reserve one for this layer */
2761 if (!parent)
2762 num_nodes[i]++;
2763 }
2764
2765 /* add the aggregator node */
2766 parent = tc_node;
2767 for (i = hw->sw_entry_point_layer; i <= aggl; i++) {
2768 if (!parent)
2769 return ICE_ERR_CFG;
2770
2771 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
2772 num_nodes[i],
2773 &first_node_teid,
2774 &num_nodes_added);
2775 if (status != ICE_SUCCESS || num_nodes[i] != num_nodes_added)
2776 return ICE_ERR_CFG;
2777
2778 /* The newly added node can be a new parent for the next
2779 * layer nodes
2780 */
2781 if (num_nodes_added) {
2782 parent = ice_sched_find_node_by_teid(tc_node,
2783 first_node_teid);
2784 /* register aggregator ID with the aggregator node */
2785 if (parent && i == aggl)
2786 parent->agg_id = agg_id;
2787 } else {
2788 parent = parent->children[0];
2789 }
2790 }
2791
2792 return ICE_SUCCESS;
2793 }
2794
2795 /**
2796 * ice_sched_cfg_agg - configure aggregator node
2797 * @pi: port information structure
2798 * @agg_id: aggregator ID
2799 * @agg_type: aggregator type queue, VSI, or aggregator group
2800 * @tc_bitmap: bits TC bitmap
2801 *
2802 * It registers a unique aggregator node into scheduler services. It
2803 * allows a user to register with a unique ID to track it's resources.
2804 * The aggregator type determines if this is a queue group, VSI group
2805 * or aggregator group. It then creates the aggregator node(s) for requested
2806 * TC(s) or removes an existing aggregator node including its configuration
2807 * if indicated via tc_bitmap. Call ice_rm_agg_cfg to release aggregator
2808 * resources and remove aggregator ID.
2809 * This function needs to be called with scheduler lock held.
2810 */
2811 static enum ice_status
ice_sched_cfg_agg(struct ice_port_info * pi,u32 agg_id,enum ice_agg_type agg_type,ice_bitmap_t * tc_bitmap)2812 ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id,
2813 enum ice_agg_type agg_type, ice_bitmap_t *tc_bitmap)
2814 {
2815 struct ice_sched_agg_info *agg_info;
2816 enum ice_status status = ICE_SUCCESS;
2817 struct ice_hw *hw = pi->hw;
2818 u8 tc;
2819
2820 agg_info = ice_get_agg_info(hw, agg_id);
2821 if (!agg_info) {
2822 /* Create new entry for new aggregator ID */
2823 agg_info = (struct ice_sched_agg_info *)
2824 ice_malloc(hw, sizeof(*agg_info));
2825 if (!agg_info)
2826 return ICE_ERR_NO_MEMORY;
2827
2828 agg_info->agg_id = agg_id;
2829 agg_info->agg_type = agg_type;
2830 agg_info->tc_bitmap[0] = 0;
2831
2832 /* Initialize the aggregator VSI list head */
2833 INIT_LIST_HEAD(&agg_info->agg_vsi_list);
2834
2835 /* Add new entry in aggregator list */
2836 LIST_ADD(&agg_info->list_entry, &hw->agg_list);
2837 }
2838 /* Create aggregator node(s) for requested TC(s) */
2839 ice_for_each_traffic_class(tc) {
2840 if (!ice_is_tc_ena(*tc_bitmap, tc)) {
2841 /* Delete aggregator cfg TC if it exists previously */
2842 status = ice_rm_agg_cfg_tc(pi, agg_info, tc, false);
2843 if (status)
2844 break;
2845 continue;
2846 }
2847
2848 /* Check if aggregator node for TC already exists */
2849 if (ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
2850 continue;
2851
2852 /* Create new aggregator node for TC */
2853 status = ice_sched_add_agg_cfg(pi, agg_id, tc);
2854 if (status)
2855 break;
2856
2857 /* Save aggregator node's TC information */
2858 ice_set_bit(tc, agg_info->tc_bitmap);
2859 }
2860
2861 return status;
2862 }
2863
2864 /**
2865 * ice_cfg_agg - config aggregator node
2866 * @pi: port information structure
2867 * @agg_id: aggregator ID
2868 * @agg_type: aggregator type queue, VSI, or aggregator group
2869 * @tc_bitmap: bits TC bitmap
2870 *
2871 * This function configures aggregator node(s).
2872 */
2873 enum ice_status
ice_cfg_agg(struct ice_port_info * pi,u32 agg_id,enum ice_agg_type agg_type,u8 tc_bitmap)2874 ice_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type,
2875 u8 tc_bitmap)
2876 {
2877 ice_bitmap_t bitmap = tc_bitmap;
2878 enum ice_status status;
2879
2880 ice_acquire_lock(&pi->sched_lock);
2881 status = ice_sched_cfg_agg(pi, agg_id, agg_type,
2882 (ice_bitmap_t *)&bitmap);
2883 if (!status)
2884 status = ice_save_agg_tc_bitmap(pi, agg_id,
2885 (ice_bitmap_t *)&bitmap);
2886 ice_release_lock(&pi->sched_lock);
2887 return status;
2888 }
2889
2890 /**
2891 * ice_get_agg_vsi_info - get the aggregator ID
2892 * @agg_info: aggregator info
2893 * @vsi_handle: software VSI handle
2894 *
2895 * The function returns aggregator VSI info based on VSI handle. This function
2896 * needs to be called with scheduler lock held.
2897 */
2898 static struct ice_sched_agg_vsi_info *
ice_get_agg_vsi_info(struct ice_sched_agg_info * agg_info,u16 vsi_handle)2899 ice_get_agg_vsi_info(struct ice_sched_agg_info *agg_info, u16 vsi_handle)
2900 {
2901 struct ice_sched_agg_vsi_info *agg_vsi_info;
2902
2903 LIST_FOR_EACH_ENTRY(agg_vsi_info, &agg_info->agg_vsi_list,
2904 ice_sched_agg_vsi_info, list_entry)
2905 if (agg_vsi_info->vsi_handle == vsi_handle)
2906 return agg_vsi_info;
2907
2908 return NULL;
2909 }
2910
2911 /**
2912 * ice_get_vsi_agg_info - get the aggregator info of VSI
2913 * @hw: pointer to the hardware structure
2914 * @vsi_handle: Sw VSI handle
2915 *
2916 * The function returns aggregator info of VSI represented via vsi_handle. The
2917 * VSI has in this case a different aggregator than the default one. This
2918 * function needs to be called with scheduler lock held.
2919 */
2920 static struct ice_sched_agg_info *
ice_get_vsi_agg_info(struct ice_hw * hw,u16 vsi_handle)2921 ice_get_vsi_agg_info(struct ice_hw *hw, u16 vsi_handle)
2922 {
2923 struct ice_sched_agg_info *agg_info;
2924
2925 LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info,
2926 list_entry) {
2927 struct ice_sched_agg_vsi_info *agg_vsi_info;
2928
2929 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
2930 if (agg_vsi_info)
2931 return agg_info;
2932 }
2933 return NULL;
2934 }
2935
2936 /**
2937 * ice_save_agg_vsi_tc_bitmap - save aggregator VSI TC bitmap
2938 * @pi: port information structure
2939 * @agg_id: aggregator ID
2940 * @vsi_handle: software VSI handle
2941 * @tc_bitmap: TC bitmap of enabled TC(s)
2942 *
2943 * Save VSI to aggregator TC bitmap. This function needs to call with scheduler
2944 * lock held.
2945 */
2946 static enum ice_status
ice_save_agg_vsi_tc_bitmap(struct ice_port_info * pi,u32 agg_id,u16 vsi_handle,ice_bitmap_t * tc_bitmap)2947 ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
2948 ice_bitmap_t *tc_bitmap)
2949 {
2950 struct ice_sched_agg_vsi_info *agg_vsi_info;
2951 struct ice_sched_agg_info *agg_info;
2952
2953 agg_info = ice_get_agg_info(pi->hw, agg_id);
2954 if (!agg_info)
2955 return ICE_ERR_PARAM;
2956 /* check if entry already exist */
2957 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
2958 if (!agg_vsi_info)
2959 return ICE_ERR_PARAM;
2960 ice_cp_bitmap(agg_vsi_info->replay_tc_bitmap, tc_bitmap,
2961 ICE_MAX_TRAFFIC_CLASS);
2962 return ICE_SUCCESS;
2963 }
2964
2965 /**
2966 * ice_sched_assoc_vsi_to_agg - associate/move VSI to new/default aggregator
2967 * @pi: port information structure
2968 * @agg_id: aggregator ID
2969 * @vsi_handle: software VSI handle
2970 * @tc_bitmap: TC bitmap of enabled TC(s)
2971 *
2972 * This function moves VSI to a new or default aggregator node. If VSI is
2973 * already associated to the aggregator node then no operation is performed on
2974 * the tree. This function needs to be called with scheduler lock held.
2975 */
2976 static enum ice_status
ice_sched_assoc_vsi_to_agg(struct ice_port_info * pi,u32 agg_id,u16 vsi_handle,ice_bitmap_t * tc_bitmap)2977 ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
2978 u16 vsi_handle, ice_bitmap_t *tc_bitmap)
2979 {
2980 struct ice_sched_agg_vsi_info *agg_vsi_info, *old_agg_vsi_info = NULL;
2981 struct ice_sched_agg_info *agg_info, *old_agg_info;
2982 enum ice_status status = ICE_SUCCESS;
2983 struct ice_hw *hw = pi->hw;
2984 u8 tc;
2985
2986 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
2987 return ICE_ERR_PARAM;
2988 agg_info = ice_get_agg_info(hw, agg_id);
2989 if (!agg_info)
2990 return ICE_ERR_PARAM;
2991 /* If the vsi is already part of another aggregator then update
2992 * its vsi info list
2993 */
2994 old_agg_info = ice_get_vsi_agg_info(hw, vsi_handle);
2995 if (old_agg_info && old_agg_info != agg_info) {
2996 struct ice_sched_agg_vsi_info *vtmp;
2997
2998 LIST_FOR_EACH_ENTRY_SAFE(old_agg_vsi_info, vtmp,
2999 &old_agg_info->agg_vsi_list,
3000 ice_sched_agg_vsi_info, list_entry)
3001 if (old_agg_vsi_info->vsi_handle == vsi_handle)
3002 break;
3003 }
3004
3005 /* check if entry already exist */
3006 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
3007 if (!agg_vsi_info) {
3008 /* Create new entry for VSI under aggregator list */
3009 agg_vsi_info = (struct ice_sched_agg_vsi_info *)
3010 ice_malloc(hw, sizeof(*agg_vsi_info));
3011 if (!agg_vsi_info)
3012 return ICE_ERR_PARAM;
3013
3014 /* add VSI ID into the aggregator list */
3015 agg_vsi_info->vsi_handle = vsi_handle;
3016 LIST_ADD(&agg_vsi_info->list_entry, &agg_info->agg_vsi_list);
3017 }
3018 /* Move VSI node to new aggregator node for requested TC(s) */
3019 ice_for_each_traffic_class(tc) {
3020 if (!ice_is_tc_ena(*tc_bitmap, tc))
3021 continue;
3022
3023 /* Move VSI to new aggregator */
3024 status = ice_sched_move_vsi_to_agg(pi, vsi_handle, agg_id, tc);
3025 if (status)
3026 break;
3027
3028 ice_set_bit(tc, agg_vsi_info->tc_bitmap);
3029 if (old_agg_vsi_info)
3030 ice_clear_bit(tc, old_agg_vsi_info->tc_bitmap);
3031 }
3032 if (old_agg_vsi_info && !old_agg_vsi_info->tc_bitmap[0]) {
3033 LIST_DEL(&old_agg_vsi_info->list_entry);
3034 ice_free(pi->hw, old_agg_vsi_info);
3035 }
3036 return status;
3037 }
3038
3039 /**
3040 * ice_sched_rm_unused_rl_prof - remove unused RL profile
3041 * @hw: pointer to the hardware structure
3042 *
3043 * This function removes unused rate limit profiles from the HW and
3044 * SW DB. The caller needs to hold scheduler lock.
3045 */
ice_sched_rm_unused_rl_prof(struct ice_hw * hw)3046 static void ice_sched_rm_unused_rl_prof(struct ice_hw *hw)
3047 {
3048 u16 ln;
3049
3050 for (ln = 0; ln < hw->num_tx_sched_layers; ln++) {
3051 struct ice_aqc_rl_profile_info *rl_prof_elem;
3052 struct ice_aqc_rl_profile_info *rl_prof_tmp;
3053
3054 LIST_FOR_EACH_ENTRY_SAFE(rl_prof_elem, rl_prof_tmp,
3055 &hw->rl_prof_list[ln],
3056 ice_aqc_rl_profile_info, list_entry) {
3057 if (!ice_sched_del_rl_profile(hw, rl_prof_elem))
3058 ice_debug(hw, ICE_DBG_SCHED, "Removed rl profile\n");
3059 }
3060 }
3061 }
3062
3063 /**
3064 * ice_sched_update_elem - update element
3065 * @hw: pointer to the HW struct
3066 * @node: pointer to node
3067 * @info: node info to update
3068 *
3069 * Update the HW DB, and local SW DB of node. Update the scheduling
3070 * parameters of node from argument info data buffer (Info->data buf) and
3071 * returns success or error on config sched element failure. The caller
3072 * needs to hold scheduler lock.
3073 */
3074 static enum ice_status
ice_sched_update_elem(struct ice_hw * hw,struct ice_sched_node * node,struct ice_aqc_txsched_elem_data * info)3075 ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
3076 struct ice_aqc_txsched_elem_data *info)
3077 {
3078 struct ice_aqc_txsched_elem_data buf;
3079 enum ice_status status;
3080 u16 elem_cfgd = 0;
3081 u16 num_elems = 1;
3082
3083 buf = *info;
3084 /* For TC nodes, CIR config is not supported */
3085 if (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_TC)
3086 buf.data.valid_sections &= ~ICE_AQC_ELEM_VALID_CIR;
3087 /* Parent TEID is reserved field in this aq call */
3088 buf.parent_teid = 0;
3089 /* Element type is reserved field in this aq call */
3090 buf.data.elem_type = 0;
3091 /* Flags is reserved field in this aq call */
3092 buf.data.flags = 0;
3093
3094 /* Update HW DB */
3095 /* Configure element node */
3096 status = ice_aq_cfg_sched_elems(hw, num_elems, &buf, sizeof(buf),
3097 &elem_cfgd, NULL);
3098 if (status || elem_cfgd != num_elems) {
3099 ice_debug(hw, ICE_DBG_SCHED, "Config sched elem error\n");
3100 return ICE_ERR_CFG;
3101 }
3102
3103 /* Config success case */
3104 /* Now update local SW DB */
3105 /* Only copy the data portion of info buffer */
3106 node->info.data = info->data;
3107 return status;
3108 }
3109
3110 /**
3111 * ice_sched_cfg_node_bw_alloc - configure node BW weight/alloc params
3112 * @hw: pointer to the HW struct
3113 * @node: sched node to configure
3114 * @rl_type: rate limit type CIR, EIR, or shared
3115 * @bw_alloc: BW weight/allocation
3116 *
3117 * This function configures node element's BW allocation.
3118 */
3119 static enum ice_status
ice_sched_cfg_node_bw_alloc(struct ice_hw * hw,struct ice_sched_node * node,enum ice_rl_type rl_type,u16 bw_alloc)3120 ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
3121 enum ice_rl_type rl_type, u16 bw_alloc)
3122 {
3123 struct ice_aqc_txsched_elem_data buf;
3124 struct ice_aqc_txsched_elem *data;
3125 enum ice_status status;
3126
3127 buf = node->info;
3128 data = &buf.data;
3129 if (rl_type == ICE_MIN_BW) {
3130 data->valid_sections |= ICE_AQC_ELEM_VALID_CIR;
3131 data->cir_bw.bw_alloc = CPU_TO_LE16(bw_alloc);
3132 } else if (rl_type == ICE_MAX_BW) {
3133 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
3134 data->eir_bw.bw_alloc = CPU_TO_LE16(bw_alloc);
3135 } else {
3136 return ICE_ERR_PARAM;
3137 }
3138
3139 /* Configure element */
3140 status = ice_sched_update_elem(hw, node, &buf);
3141 return status;
3142 }
3143
3144 /**
3145 * ice_move_vsi_to_agg - moves VSI to new or default aggregator
3146 * @pi: port information structure
3147 * @agg_id: aggregator ID
3148 * @vsi_handle: software VSI handle
3149 * @tc_bitmap: TC bitmap of enabled TC(s)
3150 *
3151 * Move or associate VSI to a new or default aggregator node.
3152 */
3153 enum ice_status
ice_move_vsi_to_agg(struct ice_port_info * pi,u32 agg_id,u16 vsi_handle,u8 tc_bitmap)3154 ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
3155 u8 tc_bitmap)
3156 {
3157 ice_bitmap_t bitmap = tc_bitmap;
3158 enum ice_status status;
3159
3160 ice_acquire_lock(&pi->sched_lock);
3161 status = ice_sched_assoc_vsi_to_agg(pi, agg_id, vsi_handle,
3162 (ice_bitmap_t *)&bitmap);
3163 if (!status)
3164 status = ice_save_agg_vsi_tc_bitmap(pi, agg_id, vsi_handle,
3165 (ice_bitmap_t *)&bitmap);
3166 ice_release_lock(&pi->sched_lock);
3167 return status;
3168 }
3169
3170 /**
3171 * ice_rm_agg_cfg - remove aggregator configuration
3172 * @pi: port information structure
3173 * @agg_id: aggregator ID
3174 *
3175 * This function removes aggregator reference to VSI and delete aggregator ID
3176 * info. It removes the aggregator configuration completely.
3177 */
ice_rm_agg_cfg(struct ice_port_info * pi,u32 agg_id)3178 enum ice_status ice_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id)
3179 {
3180 struct ice_sched_agg_info *agg_info;
3181 enum ice_status status = ICE_SUCCESS;
3182 u8 tc;
3183
3184 ice_acquire_lock(&pi->sched_lock);
3185 agg_info = ice_get_agg_info(pi->hw, agg_id);
3186 if (!agg_info) {
3187 status = ICE_ERR_DOES_NOT_EXIST;
3188 goto exit_ice_rm_agg_cfg;
3189 }
3190
3191 ice_for_each_traffic_class(tc) {
3192 status = ice_rm_agg_cfg_tc(pi, agg_info, tc, true);
3193 if (status)
3194 goto exit_ice_rm_agg_cfg;
3195 }
3196
3197 if (ice_is_any_bit_set(agg_info->tc_bitmap, ICE_MAX_TRAFFIC_CLASS)) {
3198 status = ICE_ERR_IN_USE;
3199 goto exit_ice_rm_agg_cfg;
3200 }
3201
3202 /* Safe to delete entry now */
3203 LIST_DEL(&agg_info->list_entry);
3204 ice_free(pi->hw, agg_info);
3205
3206 /* Remove unused RL profile IDs from HW and SW DB */
3207 ice_sched_rm_unused_rl_prof(pi->hw);
3208
3209 exit_ice_rm_agg_cfg:
3210 ice_release_lock(&pi->sched_lock);
3211 return status;
3212 }
3213
3214 /**
3215 * ice_set_clear_cir_bw_alloc - set or clear CIR BW alloc information
3216 * @bw_t_info: bandwidth type information structure
3217 * @bw_alloc: Bandwidth allocation information
3218 *
3219 * Save or clear CIR BW alloc information (bw_alloc) in the passed param
3220 * bw_t_info.
3221 */
3222 static void
ice_set_clear_cir_bw_alloc(struct ice_bw_type_info * bw_t_info,u16 bw_alloc)3223 ice_set_clear_cir_bw_alloc(struct ice_bw_type_info *bw_t_info, u16 bw_alloc)
3224 {
3225 bw_t_info->cir_bw.bw_alloc = bw_alloc;
3226 if (bw_t_info->cir_bw.bw_alloc)
3227 ice_set_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap);
3228 else
3229 ice_clear_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap);
3230 }
3231
3232 /**
3233 * ice_set_clear_eir_bw_alloc - set or clear EIR BW alloc information
3234 * @bw_t_info: bandwidth type information structure
3235 * @bw_alloc: Bandwidth allocation information
3236 *
3237 * Save or clear EIR BW alloc information (bw_alloc) in the passed param
3238 * bw_t_info.
3239 */
3240 static void
ice_set_clear_eir_bw_alloc(struct ice_bw_type_info * bw_t_info,u16 bw_alloc)3241 ice_set_clear_eir_bw_alloc(struct ice_bw_type_info *bw_t_info, u16 bw_alloc)
3242 {
3243 bw_t_info->eir_bw.bw_alloc = bw_alloc;
3244 if (bw_t_info->eir_bw.bw_alloc)
3245 ice_set_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap);
3246 else
3247 ice_clear_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap);
3248 }
3249
3250 /**
3251 * ice_sched_save_vsi_bw_alloc - save VSI node's BW alloc information
3252 * @pi: port information structure
3253 * @vsi_handle: sw VSI handle
3254 * @tc: traffic class
3255 * @rl_type: rate limit type min or max
3256 * @bw_alloc: Bandwidth allocation information
3257 *
3258 * Save BW alloc information of VSI type node for post replay use.
3259 */
3260 static enum ice_status
ice_sched_save_vsi_bw_alloc(struct ice_port_info * pi,u16 vsi_handle,u8 tc,enum ice_rl_type rl_type,u16 bw_alloc)3261 ice_sched_save_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
3262 enum ice_rl_type rl_type, u16 bw_alloc)
3263 {
3264 struct ice_vsi_ctx *vsi_ctx;
3265
3266 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
3267 return ICE_ERR_PARAM;
3268 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
3269 if (!vsi_ctx)
3270 return ICE_ERR_PARAM;
3271 switch (rl_type) {
3272 case ICE_MIN_BW:
3273 ice_set_clear_cir_bw_alloc(&vsi_ctx->sched.bw_t_info[tc],
3274 bw_alloc);
3275 break;
3276 case ICE_MAX_BW:
3277 ice_set_clear_eir_bw_alloc(&vsi_ctx->sched.bw_t_info[tc],
3278 bw_alloc);
3279 break;
3280 default:
3281 return ICE_ERR_PARAM;
3282 }
3283 return ICE_SUCCESS;
3284 }
3285
3286 /**
3287 * ice_set_clear_cir_bw - set or clear CIR BW
3288 * @bw_t_info: bandwidth type information structure
3289 * @bw: bandwidth in Kbps - Kilo bits per sec
3290 *
3291 * Save or clear CIR bandwidth (BW) in the passed param bw_t_info.
3292 */
ice_set_clear_cir_bw(struct ice_bw_type_info * bw_t_info,u32 bw)3293 static void ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
3294 {
3295 if (bw == ICE_SCHED_DFLT_BW) {
3296 ice_clear_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
3297 bw_t_info->cir_bw.bw = 0;
3298 } else {
3299 /* Save type of BW information */
3300 ice_set_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
3301 bw_t_info->cir_bw.bw = bw;
3302 }
3303 }
3304
3305 /**
3306 * ice_set_clear_eir_bw - set or clear EIR BW
3307 * @bw_t_info: bandwidth type information structure
3308 * @bw: bandwidth in Kbps - Kilo bits per sec
3309 *
3310 * Save or clear EIR bandwidth (BW) in the passed param bw_t_info.
3311 */
ice_set_clear_eir_bw(struct ice_bw_type_info * bw_t_info,u32 bw)3312 static void ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
3313 {
3314 if (bw == ICE_SCHED_DFLT_BW) {
3315 ice_clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
3316 bw_t_info->eir_bw.bw = 0;
3317 } else {
3318 /* save EIR BW information */
3319 ice_set_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
3320 bw_t_info->eir_bw.bw = bw;
3321 }
3322 }
3323
3324 /**
3325 * ice_set_clear_shared_bw - set or clear shared BW
3326 * @bw_t_info: bandwidth type information structure
3327 * @bw: bandwidth in Kbps - Kilo bits per sec
3328 *
3329 * Save or clear shared bandwidth (BW) in the passed param bw_t_info.
3330 */
ice_set_clear_shared_bw(struct ice_bw_type_info * bw_t_info,u32 bw)3331 static void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
3332 {
3333 if (bw == ICE_SCHED_DFLT_BW) {
3334 ice_clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
3335 bw_t_info->shared_bw = 0;
3336 } else {
3337 /* save shared BW information */
3338 ice_set_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
3339 bw_t_info->shared_bw = bw;
3340 }
3341 }
3342
3343 /**
3344 * ice_sched_save_vsi_bw - save VSI node's BW information
3345 * @pi: port information structure
3346 * @vsi_handle: sw VSI handle
3347 * @tc: traffic class
3348 * @rl_type: rate limit type min, max, or shared
3349 * @bw: bandwidth in Kbps - Kilo bits per sec
3350 *
3351 * Save BW information of VSI type node for post replay use.
3352 */
3353 static enum ice_status
ice_sched_save_vsi_bw(struct ice_port_info * pi,u16 vsi_handle,u8 tc,enum ice_rl_type rl_type,u32 bw)3354 ice_sched_save_vsi_bw(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
3355 enum ice_rl_type rl_type, u32 bw)
3356 {
3357 struct ice_vsi_ctx *vsi_ctx;
3358
3359 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
3360 return ICE_ERR_PARAM;
3361 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
3362 if (!vsi_ctx)
3363 return ICE_ERR_PARAM;
3364 switch (rl_type) {
3365 case ICE_MIN_BW:
3366 ice_set_clear_cir_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
3367 break;
3368 case ICE_MAX_BW:
3369 ice_set_clear_eir_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
3370 break;
3371 case ICE_SHARED_BW:
3372 ice_set_clear_shared_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
3373 break;
3374 default:
3375 return ICE_ERR_PARAM;
3376 }
3377 return ICE_SUCCESS;
3378 }
3379
3380 /**
3381 * ice_set_clear_prio - set or clear priority information
3382 * @bw_t_info: bandwidth type information structure
3383 * @prio: priority to save
3384 *
3385 * Save or clear priority (prio) in the passed param bw_t_info.
3386 */
ice_set_clear_prio(struct ice_bw_type_info * bw_t_info,u8 prio)3387 static void ice_set_clear_prio(struct ice_bw_type_info *bw_t_info, u8 prio)
3388 {
3389 bw_t_info->generic = prio;
3390 if (bw_t_info->generic)
3391 ice_set_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap);
3392 else
3393 ice_clear_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap);
3394 }
3395
3396 /**
3397 * ice_sched_save_vsi_prio - save VSI node's priority information
3398 * @pi: port information structure
3399 * @vsi_handle: Software VSI handle
3400 * @tc: traffic class
3401 * @prio: priority to save
3402 *
3403 * Save priority information of VSI type node for post replay use.
3404 */
3405 static enum ice_status
ice_sched_save_vsi_prio(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u8 prio)3406 ice_sched_save_vsi_prio(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
3407 u8 prio)
3408 {
3409 struct ice_vsi_ctx *vsi_ctx;
3410
3411 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
3412 return ICE_ERR_PARAM;
3413 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
3414 if (!vsi_ctx)
3415 return ICE_ERR_PARAM;
3416 if (tc >= ICE_MAX_TRAFFIC_CLASS)
3417 return ICE_ERR_PARAM;
3418 ice_set_clear_prio(&vsi_ctx->sched.bw_t_info[tc], prio);
3419 return ICE_SUCCESS;
3420 }
3421
3422 /**
3423 * ice_sched_save_agg_bw_alloc - save aggregator node's BW alloc information
3424 * @pi: port information structure
3425 * @agg_id: node aggregator ID
3426 * @tc: traffic class
3427 * @rl_type: rate limit type min or max
3428 * @bw_alloc: bandwidth alloc information
3429 *
3430 * Save BW alloc information of AGG type node for post replay use.
3431 */
3432 static enum ice_status
ice_sched_save_agg_bw_alloc(struct ice_port_info * pi,u32 agg_id,u8 tc,enum ice_rl_type rl_type,u16 bw_alloc)3433 ice_sched_save_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 tc,
3434 enum ice_rl_type rl_type, u16 bw_alloc)
3435 {
3436 struct ice_sched_agg_info *agg_info;
3437
3438 agg_info = ice_get_agg_info(pi->hw, agg_id);
3439 if (!agg_info)
3440 return ICE_ERR_PARAM;
3441 if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
3442 return ICE_ERR_PARAM;
3443 switch (rl_type) {
3444 case ICE_MIN_BW:
3445 ice_set_clear_cir_bw_alloc(&agg_info->bw_t_info[tc], bw_alloc);
3446 break;
3447 case ICE_MAX_BW:
3448 ice_set_clear_eir_bw_alloc(&agg_info->bw_t_info[tc], bw_alloc);
3449 break;
3450 default:
3451 return ICE_ERR_PARAM;
3452 }
3453 return ICE_SUCCESS;
3454 }
3455
3456 /**
3457 * ice_sched_save_agg_bw - save aggregator node's BW information
3458 * @pi: port information structure
3459 * @agg_id: node aggregator ID
3460 * @tc: traffic class
3461 * @rl_type: rate limit type min, max, or shared
3462 * @bw: bandwidth in Kbps - Kilo bits per sec
3463 *
3464 * Save BW information of AGG type node for post replay use.
3465 */
3466 static enum ice_status
ice_sched_save_agg_bw(struct ice_port_info * pi,u32 agg_id,u8 tc,enum ice_rl_type rl_type,u32 bw)3467 ice_sched_save_agg_bw(struct ice_port_info *pi, u32 agg_id, u8 tc,
3468 enum ice_rl_type rl_type, u32 bw)
3469 {
3470 struct ice_sched_agg_info *agg_info;
3471
3472 agg_info = ice_get_agg_info(pi->hw, agg_id);
3473 if (!agg_info)
3474 return ICE_ERR_PARAM;
3475 if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
3476 return ICE_ERR_PARAM;
3477 switch (rl_type) {
3478 case ICE_MIN_BW:
3479 ice_set_clear_cir_bw(&agg_info->bw_t_info[tc], bw);
3480 break;
3481 case ICE_MAX_BW:
3482 ice_set_clear_eir_bw(&agg_info->bw_t_info[tc], bw);
3483 break;
3484 case ICE_SHARED_BW:
3485 ice_set_clear_shared_bw(&agg_info->bw_t_info[tc], bw);
3486 break;
3487 default:
3488 return ICE_ERR_PARAM;
3489 }
3490 return ICE_SUCCESS;
3491 }
3492
3493 /**
3494 * ice_cfg_vsi_bw_lmt_per_tc - configure VSI BW limit per TC
3495 * @pi: port information structure
3496 * @vsi_handle: software VSI handle
3497 * @tc: traffic class
3498 * @rl_type: min or max
3499 * @bw: bandwidth in Kbps
3500 *
3501 * This function configures BW limit of VSI scheduling node based on TC
3502 * information.
3503 */
3504 enum ice_status
ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info * pi,u16 vsi_handle,u8 tc,enum ice_rl_type rl_type,u32 bw)3505 ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
3506 enum ice_rl_type rl_type, u32 bw)
3507 {
3508 enum ice_status status;
3509
3510 status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle,
3511 ICE_AGG_TYPE_VSI,
3512 tc, rl_type, bw);
3513 if (!status) {
3514 ice_acquire_lock(&pi->sched_lock);
3515 status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, bw);
3516 ice_release_lock(&pi->sched_lock);
3517 }
3518 return status;
3519 }
3520
3521 /**
3522 * ice_cfg_vsi_bw_dflt_lmt_per_tc - configure default VSI BW limit per TC
3523 * @pi: port information structure
3524 * @vsi_handle: software VSI handle
3525 * @tc: traffic class
3526 * @rl_type: min or max
3527 *
3528 * This function configures default BW limit of VSI scheduling node based on TC
3529 * information.
3530 */
3531 enum ice_status
ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info * pi,u16 vsi_handle,u8 tc,enum ice_rl_type rl_type)3532 ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
3533 enum ice_rl_type rl_type)
3534 {
3535 enum ice_status status;
3536
3537 status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle,
3538 ICE_AGG_TYPE_VSI,
3539 tc, rl_type,
3540 ICE_SCHED_DFLT_BW);
3541 if (!status) {
3542 ice_acquire_lock(&pi->sched_lock);
3543 status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type,
3544 ICE_SCHED_DFLT_BW);
3545 ice_release_lock(&pi->sched_lock);
3546 }
3547 return status;
3548 }
3549
3550 /**
3551 * ice_cfg_agg_bw_lmt_per_tc - configure aggregator BW limit per TC
3552 * @pi: port information structure
3553 * @agg_id: aggregator ID
3554 * @tc: traffic class
3555 * @rl_type: min or max
3556 * @bw: bandwidth in Kbps
3557 *
3558 * This function applies BW limit to aggregator scheduling node based on TC
3559 * information.
3560 */
3561 enum ice_status
ice_cfg_agg_bw_lmt_per_tc(struct ice_port_info * pi,u32 agg_id,u8 tc,enum ice_rl_type rl_type,u32 bw)3562 ice_cfg_agg_bw_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
3563 enum ice_rl_type rl_type, u32 bw)
3564 {
3565 enum ice_status status;
3566
3567 status = ice_sched_set_node_bw_lmt_per_tc(pi, agg_id, ICE_AGG_TYPE_AGG,
3568 tc, rl_type, bw);
3569 if (!status) {
3570 ice_acquire_lock(&pi->sched_lock);
3571 status = ice_sched_save_agg_bw(pi, agg_id, tc, rl_type, bw);
3572 ice_release_lock(&pi->sched_lock);
3573 }
3574 return status;
3575 }
3576
3577 /**
3578 * ice_cfg_agg_bw_dflt_lmt_per_tc - configure aggregator BW default limit per TC
3579 * @pi: port information structure
3580 * @agg_id: aggregator ID
3581 * @tc: traffic class
3582 * @rl_type: min or max
3583 *
3584 * This function applies default BW limit to aggregator scheduling node based
3585 * on TC information.
3586 */
3587 enum ice_status
ice_cfg_agg_bw_dflt_lmt_per_tc(struct ice_port_info * pi,u32 agg_id,u8 tc,enum ice_rl_type rl_type)3588 ice_cfg_agg_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
3589 enum ice_rl_type rl_type)
3590 {
3591 enum ice_status status;
3592
3593 status = ice_sched_set_node_bw_lmt_per_tc(pi, agg_id, ICE_AGG_TYPE_AGG,
3594 tc, rl_type,
3595 ICE_SCHED_DFLT_BW);
3596 if (!status) {
3597 ice_acquire_lock(&pi->sched_lock);
3598 status = ice_sched_save_agg_bw(pi, agg_id, tc, rl_type,
3599 ICE_SCHED_DFLT_BW);
3600 ice_release_lock(&pi->sched_lock);
3601 }
3602 return status;
3603 }
3604
3605 /**
3606 * ice_cfg_vsi_bw_shared_lmt - configure VSI BW shared limit
3607 * @pi: port information structure
3608 * @vsi_handle: software VSI handle
3609 * @min_bw: minimum bandwidth in Kbps
3610 * @max_bw: maximum bandwidth in Kbps
3611 * @shared_bw: shared bandwidth in Kbps
3612 *
3613 * Configure shared rate limiter(SRL) of all VSI type nodes across all traffic
3614 * classes for VSI matching handle.
3615 */
3616 enum ice_status
ice_cfg_vsi_bw_shared_lmt(struct ice_port_info * pi,u16 vsi_handle,u32 min_bw,u32 max_bw,u32 shared_bw)3617 ice_cfg_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle, u32 min_bw,
3618 u32 max_bw, u32 shared_bw)
3619 {
3620 return ice_sched_set_vsi_bw_shared_lmt(pi, vsi_handle, min_bw, max_bw,
3621 shared_bw);
3622 }
3623
3624 /**
3625 * ice_cfg_vsi_bw_no_shared_lmt - configure VSI BW for no shared limiter
3626 * @pi: port information structure
3627 * @vsi_handle: software VSI handle
3628 *
3629 * This function removes the shared rate limiter(SRL) of all VSI type nodes
3630 * across all traffic classes for VSI matching handle.
3631 */
3632 enum ice_status
ice_cfg_vsi_bw_no_shared_lmt(struct ice_port_info * pi,u16 vsi_handle)3633 ice_cfg_vsi_bw_no_shared_lmt(struct ice_port_info *pi, u16 vsi_handle)
3634 {
3635 return ice_sched_set_vsi_bw_shared_lmt(pi, vsi_handle,
3636 ICE_SCHED_DFLT_BW,
3637 ICE_SCHED_DFLT_BW,
3638 ICE_SCHED_DFLT_BW);
3639 }
3640
3641 /**
3642 * ice_cfg_agg_bw_shared_lmt - configure aggregator BW shared limit
3643 * @pi: port information structure
3644 * @agg_id: aggregator ID
3645 * @min_bw: minimum bandwidth in Kbps
3646 * @max_bw: maximum bandwidth in Kbps
3647 * @shared_bw: shared bandwidth in Kbps
3648 *
3649 * This function configures the shared rate limiter(SRL) of all aggregator type
3650 * nodes across all traffic classes for aggregator matching agg_id.
3651 */
3652 enum ice_status
ice_cfg_agg_bw_shared_lmt(struct ice_port_info * pi,u32 agg_id,u32 min_bw,u32 max_bw,u32 shared_bw)3653 ice_cfg_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 min_bw,
3654 u32 max_bw, u32 shared_bw)
3655 {
3656 return ice_sched_set_agg_bw_shared_lmt(pi, agg_id, min_bw, max_bw,
3657 shared_bw);
3658 }
3659
3660 /**
3661 * ice_cfg_agg_bw_no_shared_lmt - configure aggregator BW for no shared limiter
3662 * @pi: port information structure
3663 * @agg_id: aggregator ID
3664 *
3665 * This function removes the shared rate limiter(SRL) of all aggregator type
3666 * nodes across all traffic classes for aggregator matching agg_id.
3667 */
3668 enum ice_status
ice_cfg_agg_bw_no_shared_lmt(struct ice_port_info * pi,u32 agg_id)3669 ice_cfg_agg_bw_no_shared_lmt(struct ice_port_info *pi, u32 agg_id)
3670 {
3671 return ice_sched_set_agg_bw_shared_lmt(pi, agg_id, ICE_SCHED_DFLT_BW,
3672 ICE_SCHED_DFLT_BW,
3673 ICE_SCHED_DFLT_BW);
3674 }
3675
3676 /**
3677 * ice_cfg_agg_bw_shared_lmt_per_tc - config aggregator BW shared limit per tc
3678 * @pi: port information structure
3679 * @agg_id: aggregator ID
3680 * @tc: traffic class
3681 * @min_bw: minimum bandwidth in Kbps
3682 * @max_bw: maximum bandwidth in Kbps
3683 * @shared_bw: shared bandwidth in Kbps
3684 *
3685 * This function configures the shared rate limiter(SRL) of all aggregator type
3686 * nodes across all traffic classes for aggregator matching agg_id.
3687 */
3688 enum ice_status
ice_cfg_agg_bw_shared_lmt_per_tc(struct ice_port_info * pi,u32 agg_id,u8 tc,u32 min_bw,u32 max_bw,u32 shared_bw)3689 ice_cfg_agg_bw_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
3690 u32 min_bw, u32 max_bw, u32 shared_bw)
3691 {
3692 return ice_sched_set_agg_bw_shared_lmt_per_tc(pi, agg_id, tc, min_bw,
3693 max_bw, shared_bw);
3694 }
3695
3696 /**
3697 * ice_cfg_agg_bw_no_shared_lmt_per_tc - cfg aggregator BW shared limit per tc
3698 * @pi: port information structure
3699 * @agg_id: aggregator ID
3700 * @tc: traffic class
3701 *
3702 * This function configures the shared rate limiter(SRL) of all aggregator type
3703 * nodes across all traffic classes for aggregator matching agg_id.
3704 */
3705 enum ice_status
ice_cfg_agg_bw_no_shared_lmt_per_tc(struct ice_port_info * pi,u32 agg_id,u8 tc)3706 ice_cfg_agg_bw_no_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc)
3707 {
3708 return ice_sched_set_agg_bw_shared_lmt_per_tc(pi, agg_id, tc,
3709 ICE_SCHED_DFLT_BW,
3710 ICE_SCHED_DFLT_BW,
3711 ICE_SCHED_DFLT_BW);
3712 }
3713
3714 /**
3715 * ice_cfg_vsi_q_priority - config VSI queue priority of node
3716 * @pi: port information structure
3717 * @num_qs: number of VSI queues
3718 * @q_ids: queue IDs array
3719 * @q_prio: queue priority array
3720 *
3721 * This function configures the queue node priority (Sibling Priority) of the
3722 * passed in VSI's queue(s) for a given traffic class (TC).
3723 */
3724 enum ice_status
ice_cfg_vsi_q_priority(struct ice_port_info * pi,u16 num_qs,u32 * q_ids,u8 * q_prio)3725 ice_cfg_vsi_q_priority(struct ice_port_info *pi, u16 num_qs, u32 *q_ids,
3726 u8 *q_prio)
3727 {
3728 enum ice_status status = ICE_ERR_PARAM;
3729 u16 i;
3730
3731 ice_acquire_lock(&pi->sched_lock);
3732
3733 for (i = 0; i < num_qs; i++) {
3734 struct ice_sched_node *node;
3735
3736 node = ice_sched_find_node_by_teid(pi->root, q_ids[i]);
3737 if (!node || node->info.data.elem_type !=
3738 ICE_AQC_ELEM_TYPE_LEAF) {
3739 status = ICE_ERR_PARAM;
3740 break;
3741 }
3742 /* Configure Priority */
3743 status = ice_sched_cfg_sibl_node_prio(pi, node, q_prio[i]);
3744 if (status)
3745 break;
3746 }
3747
3748 ice_release_lock(&pi->sched_lock);
3749 return status;
3750 }
3751
3752 /**
3753 * ice_cfg_agg_vsi_priority_per_tc - config aggregator's VSI priority per TC
3754 * @pi: port information structure
3755 * @agg_id: Aggregator ID
3756 * @num_vsis: number of VSI(s)
3757 * @vsi_handle_arr: array of software VSI handles
3758 * @node_prio: pointer to node priority
3759 * @tc: traffic class
3760 *
3761 * This function configures the node priority (Sibling Priority) of the
3762 * passed in VSI's for a given traffic class (TC) of an Aggregator ID.
3763 */
3764 enum ice_status
ice_cfg_agg_vsi_priority_per_tc(struct ice_port_info * pi,u32 agg_id,u16 num_vsis,u16 * vsi_handle_arr,u8 * node_prio,u8 tc)3765 ice_cfg_agg_vsi_priority_per_tc(struct ice_port_info *pi, u32 agg_id,
3766 u16 num_vsis, u16 *vsi_handle_arr,
3767 u8 *node_prio, u8 tc)
3768 {
3769 struct ice_sched_agg_vsi_info *agg_vsi_info;
3770 struct ice_sched_node *tc_node, *agg_node;
3771 enum ice_status status = ICE_ERR_PARAM;
3772 struct ice_sched_agg_info *agg_info;
3773 bool agg_id_present = false;
3774 struct ice_hw *hw = pi->hw;
3775 u16 i;
3776
3777 ice_acquire_lock(&pi->sched_lock);
3778 LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info,
3779 list_entry)
3780 if (agg_info->agg_id == agg_id) {
3781 agg_id_present = true;
3782 break;
3783 }
3784 if (!agg_id_present)
3785 goto exit_agg_priority_per_tc;
3786
3787 tc_node = ice_sched_get_tc_node(pi, tc);
3788 if (!tc_node)
3789 goto exit_agg_priority_per_tc;
3790
3791 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
3792 if (!agg_node)
3793 goto exit_agg_priority_per_tc;
3794
3795 if (num_vsis > hw->max_children[agg_node->tx_sched_layer])
3796 goto exit_agg_priority_per_tc;
3797
3798 for (i = 0; i < num_vsis; i++) {
3799 struct ice_sched_node *vsi_node;
3800 bool vsi_handle_valid = false;
3801 u16 vsi_handle;
3802
3803 status = ICE_ERR_PARAM;
3804 vsi_handle = vsi_handle_arr[i];
3805 if (!ice_is_vsi_valid(hw, vsi_handle))
3806 goto exit_agg_priority_per_tc;
3807 /* Verify child nodes before applying settings */
3808 LIST_FOR_EACH_ENTRY(agg_vsi_info, &agg_info->agg_vsi_list,
3809 ice_sched_agg_vsi_info, list_entry)
3810 if (agg_vsi_info->vsi_handle == vsi_handle) {
3811 vsi_handle_valid = true;
3812 break;
3813 }
3814
3815 if (!vsi_handle_valid)
3816 goto exit_agg_priority_per_tc;
3817
3818 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
3819 if (!vsi_node)
3820 goto exit_agg_priority_per_tc;
3821
3822 if (ice_sched_find_node_in_subtree(hw, agg_node, vsi_node)) {
3823 /* Configure Priority */
3824 status = ice_sched_cfg_sibl_node_prio(pi, vsi_node,
3825 node_prio[i]);
3826 if (status)
3827 break;
3828 status = ice_sched_save_vsi_prio(pi, vsi_handle, tc,
3829 node_prio[i]);
3830 if (status)
3831 break;
3832 }
3833 }
3834
3835 exit_agg_priority_per_tc:
3836 ice_release_lock(&pi->sched_lock);
3837 return status;
3838 }
3839
3840 /**
3841 * ice_cfg_vsi_bw_alloc - config VSI BW alloc per TC
3842 * @pi: port information structure
3843 * @vsi_handle: software VSI handle
3844 * @ena_tcmap: enabled TC map
3845 * @rl_type: Rate limit type CIR/EIR
3846 * @bw_alloc: Array of BW alloc
3847 *
3848 * This function configures the BW allocation of the passed in VSI's
3849 * node(s) for enabled traffic class.
3850 */
3851 enum ice_status
ice_cfg_vsi_bw_alloc(struct ice_port_info * pi,u16 vsi_handle,u8 ena_tcmap,enum ice_rl_type rl_type,u8 * bw_alloc)3852 ice_cfg_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 ena_tcmap,
3853 enum ice_rl_type rl_type, u8 *bw_alloc)
3854 {
3855 enum ice_status status = ICE_SUCCESS;
3856 u8 tc;
3857
3858 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
3859 return ICE_ERR_PARAM;
3860
3861 ice_acquire_lock(&pi->sched_lock);
3862
3863 /* Return success if no nodes are present across TC */
3864 ice_for_each_traffic_class(tc) {
3865 struct ice_sched_node *tc_node, *vsi_node;
3866
3867 if (!ice_is_tc_ena(ena_tcmap, tc))
3868 continue;
3869
3870 tc_node = ice_sched_get_tc_node(pi, tc);
3871 if (!tc_node)
3872 continue;
3873
3874 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
3875 if (!vsi_node)
3876 continue;
3877
3878 status = ice_sched_cfg_node_bw_alloc(pi->hw, vsi_node, rl_type,
3879 bw_alloc[tc]);
3880 if (status)
3881 break;
3882 status = ice_sched_save_vsi_bw_alloc(pi, vsi_handle, tc,
3883 rl_type, bw_alloc[tc]);
3884 if (status)
3885 break;
3886 }
3887
3888 ice_release_lock(&pi->sched_lock);
3889 return status;
3890 }
3891
3892 /**
3893 * ice_cfg_agg_bw_alloc - config aggregator BW alloc
3894 * @pi: port information structure
3895 * @agg_id: aggregator ID
3896 * @ena_tcmap: enabled TC map
3897 * @rl_type: rate limit type CIR/EIR
3898 * @bw_alloc: array of BW alloc
3899 *
3900 * This function configures the BW allocation of passed in aggregator for
3901 * enabled traffic class(s).
3902 */
3903 enum ice_status
ice_cfg_agg_bw_alloc(struct ice_port_info * pi,u32 agg_id,u8 ena_tcmap,enum ice_rl_type rl_type,u8 * bw_alloc)3904 ice_cfg_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 ena_tcmap,
3905 enum ice_rl_type rl_type, u8 *bw_alloc)
3906 {
3907 struct ice_sched_agg_info *agg_info;
3908 bool agg_id_present = false;
3909 enum ice_status status = ICE_SUCCESS;
3910 struct ice_hw *hw = pi->hw;
3911 u8 tc;
3912
3913 ice_acquire_lock(&pi->sched_lock);
3914 LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info,
3915 list_entry)
3916 if (agg_info->agg_id == agg_id) {
3917 agg_id_present = true;
3918 break;
3919 }
3920 if (!agg_id_present) {
3921 status = ICE_ERR_PARAM;
3922 goto exit_cfg_agg_bw_alloc;
3923 }
3924
3925 /* Return success if no nodes are present across TC */
3926 ice_for_each_traffic_class(tc) {
3927 struct ice_sched_node *tc_node, *agg_node;
3928
3929 if (!ice_is_tc_ena(ena_tcmap, tc))
3930 continue;
3931
3932 tc_node = ice_sched_get_tc_node(pi, tc);
3933 if (!tc_node)
3934 continue;
3935
3936 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
3937 if (!agg_node)
3938 continue;
3939
3940 status = ice_sched_cfg_node_bw_alloc(hw, agg_node, rl_type,
3941 bw_alloc[tc]);
3942 if (status)
3943 break;
3944 status = ice_sched_save_agg_bw_alloc(pi, agg_id, tc, rl_type,
3945 bw_alloc[tc]);
3946 if (status)
3947 break;
3948 }
3949
3950 exit_cfg_agg_bw_alloc:
3951 ice_release_lock(&pi->sched_lock);
3952 return status;
3953 }
3954
3955 /**
3956 * ice_sched_calc_wakeup - calculate RL profile wakeup parameter
3957 * @hw: pointer to the HW struct
3958 * @bw: bandwidth in Kbps
3959 *
3960 * This function calculates the wakeup parameter of RL profile.
3961 */
ice_sched_calc_wakeup(struct ice_hw * hw,s32 bw)3962 static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw)
3963 {
3964 s64 bytes_per_sec, wakeup_int, wakeup_a, wakeup_b, wakeup_f;
3965 s32 wakeup_f_int;
3966 u16 wakeup = 0;
3967
3968 /* Get the wakeup integer value */
3969 bytes_per_sec = DIV_S64((s64)bw * 1000, BITS_PER_BYTE);
3970 wakeup_int = DIV_S64(hw->psm_clk_freq, bytes_per_sec);
3971 if (wakeup_int > 63) {
3972 wakeup = (u16)((1 << 15) | wakeup_int);
3973 } else {
3974 /* Calculate fraction value up to 4 decimals
3975 * Convert Integer value to a constant multiplier
3976 */
3977 wakeup_b = (s64)ICE_RL_PROF_MULTIPLIER * wakeup_int;
3978 wakeup_a = DIV_S64((s64)ICE_RL_PROF_MULTIPLIER *
3979 hw->psm_clk_freq, bytes_per_sec);
3980
3981 /* Get Fraction value */
3982 wakeup_f = wakeup_a - wakeup_b;
3983
3984 /* Round up the Fractional value via Ceil(Fractional value) */
3985 if (wakeup_f > DIV_S64(ICE_RL_PROF_MULTIPLIER, 2))
3986 wakeup_f += 1;
3987
3988 wakeup_f_int = (s32)DIV_S64(wakeup_f * ICE_RL_PROF_FRACTION,
3989 ICE_RL_PROF_MULTIPLIER);
3990 wakeup |= (u16)(wakeup_int << 9);
3991 wakeup |= (u16)(0x1ff & wakeup_f_int);
3992 }
3993
3994 return wakeup;
3995 }
3996
3997 /**
3998 * ice_sched_bw_to_rl_profile - convert BW to profile parameters
3999 * @hw: pointer to the HW struct
4000 * @bw: bandwidth in Kbps
4001 * @profile: profile parameters to return
4002 *
4003 * This function converts the BW to profile structure format.
4004 */
4005 static enum ice_status
ice_sched_bw_to_rl_profile(struct ice_hw * hw,u32 bw,struct ice_aqc_rl_profile_elem * profile)4006 ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw,
4007 struct ice_aqc_rl_profile_elem *profile)
4008 {
4009 enum ice_status status = ICE_ERR_PARAM;
4010 s64 bytes_per_sec, ts_rate, mv_tmp;
4011 bool found = false;
4012 s32 encode = 0;
4013 s64 mv = 0;
4014 s32 i;
4015
4016 /* Bw settings range is from 0.5Mb/sec to 100Gb/sec */
4017 if (bw < ICE_SCHED_MIN_BW || bw > ICE_SCHED_MAX_BW)
4018 return status;
4019
4020 /* Bytes per second from Kbps */
4021 bytes_per_sec = DIV_S64((s64)bw * 1000, BITS_PER_BYTE);
4022
4023 /* encode is 6 bits but really useful are 5 bits */
4024 for (i = 0; i < 64; i++) {
4025 u64 pow_result = BIT_ULL(i);
4026
4027 ts_rate = DIV_S64((s64)hw->psm_clk_freq,
4028 pow_result * ICE_RL_PROF_TS_MULTIPLIER);
4029 if (ts_rate <= 0)
4030 continue;
4031
4032 /* Multiplier value */
4033 mv_tmp = DIV_S64(bytes_per_sec * ICE_RL_PROF_MULTIPLIER,
4034 ts_rate);
4035
4036 /* Round to the nearest ICE_RL_PROF_MULTIPLIER */
4037 mv = round_up_64bit(mv_tmp, ICE_RL_PROF_MULTIPLIER);
4038
4039 /* First multiplier value greater than the given
4040 * accuracy bytes
4041 */
4042 if (mv > ICE_RL_PROF_ACCURACY_BYTES) {
4043 encode = i;
4044 found = true;
4045 break;
4046 }
4047 }
4048 if (found) {
4049 u16 wm;
4050
4051 wm = ice_sched_calc_wakeup(hw, bw);
4052 profile->rl_multiply = CPU_TO_LE16(mv);
4053 profile->wake_up_calc = CPU_TO_LE16(wm);
4054 profile->rl_encode = CPU_TO_LE16(encode);
4055 status = ICE_SUCCESS;
4056 } else {
4057 status = ICE_ERR_DOES_NOT_EXIST;
4058 }
4059
4060 return status;
4061 }
4062
4063 /**
4064 * ice_sched_add_rl_profile - add RL profile
4065 * @hw: pointer to the hardware structure
4066 * @rl_type: type of rate limit BW - min, max, or shared
4067 * @bw: bandwidth in Kbps - Kilo bits per sec
4068 * @layer_num: specifies in which layer to create profile
4069 *
4070 * This function first checks the existing list for corresponding BW
4071 * parameter. If it exists, it returns the associated profile otherwise
4072 * it creates a new rate limit profile for requested BW, and adds it to
4073 * the HW DB and local list. It returns the new profile or null on error.
4074 * The caller needs to hold the scheduler lock.
4075 */
4076 static struct ice_aqc_rl_profile_info *
ice_sched_add_rl_profile(struct ice_hw * hw,enum ice_rl_type rl_type,u32 bw,u8 layer_num)4077 ice_sched_add_rl_profile(struct ice_hw *hw, enum ice_rl_type rl_type,
4078 u32 bw, u8 layer_num)
4079 {
4080 struct ice_aqc_rl_profile_info *rl_prof_elem;
4081 u16 profiles_added = 0, num_profiles = 1;
4082 struct ice_aqc_rl_profile_elem *buf;
4083 enum ice_status status;
4084 u8 profile_type;
4085
4086 if (!hw || layer_num >= hw->num_tx_sched_layers)
4087 return NULL;
4088 switch (rl_type) {
4089 case ICE_MIN_BW:
4090 profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR;
4091 break;
4092 case ICE_MAX_BW:
4093 profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR;
4094 break;
4095 case ICE_SHARED_BW:
4096 profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL;
4097 break;
4098 default:
4099 return NULL;
4100 }
4101
4102 LIST_FOR_EACH_ENTRY(rl_prof_elem, &hw->rl_prof_list[layer_num],
4103 ice_aqc_rl_profile_info, list_entry)
4104 if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
4105 profile_type && rl_prof_elem->bw == bw)
4106 /* Return existing profile ID info */
4107 return rl_prof_elem;
4108
4109 /* Create new profile ID */
4110 rl_prof_elem = (struct ice_aqc_rl_profile_info *)
4111 ice_malloc(hw, sizeof(*rl_prof_elem));
4112
4113 if (!rl_prof_elem)
4114 return NULL;
4115
4116 status = ice_sched_bw_to_rl_profile(hw, bw, &rl_prof_elem->profile);
4117 if (status != ICE_SUCCESS)
4118 goto exit_add_rl_prof;
4119
4120 rl_prof_elem->bw = bw;
4121 /* layer_num is zero relative, and fw expects level from 1 to 9 */
4122 rl_prof_elem->profile.level = layer_num + 1;
4123 rl_prof_elem->profile.flags = profile_type;
4124 rl_prof_elem->profile.max_burst_size = CPU_TO_LE16(hw->max_burst_size);
4125
4126 /* Create new entry in HW DB */
4127 buf = &rl_prof_elem->profile;
4128 status = ice_aq_add_rl_profile(hw, num_profiles, buf, sizeof(*buf),
4129 &profiles_added, NULL);
4130 if (status || profiles_added != num_profiles)
4131 goto exit_add_rl_prof;
4132
4133 /* Good entry - add in the list */
4134 rl_prof_elem->prof_id_ref = 0;
4135 LIST_ADD(&rl_prof_elem->list_entry, &hw->rl_prof_list[layer_num]);
4136 return rl_prof_elem;
4137
4138 exit_add_rl_prof:
4139 ice_free(hw, rl_prof_elem);
4140 return NULL;
4141 }
4142
4143 /**
4144 * ice_sched_cfg_node_bw_lmt - configure node sched params
4145 * @hw: pointer to the HW struct
4146 * @node: sched node to configure
4147 * @rl_type: rate limit type CIR, EIR, or shared
4148 * @rl_prof_id: rate limit profile ID
4149 *
4150 * This function configures node element's BW limit.
4151 */
4152 static enum ice_status
ice_sched_cfg_node_bw_lmt(struct ice_hw * hw,struct ice_sched_node * node,enum ice_rl_type rl_type,u16 rl_prof_id)4153 ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,
4154 enum ice_rl_type rl_type, u16 rl_prof_id)
4155 {
4156 struct ice_aqc_txsched_elem_data buf;
4157 struct ice_aqc_txsched_elem *data;
4158
4159 buf = node->info;
4160 data = &buf.data;
4161 switch (rl_type) {
4162 case ICE_MIN_BW:
4163 data->valid_sections |= ICE_AQC_ELEM_VALID_CIR;
4164 data->cir_bw.bw_profile_idx = CPU_TO_LE16(rl_prof_id);
4165 break;
4166 case ICE_MAX_BW:
4167 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
4168 data->eir_bw.bw_profile_idx = CPU_TO_LE16(rl_prof_id);
4169 break;
4170 case ICE_SHARED_BW:
4171 data->valid_sections |= ICE_AQC_ELEM_VALID_SHARED;
4172 data->srl_id = CPU_TO_LE16(rl_prof_id);
4173 break;
4174 default:
4175 /* Unknown rate limit type */
4176 return ICE_ERR_PARAM;
4177 }
4178
4179 /* Configure element */
4180 return ice_sched_update_elem(hw, node, &buf);
4181 }
4182
4183 /**
4184 * ice_sched_get_node_rl_prof_id - get node's rate limit profile ID
4185 * @node: sched node
4186 * @rl_type: rate limit type
4187 *
4188 * If existing profile matches, it returns the corresponding rate
4189 * limit profile ID, otherwise it returns an invalid ID as error.
4190 */
4191 static u16
ice_sched_get_node_rl_prof_id(struct ice_sched_node * node,enum ice_rl_type rl_type)4192 ice_sched_get_node_rl_prof_id(struct ice_sched_node *node,
4193 enum ice_rl_type rl_type)
4194 {
4195 u16 rl_prof_id = ICE_SCHED_INVAL_PROF_ID;
4196 struct ice_aqc_txsched_elem *data;
4197
4198 data = &node->info.data;
4199 switch (rl_type) {
4200 case ICE_MIN_BW:
4201 if (data->valid_sections & ICE_AQC_ELEM_VALID_CIR)
4202 rl_prof_id = LE16_TO_CPU(data->cir_bw.bw_profile_idx);
4203 break;
4204 case ICE_MAX_BW:
4205 if (data->valid_sections & ICE_AQC_ELEM_VALID_EIR)
4206 rl_prof_id = LE16_TO_CPU(data->eir_bw.bw_profile_idx);
4207 break;
4208 case ICE_SHARED_BW:
4209 if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED)
4210 rl_prof_id = LE16_TO_CPU(data->srl_id);
4211 break;
4212 default:
4213 break;
4214 }
4215
4216 return rl_prof_id;
4217 }
4218
4219 /**
4220 * ice_sched_get_rl_prof_layer - selects rate limit profile creation layer
4221 * @pi: port information structure
4222 * @rl_type: type of rate limit BW - min, max, or shared
4223 * @layer_index: layer index
4224 *
4225 * This function returns requested profile creation layer.
4226 */
4227 static u8
ice_sched_get_rl_prof_layer(struct ice_port_info * pi,enum ice_rl_type rl_type,u8 layer_index)4228 ice_sched_get_rl_prof_layer(struct ice_port_info *pi, enum ice_rl_type rl_type,
4229 u8 layer_index)
4230 {
4231 struct ice_hw *hw = pi->hw;
4232
4233 if (layer_index >= hw->num_tx_sched_layers)
4234 return ICE_SCHED_INVAL_LAYER_NUM;
4235 switch (rl_type) {
4236 case ICE_MIN_BW:
4237 if (hw->layer_info[layer_index].max_cir_rl_profiles)
4238 return layer_index;
4239 break;
4240 case ICE_MAX_BW:
4241 if (hw->layer_info[layer_index].max_eir_rl_profiles)
4242 return layer_index;
4243 break;
4244 case ICE_SHARED_BW:
4245 /* if current layer doesn't support SRL profile creation
4246 * then try a layer up or down.
4247 */
4248 if (hw->layer_info[layer_index].max_srl_profiles)
4249 return layer_index;
4250 else if (layer_index < hw->num_tx_sched_layers - 1 &&
4251 hw->layer_info[layer_index + 1].max_srl_profiles)
4252 return layer_index + 1;
4253 else if (layer_index > 0 &&
4254 hw->layer_info[layer_index - 1].max_srl_profiles)
4255 return layer_index - 1;
4256 break;
4257 default:
4258 break;
4259 }
4260 return ICE_SCHED_INVAL_LAYER_NUM;
4261 }
4262
4263 /**
4264 * ice_sched_get_srl_node - get shared rate limit node
4265 * @node: tree node
4266 * @srl_layer: shared rate limit layer
4267 *
4268 * This function returns SRL node to be used for shared rate limit purpose.
4269 * The caller needs to hold scheduler lock.
4270 */
4271 static struct ice_sched_node *
ice_sched_get_srl_node(struct ice_sched_node * node,u8 srl_layer)4272 ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer)
4273 {
4274 if (srl_layer > node->tx_sched_layer)
4275 return node->children[0];
4276 else if (srl_layer < node->tx_sched_layer)
4277 /* Node can't be created without a parent. It will always
4278 * have a valid parent except root node.
4279 */
4280 return node->parent;
4281 else
4282 return node;
4283 }
4284
4285 /**
4286 * ice_sched_rm_rl_profile - remove RL profile ID
4287 * @hw: pointer to the hardware structure
4288 * @layer_num: layer number where profiles are saved
4289 * @profile_type: profile type like EIR, CIR, or SRL
4290 * @profile_id: profile ID to remove
4291 *
4292 * This function removes rate limit profile from layer 'layer_num' of type
4293 * 'profile_type' and profile ID as 'profile_id'. The caller needs to hold
4294 * scheduler lock.
4295 */
4296 static enum ice_status
ice_sched_rm_rl_profile(struct ice_hw * hw,u8 layer_num,u8 profile_type,u16 profile_id)4297 ice_sched_rm_rl_profile(struct ice_hw *hw, u8 layer_num, u8 profile_type,
4298 u16 profile_id)
4299 {
4300 struct ice_aqc_rl_profile_info *rl_prof_elem;
4301 enum ice_status status = ICE_SUCCESS;
4302
4303 if (!hw || layer_num >= hw->num_tx_sched_layers)
4304 return ICE_ERR_PARAM;
4305 /* Check the existing list for RL profile */
4306 LIST_FOR_EACH_ENTRY(rl_prof_elem, &hw->rl_prof_list[layer_num],
4307 ice_aqc_rl_profile_info, list_entry)
4308 if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
4309 profile_type &&
4310 LE16_TO_CPU(rl_prof_elem->profile.profile_id) ==
4311 profile_id) {
4312 if (rl_prof_elem->prof_id_ref)
4313 rl_prof_elem->prof_id_ref--;
4314
4315 /* Remove old profile ID from database */
4316 status = ice_sched_del_rl_profile(hw, rl_prof_elem);
4317 if (status && status != ICE_ERR_IN_USE)
4318 ice_debug(hw, ICE_DBG_SCHED, "Remove rl profile failed\n");
4319 break;
4320 }
4321 if (status == ICE_ERR_IN_USE)
4322 status = ICE_SUCCESS;
4323 return status;
4324 }
4325
4326 /**
4327 * ice_sched_set_node_bw_dflt - set node's bandwidth limit to default
4328 * @pi: port information structure
4329 * @node: pointer to node structure
4330 * @rl_type: rate limit type min, max, or shared
4331 * @layer_num: layer number where RL profiles are saved
4332 *
4333 * This function configures node element's BW rate limit profile ID of
4334 * type CIR, EIR, or SRL to default. This function needs to be called
4335 * with the scheduler lock held.
4336 */
4337 static enum ice_status
ice_sched_set_node_bw_dflt(struct ice_port_info * pi,struct ice_sched_node * node,enum ice_rl_type rl_type,u8 layer_num)4338 ice_sched_set_node_bw_dflt(struct ice_port_info *pi,
4339 struct ice_sched_node *node,
4340 enum ice_rl_type rl_type, u8 layer_num)
4341 {
4342 enum ice_status status;
4343 struct ice_hw *hw;
4344 u8 profile_type;
4345 u16 rl_prof_id;
4346 u16 old_id;
4347
4348 hw = pi->hw;
4349 switch (rl_type) {
4350 case ICE_MIN_BW:
4351 profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR;
4352 rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID;
4353 break;
4354 case ICE_MAX_BW:
4355 profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR;
4356 rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID;
4357 break;
4358 case ICE_SHARED_BW:
4359 profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL;
4360 /* No SRL is configured for default case */
4361 rl_prof_id = ICE_SCHED_NO_SHARED_RL_PROF_ID;
4362 break;
4363 default:
4364 return ICE_ERR_PARAM;
4365 }
4366 /* Save existing RL prof ID for later clean up */
4367 old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
4368 /* Configure BW scheduling parameters */
4369 status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id);
4370 if (status)
4371 return status;
4372
4373 /* Remove stale RL profile ID */
4374 if (old_id == ICE_SCHED_DFLT_RL_PROF_ID ||
4375 old_id == ICE_SCHED_INVAL_PROF_ID)
4376 return ICE_SUCCESS;
4377
4378 return ice_sched_rm_rl_profile(hw, layer_num, profile_type, old_id);
4379 }
4380
4381 /**
4382 * ice_sched_set_node_bw - set node's bandwidth
4383 * @pi: port information structure
4384 * @node: tree node
4385 * @rl_type: rate limit type min, max, or shared
4386 * @bw: bandwidth in Kbps - Kilo bits per sec
4387 * @layer_num: layer number
4388 *
4389 * This function adds new profile corresponding to requested BW, configures
4390 * node's RL profile ID of type CIR, EIR, or SRL, and removes old profile
4391 * ID from local database. The caller needs to hold scheduler lock.
4392 */
4393 enum ice_status
ice_sched_set_node_bw(struct ice_port_info * pi,struct ice_sched_node * node,enum ice_rl_type rl_type,u32 bw,u8 layer_num)4394 ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
4395 enum ice_rl_type rl_type, u32 bw, u8 layer_num)
4396 {
4397 struct ice_aqc_rl_profile_info *rl_prof_info;
4398 enum ice_status status = ICE_ERR_PARAM;
4399 struct ice_hw *hw = pi->hw;
4400 u16 old_id, rl_prof_id;
4401
4402 rl_prof_info = ice_sched_add_rl_profile(hw, rl_type, bw, layer_num);
4403 if (!rl_prof_info)
4404 return status;
4405
4406 rl_prof_id = LE16_TO_CPU(rl_prof_info->profile.profile_id);
4407
4408 /* Save existing RL prof ID for later clean up */
4409 old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
4410 /* Configure BW scheduling parameters */
4411 status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id);
4412 if (status)
4413 return status;
4414
4415 /* New changes has been applied */
4416 /* Increment the profile ID reference count */
4417 rl_prof_info->prof_id_ref++;
4418
4419 /* Check for old ID removal */
4420 if ((old_id == ICE_SCHED_DFLT_RL_PROF_ID && rl_type != ICE_SHARED_BW) ||
4421 old_id == ICE_SCHED_INVAL_PROF_ID || old_id == rl_prof_id)
4422 return ICE_SUCCESS;
4423
4424 return ice_sched_rm_rl_profile(hw, layer_num,
4425 rl_prof_info->profile.flags &
4426 ICE_AQC_RL_PROFILE_TYPE_M, old_id);
4427 }
4428
4429 /**
4430 * ice_sched_set_node_priority - set node's priority
4431 * @pi: port information structure
4432 * @node: tree node
4433 * @priority: number 0-7 representing priority among siblings
4434 *
4435 * This function sets priority of a node among it's siblings.
4436 */
4437 enum ice_status
ice_sched_set_node_priority(struct ice_port_info * pi,struct ice_sched_node * node,u16 priority)4438 ice_sched_set_node_priority(struct ice_port_info *pi, struct ice_sched_node *node,
4439 u16 priority)
4440 {
4441 struct ice_aqc_txsched_elem_data buf;
4442 struct ice_aqc_txsched_elem *data;
4443
4444 buf = node->info;
4445 data = &buf.data;
4446
4447 data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC;
4448 data->generic |= ICE_AQC_ELEM_GENERIC_PRIO_M &
4449 (priority << ICE_AQC_ELEM_GENERIC_PRIO_S);
4450
4451 return ice_sched_update_elem(pi->hw, node, &buf);
4452 }
4453
4454 /**
4455 * ice_sched_set_node_weight - set node's weight
4456 * @pi: port information structure
4457 * @node: tree node
4458 * @weight: number 1-200 representing weight for WFQ
4459 *
4460 * This function sets weight of the node for WFQ algorithm.
4461 */
4462 enum ice_status
ice_sched_set_node_weight(struct ice_port_info * pi,struct ice_sched_node * node,u16 weight)4463 ice_sched_set_node_weight(struct ice_port_info *pi, struct ice_sched_node *node, u16 weight)
4464 {
4465 struct ice_aqc_txsched_elem_data buf;
4466 struct ice_aqc_txsched_elem *data;
4467
4468 buf = node->info;
4469 data = &buf.data;
4470
4471 data->valid_sections = ICE_AQC_ELEM_VALID_CIR | ICE_AQC_ELEM_VALID_EIR |
4472 ICE_AQC_ELEM_VALID_GENERIC;
4473 data->cir_bw.bw_alloc = CPU_TO_LE16(weight);
4474 data->eir_bw.bw_alloc = CPU_TO_LE16(weight);
4475 data->generic |= ICE_AQC_ELEM_GENERIC_SP_M &
4476 (0x0 << ICE_AQC_ELEM_GENERIC_SP_S);
4477
4478 return ice_sched_update_elem(pi->hw, node, &buf);
4479 }
4480
4481 /**
4482 * ice_sched_set_node_bw_lmt - set node's BW limit
4483 * @pi: port information structure
4484 * @node: tree node
4485 * @rl_type: rate limit type min, max, or shared
4486 * @bw: bandwidth in Kbps - Kilo bits per sec
4487 *
4488 * It updates node's BW limit parameters like BW RL profile ID of type CIR,
4489 * EIR, or SRL. The caller needs to hold scheduler lock.
4490 *
4491 * NOTE: Caller provides the correct SRL node in case of shared profile
4492 * settings.
4493 */
4494 enum ice_status
ice_sched_set_node_bw_lmt(struct ice_port_info * pi,struct ice_sched_node * node,enum ice_rl_type rl_type,u32 bw)4495 ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
4496 enum ice_rl_type rl_type, u32 bw)
4497 {
4498 struct ice_hw *hw;
4499 u8 layer_num;
4500
4501 if (!pi)
4502 return ICE_ERR_PARAM;
4503 hw = pi->hw;
4504 /* Remove unused RL profile IDs from HW and SW DB */
4505 ice_sched_rm_unused_rl_prof(hw);
4506
4507 layer_num = ice_sched_get_rl_prof_layer(pi, rl_type,
4508 node->tx_sched_layer);
4509 if (layer_num >= hw->num_tx_sched_layers)
4510 return ICE_ERR_PARAM;
4511
4512 if (bw == ICE_SCHED_DFLT_BW)
4513 return ice_sched_set_node_bw_dflt(pi, node, rl_type, layer_num);
4514 return ice_sched_set_node_bw(pi, node, rl_type, bw, layer_num);
4515 }
4516
4517 /**
4518 * ice_sched_set_node_bw_dflt_lmt - set node's BW limit to default
4519 * @pi: port information structure
4520 * @node: pointer to node structure
4521 * @rl_type: rate limit type min, max, or shared
4522 *
4523 * This function configures node element's BW rate limit profile ID of
4524 * type CIR, EIR, or SRL to default. This function needs to be called
4525 * with the scheduler lock held.
4526 */
4527 static enum ice_status
ice_sched_set_node_bw_dflt_lmt(struct ice_port_info * pi,struct ice_sched_node * node,enum ice_rl_type rl_type)4528 ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi,
4529 struct ice_sched_node *node,
4530 enum ice_rl_type rl_type)
4531 {
4532 return ice_sched_set_node_bw_lmt(pi, node, rl_type,
4533 ICE_SCHED_DFLT_BW);
4534 }
4535
4536 /**
4537 * ice_sched_validate_srl_node - Check node for SRL applicability
4538 * @node: sched node to configure
4539 * @sel_layer: selected SRL layer
4540 *
4541 * This function checks if the SRL can be applied to a selceted layer node on
4542 * behalf of the requested node (first argument). This function needs to be
4543 * called with scheduler lock held.
4544 */
4545 static enum ice_status
ice_sched_validate_srl_node(struct ice_sched_node * node,u8 sel_layer)4546 ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer)
4547 {
4548 /* SRL profiles are not available on all layers. Check if the
4549 * SRL profile can be applied to a node above or below the
4550 * requested node. SRL configuration is possible only if the
4551 * selected layer's node has single child.
4552 */
4553 if (sel_layer == node->tx_sched_layer ||
4554 ((sel_layer == node->tx_sched_layer + 1) &&
4555 node->num_children == 1) ||
4556 ((sel_layer == node->tx_sched_layer - 1) &&
4557 (node->parent && node->parent->num_children == 1)))
4558 return ICE_SUCCESS;
4559
4560 return ICE_ERR_CFG;
4561 }
4562
4563 /**
4564 * ice_sched_save_q_bw - save queue node's BW information
4565 * @q_ctx: queue context structure
4566 * @rl_type: rate limit type min, max, or shared
4567 * @bw: bandwidth in Kbps - Kilo bits per sec
4568 *
4569 * Save BW information of queue type node for post replay use.
4570 */
4571 static enum ice_status
ice_sched_save_q_bw(struct ice_q_ctx * q_ctx,enum ice_rl_type rl_type,u32 bw)4572 ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw)
4573 {
4574 switch (rl_type) {
4575 case ICE_MIN_BW:
4576 ice_set_clear_cir_bw(&q_ctx->bw_t_info, bw);
4577 break;
4578 case ICE_MAX_BW:
4579 ice_set_clear_eir_bw(&q_ctx->bw_t_info, bw);
4580 break;
4581 case ICE_SHARED_BW:
4582 ice_set_clear_shared_bw(&q_ctx->bw_t_info, bw);
4583 break;
4584 default:
4585 return ICE_ERR_PARAM;
4586 }
4587 return ICE_SUCCESS;
4588 }
4589
4590 /**
4591 * ice_sched_set_q_bw_lmt - sets queue BW limit
4592 * @pi: port information structure
4593 * @vsi_handle: sw VSI handle
4594 * @tc: traffic class
4595 * @q_handle: software queue handle
4596 * @rl_type: min, max, or shared
4597 * @bw: bandwidth in Kbps
4598 *
4599 * This function sets BW limit of queue scheduling node.
4600 */
4601 static enum ice_status
ice_sched_set_q_bw_lmt(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u16 q_handle,enum ice_rl_type rl_type,u32 bw)4602 ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
4603 u16 q_handle, enum ice_rl_type rl_type, u32 bw)
4604 {
4605 enum ice_status status = ICE_ERR_PARAM;
4606 struct ice_sched_node *node;
4607 struct ice_q_ctx *q_ctx;
4608
4609 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4610 return ICE_ERR_PARAM;
4611 ice_acquire_lock(&pi->sched_lock);
4612 q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handle);
4613 if (!q_ctx)
4614 goto exit_q_bw_lmt;
4615 node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
4616 if (!node) {
4617 ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong q_teid\n");
4618 goto exit_q_bw_lmt;
4619 }
4620
4621 /* Return error if it is not a leaf node */
4622 if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF)
4623 goto exit_q_bw_lmt;
4624
4625 /* SRL bandwidth layer selection */
4626 if (rl_type == ICE_SHARED_BW) {
4627 u8 sel_layer; /* selected layer */
4628
4629 sel_layer = ice_sched_get_rl_prof_layer(pi, rl_type,
4630 node->tx_sched_layer);
4631 if (sel_layer >= pi->hw->num_tx_sched_layers) {
4632 status = ICE_ERR_PARAM;
4633 goto exit_q_bw_lmt;
4634 }
4635 status = ice_sched_validate_srl_node(node, sel_layer);
4636 if (status)
4637 goto exit_q_bw_lmt;
4638 }
4639
4640 if (bw == ICE_SCHED_DFLT_BW)
4641 status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type);
4642 else
4643 status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw);
4644
4645 if (!status)
4646 status = ice_sched_save_q_bw(q_ctx, rl_type, bw);
4647
4648 exit_q_bw_lmt:
4649 ice_release_lock(&pi->sched_lock);
4650 return status;
4651 }
4652
4653 /**
4654 * ice_cfg_q_bw_lmt - configure queue BW limit
4655 * @pi: port information structure
4656 * @vsi_handle: sw VSI handle
4657 * @tc: traffic class
4658 * @q_handle: software queue handle
4659 * @rl_type: min, max, or shared
4660 * @bw: bandwidth in Kbps
4661 *
4662 * This function configures BW limit of queue scheduling node.
4663 */
4664 enum ice_status
ice_cfg_q_bw_lmt(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u16 q_handle,enum ice_rl_type rl_type,u32 bw)4665 ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
4666 u16 q_handle, enum ice_rl_type rl_type, u32 bw)
4667 {
4668 return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type,
4669 bw);
4670 }
4671
4672 /**
4673 * ice_cfg_q_bw_dflt_lmt - configure queue BW default limit
4674 * @pi: port information structure
4675 * @vsi_handle: sw VSI handle
4676 * @tc: traffic class
4677 * @q_handle: software queue handle
4678 * @rl_type: min, max, or shared
4679 *
4680 * This function configures BW default limit of queue scheduling node.
4681 */
4682 enum ice_status
ice_cfg_q_bw_dflt_lmt(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u16 q_handle,enum ice_rl_type rl_type)4683 ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
4684 u16 q_handle, enum ice_rl_type rl_type)
4685 {
4686 return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type,
4687 ICE_SCHED_DFLT_BW);
4688 }
4689
4690 /**
4691 * ice_sched_save_tc_node_bw - save TC node BW limit
4692 * @pi: port information structure
4693 * @tc: TC number
4694 * @rl_type: min or max
4695 * @bw: bandwidth in Kbps
4696 *
4697 * This function saves the modified values of bandwidth settings for later
4698 * replay purpose (restore) after reset.
4699 */
4700 static enum ice_status
ice_sched_save_tc_node_bw(struct ice_port_info * pi,u8 tc,enum ice_rl_type rl_type,u32 bw)4701 ice_sched_save_tc_node_bw(struct ice_port_info *pi, u8 tc,
4702 enum ice_rl_type rl_type, u32 bw)
4703 {
4704 if (tc >= ICE_MAX_TRAFFIC_CLASS)
4705 return ICE_ERR_PARAM;
4706 switch (rl_type) {
4707 case ICE_MIN_BW:
4708 ice_set_clear_cir_bw(&pi->tc_node_bw_t_info[tc], bw);
4709 break;
4710 case ICE_MAX_BW:
4711 ice_set_clear_eir_bw(&pi->tc_node_bw_t_info[tc], bw);
4712 break;
4713 case ICE_SHARED_BW:
4714 ice_set_clear_shared_bw(&pi->tc_node_bw_t_info[tc], bw);
4715 break;
4716 default:
4717 return ICE_ERR_PARAM;
4718 }
4719 return ICE_SUCCESS;
4720 }
4721
4722 /**
4723 * ice_sched_set_tc_node_bw_lmt - sets TC node BW limit
4724 * @pi: port information structure
4725 * @tc: TC number
4726 * @rl_type: min or max
4727 * @bw: bandwidth in Kbps
4728 *
4729 * This function configures bandwidth limit of TC node.
4730 */
4731 static enum ice_status
ice_sched_set_tc_node_bw_lmt(struct ice_port_info * pi,u8 tc,enum ice_rl_type rl_type,u32 bw)4732 ice_sched_set_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc,
4733 enum ice_rl_type rl_type, u32 bw)
4734 {
4735 enum ice_status status = ICE_ERR_PARAM;
4736 struct ice_sched_node *tc_node;
4737
4738 if (tc >= ICE_MAX_TRAFFIC_CLASS)
4739 return status;
4740 ice_acquire_lock(&pi->sched_lock);
4741 tc_node = ice_sched_get_tc_node(pi, tc);
4742 if (!tc_node)
4743 goto exit_set_tc_node_bw;
4744 if (bw == ICE_SCHED_DFLT_BW)
4745 status = ice_sched_set_node_bw_dflt_lmt(pi, tc_node, rl_type);
4746 else
4747 status = ice_sched_set_node_bw_lmt(pi, tc_node, rl_type, bw);
4748 if (!status)
4749 status = ice_sched_save_tc_node_bw(pi, tc, rl_type, bw);
4750
4751 exit_set_tc_node_bw:
4752 ice_release_lock(&pi->sched_lock);
4753 return status;
4754 }
4755
4756 /**
4757 * ice_cfg_tc_node_bw_lmt - configure TC node BW limit
4758 * @pi: port information structure
4759 * @tc: TC number
4760 * @rl_type: min or max
4761 * @bw: bandwidth in Kbps
4762 *
4763 * This function configures BW limit of TC node.
4764 * Note: The minimum guaranteed reservation is done via DCBX.
4765 */
4766 enum ice_status
ice_cfg_tc_node_bw_lmt(struct ice_port_info * pi,u8 tc,enum ice_rl_type rl_type,u32 bw)4767 ice_cfg_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc,
4768 enum ice_rl_type rl_type, u32 bw)
4769 {
4770 return ice_sched_set_tc_node_bw_lmt(pi, tc, rl_type, bw);
4771 }
4772
4773 /**
4774 * ice_cfg_tc_node_bw_dflt_lmt - configure TC node BW default limit
4775 * @pi: port information structure
4776 * @tc: TC number
4777 * @rl_type: min or max
4778 *
4779 * This function configures BW default limit of TC node.
4780 */
4781 enum ice_status
ice_cfg_tc_node_bw_dflt_lmt(struct ice_port_info * pi,u8 tc,enum ice_rl_type rl_type)4782 ice_cfg_tc_node_bw_dflt_lmt(struct ice_port_info *pi, u8 tc,
4783 enum ice_rl_type rl_type)
4784 {
4785 return ice_sched_set_tc_node_bw_lmt(pi, tc, rl_type, ICE_SCHED_DFLT_BW);
4786 }
4787
4788 /**
4789 * ice_sched_save_tc_node_bw_alloc - save TC node's BW alloc information
4790 * @pi: port information structure
4791 * @tc: traffic class
4792 * @rl_type: rate limit type min or max
4793 * @bw_alloc: Bandwidth allocation information
4794 *
4795 * Save BW alloc information of VSI type node for post replay use.
4796 */
4797 static enum ice_status
ice_sched_save_tc_node_bw_alloc(struct ice_port_info * pi,u8 tc,enum ice_rl_type rl_type,u16 bw_alloc)4798 ice_sched_save_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc,
4799 enum ice_rl_type rl_type, u16 bw_alloc)
4800 {
4801 if (tc >= ICE_MAX_TRAFFIC_CLASS)
4802 return ICE_ERR_PARAM;
4803 switch (rl_type) {
4804 case ICE_MIN_BW:
4805 ice_set_clear_cir_bw_alloc(&pi->tc_node_bw_t_info[tc],
4806 bw_alloc);
4807 break;
4808 case ICE_MAX_BW:
4809 ice_set_clear_eir_bw_alloc(&pi->tc_node_bw_t_info[tc],
4810 bw_alloc);
4811 break;
4812 default:
4813 return ICE_ERR_PARAM;
4814 }
4815 return ICE_SUCCESS;
4816 }
4817
4818 /**
4819 * ice_sched_set_tc_node_bw_alloc - set TC node BW alloc
4820 * @pi: port information structure
4821 * @tc: TC number
4822 * @rl_type: min or max
4823 * @bw_alloc: bandwidth alloc
4824 *
4825 * This function configures bandwidth alloc of TC node, also saves the
4826 * changed settings for replay purpose, and return success if it succeeds
4827 * in modifying bandwidth alloc setting.
4828 */
4829 static enum ice_status
ice_sched_set_tc_node_bw_alloc(struct ice_port_info * pi,u8 tc,enum ice_rl_type rl_type,u8 bw_alloc)4830 ice_sched_set_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc,
4831 enum ice_rl_type rl_type, u8 bw_alloc)
4832 {
4833 enum ice_status status = ICE_ERR_PARAM;
4834 struct ice_sched_node *tc_node;
4835
4836 if (tc >= ICE_MAX_TRAFFIC_CLASS)
4837 return status;
4838 ice_acquire_lock(&pi->sched_lock);
4839 tc_node = ice_sched_get_tc_node(pi, tc);
4840 if (!tc_node)
4841 goto exit_set_tc_node_bw_alloc;
4842 status = ice_sched_cfg_node_bw_alloc(pi->hw, tc_node, rl_type,
4843 bw_alloc);
4844 if (status)
4845 goto exit_set_tc_node_bw_alloc;
4846 status = ice_sched_save_tc_node_bw_alloc(pi, tc, rl_type, bw_alloc);
4847
4848 exit_set_tc_node_bw_alloc:
4849 ice_release_lock(&pi->sched_lock);
4850 return status;
4851 }
4852
4853 /**
4854 * ice_cfg_tc_node_bw_alloc - configure TC node BW alloc
4855 * @pi: port information structure
4856 * @tc: TC number
4857 * @rl_type: min or max
4858 * @bw_alloc: bandwidth alloc
4859 *
4860 * This function configures BW limit of TC node.
4861 * Note: The minimum guaranteed reservation is done via DCBX.
4862 */
4863 enum ice_status
ice_cfg_tc_node_bw_alloc(struct ice_port_info * pi,u8 tc,enum ice_rl_type rl_type,u8 bw_alloc)4864 ice_cfg_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc,
4865 enum ice_rl_type rl_type, u8 bw_alloc)
4866 {
4867 return ice_sched_set_tc_node_bw_alloc(pi, tc, rl_type, bw_alloc);
4868 }
4869
4870 /**
4871 * ice_sched_set_agg_bw_dflt_lmt - set aggregator node's BW limit to default
4872 * @pi: port information structure
4873 * @vsi_handle: software VSI handle
4874 *
4875 * This function retrieves the aggregator ID based on VSI ID and TC,
4876 * and sets node's BW limit to default. This function needs to be
4877 * called with the scheduler lock held.
4878 */
4879 enum ice_status
ice_sched_set_agg_bw_dflt_lmt(struct ice_port_info * pi,u16 vsi_handle)4880 ice_sched_set_agg_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle)
4881 {
4882 struct ice_vsi_ctx *vsi_ctx;
4883 enum ice_status status = ICE_SUCCESS;
4884 u8 tc;
4885
4886 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4887 return ICE_ERR_PARAM;
4888 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
4889 if (!vsi_ctx)
4890 return ICE_ERR_PARAM;
4891
4892 ice_for_each_traffic_class(tc) {
4893 struct ice_sched_node *node;
4894
4895 node = vsi_ctx->sched.ag_node[tc];
4896 if (!node)
4897 continue;
4898
4899 /* Set min profile to default */
4900 status = ice_sched_set_node_bw_dflt_lmt(pi, node, ICE_MIN_BW);
4901 if (status)
4902 break;
4903
4904 /* Set max profile to default */
4905 status = ice_sched_set_node_bw_dflt_lmt(pi, node, ICE_MAX_BW);
4906 if (status)
4907 break;
4908
4909 /* Remove shared profile, if there is one */
4910 status = ice_sched_set_node_bw_dflt_lmt(pi, node,
4911 ICE_SHARED_BW);
4912 if (status)
4913 break;
4914 }
4915
4916 return status;
4917 }
4918
4919 /**
4920 * ice_sched_get_node_by_id_type - get node from ID type
4921 * @pi: port information structure
4922 * @id: identifier
4923 * @agg_type: type of aggregator
4924 * @tc: traffic class
4925 *
4926 * This function returns node identified by ID of type aggregator, and
4927 * based on traffic class (TC). This function needs to be called with
4928 * the scheduler lock held.
4929 */
4930 static struct ice_sched_node *
ice_sched_get_node_by_id_type(struct ice_port_info * pi,u32 id,enum ice_agg_type agg_type,u8 tc)4931 ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id,
4932 enum ice_agg_type agg_type, u8 tc)
4933 {
4934 struct ice_sched_node *node = NULL;
4935
4936 switch (agg_type) {
4937 case ICE_AGG_TYPE_VSI: {
4938 struct ice_vsi_ctx *vsi_ctx;
4939 u16 vsi_handle = (u16)id;
4940
4941 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4942 break;
4943 /* Get sched_vsi_info */
4944 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
4945 if (!vsi_ctx)
4946 break;
4947 node = vsi_ctx->sched.vsi_node[tc];
4948 break;
4949 }
4950
4951 case ICE_AGG_TYPE_AGG: {
4952 struct ice_sched_node *tc_node;
4953
4954 tc_node = ice_sched_get_tc_node(pi, tc);
4955 if (tc_node)
4956 node = ice_sched_get_agg_node(pi, tc_node, id);
4957 break;
4958 }
4959
4960 case ICE_AGG_TYPE_Q:
4961 /* The current implementation allows single queue to modify */
4962 node = ice_sched_find_node_by_teid(pi->root, id);
4963 break;
4964
4965 case ICE_AGG_TYPE_QG: {
4966 struct ice_sched_node *child_node;
4967
4968 /* The current implementation allows single qg to modify */
4969 child_node = ice_sched_find_node_by_teid(pi->root, id);
4970 if (!child_node)
4971 break;
4972 node = child_node->parent;
4973 break;
4974 }
4975
4976 default:
4977 break;
4978 }
4979
4980 return node;
4981 }
4982
4983 /**
4984 * ice_sched_set_node_bw_lmt_per_tc - set node BW limit per TC
4985 * @pi: port information structure
4986 * @id: ID (software VSI handle or AGG ID)
4987 * @agg_type: aggregator type (VSI or AGG type node)
4988 * @tc: traffic class
4989 * @rl_type: min or max
4990 * @bw: bandwidth in Kbps
4991 *
4992 * This function sets BW limit of VSI or Aggregator scheduling node
4993 * based on TC information from passed in argument BW.
4994 */
4995 enum ice_status
ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info * pi,u32 id,enum ice_agg_type agg_type,u8 tc,enum ice_rl_type rl_type,u32 bw)4996 ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id,
4997 enum ice_agg_type agg_type, u8 tc,
4998 enum ice_rl_type rl_type, u32 bw)
4999 {
5000 enum ice_status status = ICE_ERR_PARAM;
5001 struct ice_sched_node *node;
5002
5003 if (!pi)
5004 return status;
5005
5006 if (rl_type == ICE_UNKNOWN_BW)
5007 return status;
5008
5009 ice_acquire_lock(&pi->sched_lock);
5010 node = ice_sched_get_node_by_id_type(pi, id, agg_type, tc);
5011 if (!node) {
5012 ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong id, agg type, or tc\n");
5013 goto exit_set_node_bw_lmt_per_tc;
5014 }
5015 if (bw == ICE_SCHED_DFLT_BW)
5016 status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type);
5017 else
5018 status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw);
5019
5020 exit_set_node_bw_lmt_per_tc:
5021 ice_release_lock(&pi->sched_lock);
5022 return status;
5023 }
5024
5025 /**
5026 * ice_sched_validate_vsi_srl_node - validate VSI SRL node
5027 * @pi: port information structure
5028 * @vsi_handle: software VSI handle
5029 *
5030 * This function validates SRL node of the VSI node if available SRL layer is
5031 * different than the VSI node layer on all TC(s).This function needs to be
5032 * called with scheduler lock held.
5033 */
5034 static enum ice_status
ice_sched_validate_vsi_srl_node(struct ice_port_info * pi,u16 vsi_handle)5035 ice_sched_validate_vsi_srl_node(struct ice_port_info *pi, u16 vsi_handle)
5036 {
5037 u8 sel_layer = ICE_SCHED_INVAL_LAYER_NUM;
5038 u8 tc;
5039
5040 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
5041 return ICE_ERR_PARAM;
5042
5043 /* Return success if no nodes are present across TC */
5044 ice_for_each_traffic_class(tc) {
5045 struct ice_sched_node *tc_node, *vsi_node;
5046 enum ice_rl_type rl_type = ICE_SHARED_BW;
5047 enum ice_status status;
5048
5049 tc_node = ice_sched_get_tc_node(pi, tc);
5050 if (!tc_node)
5051 continue;
5052
5053 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
5054 if (!vsi_node)
5055 continue;
5056
5057 /* SRL bandwidth layer selection */
5058 if (sel_layer == ICE_SCHED_INVAL_LAYER_NUM) {
5059 u8 node_layer = vsi_node->tx_sched_layer;
5060 u8 layer_num;
5061
5062 layer_num = ice_sched_get_rl_prof_layer(pi, rl_type,
5063 node_layer);
5064 if (layer_num >= pi->hw->num_tx_sched_layers)
5065 return ICE_ERR_PARAM;
5066 sel_layer = layer_num;
5067 }
5068
5069 status = ice_sched_validate_srl_node(vsi_node, sel_layer);
5070 if (status)
5071 return status;
5072 }
5073 return ICE_SUCCESS;
5074 }
5075
5076 /**
5077 * ice_sched_set_save_vsi_srl_node_bw - set VSI shared limit values
5078 * @pi: port information structure
5079 * @vsi_handle: software VSI handle
5080 * @tc: traffic class
5081 * @srl_node: sched node to configure
5082 * @rl_type: rate limit type minimum, maximum, or shared
5083 * @bw: minimum, maximum, or shared bandwidth in Kbps
5084 *
5085 * Configure shared rate limiter(SRL) of VSI type nodes across given traffic
5086 * class, and saves those value for later use for replaying purposes. The
5087 * caller holds the scheduler lock.
5088 */
5089 static enum ice_status
ice_sched_set_save_vsi_srl_node_bw(struct ice_port_info * pi,u16 vsi_handle,u8 tc,struct ice_sched_node * srl_node,enum ice_rl_type rl_type,u32 bw)5090 ice_sched_set_save_vsi_srl_node_bw(struct ice_port_info *pi, u16 vsi_handle,
5091 u8 tc, struct ice_sched_node *srl_node,
5092 enum ice_rl_type rl_type, u32 bw)
5093 {
5094 enum ice_status status;
5095
5096 if (bw == ICE_SCHED_DFLT_BW) {
5097 status = ice_sched_set_node_bw_dflt_lmt(pi, srl_node, rl_type);
5098 } else {
5099 status = ice_sched_set_node_bw_lmt(pi, srl_node, rl_type, bw);
5100 if (status)
5101 return status;
5102 status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, bw);
5103 }
5104 return status;
5105 }
5106
5107 /**
5108 * ice_sched_set_vsi_node_srl_per_tc - set VSI node BW shared limit for tc
5109 * @pi: port information structure
5110 * @vsi_handle: software VSI handle
5111 * @tc: traffic class
5112 * @min_bw: minimum bandwidth in Kbps
5113 * @max_bw: maximum bandwidth in Kbps
5114 * @shared_bw: shared bandwidth in Kbps
5115 *
5116 * Configure shared rate limiter(SRL) of VSI type nodes across requested
5117 * traffic class for VSI matching handle. When BW value of ICE_SCHED_DFLT_BW
5118 * is passed, it removes the corresponding bw from the node. The caller
5119 * holds scheduler lock.
5120 */
5121 static enum ice_status
ice_sched_set_vsi_node_srl_per_tc(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u32 min_bw,u32 max_bw,u32 shared_bw)5122 ice_sched_set_vsi_node_srl_per_tc(struct ice_port_info *pi, u16 vsi_handle,
5123 u8 tc, u32 min_bw, u32 max_bw, u32 shared_bw)
5124 {
5125 struct ice_sched_node *tc_node, *vsi_node, *cfg_node;
5126 enum ice_status status;
5127 u8 layer_num;
5128
5129 tc_node = ice_sched_get_tc_node(pi, tc);
5130 if (!tc_node)
5131 return ICE_ERR_CFG;
5132
5133 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
5134 if (!vsi_node)
5135 return ICE_ERR_CFG;
5136
5137 layer_num = ice_sched_get_rl_prof_layer(pi, ICE_SHARED_BW,
5138 vsi_node->tx_sched_layer);
5139 if (layer_num >= pi->hw->num_tx_sched_layers)
5140 return ICE_ERR_PARAM;
5141
5142 /* SRL node may be different */
5143 cfg_node = ice_sched_get_srl_node(vsi_node, layer_num);
5144 if (!cfg_node)
5145 return ICE_ERR_CFG;
5146
5147 status = ice_sched_set_save_vsi_srl_node_bw(pi, vsi_handle, tc,
5148 cfg_node, ICE_MIN_BW,
5149 min_bw);
5150 if (status)
5151 return status;
5152
5153 status = ice_sched_set_save_vsi_srl_node_bw(pi, vsi_handle, tc,
5154 cfg_node, ICE_MAX_BW,
5155 max_bw);
5156 if (status)
5157 return status;
5158
5159 return ice_sched_set_save_vsi_srl_node_bw(pi, vsi_handle, tc, cfg_node,
5160 ICE_SHARED_BW, shared_bw);
5161 }
5162
5163 /**
5164 * ice_sched_set_vsi_bw_shared_lmt - set VSI BW shared limit
5165 * @pi: port information structure
5166 * @vsi_handle: software VSI handle
5167 * @min_bw: minimum bandwidth in Kbps
5168 * @max_bw: maximum bandwidth in Kbps
5169 * @shared_bw: shared bandwidth in Kbps
5170 *
5171 * Configure shared rate limiter(SRL) of all VSI type nodes across all traffic
5172 * classes for VSI matching handle. When BW value of ICE_SCHED_DFLT_BW is
5173 * passed, it removes those value(s) from the node.
5174 */
5175 enum ice_status
ice_sched_set_vsi_bw_shared_lmt(struct ice_port_info * pi,u16 vsi_handle,u32 min_bw,u32 max_bw,u32 shared_bw)5176 ice_sched_set_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle,
5177 u32 min_bw, u32 max_bw, u32 shared_bw)
5178 {
5179 enum ice_status status = ICE_SUCCESS;
5180 u8 tc;
5181
5182 if (!pi)
5183 return ICE_ERR_PARAM;
5184
5185 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
5186 return ICE_ERR_PARAM;
5187
5188 ice_acquire_lock(&pi->sched_lock);
5189 status = ice_sched_validate_vsi_srl_node(pi, vsi_handle);
5190 if (status)
5191 goto exit_set_vsi_bw_shared_lmt;
5192 /* Return success if no nodes are present across TC */
5193 ice_for_each_traffic_class(tc) {
5194 struct ice_sched_node *tc_node, *vsi_node;
5195
5196 tc_node = ice_sched_get_tc_node(pi, tc);
5197 if (!tc_node)
5198 continue;
5199
5200 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
5201 if (!vsi_node)
5202 continue;
5203
5204 status = ice_sched_set_vsi_node_srl_per_tc(pi, vsi_handle, tc,
5205 min_bw, max_bw,
5206 shared_bw);
5207 if (status)
5208 break;
5209 }
5210
5211 exit_set_vsi_bw_shared_lmt:
5212 ice_release_lock(&pi->sched_lock);
5213 return status;
5214 }
5215
5216 /**
5217 * ice_sched_validate_agg_srl_node - validate AGG SRL node
5218 * @pi: port information structure
5219 * @agg_id: aggregator ID
5220 *
5221 * This function validates SRL node of the AGG node if available SRL layer is
5222 * different than the AGG node layer on all TC(s).This function needs to be
5223 * called with scheduler lock held.
5224 */
5225 static enum ice_status
ice_sched_validate_agg_srl_node(struct ice_port_info * pi,u32 agg_id)5226 ice_sched_validate_agg_srl_node(struct ice_port_info *pi, u32 agg_id)
5227 {
5228 u8 sel_layer = ICE_SCHED_INVAL_LAYER_NUM;
5229 struct ice_sched_agg_info *agg_info;
5230 bool agg_id_present = false;
5231 enum ice_status status = ICE_SUCCESS;
5232 u8 tc;
5233
5234 LIST_FOR_EACH_ENTRY(agg_info, &pi->hw->agg_list, ice_sched_agg_info,
5235 list_entry)
5236 if (agg_info->agg_id == agg_id) {
5237 agg_id_present = true;
5238 break;
5239 }
5240 if (!agg_id_present)
5241 return ICE_ERR_PARAM;
5242 /* Return success if no nodes are present across TC */
5243 ice_for_each_traffic_class(tc) {
5244 struct ice_sched_node *tc_node, *agg_node;
5245 enum ice_rl_type rl_type = ICE_SHARED_BW;
5246
5247 tc_node = ice_sched_get_tc_node(pi, tc);
5248 if (!tc_node)
5249 continue;
5250
5251 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
5252 if (!agg_node)
5253 continue;
5254 /* SRL bandwidth layer selection */
5255 if (sel_layer == ICE_SCHED_INVAL_LAYER_NUM) {
5256 u8 node_layer = agg_node->tx_sched_layer;
5257 u8 layer_num;
5258
5259 layer_num = ice_sched_get_rl_prof_layer(pi, rl_type,
5260 node_layer);
5261 if (layer_num >= pi->hw->num_tx_sched_layers)
5262 return ICE_ERR_PARAM;
5263 sel_layer = layer_num;
5264 }
5265
5266 status = ice_sched_validate_srl_node(agg_node, sel_layer);
5267 if (status)
5268 break;
5269 }
5270 return status;
5271 }
5272
5273 /**
5274 * ice_sched_validate_agg_id - Validate aggregator id
5275 * @pi: port information structure
5276 * @agg_id: aggregator ID
5277 *
5278 * This function validates aggregator id. Caller holds the scheduler lock.
5279 */
5280 static enum ice_status
ice_sched_validate_agg_id(struct ice_port_info * pi,u32 agg_id)5281 ice_sched_validate_agg_id(struct ice_port_info *pi, u32 agg_id)
5282 {
5283 struct ice_sched_agg_info *agg_info;
5284 struct ice_sched_agg_info *tmp;
5285 bool agg_id_present = false;
5286 enum ice_status status;
5287
5288 status = ice_sched_validate_agg_srl_node(pi, agg_id);
5289 if (status)
5290 return status;
5291
5292 LIST_FOR_EACH_ENTRY_SAFE(agg_info, tmp, &pi->hw->agg_list,
5293 ice_sched_agg_info, list_entry)
5294 if (agg_info->agg_id == agg_id) {
5295 agg_id_present = true;
5296 break;
5297 }
5298
5299 if (!agg_id_present)
5300 return ICE_ERR_PARAM;
5301
5302 return ICE_SUCCESS;
5303 }
5304
5305 /**
5306 * ice_sched_set_save_agg_srl_node_bw - set aggregator shared limit values
5307 * @pi: port information structure
5308 * @agg_id: aggregator ID
5309 * @tc: traffic class
5310 * @srl_node: sched node to configure
5311 * @rl_type: rate limit type minimum, maximum, or shared
5312 * @bw: minimum, maximum, or shared bandwidth in Kbps
5313 *
5314 * Configure shared rate limiter(SRL) of aggregator type nodes across
5315 * requested traffic class, and saves those value for later use for
5316 * replaying purposes. The caller holds the scheduler lock.
5317 */
5318 static enum ice_status
ice_sched_set_save_agg_srl_node_bw(struct ice_port_info * pi,u32 agg_id,u8 tc,struct ice_sched_node * srl_node,enum ice_rl_type rl_type,u32 bw)5319 ice_sched_set_save_agg_srl_node_bw(struct ice_port_info *pi, u32 agg_id, u8 tc,
5320 struct ice_sched_node *srl_node,
5321 enum ice_rl_type rl_type, u32 bw)
5322 {
5323 enum ice_status status;
5324
5325 if (bw == ICE_SCHED_DFLT_BW) {
5326 status = ice_sched_set_node_bw_dflt_lmt(pi, srl_node, rl_type);
5327 } else {
5328 status = ice_sched_set_node_bw_lmt(pi, srl_node, rl_type, bw);
5329 if (status)
5330 return status;
5331 status = ice_sched_save_agg_bw(pi, agg_id, tc, rl_type, bw);
5332 }
5333 return status;
5334 }
5335
5336 /**
5337 * ice_sched_set_agg_node_srl_per_tc - set aggregator SRL per tc
5338 * @pi: port information structure
5339 * @agg_id: aggregator ID
5340 * @tc: traffic class
5341 * @min_bw: minimum bandwidth in Kbps
5342 * @max_bw: maximum bandwidth in Kbps
5343 * @shared_bw: shared bandwidth in Kbps
5344 *
5345 * This function configures the shared rate limiter(SRL) of aggregator type
5346 * node for a given traffic class for aggregator matching agg_id. When BW
5347 * value of ICE_SCHED_DFLT_BW is passed, it removes SRL from the node. Caller
5348 * holds the scheduler lock.
5349 */
5350 static enum ice_status
ice_sched_set_agg_node_srl_per_tc(struct ice_port_info * pi,u32 agg_id,u8 tc,u32 min_bw,u32 max_bw,u32 shared_bw)5351 ice_sched_set_agg_node_srl_per_tc(struct ice_port_info *pi, u32 agg_id,
5352 u8 tc, u32 min_bw, u32 max_bw, u32 shared_bw)
5353 {
5354 struct ice_sched_node *tc_node, *agg_node, *cfg_node;
5355 enum ice_rl_type rl_type = ICE_SHARED_BW;
5356 enum ice_status status = ICE_ERR_CFG;
5357 u8 layer_num;
5358
5359 tc_node = ice_sched_get_tc_node(pi, tc);
5360 if (!tc_node)
5361 return ICE_ERR_CFG;
5362
5363 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
5364 if (!agg_node)
5365 return ICE_ERR_CFG;
5366
5367 layer_num = ice_sched_get_rl_prof_layer(pi, rl_type,
5368 agg_node->tx_sched_layer);
5369 if (layer_num >= pi->hw->num_tx_sched_layers)
5370 return ICE_ERR_PARAM;
5371
5372 /* SRL node may be different */
5373 cfg_node = ice_sched_get_srl_node(agg_node, layer_num);
5374 if (!cfg_node)
5375 return ICE_ERR_CFG;
5376
5377 status = ice_sched_set_save_agg_srl_node_bw(pi, agg_id, tc, cfg_node,
5378 ICE_MIN_BW, min_bw);
5379 if (status)
5380 return status;
5381
5382 status = ice_sched_set_save_agg_srl_node_bw(pi, agg_id, tc, cfg_node,
5383 ICE_MAX_BW, max_bw);
5384 if (status)
5385 return status;
5386
5387 status = ice_sched_set_save_agg_srl_node_bw(pi, agg_id, tc, cfg_node,
5388 ICE_SHARED_BW, shared_bw);
5389 return status;
5390 }
5391
5392 /**
5393 * ice_sched_set_agg_bw_shared_lmt - set aggregator BW shared limit
5394 * @pi: port information structure
5395 * @agg_id: aggregator ID
5396 * @min_bw: minimum bandwidth in Kbps
5397 * @max_bw: maximum bandwidth in Kbps
5398 * @shared_bw: shared bandwidth in Kbps
5399 *
5400 * This function configures the shared rate limiter(SRL) of all aggregator type
5401 * nodes across all traffic classes for aggregator matching agg_id. When
5402 * BW value of ICE_SCHED_DFLT_BW is passed, it removes SRL from the
5403 * node(s).
5404 */
5405 enum ice_status
ice_sched_set_agg_bw_shared_lmt(struct ice_port_info * pi,u32 agg_id,u32 min_bw,u32 max_bw,u32 shared_bw)5406 ice_sched_set_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id,
5407 u32 min_bw, u32 max_bw, u32 shared_bw)
5408 {
5409 enum ice_status status;
5410 u8 tc;
5411
5412 if (!pi)
5413 return ICE_ERR_PARAM;
5414
5415 ice_acquire_lock(&pi->sched_lock);
5416 status = ice_sched_validate_agg_id(pi, agg_id);
5417 if (status)
5418 goto exit_agg_bw_shared_lmt;
5419
5420 /* Return success if no nodes are present across TC */
5421 ice_for_each_traffic_class(tc) {
5422 struct ice_sched_node *tc_node, *agg_node;
5423
5424 tc_node = ice_sched_get_tc_node(pi, tc);
5425 if (!tc_node)
5426 continue;
5427
5428 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
5429 if (!agg_node)
5430 continue;
5431
5432 status = ice_sched_set_agg_node_srl_per_tc(pi, agg_id, tc,
5433 min_bw, max_bw,
5434 shared_bw);
5435 if (status)
5436 break;
5437 }
5438
5439 exit_agg_bw_shared_lmt:
5440 ice_release_lock(&pi->sched_lock);
5441 return status;
5442 }
5443
5444 /**
5445 * ice_sched_set_agg_bw_shared_lmt_per_tc - set aggregator BW shared lmt per tc
5446 * @pi: port information structure
5447 * @agg_id: aggregator ID
5448 * @tc: traffic class
5449 * @min_bw: minimum bandwidth in Kbps
5450 * @max_bw: maximum bandwidth in Kbps
5451 * @shared_bw: shared bandwidth in Kbps
5452 *
5453 * This function configures the shared rate limiter(SRL) of aggregator type
5454 * node for a given traffic class for aggregator matching agg_id. When BW
5455 * value of ICE_SCHED_DFLT_BW is passed, it removes SRL from the node.
5456 */
5457 enum ice_status
ice_sched_set_agg_bw_shared_lmt_per_tc(struct ice_port_info * pi,u32 agg_id,u8 tc,u32 min_bw,u32 max_bw,u32 shared_bw)5458 ice_sched_set_agg_bw_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id,
5459 u8 tc, u32 min_bw, u32 max_bw,
5460 u32 shared_bw)
5461 {
5462 enum ice_status status;
5463
5464 if (!pi)
5465 return ICE_ERR_PARAM;
5466 ice_acquire_lock(&pi->sched_lock);
5467 status = ice_sched_validate_agg_id(pi, agg_id);
5468 if (status)
5469 goto exit_agg_bw_shared_lmt_per_tc;
5470
5471 status = ice_sched_set_agg_node_srl_per_tc(pi, agg_id, tc, min_bw,
5472 max_bw, shared_bw);
5473
5474 exit_agg_bw_shared_lmt_per_tc:
5475 ice_release_lock(&pi->sched_lock);
5476 return status;
5477 }
5478
5479 /**
5480 * ice_sched_cfg_sibl_node_prio - configure node sibling priority
5481 * @pi: port information structure
5482 * @node: sched node to configure
5483 * @priority: sibling priority
5484 *
5485 * This function configures node element's sibling priority only. This
5486 * function needs to be called with scheduler lock held.
5487 */
5488 enum ice_status
ice_sched_cfg_sibl_node_prio(struct ice_port_info * pi,struct ice_sched_node * node,u8 priority)5489 ice_sched_cfg_sibl_node_prio(struct ice_port_info *pi,
5490 struct ice_sched_node *node, u8 priority)
5491 {
5492 struct ice_aqc_txsched_elem_data buf;
5493 struct ice_aqc_txsched_elem *data;
5494 struct ice_hw *hw = pi->hw;
5495 enum ice_status status;
5496
5497 if (!hw)
5498 return ICE_ERR_PARAM;
5499 buf = node->info;
5500 data = &buf.data;
5501 data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC;
5502 priority = (priority << ICE_AQC_ELEM_GENERIC_PRIO_S) &
5503 ICE_AQC_ELEM_GENERIC_PRIO_M;
5504 data->generic &= ~ICE_AQC_ELEM_GENERIC_PRIO_M;
5505 data->generic |= priority;
5506
5507 /* Configure element */
5508 status = ice_sched_update_elem(hw, node, &buf);
5509 return status;
5510 }
5511
5512 /**
5513 * ice_cfg_rl_burst_size - Set burst size value
5514 * @hw: pointer to the HW struct
5515 * @bytes: burst size in bytes
5516 *
5517 * This function configures/set the burst size to requested new value. The new
5518 * burst size value is used for future rate limit calls. It doesn't change the
5519 * existing or previously created RL profiles.
5520 */
ice_cfg_rl_burst_size(struct ice_hw * hw,u32 bytes)5521 enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)
5522 {
5523 u16 burst_size_to_prog;
5524
5525 if (bytes < ICE_MIN_BURST_SIZE_ALLOWED ||
5526 bytes > ICE_MAX_BURST_SIZE_ALLOWED)
5527 return ICE_ERR_PARAM;
5528 if (ice_round_to_num(bytes, 64) <=
5529 ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY) {
5530 /* 64 byte granularity case */
5531 /* Disable MSB granularity bit */
5532 burst_size_to_prog = ICE_64_BYTE_GRANULARITY;
5533 /* round number to nearest 64 byte granularity */
5534 bytes = ice_round_to_num(bytes, 64);
5535 /* The value is in 64 byte chunks */
5536 burst_size_to_prog |= (u16)(bytes / 64);
5537 } else {
5538 /* k bytes granularity case */
5539 /* Enable MSB granularity bit */
5540 burst_size_to_prog = ICE_KBYTE_GRANULARITY;
5541 /* round number to nearest 1024 granularity */
5542 bytes = ice_round_to_num(bytes, 1024);
5543 /* check rounding doesn't go beyond allowed */
5544 if (bytes > ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY)
5545 bytes = ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY;
5546 /* The value is in k bytes */
5547 burst_size_to_prog |= (u16)(bytes / 1024);
5548 }
5549 hw->max_burst_size = burst_size_to_prog;
5550 return ICE_SUCCESS;
5551 }
5552
5553 /**
5554 * ice_sched_replay_node_prio - re-configure node priority
5555 * @hw: pointer to the HW struct
5556 * @node: sched node to configure
5557 * @priority: priority value
5558 *
5559 * This function configures node element's priority value. It
5560 * needs to be called with scheduler lock held.
5561 */
5562 static enum ice_status
ice_sched_replay_node_prio(struct ice_hw * hw,struct ice_sched_node * node,u8 priority)5563 ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node,
5564 u8 priority)
5565 {
5566 struct ice_aqc_txsched_elem_data buf;
5567 struct ice_aqc_txsched_elem *data;
5568 enum ice_status status;
5569
5570 buf = node->info;
5571 data = &buf.data;
5572 data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC;
5573 data->generic = priority;
5574
5575 /* Configure element */
5576 status = ice_sched_update_elem(hw, node, &buf);
5577 return status;
5578 }
5579
5580 /**
5581 * ice_sched_replay_node_bw - replay node(s) BW
5582 * @hw: pointer to the HW struct
5583 * @node: sched node to configure
5584 * @bw_t_info: BW type information
5585 *
5586 * This function restores node's BW from bw_t_info. The caller needs
5587 * to hold the scheduler lock.
5588 */
5589 static enum ice_status
ice_sched_replay_node_bw(struct ice_hw * hw,struct ice_sched_node * node,struct ice_bw_type_info * bw_t_info)5590 ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node,
5591 struct ice_bw_type_info *bw_t_info)
5592 {
5593 struct ice_port_info *pi = hw->port_info;
5594 enum ice_status status = ICE_ERR_PARAM;
5595 u16 bw_alloc;
5596
5597 if (!node)
5598 return status;
5599 if (!ice_is_any_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CNT))
5600 return ICE_SUCCESS;
5601 if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_PRIO)) {
5602 status = ice_sched_replay_node_prio(hw, node,
5603 bw_t_info->generic);
5604 if (status)
5605 return status;
5606 }
5607 if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CIR)) {
5608 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW,
5609 bw_t_info->cir_bw.bw);
5610 if (status)
5611 return status;
5612 }
5613 if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CIR_WT)) {
5614 bw_alloc = bw_t_info->cir_bw.bw_alloc;
5615 status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MIN_BW,
5616 bw_alloc);
5617 if (status)
5618 return status;
5619 }
5620 if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_EIR)) {
5621 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW,
5622 bw_t_info->eir_bw.bw);
5623 if (status)
5624 return status;
5625 }
5626 if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_EIR_WT)) {
5627 bw_alloc = bw_t_info->eir_bw.bw_alloc;
5628 status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MAX_BW,
5629 bw_alloc);
5630 if (status)
5631 return status;
5632 }
5633 if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_SHARED))
5634 status = ice_sched_set_node_bw_lmt(pi, node, ICE_SHARED_BW,
5635 bw_t_info->shared_bw);
5636 return status;
5637 }
5638
5639 /**
5640 * ice_sched_replay_agg_bw - replay aggregator node(s) BW
5641 * @hw: pointer to the HW struct
5642 * @agg_info: aggregator data structure
5643 *
5644 * This function re-creates aggregator type nodes. The caller needs to hold
5645 * the scheduler lock.
5646 */
5647 static enum ice_status
ice_sched_replay_agg_bw(struct ice_hw * hw,struct ice_sched_agg_info * agg_info)5648 ice_sched_replay_agg_bw(struct ice_hw *hw, struct ice_sched_agg_info *agg_info)
5649 {
5650 struct ice_sched_node *tc_node, *agg_node;
5651 enum ice_status status = ICE_SUCCESS;
5652 u8 tc;
5653
5654 if (!agg_info)
5655 return ICE_ERR_PARAM;
5656 ice_for_each_traffic_class(tc) {
5657 if (!ice_is_any_bit_set(agg_info->bw_t_info[tc].bw_t_bitmap,
5658 ICE_BW_TYPE_CNT))
5659 continue;
5660 tc_node = ice_sched_get_tc_node(hw->port_info, tc);
5661 if (!tc_node) {
5662 status = ICE_ERR_PARAM;
5663 break;
5664 }
5665 agg_node = ice_sched_get_agg_node(hw->port_info, tc_node,
5666 agg_info->agg_id);
5667 if (!agg_node) {
5668 status = ICE_ERR_PARAM;
5669 break;
5670 }
5671 status = ice_sched_replay_node_bw(hw, agg_node,
5672 &agg_info->bw_t_info[tc]);
5673 if (status)
5674 break;
5675 }
5676 return status;
5677 }
5678
5679 /**
5680 * ice_sched_get_ena_tc_bitmap - get enabled TC bitmap
5681 * @pi: port info struct
5682 * @tc_bitmap: 8 bits TC bitmap to check
5683 * @ena_tc_bitmap: 8 bits enabled TC bitmap to return
5684 *
5685 * This function returns enabled TC bitmap in variable ena_tc_bitmap. Some TCs
5686 * may be missing, it returns enabled TCs. This function needs to be called with
5687 * scheduler lock held.
5688 */
5689 static void
ice_sched_get_ena_tc_bitmap(struct ice_port_info * pi,ice_bitmap_t * tc_bitmap,ice_bitmap_t * ena_tc_bitmap)5690 ice_sched_get_ena_tc_bitmap(struct ice_port_info *pi, ice_bitmap_t *tc_bitmap,
5691 ice_bitmap_t *ena_tc_bitmap)
5692 {
5693 u8 tc;
5694
5695 /* Some TC(s) may be missing after reset, adjust for replay */
5696 ice_for_each_traffic_class(tc)
5697 if (ice_is_tc_ena(*tc_bitmap, tc) &&
5698 (ice_sched_get_tc_node(pi, tc)))
5699 ice_set_bit(tc, ena_tc_bitmap);
5700 }
5701
5702 /**
5703 * ice_sched_replay_agg - recreate aggregator node(s)
5704 * @hw: pointer to the HW struct
5705 *
5706 * This function recreate aggregator type nodes which are not replayed earlier.
5707 * It also replay aggregator BW information. These aggregator nodes are not
5708 * associated with VSI type node yet.
5709 */
ice_sched_replay_agg(struct ice_hw * hw)5710 void ice_sched_replay_agg(struct ice_hw *hw)
5711 {
5712 struct ice_port_info *pi = hw->port_info;
5713 struct ice_sched_agg_info *agg_info;
5714
5715 ice_acquire_lock(&pi->sched_lock);
5716 LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info,
5717 list_entry)
5718 /* replay aggregator (re-create aggregator node) */
5719 if (!ice_cmp_bitmap(agg_info->tc_bitmap,
5720 agg_info->replay_tc_bitmap,
5721 ICE_MAX_TRAFFIC_CLASS)) {
5722 ice_declare_bitmap(replay_bitmap,
5723 ICE_MAX_TRAFFIC_CLASS);
5724 enum ice_status status;
5725
5726 ice_zero_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
5727 ice_sched_get_ena_tc_bitmap(pi,
5728 agg_info->replay_tc_bitmap,
5729 replay_bitmap);
5730 status = ice_sched_cfg_agg(hw->port_info,
5731 agg_info->agg_id,
5732 ICE_AGG_TYPE_AGG,
5733 replay_bitmap);
5734 if (status) {
5735 ice_info(hw, "Replay agg id[%d] failed\n",
5736 agg_info->agg_id);
5737 /* Move on to next one */
5738 continue;
5739 }
5740 /* Replay aggregator node BW (restore aggregator BW) */
5741 status = ice_sched_replay_agg_bw(hw, agg_info);
5742 if (status)
5743 ice_info(hw, "Replay agg bw [id=%d] failed\n",
5744 agg_info->agg_id);
5745 }
5746 ice_release_lock(&pi->sched_lock);
5747 }
5748
5749 /**
5750 * ice_sched_replay_agg_vsi_preinit - Agg/VSI replay pre initialization
5751 * @hw: pointer to the HW struct
5752 *
5753 * This function initialize aggregator(s) TC bitmap to zero. A required
5754 * preinit step for replaying aggregators.
5755 */
ice_sched_replay_agg_vsi_preinit(struct ice_hw * hw)5756 void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw)
5757 {
5758 struct ice_port_info *pi = hw->port_info;
5759 struct ice_sched_agg_info *agg_info;
5760
5761 ice_acquire_lock(&pi->sched_lock);
5762 LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info,
5763 list_entry) {
5764 struct ice_sched_agg_vsi_info *agg_vsi_info;
5765
5766 agg_info->tc_bitmap[0] = 0;
5767 LIST_FOR_EACH_ENTRY(agg_vsi_info, &agg_info->agg_vsi_list,
5768 ice_sched_agg_vsi_info, list_entry)
5769 agg_vsi_info->tc_bitmap[0] = 0;
5770 }
5771 ice_release_lock(&pi->sched_lock);
5772 }
5773
5774 /**
5775 * ice_sched_replay_root_node_bw - replay root node BW
5776 * @pi: port information structure
5777 *
5778 * Replay root node BW settings.
5779 */
ice_sched_replay_root_node_bw(struct ice_port_info * pi)5780 enum ice_status ice_sched_replay_root_node_bw(struct ice_port_info *pi)
5781 {
5782 enum ice_status status = ICE_SUCCESS;
5783
5784 if (!pi->hw)
5785 return ICE_ERR_PARAM;
5786 ice_acquire_lock(&pi->sched_lock);
5787
5788 status = ice_sched_replay_node_bw(pi->hw, pi->root,
5789 &pi->root_node_bw_t_info);
5790 ice_release_lock(&pi->sched_lock);
5791 return status;
5792 }
5793
5794 /**
5795 * ice_sched_replay_tc_node_bw - replay TC node(s) BW
5796 * @pi: port information structure
5797 *
5798 * This function replay TC nodes.
5799 */
ice_sched_replay_tc_node_bw(struct ice_port_info * pi)5800 enum ice_status ice_sched_replay_tc_node_bw(struct ice_port_info *pi)
5801 {
5802 enum ice_status status = ICE_SUCCESS;
5803 u8 tc;
5804
5805 if (!pi->hw)
5806 return ICE_ERR_PARAM;
5807 ice_acquire_lock(&pi->sched_lock);
5808 ice_for_each_traffic_class(tc) {
5809 struct ice_sched_node *tc_node;
5810
5811 tc_node = ice_sched_get_tc_node(pi, tc);
5812 if (!tc_node)
5813 continue; /* TC not present */
5814 status = ice_sched_replay_node_bw(pi->hw, tc_node,
5815 &pi->tc_node_bw_t_info[tc]);
5816 if (status)
5817 break;
5818 }
5819 ice_release_lock(&pi->sched_lock);
5820 return status;
5821 }
5822
5823 /**
5824 * ice_sched_replay_vsi_bw - replay VSI type node(s) BW
5825 * @hw: pointer to the HW struct
5826 * @vsi_handle: software VSI handle
5827 * @tc_bitmap: 8 bits TC bitmap
5828 *
5829 * This function replays VSI type nodes bandwidth. This function needs to be
5830 * called with scheduler lock held.
5831 */
5832 static enum ice_status
ice_sched_replay_vsi_bw(struct ice_hw * hw,u16 vsi_handle,ice_bitmap_t * tc_bitmap)5833 ice_sched_replay_vsi_bw(struct ice_hw *hw, u16 vsi_handle,
5834 ice_bitmap_t *tc_bitmap)
5835 {
5836 struct ice_sched_node *vsi_node, *tc_node;
5837 struct ice_port_info *pi = hw->port_info;
5838 struct ice_bw_type_info *bw_t_info;
5839 struct ice_vsi_ctx *vsi_ctx;
5840 enum ice_status status = ICE_SUCCESS;
5841 u8 tc;
5842
5843 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
5844 if (!vsi_ctx)
5845 return ICE_ERR_PARAM;
5846 ice_for_each_traffic_class(tc) {
5847 if (!ice_is_tc_ena(*tc_bitmap, tc))
5848 continue;
5849 tc_node = ice_sched_get_tc_node(pi, tc);
5850 if (!tc_node)
5851 continue;
5852 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
5853 if (!vsi_node)
5854 continue;
5855 bw_t_info = &vsi_ctx->sched.bw_t_info[tc];
5856 status = ice_sched_replay_node_bw(hw, vsi_node, bw_t_info);
5857 if (status)
5858 break;
5859 }
5860 return status;
5861 }
5862
5863 /**
5864 * ice_sched_replay_vsi_agg - replay aggregator & VSI to aggregator node(s)
5865 * @hw: pointer to the HW struct
5866 * @vsi_handle: software VSI handle
5867 *
5868 * This function replays aggregator node, VSI to aggregator type nodes, and
5869 * their node bandwidth information. This function needs to be called with
5870 * scheduler lock held.
5871 */
5872 static enum ice_status
ice_sched_replay_vsi_agg(struct ice_hw * hw,u16 vsi_handle)5873 ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
5874 {
5875 ice_declare_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
5876 struct ice_sched_agg_vsi_info *agg_vsi_info;
5877 struct ice_port_info *pi = hw->port_info;
5878 struct ice_sched_agg_info *agg_info;
5879 enum ice_status status;
5880
5881 ice_zero_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
5882 if (!ice_is_vsi_valid(hw, vsi_handle))
5883 return ICE_ERR_PARAM;
5884 agg_info = ice_get_vsi_agg_info(hw, vsi_handle);
5885 if (!agg_info)
5886 return ICE_SUCCESS; /* Not present in list - default Agg case */
5887 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
5888 if (!agg_vsi_info)
5889 return ICE_SUCCESS; /* Not present in list - default Agg case */
5890 ice_sched_get_ena_tc_bitmap(pi, agg_info->replay_tc_bitmap,
5891 replay_bitmap);
5892 /* Replay aggregator node associated to vsi_handle */
5893 status = ice_sched_cfg_agg(hw->port_info, agg_info->agg_id,
5894 ICE_AGG_TYPE_AGG, replay_bitmap);
5895 if (status)
5896 return status;
5897 /* Replay aggregator node BW (restore aggregator BW) */
5898 status = ice_sched_replay_agg_bw(hw, agg_info);
5899 if (status)
5900 return status;
5901
5902 ice_zero_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
5903 ice_sched_get_ena_tc_bitmap(pi, agg_vsi_info->replay_tc_bitmap,
5904 replay_bitmap);
5905 /* Move this VSI (vsi_handle) to above aggregator */
5906 status = ice_sched_assoc_vsi_to_agg(pi, agg_info->agg_id, vsi_handle,
5907 replay_bitmap);
5908 if (status)
5909 return status;
5910 /* Replay VSI BW (restore VSI BW) */
5911 return ice_sched_replay_vsi_bw(hw, vsi_handle,
5912 agg_vsi_info->tc_bitmap);
5913 }
5914
5915 /**
5916 * ice_replay_vsi_agg - replay VSI to aggregator node
5917 * @hw: pointer to the HW struct
5918 * @vsi_handle: software VSI handle
5919 *
5920 * This function replays association of VSI to aggregator type nodes, and
5921 * node bandwidth information.
5922 */
ice_replay_vsi_agg(struct ice_hw * hw,u16 vsi_handle)5923 enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
5924 {
5925 struct ice_port_info *pi = hw->port_info;
5926 enum ice_status status;
5927
5928 ice_acquire_lock(&pi->sched_lock);
5929 status = ice_sched_replay_vsi_agg(hw, vsi_handle);
5930 ice_release_lock(&pi->sched_lock);
5931 return status;
5932 }
5933
5934 /**
5935 * ice_sched_replay_q_bw - replay queue type node BW
5936 * @pi: port information structure
5937 * @q_ctx: queue context structure
5938 *
5939 * This function replays queue type node bandwidth. This function needs to be
5940 * called with scheduler lock held.
5941 */
5942 enum ice_status
ice_sched_replay_q_bw(struct ice_port_info * pi,struct ice_q_ctx * q_ctx)5943 ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx)
5944 {
5945 struct ice_sched_node *q_node;
5946
5947 /* Following also checks the presence of node in tree */
5948 q_node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
5949 if (!q_node)
5950 return ICE_ERR_PARAM;
5951 return ice_sched_replay_node_bw(pi->hw, q_node, &q_ctx->bw_t_info);
5952 }
5953