1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3
4 #include "ice_sched.h"
5
6 /**
7 * ice_sched_add_root_node - Insert the Tx scheduler root node in SW DB
8 * @pi: port information structure
9 * @info: Scheduler element information from firmware
10 *
11 * This function inserts the root node of the scheduling tree topology
12 * to the SW DB.
13 */
14 static enum ice_status
ice_sched_add_root_node(struct ice_port_info * pi,struct ice_aqc_txsched_elem_data * info)15 ice_sched_add_root_node(struct ice_port_info *pi,
16 struct ice_aqc_txsched_elem_data *info)
17 {
18 struct ice_sched_node *root;
19 struct ice_hw *hw;
20
21 if (!pi)
22 return ICE_ERR_PARAM;
23
24 hw = pi->hw;
25
26 root = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*root), GFP_KERNEL);
27 if (!root)
28 return ICE_ERR_NO_MEMORY;
29
30 /* coverity[suspicious_sizeof] */
31 root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0],
32 sizeof(*root), GFP_KERNEL);
33 if (!root->children) {
34 devm_kfree(ice_hw_to_dev(hw), root);
35 return ICE_ERR_NO_MEMORY;
36 }
37
38 memcpy(&root->info, info, sizeof(*info));
39 pi->root = root;
40 return 0;
41 }
42
43 /**
44 * ice_sched_find_node_by_teid - Find the Tx scheduler node in SW DB
45 * @start_node: pointer to the starting ice_sched_node struct in a sub-tree
46 * @teid: node TEID to search
47 *
48 * This function searches for a node matching the TEID in the scheduling tree
49 * from the SW DB. The search is recursive and is restricted by the number of
50 * layers it has searched through; stopping at the max supported layer.
51 *
52 * This function needs to be called when holding the port_info->sched_lock
53 */
54 struct ice_sched_node *
ice_sched_find_node_by_teid(struct ice_sched_node * start_node,u32 teid)55 ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
56 {
57 u16 i;
58
59 /* The TEID is same as that of the start_node */
60 if (ICE_TXSCHED_GET_NODE_TEID(start_node) == teid)
61 return start_node;
62
63 /* The node has no children or is at the max layer */
64 if (!start_node->num_children ||
65 start_node->tx_sched_layer >= ICE_AQC_TOPO_MAX_LEVEL_NUM ||
66 start_node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF)
67 return NULL;
68
69 /* Check if TEID matches to any of the children nodes */
70 for (i = 0; i < start_node->num_children; i++)
71 if (ICE_TXSCHED_GET_NODE_TEID(start_node->children[i]) == teid)
72 return start_node->children[i];
73
74 /* Search within each child's sub-tree */
75 for (i = 0; i < start_node->num_children; i++) {
76 struct ice_sched_node *tmp;
77
78 tmp = ice_sched_find_node_by_teid(start_node->children[i],
79 teid);
80 if (tmp)
81 return tmp;
82 }
83
84 return NULL;
85 }
86
87 /**
88 * ice_aqc_send_sched_elem_cmd - send scheduling elements cmd
89 * @hw: pointer to the HW struct
90 * @cmd_opc: cmd opcode
91 * @elems_req: number of elements to request
92 * @buf: pointer to buffer
93 * @buf_size: buffer size in bytes
94 * @elems_resp: returns total number of elements response
95 * @cd: pointer to command details structure or NULL
96 *
97 * This function sends a scheduling elements cmd (cmd_opc)
98 */
99 static enum ice_status
ice_aqc_send_sched_elem_cmd(struct ice_hw * hw,enum ice_adminq_opc cmd_opc,u16 elems_req,void * buf,u16 buf_size,u16 * elems_resp,struct ice_sq_cd * cd)100 ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
101 u16 elems_req, void *buf, u16 buf_size,
102 u16 *elems_resp, struct ice_sq_cd *cd)
103 {
104 struct ice_aqc_sched_elem_cmd *cmd;
105 struct ice_aq_desc desc;
106 enum ice_status status;
107
108 cmd = &desc.params.sched_elem_cmd;
109 ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc);
110 cmd->num_elem_req = cpu_to_le16(elems_req);
111 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
112 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
113 if (!status && elems_resp)
114 *elems_resp = le16_to_cpu(cmd->num_elem_resp);
115
116 return status;
117 }
118
119 /**
120 * ice_aq_query_sched_elems - query scheduler elements
121 * @hw: pointer to the HW struct
122 * @elems_req: number of elements to query
123 * @buf: pointer to buffer
124 * @buf_size: buffer size in bytes
125 * @elems_ret: returns total number of elements returned
126 * @cd: pointer to command details structure or NULL
127 *
128 * Query scheduling elements (0x0404)
129 */
130 enum ice_status
ice_aq_query_sched_elems(struct ice_hw * hw,u16 elems_req,struct ice_aqc_txsched_elem_data * buf,u16 buf_size,u16 * elems_ret,struct ice_sq_cd * cd)131 ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
132 struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
133 u16 *elems_ret, struct ice_sq_cd *cd)
134 {
135 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_get_sched_elems,
136 elems_req, (void *)buf, buf_size,
137 elems_ret, cd);
138 }
139
140 /**
141 * ice_sched_add_node - Insert the Tx scheduler node in SW DB
142 * @pi: port information structure
143 * @layer: Scheduler layer of the node
144 * @info: Scheduler element information from firmware
145 *
146 * This function inserts a scheduler node to the SW DB.
147 */
148 enum ice_status
ice_sched_add_node(struct ice_port_info * pi,u8 layer,struct ice_aqc_txsched_elem_data * info)149 ice_sched_add_node(struct ice_port_info *pi, u8 layer,
150 struct ice_aqc_txsched_elem_data *info)
151 {
152 struct ice_aqc_txsched_elem_data elem;
153 struct ice_sched_node *parent;
154 struct ice_sched_node *node;
155 enum ice_status status;
156 struct ice_hw *hw;
157
158 if (!pi)
159 return ICE_ERR_PARAM;
160
161 hw = pi->hw;
162
163 /* A valid parent node should be there */
164 parent = ice_sched_find_node_by_teid(pi->root,
165 le32_to_cpu(info->parent_teid));
166 if (!parent) {
167 ice_debug(hw, ICE_DBG_SCHED, "Parent Node not found for parent_teid=0x%x\n",
168 le32_to_cpu(info->parent_teid));
169 return ICE_ERR_PARAM;
170 }
171
172 /* query the current node information from FW before adding it
173 * to the SW DB
174 */
175 status = ice_sched_query_elem(hw, le32_to_cpu(info->node_teid), &elem);
176 if (status)
177 return status;
178
179 node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL);
180 if (!node)
181 return ICE_ERR_NO_MEMORY;
182 if (hw->max_children[layer]) {
183 /* coverity[suspicious_sizeof] */
184 node->children = devm_kcalloc(ice_hw_to_dev(hw),
185 hw->max_children[layer],
186 sizeof(*node), GFP_KERNEL);
187 if (!node->children) {
188 devm_kfree(ice_hw_to_dev(hw), node);
189 return ICE_ERR_NO_MEMORY;
190 }
191 }
192
193 node->in_use = true;
194 node->parent = parent;
195 node->tx_sched_layer = layer;
196 parent->children[parent->num_children++] = node;
197 node->info = elem;
198 return 0;
199 }
200
201 /**
202 * ice_aq_delete_sched_elems - delete scheduler elements
203 * @hw: pointer to the HW struct
204 * @grps_req: number of groups to delete
205 * @buf: pointer to buffer
206 * @buf_size: buffer size in bytes
207 * @grps_del: returns total number of elements deleted
208 * @cd: pointer to command details structure or NULL
209 *
210 * Delete scheduling elements (0x040F)
211 */
212 static enum ice_status
ice_aq_delete_sched_elems(struct ice_hw * hw,u16 grps_req,struct ice_aqc_delete_elem * buf,u16 buf_size,u16 * grps_del,struct ice_sq_cd * cd)213 ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
214 struct ice_aqc_delete_elem *buf, u16 buf_size,
215 u16 *grps_del, struct ice_sq_cd *cd)
216 {
217 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_delete_sched_elems,
218 grps_req, (void *)buf, buf_size,
219 grps_del, cd);
220 }
221
222 /**
223 * ice_sched_remove_elems - remove nodes from HW
224 * @hw: pointer to the HW struct
225 * @parent: pointer to the parent node
226 * @num_nodes: number of nodes
227 * @node_teids: array of node teids to be deleted
228 *
229 * This function remove nodes from HW
230 */
231 static enum ice_status
ice_sched_remove_elems(struct ice_hw * hw,struct ice_sched_node * parent,u16 num_nodes,u32 * node_teids)232 ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
233 u16 num_nodes, u32 *node_teids)
234 {
235 struct ice_aqc_delete_elem *buf;
236 u16 i, num_groups_removed = 0;
237 enum ice_status status;
238 u16 buf_size;
239
240 buf_size = struct_size(buf, teid, num_nodes);
241 buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
242 if (!buf)
243 return ICE_ERR_NO_MEMORY;
244
245 buf->hdr.parent_teid = parent->info.node_teid;
246 buf->hdr.num_elems = cpu_to_le16(num_nodes);
247 for (i = 0; i < num_nodes; i++)
248 buf->teid[i] = cpu_to_le32(node_teids[i]);
249
250 status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,
251 &num_groups_removed, NULL);
252 if (status || num_groups_removed != 1)
253 ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n",
254 hw->adminq.sq_last_status);
255
256 devm_kfree(ice_hw_to_dev(hw), buf);
257 return status;
258 }
259
260 /**
261 * ice_sched_get_first_node - get the first node of the given layer
262 * @pi: port information structure
263 * @parent: pointer the base node of the subtree
264 * @layer: layer number
265 *
266 * This function retrieves the first node of the given layer from the subtree
267 */
268 static struct ice_sched_node *
ice_sched_get_first_node(struct ice_port_info * pi,struct ice_sched_node * parent,u8 layer)269 ice_sched_get_first_node(struct ice_port_info *pi,
270 struct ice_sched_node *parent, u8 layer)
271 {
272 return pi->sib_head[parent->tc_num][layer];
273 }
274
275 /**
276 * ice_sched_get_tc_node - get pointer to TC node
277 * @pi: port information structure
278 * @tc: TC number
279 *
280 * This function returns the TC node pointer
281 */
ice_sched_get_tc_node(struct ice_port_info * pi,u8 tc)282 struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc)
283 {
284 u8 i;
285
286 if (!pi || !pi->root)
287 return NULL;
288 for (i = 0; i < pi->root->num_children; i++)
289 if (pi->root->children[i]->tc_num == tc)
290 return pi->root->children[i];
291 return NULL;
292 }
293
294 /**
295 * ice_free_sched_node - Free a Tx scheduler node from SW DB
296 * @pi: port information structure
297 * @node: pointer to the ice_sched_node struct
298 *
299 * This function frees up a node from SW DB as well as from HW
300 *
301 * This function needs to be called with the port_info->sched_lock held
302 */
ice_free_sched_node(struct ice_port_info * pi,struct ice_sched_node * node)303 void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
304 {
305 struct ice_sched_node *parent;
306 struct ice_hw *hw = pi->hw;
307 u8 i, j;
308
309 /* Free the children before freeing up the parent node
310 * The parent array is updated below and that shifts the nodes
311 * in the array. So always pick the first child if num children > 0
312 */
313 while (node->num_children)
314 ice_free_sched_node(pi, node->children[0]);
315
316 /* Leaf, TC and root nodes can't be deleted by SW */
317 if (node->tx_sched_layer >= hw->sw_entry_point_layer &&
318 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
319 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT &&
320 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) {
321 u32 teid = le32_to_cpu(node->info.node_teid);
322
323 ice_sched_remove_elems(hw, node->parent, 1, &teid);
324 }
325 parent = node->parent;
326 /* root has no parent */
327 if (parent) {
328 struct ice_sched_node *p;
329
330 /* update the parent */
331 for (i = 0; i < parent->num_children; i++)
332 if (parent->children[i] == node) {
333 for (j = i + 1; j < parent->num_children; j++)
334 parent->children[j - 1] =
335 parent->children[j];
336 parent->num_children--;
337 break;
338 }
339
340 p = ice_sched_get_first_node(pi, node, node->tx_sched_layer);
341 while (p) {
342 if (p->sibling == node) {
343 p->sibling = node->sibling;
344 break;
345 }
346 p = p->sibling;
347 }
348
349 /* update the sibling head if head is getting removed */
350 if (pi->sib_head[node->tc_num][node->tx_sched_layer] == node)
351 pi->sib_head[node->tc_num][node->tx_sched_layer] =
352 node->sibling;
353 }
354
355 /* leaf nodes have no children */
356 if (node->children)
357 devm_kfree(ice_hw_to_dev(hw), node->children);
358 devm_kfree(ice_hw_to_dev(hw), node);
359 }
360
361 /**
362 * ice_aq_get_dflt_topo - gets default scheduler topology
363 * @hw: pointer to the HW struct
364 * @lport: logical port number
365 * @buf: pointer to buffer
366 * @buf_size: buffer size in bytes
367 * @num_branches: returns total number of queue to port branches
368 * @cd: pointer to command details structure or NULL
369 *
370 * Get default scheduler topology (0x400)
371 */
372 static enum ice_status
ice_aq_get_dflt_topo(struct ice_hw * hw,u8 lport,struct ice_aqc_get_topo_elem * buf,u16 buf_size,u8 * num_branches,struct ice_sq_cd * cd)373 ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
374 struct ice_aqc_get_topo_elem *buf, u16 buf_size,
375 u8 *num_branches, struct ice_sq_cd *cd)
376 {
377 struct ice_aqc_get_topo *cmd;
378 struct ice_aq_desc desc;
379 enum ice_status status;
380
381 cmd = &desc.params.get_topo;
382 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo);
383 cmd->port_num = lport;
384 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
385 if (!status && num_branches)
386 *num_branches = cmd->num_branches;
387
388 return status;
389 }
390
391 /**
392 * ice_aq_add_sched_elems - adds scheduling element
393 * @hw: pointer to the HW struct
394 * @grps_req: the number of groups that are requested to be added
395 * @buf: pointer to buffer
396 * @buf_size: buffer size in bytes
397 * @grps_added: returns total number of groups added
398 * @cd: pointer to command details structure or NULL
399 *
400 * Add scheduling elements (0x0401)
401 */
402 static enum ice_status
ice_aq_add_sched_elems(struct ice_hw * hw,u16 grps_req,struct ice_aqc_add_elem * buf,u16 buf_size,u16 * grps_added,struct ice_sq_cd * cd)403 ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
404 struct ice_aqc_add_elem *buf, u16 buf_size,
405 u16 *grps_added, struct ice_sq_cd *cd)
406 {
407 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_add_sched_elems,
408 grps_req, (void *)buf, buf_size,
409 grps_added, cd);
410 }
411
412 /**
413 * ice_aq_cfg_sched_elems - configures scheduler elements
414 * @hw: pointer to the HW struct
415 * @elems_req: number of elements to configure
416 * @buf: pointer to buffer
417 * @buf_size: buffer size in bytes
418 * @elems_cfgd: returns total number of elements configured
419 * @cd: pointer to command details structure or NULL
420 *
421 * Configure scheduling elements (0x0403)
422 */
423 static enum ice_status
ice_aq_cfg_sched_elems(struct ice_hw * hw,u16 elems_req,struct ice_aqc_txsched_elem_data * buf,u16 buf_size,u16 * elems_cfgd,struct ice_sq_cd * cd)424 ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
425 struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
426 u16 *elems_cfgd, struct ice_sq_cd *cd)
427 {
428 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_cfg_sched_elems,
429 elems_req, (void *)buf, buf_size,
430 elems_cfgd, cd);
431 }
432
433 /**
434 * ice_aq_move_sched_elems - move scheduler elements
435 * @hw: pointer to the HW struct
436 * @grps_req: number of groups to move
437 * @buf: pointer to buffer
438 * @buf_size: buffer size in bytes
439 * @grps_movd: returns total number of groups moved
440 * @cd: pointer to command details structure or NULL
441 *
442 * Move scheduling elements (0x0408)
443 */
444 static enum ice_status
ice_aq_move_sched_elems(struct ice_hw * hw,u16 grps_req,struct ice_aqc_move_elem * buf,u16 buf_size,u16 * grps_movd,struct ice_sq_cd * cd)445 ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
446 struct ice_aqc_move_elem *buf, u16 buf_size,
447 u16 *grps_movd, struct ice_sq_cd *cd)
448 {
449 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_move_sched_elems,
450 grps_req, (void *)buf, buf_size,
451 grps_movd, cd);
452 }
453
454 /**
455 * ice_aq_suspend_sched_elems - suspend scheduler elements
456 * @hw: pointer to the HW struct
457 * @elems_req: number of elements to suspend
458 * @buf: pointer to buffer
459 * @buf_size: buffer size in bytes
460 * @elems_ret: returns total number of elements suspended
461 * @cd: pointer to command details structure or NULL
462 *
463 * Suspend scheduling elements (0x0409)
464 */
465 static enum ice_status
ice_aq_suspend_sched_elems(struct ice_hw * hw,u16 elems_req,__le32 * buf,u16 buf_size,u16 * elems_ret,struct ice_sq_cd * cd)466 ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
467 u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
468 {
469 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_suspend_sched_elems,
470 elems_req, (void *)buf, buf_size,
471 elems_ret, cd);
472 }
473
474 /**
475 * ice_aq_resume_sched_elems - resume scheduler elements
476 * @hw: pointer to the HW struct
477 * @elems_req: number of elements to resume
478 * @buf: pointer to buffer
479 * @buf_size: buffer size in bytes
480 * @elems_ret: returns total number of elements resumed
481 * @cd: pointer to command details structure or NULL
482 *
483 * resume scheduling elements (0x040A)
484 */
485 static enum ice_status
ice_aq_resume_sched_elems(struct ice_hw * hw,u16 elems_req,__le32 * buf,u16 buf_size,u16 * elems_ret,struct ice_sq_cd * cd)486 ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
487 u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
488 {
489 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_resume_sched_elems,
490 elems_req, (void *)buf, buf_size,
491 elems_ret, cd);
492 }
493
494 /**
495 * ice_aq_query_sched_res - query scheduler resource
496 * @hw: pointer to the HW struct
497 * @buf_size: buffer size in bytes
498 * @buf: pointer to buffer
499 * @cd: pointer to command details structure or NULL
500 *
501 * Query scheduler resource allocation (0x0412)
502 */
503 static enum ice_status
ice_aq_query_sched_res(struct ice_hw * hw,u16 buf_size,struct ice_aqc_query_txsched_res_resp * buf,struct ice_sq_cd * cd)504 ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
505 struct ice_aqc_query_txsched_res_resp *buf,
506 struct ice_sq_cd *cd)
507 {
508 struct ice_aq_desc desc;
509
510 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_sched_res);
511 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
512 }
513
514 /**
515 * ice_sched_suspend_resume_elems - suspend or resume HW nodes
516 * @hw: pointer to the HW struct
517 * @num_nodes: number of nodes
518 * @node_teids: array of node teids to be suspended or resumed
519 * @suspend: true means suspend / false means resume
520 *
521 * This function suspends or resumes HW nodes
522 */
523 static enum ice_status
ice_sched_suspend_resume_elems(struct ice_hw * hw,u8 num_nodes,u32 * node_teids,bool suspend)524 ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
525 bool suspend)
526 {
527 u16 i, buf_size, num_elem_ret = 0;
528 enum ice_status status;
529 __le32 *buf;
530
531 buf_size = sizeof(*buf) * num_nodes;
532 buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
533 if (!buf)
534 return ICE_ERR_NO_MEMORY;
535
536 for (i = 0; i < num_nodes; i++)
537 buf[i] = cpu_to_le32(node_teids[i]);
538
539 if (suspend)
540 status = ice_aq_suspend_sched_elems(hw, num_nodes, buf,
541 buf_size, &num_elem_ret,
542 NULL);
543 else
544 status = ice_aq_resume_sched_elems(hw, num_nodes, buf,
545 buf_size, &num_elem_ret,
546 NULL);
547 if (status || num_elem_ret != num_nodes)
548 ice_debug(hw, ICE_DBG_SCHED, "suspend/resume failed\n");
549
550 devm_kfree(ice_hw_to_dev(hw), buf);
551 return status;
552 }
553
554 /**
555 * ice_alloc_lan_q_ctx - allocate LAN queue contexts for the given VSI and TC
556 * @hw: pointer to the HW struct
557 * @vsi_handle: VSI handle
558 * @tc: TC number
559 * @new_numqs: number of queues
560 */
561 static enum ice_status
ice_alloc_lan_q_ctx(struct ice_hw * hw,u16 vsi_handle,u8 tc,u16 new_numqs)562 ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
563 {
564 struct ice_vsi_ctx *vsi_ctx;
565 struct ice_q_ctx *q_ctx;
566
567 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
568 if (!vsi_ctx)
569 return ICE_ERR_PARAM;
570 /* allocate LAN queue contexts */
571 if (!vsi_ctx->lan_q_ctx[tc]) {
572 vsi_ctx->lan_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw),
573 new_numqs,
574 sizeof(*q_ctx),
575 GFP_KERNEL);
576 if (!vsi_ctx->lan_q_ctx[tc])
577 return ICE_ERR_NO_MEMORY;
578 vsi_ctx->num_lan_q_entries[tc] = new_numqs;
579 return 0;
580 }
581 /* num queues are increased, update the queue contexts */
582 if (new_numqs > vsi_ctx->num_lan_q_entries[tc]) {
583 u16 prev_num = vsi_ctx->num_lan_q_entries[tc];
584
585 q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
586 sizeof(*q_ctx), GFP_KERNEL);
587 if (!q_ctx)
588 return ICE_ERR_NO_MEMORY;
589 memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc],
590 prev_num * sizeof(*q_ctx));
591 devm_kfree(ice_hw_to_dev(hw), vsi_ctx->lan_q_ctx[tc]);
592 vsi_ctx->lan_q_ctx[tc] = q_ctx;
593 vsi_ctx->num_lan_q_entries[tc] = new_numqs;
594 }
595 return 0;
596 }
597
598 /**
599 * ice_aq_rl_profile - performs a rate limiting task
600 * @hw: pointer to the HW struct
601 * @opcode: opcode for add, query, or remove profile(s)
602 * @num_profiles: the number of profiles
603 * @buf: pointer to buffer
604 * @buf_size: buffer size in bytes
605 * @num_processed: number of processed add or remove profile(s) to return
606 * @cd: pointer to command details structure
607 *
608 * RL profile function to add, query, or remove profile(s)
609 */
610 static enum ice_status
ice_aq_rl_profile(struct ice_hw * hw,enum ice_adminq_opc opcode,u16 num_profiles,struct ice_aqc_rl_profile_elem * buf,u16 buf_size,u16 * num_processed,struct ice_sq_cd * cd)611 ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
612 u16 num_profiles, struct ice_aqc_rl_profile_elem *buf,
613 u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd)
614 {
615 struct ice_aqc_rl_profile *cmd;
616 struct ice_aq_desc desc;
617 enum ice_status status;
618
619 cmd = &desc.params.rl_profile;
620
621 ice_fill_dflt_direct_cmd_desc(&desc, opcode);
622 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
623 cmd->num_profiles = cpu_to_le16(num_profiles);
624 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
625 if (!status && num_processed)
626 *num_processed = le16_to_cpu(cmd->num_processed);
627 return status;
628 }
629
630 /**
631 * ice_aq_add_rl_profile - adds rate limiting profile(s)
632 * @hw: pointer to the HW struct
633 * @num_profiles: the number of profile(s) to be add
634 * @buf: pointer to buffer
635 * @buf_size: buffer size in bytes
636 * @num_profiles_added: total number of profiles added to return
637 * @cd: pointer to command details structure
638 *
639 * Add RL profile (0x0410)
640 */
641 static enum ice_status
ice_aq_add_rl_profile(struct ice_hw * hw,u16 num_profiles,struct ice_aqc_rl_profile_elem * buf,u16 buf_size,u16 * num_profiles_added,struct ice_sq_cd * cd)642 ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,
643 struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
644 u16 *num_profiles_added, struct ice_sq_cd *cd)
645 {
646 return ice_aq_rl_profile(hw, ice_aqc_opc_add_rl_profiles, num_profiles,
647 buf, buf_size, num_profiles_added, cd);
648 }
649
650 /**
651 * ice_aq_remove_rl_profile - removes RL profile(s)
652 * @hw: pointer to the HW struct
653 * @num_profiles: the number of profile(s) to remove
654 * @buf: pointer to buffer
655 * @buf_size: buffer size in bytes
656 * @num_profiles_removed: total number of profiles removed to return
657 * @cd: pointer to command details structure or NULL
658 *
659 * Remove RL profile (0x0415)
660 */
661 static enum ice_status
ice_aq_remove_rl_profile(struct ice_hw * hw,u16 num_profiles,struct ice_aqc_rl_profile_elem * buf,u16 buf_size,u16 * num_profiles_removed,struct ice_sq_cd * cd)662 ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles,
663 struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
664 u16 *num_profiles_removed, struct ice_sq_cd *cd)
665 {
666 return ice_aq_rl_profile(hw, ice_aqc_opc_remove_rl_profiles,
667 num_profiles, buf, buf_size,
668 num_profiles_removed, cd);
669 }
670
671 /**
672 * ice_sched_del_rl_profile - remove RL profile
673 * @hw: pointer to the HW struct
674 * @rl_info: rate limit profile information
675 *
676 * If the profile ID is not referenced anymore, it removes profile ID with
677 * its associated parameters from HW DB,and locally. The caller needs to
678 * hold scheduler lock.
679 */
680 static enum ice_status
ice_sched_del_rl_profile(struct ice_hw * hw,struct ice_aqc_rl_profile_info * rl_info)681 ice_sched_del_rl_profile(struct ice_hw *hw,
682 struct ice_aqc_rl_profile_info *rl_info)
683 {
684 struct ice_aqc_rl_profile_elem *buf;
685 u16 num_profiles_removed;
686 enum ice_status status;
687 u16 num_profiles = 1;
688
689 if (rl_info->prof_id_ref != 0)
690 return ICE_ERR_IN_USE;
691
692 /* Safe to remove profile ID */
693 buf = &rl_info->profile;
694 status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf),
695 &num_profiles_removed, NULL);
696 if (status || num_profiles_removed != num_profiles)
697 return ICE_ERR_CFG;
698
699 /* Delete stale entry now */
700 list_del(&rl_info->list_entry);
701 devm_kfree(ice_hw_to_dev(hw), rl_info);
702 return status;
703 }
704
705 /**
706 * ice_sched_clear_rl_prof - clears RL prof entries
707 * @pi: port information structure
708 *
709 * This function removes all RL profile from HW as well as from SW DB.
710 */
ice_sched_clear_rl_prof(struct ice_port_info * pi)711 static void ice_sched_clear_rl_prof(struct ice_port_info *pi)
712 {
713 u16 ln;
714
715 for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) {
716 struct ice_aqc_rl_profile_info *rl_prof_elem;
717 struct ice_aqc_rl_profile_info *rl_prof_tmp;
718
719 list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp,
720 &pi->rl_prof_list[ln], list_entry) {
721 struct ice_hw *hw = pi->hw;
722 enum ice_status status;
723
724 rl_prof_elem->prof_id_ref = 0;
725 status = ice_sched_del_rl_profile(hw, rl_prof_elem);
726 if (status) {
727 ice_debug(hw, ICE_DBG_SCHED, "Remove rl profile failed\n");
728 /* On error, free mem required */
729 list_del(&rl_prof_elem->list_entry);
730 devm_kfree(ice_hw_to_dev(hw), rl_prof_elem);
731 }
732 }
733 }
734 }
735
736 /**
737 * ice_sched_clear_agg - clears the aggregator related information
738 * @hw: pointer to the hardware structure
739 *
740 * This function removes aggregator list and free up aggregator related memory
741 * previously allocated.
742 */
ice_sched_clear_agg(struct ice_hw * hw)743 void ice_sched_clear_agg(struct ice_hw *hw)
744 {
745 struct ice_sched_agg_info *agg_info;
746 struct ice_sched_agg_info *atmp;
747
748 list_for_each_entry_safe(agg_info, atmp, &hw->agg_list, list_entry) {
749 struct ice_sched_agg_vsi_info *agg_vsi_info;
750 struct ice_sched_agg_vsi_info *vtmp;
751
752 list_for_each_entry_safe(agg_vsi_info, vtmp,
753 &agg_info->agg_vsi_list, list_entry) {
754 list_del(&agg_vsi_info->list_entry);
755 devm_kfree(ice_hw_to_dev(hw), agg_vsi_info);
756 }
757 list_del(&agg_info->list_entry);
758 devm_kfree(ice_hw_to_dev(hw), agg_info);
759 }
760 }
761
762 /**
763 * ice_sched_clear_tx_topo - clears the scheduler tree nodes
764 * @pi: port information structure
765 *
766 * This function removes all the nodes from HW as well as from SW DB.
767 */
ice_sched_clear_tx_topo(struct ice_port_info * pi)768 static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
769 {
770 if (!pi)
771 return;
772 /* remove RL profiles related lists */
773 ice_sched_clear_rl_prof(pi);
774 if (pi->root) {
775 ice_free_sched_node(pi, pi->root);
776 pi->root = NULL;
777 }
778 }
779
780 /**
781 * ice_sched_clear_port - clear the scheduler elements from SW DB for a port
782 * @pi: port information structure
783 *
784 * Cleanup scheduling elements from SW DB
785 */
ice_sched_clear_port(struct ice_port_info * pi)786 void ice_sched_clear_port(struct ice_port_info *pi)
787 {
788 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
789 return;
790
791 pi->port_state = ICE_SCHED_PORT_STATE_INIT;
792 mutex_lock(&pi->sched_lock);
793 ice_sched_clear_tx_topo(pi);
794 mutex_unlock(&pi->sched_lock);
795 mutex_destroy(&pi->sched_lock);
796 }
797
798 /**
799 * ice_sched_cleanup_all - cleanup scheduler elements from SW DB for all ports
800 * @hw: pointer to the HW struct
801 *
802 * Cleanup scheduling elements from SW DB for all the ports
803 */
ice_sched_cleanup_all(struct ice_hw * hw)804 void ice_sched_cleanup_all(struct ice_hw *hw)
805 {
806 if (!hw)
807 return;
808
809 if (hw->layer_info) {
810 devm_kfree(ice_hw_to_dev(hw), hw->layer_info);
811 hw->layer_info = NULL;
812 }
813
814 ice_sched_clear_port(hw->port_info);
815
816 hw->num_tx_sched_layers = 0;
817 hw->num_tx_sched_phys_layers = 0;
818 hw->flattened_layers = 0;
819 hw->max_cgds = 0;
820 }
821
822 /**
823 * ice_sched_add_elems - add nodes to HW and SW DB
824 * @pi: port information structure
825 * @tc_node: pointer to the branch node
826 * @parent: pointer to the parent node
827 * @layer: layer number to add nodes
828 * @num_nodes: number of nodes
829 * @num_nodes_added: pointer to num nodes added
830 * @first_node_teid: if new nodes are added then return the TEID of first node
831 *
832 * This function add nodes to HW as well as to SW DB for a given layer
833 */
834 static enum ice_status
ice_sched_add_elems(struct ice_port_info * pi,struct ice_sched_node * tc_node,struct ice_sched_node * parent,u8 layer,u16 num_nodes,u16 * num_nodes_added,u32 * first_node_teid)835 ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
836 struct ice_sched_node *parent, u8 layer, u16 num_nodes,
837 u16 *num_nodes_added, u32 *first_node_teid)
838 {
839 struct ice_sched_node *prev, *new_node;
840 struct ice_aqc_add_elem *buf;
841 u16 i, num_groups_added = 0;
842 enum ice_status status = 0;
843 struct ice_hw *hw = pi->hw;
844 size_t buf_size;
845 u32 teid;
846
847 buf_size = struct_size(buf, generic, num_nodes);
848 buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
849 if (!buf)
850 return ICE_ERR_NO_MEMORY;
851
852 buf->hdr.parent_teid = parent->info.node_teid;
853 buf->hdr.num_elems = cpu_to_le16(num_nodes);
854 for (i = 0; i < num_nodes; i++) {
855 buf->generic[i].parent_teid = parent->info.node_teid;
856 buf->generic[i].data.elem_type = ICE_AQC_ELEM_TYPE_SE_GENERIC;
857 buf->generic[i].data.valid_sections =
858 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
859 ICE_AQC_ELEM_VALID_EIR;
860 buf->generic[i].data.generic = 0;
861 buf->generic[i].data.cir_bw.bw_profile_idx =
862 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
863 buf->generic[i].data.cir_bw.bw_alloc =
864 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
865 buf->generic[i].data.eir_bw.bw_profile_idx =
866 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
867 buf->generic[i].data.eir_bw.bw_alloc =
868 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
869 }
870
871 status = ice_aq_add_sched_elems(hw, 1, buf, buf_size,
872 &num_groups_added, NULL);
873 if (status || num_groups_added != 1) {
874 ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n",
875 hw->adminq.sq_last_status);
876 devm_kfree(ice_hw_to_dev(hw), buf);
877 return ICE_ERR_CFG;
878 }
879
880 *num_nodes_added = num_nodes;
881 /* add nodes to the SW DB */
882 for (i = 0; i < num_nodes; i++) {
883 status = ice_sched_add_node(pi, layer, &buf->generic[i]);
884 if (status) {
885 ice_debug(hw, ICE_DBG_SCHED, "add nodes in SW DB failed status =%d\n",
886 status);
887 break;
888 }
889
890 teid = le32_to_cpu(buf->generic[i].node_teid);
891 new_node = ice_sched_find_node_by_teid(parent, teid);
892 if (!new_node) {
893 ice_debug(hw, ICE_DBG_SCHED, "Node is missing for teid =%d\n", teid);
894 break;
895 }
896
897 new_node->sibling = NULL;
898 new_node->tc_num = tc_node->tc_num;
899
900 /* add it to previous node sibling pointer */
901 /* Note: siblings are not linked across branches */
902 prev = ice_sched_get_first_node(pi, tc_node, layer);
903 if (prev && prev != new_node) {
904 while (prev->sibling)
905 prev = prev->sibling;
906 prev->sibling = new_node;
907 }
908
909 /* initialize the sibling head */
910 if (!pi->sib_head[tc_node->tc_num][layer])
911 pi->sib_head[tc_node->tc_num][layer] = new_node;
912
913 if (i == 0)
914 *first_node_teid = teid;
915 }
916
917 devm_kfree(ice_hw_to_dev(hw), buf);
918 return status;
919 }
920
921 /**
922 * ice_sched_add_nodes_to_hw_layer - Add nodes to HW layer
923 * @pi: port information structure
924 * @tc_node: pointer to TC node
925 * @parent: pointer to parent node
926 * @layer: layer number to add nodes
927 * @num_nodes: number of nodes to be added
928 * @first_node_teid: pointer to the first node TEID
929 * @num_nodes_added: pointer to number of nodes added
930 *
931 * Add nodes into specific HW layer.
932 */
933 static enum ice_status
ice_sched_add_nodes_to_hw_layer(struct ice_port_info * pi,struct ice_sched_node * tc_node,struct ice_sched_node * parent,u8 layer,u16 num_nodes,u32 * first_node_teid,u16 * num_nodes_added)934 ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,
935 struct ice_sched_node *tc_node,
936 struct ice_sched_node *parent, u8 layer,
937 u16 num_nodes, u32 *first_node_teid,
938 u16 *num_nodes_added)
939 {
940 u16 max_child_nodes;
941
942 *num_nodes_added = 0;
943
944 if (!num_nodes)
945 return 0;
946
947 if (!parent || layer < pi->hw->sw_entry_point_layer)
948 return ICE_ERR_PARAM;
949
950 /* max children per node per layer */
951 max_child_nodes = pi->hw->max_children[parent->tx_sched_layer];
952
953 /* current number of children + required nodes exceed max children */
954 if ((parent->num_children + num_nodes) > max_child_nodes) {
955 /* Fail if the parent is a TC node */
956 if (parent == tc_node)
957 return ICE_ERR_CFG;
958 return ICE_ERR_MAX_LIMIT;
959 }
960
961 return ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes,
962 num_nodes_added, first_node_teid);
963 }
964
965 /**
966 * ice_sched_add_nodes_to_layer - Add nodes to a given layer
967 * @pi: port information structure
968 * @tc_node: pointer to TC node
969 * @parent: pointer to parent node
970 * @layer: layer number to add nodes
971 * @num_nodes: number of nodes to be added
972 * @first_node_teid: pointer to the first node TEID
973 * @num_nodes_added: pointer to number of nodes added
974 *
975 * This function add nodes to a given layer.
976 */
977 static enum ice_status
ice_sched_add_nodes_to_layer(struct ice_port_info * pi,struct ice_sched_node * tc_node,struct ice_sched_node * parent,u8 layer,u16 num_nodes,u32 * first_node_teid,u16 * num_nodes_added)978 ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
979 struct ice_sched_node *tc_node,
980 struct ice_sched_node *parent, u8 layer,
981 u16 num_nodes, u32 *first_node_teid,
982 u16 *num_nodes_added)
983 {
984 u32 *first_teid_ptr = first_node_teid;
985 u16 new_num_nodes = num_nodes;
986 enum ice_status status = 0;
987
988 *num_nodes_added = 0;
989 while (*num_nodes_added < num_nodes) {
990 u16 max_child_nodes, num_added = 0;
991 /* cppcheck-suppress unusedVariable */
992 u32 temp;
993
994 status = ice_sched_add_nodes_to_hw_layer(pi, tc_node, parent,
995 layer, new_num_nodes,
996 first_teid_ptr,
997 &num_added);
998 if (!status)
999 *num_nodes_added += num_added;
1000 /* added more nodes than requested ? */
1001 if (*num_nodes_added > num_nodes) {
1002 ice_debug(pi->hw, ICE_DBG_SCHED, "added extra nodes %d %d\n", num_nodes,
1003 *num_nodes_added);
1004 status = ICE_ERR_CFG;
1005 break;
1006 }
1007 /* break if all the nodes are added successfully */
1008 if (!status && (*num_nodes_added == num_nodes))
1009 break;
1010 /* break if the error is not max limit */
1011 if (status && status != ICE_ERR_MAX_LIMIT)
1012 break;
1013 /* Exceeded the max children */
1014 max_child_nodes = pi->hw->max_children[parent->tx_sched_layer];
1015 /* utilize all the spaces if the parent is not full */
1016 if (parent->num_children < max_child_nodes) {
1017 new_num_nodes = max_child_nodes - parent->num_children;
1018 } else {
1019 /* This parent is full, try the next sibling */
1020 parent = parent->sibling;
1021 /* Don't modify the first node TEID memory if the
1022 * first node was added already in the above call.
1023 * Instead send some temp memory for all other
1024 * recursive calls.
1025 */
1026 if (num_added)
1027 first_teid_ptr = &temp;
1028
1029 new_num_nodes = num_nodes - *num_nodes_added;
1030 }
1031 }
1032 return status;
1033 }
1034
1035 /**
1036 * ice_sched_get_qgrp_layer - get the current queue group layer number
1037 * @hw: pointer to the HW struct
1038 *
1039 * This function returns the current queue group layer number
1040 */
ice_sched_get_qgrp_layer(struct ice_hw * hw)1041 static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw)
1042 {
1043 /* It's always total layers - 1, the array is 0 relative so -2 */
1044 return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET;
1045 }
1046
1047 /**
1048 * ice_sched_get_vsi_layer - get the current VSI layer number
1049 * @hw: pointer to the HW struct
1050 *
1051 * This function returns the current VSI layer number
1052 */
ice_sched_get_vsi_layer(struct ice_hw * hw)1053 static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
1054 {
1055 /* Num Layers VSI layer
1056 * 9 6
1057 * 7 4
1058 * 5 or less sw_entry_point_layer
1059 */
1060 /* calculate the VSI layer based on number of layers. */
1061 if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) {
1062 u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
1063
1064 if (layer > hw->sw_entry_point_layer)
1065 return layer;
1066 }
1067 return hw->sw_entry_point_layer;
1068 }
1069
1070 /**
1071 * ice_sched_get_agg_layer - get the current aggregator layer number
1072 * @hw: pointer to the HW struct
1073 *
1074 * This function returns the current aggregator layer number
1075 */
ice_sched_get_agg_layer(struct ice_hw * hw)1076 static u8 ice_sched_get_agg_layer(struct ice_hw *hw)
1077 {
1078 /* Num Layers aggregator layer
1079 * 9 4
1080 * 7 or less sw_entry_point_layer
1081 */
1082 /* calculate the aggregator layer based on number of layers. */
1083 if (hw->num_tx_sched_layers > ICE_AGG_LAYER_OFFSET + 1) {
1084 u8 layer = hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET;
1085
1086 if (layer > hw->sw_entry_point_layer)
1087 return layer;
1088 }
1089 return hw->sw_entry_point_layer;
1090 }
1091
1092 /**
1093 * ice_rm_dflt_leaf_node - remove the default leaf node in the tree
1094 * @pi: port information structure
1095 *
1096 * This function removes the leaf node that was created by the FW
1097 * during initialization
1098 */
ice_rm_dflt_leaf_node(struct ice_port_info * pi)1099 static void ice_rm_dflt_leaf_node(struct ice_port_info *pi)
1100 {
1101 struct ice_sched_node *node;
1102
1103 node = pi->root;
1104 while (node) {
1105 if (!node->num_children)
1106 break;
1107 node = node->children[0];
1108 }
1109 if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) {
1110 u32 teid = le32_to_cpu(node->info.node_teid);
1111 enum ice_status status;
1112
1113 /* remove the default leaf node */
1114 status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid);
1115 if (!status)
1116 ice_free_sched_node(pi, node);
1117 }
1118 }
1119
1120 /**
1121 * ice_sched_rm_dflt_nodes - free the default nodes in the tree
1122 * @pi: port information structure
1123 *
1124 * This function frees all the nodes except root and TC that were created by
1125 * the FW during initialization
1126 */
ice_sched_rm_dflt_nodes(struct ice_port_info * pi)1127 static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi)
1128 {
1129 struct ice_sched_node *node;
1130
1131 ice_rm_dflt_leaf_node(pi);
1132
1133 /* remove the default nodes except TC and root nodes */
1134 node = pi->root;
1135 while (node) {
1136 if (node->tx_sched_layer >= pi->hw->sw_entry_point_layer &&
1137 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
1138 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT) {
1139 ice_free_sched_node(pi, node);
1140 break;
1141 }
1142
1143 if (!node->num_children)
1144 break;
1145 node = node->children[0];
1146 }
1147 }
1148
1149 /**
1150 * ice_sched_init_port - Initialize scheduler by querying information from FW
1151 * @pi: port info structure for the tree to cleanup
1152 *
1153 * This function is the initial call to find the total number of Tx scheduler
1154 * resources, default topology created by firmware and storing the information
1155 * in SW DB.
1156 */
ice_sched_init_port(struct ice_port_info * pi)1157 enum ice_status ice_sched_init_port(struct ice_port_info *pi)
1158 {
1159 struct ice_aqc_get_topo_elem *buf;
1160 enum ice_status status;
1161 struct ice_hw *hw;
1162 u8 num_branches;
1163 u16 num_elems;
1164 u8 i, j;
1165
1166 if (!pi)
1167 return ICE_ERR_PARAM;
1168 hw = pi->hw;
1169
1170 /* Query the Default Topology from FW */
1171 buf = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
1172 if (!buf)
1173 return ICE_ERR_NO_MEMORY;
1174
1175 /* Query default scheduling tree topology */
1176 status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN,
1177 &num_branches, NULL);
1178 if (status)
1179 goto err_init_port;
1180
1181 /* num_branches should be between 1-8 */
1182 if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) {
1183 ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n",
1184 num_branches);
1185 status = ICE_ERR_PARAM;
1186 goto err_init_port;
1187 }
1188
1189 /* get the number of elements on the default/first branch */
1190 num_elems = le16_to_cpu(buf[0].hdr.num_elems);
1191
1192 /* num_elems should always be between 1-9 */
1193 if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) {
1194 ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n",
1195 num_elems);
1196 status = ICE_ERR_PARAM;
1197 goto err_init_port;
1198 }
1199
1200 /* If the last node is a leaf node then the index of the queue group
1201 * layer is two less than the number of elements.
1202 */
1203 if (num_elems > 2 && buf[0].generic[num_elems - 1].data.elem_type ==
1204 ICE_AQC_ELEM_TYPE_LEAF)
1205 pi->last_node_teid =
1206 le32_to_cpu(buf[0].generic[num_elems - 2].node_teid);
1207 else
1208 pi->last_node_teid =
1209 le32_to_cpu(buf[0].generic[num_elems - 1].node_teid);
1210
1211 /* Insert the Tx Sched root node */
1212 status = ice_sched_add_root_node(pi, &buf[0].generic[0]);
1213 if (status)
1214 goto err_init_port;
1215
1216 /* Parse the default tree and cache the information */
1217 for (i = 0; i < num_branches; i++) {
1218 num_elems = le16_to_cpu(buf[i].hdr.num_elems);
1219
1220 /* Skip root element as already inserted */
1221 for (j = 1; j < num_elems; j++) {
1222 /* update the sw entry point */
1223 if (buf[0].generic[j].data.elem_type ==
1224 ICE_AQC_ELEM_TYPE_ENTRY_POINT)
1225 hw->sw_entry_point_layer = j;
1226
1227 status = ice_sched_add_node(pi, j, &buf[i].generic[j]);
1228 if (status)
1229 goto err_init_port;
1230 }
1231 }
1232
1233 /* Remove the default nodes. */
1234 if (pi->root)
1235 ice_sched_rm_dflt_nodes(pi);
1236
1237 /* initialize the port for handling the scheduler tree */
1238 pi->port_state = ICE_SCHED_PORT_STATE_READY;
1239 mutex_init(&pi->sched_lock);
1240 for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
1241 INIT_LIST_HEAD(&pi->rl_prof_list[i]);
1242
1243 err_init_port:
1244 if (status && pi->root) {
1245 ice_free_sched_node(pi, pi->root);
1246 pi->root = NULL;
1247 }
1248
1249 devm_kfree(ice_hw_to_dev(hw), buf);
1250 return status;
1251 }
1252
1253 /**
1254 * ice_sched_query_res_alloc - query the FW for num of logical sched layers
1255 * @hw: pointer to the HW struct
1256 *
1257 * query FW for allocated scheduler resources and store in HW struct
1258 */
ice_sched_query_res_alloc(struct ice_hw * hw)1259 enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
1260 {
1261 struct ice_aqc_query_txsched_res_resp *buf;
1262 enum ice_status status = 0;
1263 __le16 max_sibl;
1264 u16 i;
1265
1266 if (hw->layer_info)
1267 return status;
1268
1269 buf = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*buf), GFP_KERNEL);
1270 if (!buf)
1271 return ICE_ERR_NO_MEMORY;
1272
1273 status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL);
1274 if (status)
1275 goto sched_query_out;
1276
1277 hw->num_tx_sched_layers = le16_to_cpu(buf->sched_props.logical_levels);
1278 hw->num_tx_sched_phys_layers =
1279 le16_to_cpu(buf->sched_props.phys_levels);
1280 hw->flattened_layers = buf->sched_props.flattening_bitmap;
1281 hw->max_cgds = buf->sched_props.max_pf_cgds;
1282
1283 /* max sibling group size of current layer refers to the max children
1284 * of the below layer node.
1285 * layer 1 node max children will be layer 2 max sibling group size
1286 * layer 2 node max children will be layer 3 max sibling group size
1287 * and so on. This array will be populated from root (index 0) to
1288 * qgroup layer 7. Leaf node has no children.
1289 */
1290 for (i = 0; i < hw->num_tx_sched_layers - 1; i++) {
1291 max_sibl = buf->layer_props[i + 1].max_sibl_grp_sz;
1292 hw->max_children[i] = le16_to_cpu(max_sibl);
1293 }
1294
1295 hw->layer_info = devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props,
1296 (hw->num_tx_sched_layers *
1297 sizeof(*hw->layer_info)),
1298 GFP_KERNEL);
1299 if (!hw->layer_info) {
1300 status = ICE_ERR_NO_MEMORY;
1301 goto sched_query_out;
1302 }
1303
1304 sched_query_out:
1305 devm_kfree(ice_hw_to_dev(hw), buf);
1306 return status;
1307 }
1308
1309 /**
1310 * ice_sched_get_psm_clk_freq - determine the PSM clock frequency
1311 * @hw: pointer to the HW struct
1312 *
1313 * Determine the PSM clock frequency and store in HW struct
1314 */
ice_sched_get_psm_clk_freq(struct ice_hw * hw)1315 void ice_sched_get_psm_clk_freq(struct ice_hw *hw)
1316 {
1317 u32 val, clk_src;
1318
1319 val = rd32(hw, GLGEN_CLKSTAT_SRC);
1320 clk_src = (val & GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M) >>
1321 GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_S;
1322
1323 #define PSM_CLK_SRC_367_MHZ 0x0
1324 #define PSM_CLK_SRC_416_MHZ 0x1
1325 #define PSM_CLK_SRC_446_MHZ 0x2
1326 #define PSM_CLK_SRC_390_MHZ 0x3
1327
1328 switch (clk_src) {
1329 case PSM_CLK_SRC_367_MHZ:
1330 hw->psm_clk_freq = ICE_PSM_CLK_367MHZ_IN_HZ;
1331 break;
1332 case PSM_CLK_SRC_416_MHZ:
1333 hw->psm_clk_freq = ICE_PSM_CLK_416MHZ_IN_HZ;
1334 break;
1335 case PSM_CLK_SRC_446_MHZ:
1336 hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ;
1337 break;
1338 case PSM_CLK_SRC_390_MHZ:
1339 hw->psm_clk_freq = ICE_PSM_CLK_390MHZ_IN_HZ;
1340 break;
1341 default:
1342 ice_debug(hw, ICE_DBG_SCHED, "PSM clk_src unexpected %u\n",
1343 clk_src);
1344 /* fall back to a safe default */
1345 hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ;
1346 }
1347 }
1348
1349 /**
1350 * ice_sched_find_node_in_subtree - Find node in part of base node subtree
1351 * @hw: pointer to the HW struct
1352 * @base: pointer to the base node
1353 * @node: pointer to the node to search
1354 *
1355 * This function checks whether a given node is part of the base node
1356 * subtree or not
1357 */
1358 static bool
ice_sched_find_node_in_subtree(struct ice_hw * hw,struct ice_sched_node * base,struct ice_sched_node * node)1359 ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
1360 struct ice_sched_node *node)
1361 {
1362 u8 i;
1363
1364 for (i = 0; i < base->num_children; i++) {
1365 struct ice_sched_node *child = base->children[i];
1366
1367 if (node == child)
1368 return true;
1369
1370 if (child->tx_sched_layer > node->tx_sched_layer)
1371 return false;
1372
1373 /* this recursion is intentional, and wouldn't
1374 * go more than 8 calls
1375 */
1376 if (ice_sched_find_node_in_subtree(hw, child, node))
1377 return true;
1378 }
1379 return false;
1380 }
1381
1382 /**
1383 * ice_sched_get_free_qgrp - Scan all queue group siblings and find a free node
1384 * @pi: port information structure
1385 * @vsi_node: software VSI handle
1386 * @qgrp_node: first queue group node identified for scanning
1387 * @owner: LAN or RDMA
1388 *
1389 * This function retrieves a free LAN or RDMA queue group node by scanning
1390 * qgrp_node and its siblings for the queue group with the fewest number
1391 * of queues currently assigned.
1392 */
1393 static struct ice_sched_node *
ice_sched_get_free_qgrp(struct ice_port_info * pi,struct ice_sched_node * vsi_node,struct ice_sched_node * qgrp_node,u8 owner)1394 ice_sched_get_free_qgrp(struct ice_port_info *pi,
1395 struct ice_sched_node *vsi_node,
1396 struct ice_sched_node *qgrp_node, u8 owner)
1397 {
1398 struct ice_sched_node *min_qgrp;
1399 u8 min_children;
1400
1401 if (!qgrp_node)
1402 return qgrp_node;
1403 min_children = qgrp_node->num_children;
1404 if (!min_children)
1405 return qgrp_node;
1406 min_qgrp = qgrp_node;
1407 /* scan all queue groups until find a node which has less than the
1408 * minimum number of children. This way all queue group nodes get
1409 * equal number of shares and active. The bandwidth will be equally
1410 * distributed across all queues.
1411 */
1412 while (qgrp_node) {
1413 /* make sure the qgroup node is part of the VSI subtree */
1414 if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
1415 if (qgrp_node->num_children < min_children &&
1416 qgrp_node->owner == owner) {
1417 /* replace the new min queue group node */
1418 min_qgrp = qgrp_node;
1419 min_children = min_qgrp->num_children;
1420 /* break if it has no children, */
1421 if (!min_children)
1422 break;
1423 }
1424 qgrp_node = qgrp_node->sibling;
1425 }
1426 return min_qgrp;
1427 }
1428
1429 /**
1430 * ice_sched_get_free_qparent - Get a free LAN or RDMA queue group node
1431 * @pi: port information structure
1432 * @vsi_handle: software VSI handle
1433 * @tc: branch number
1434 * @owner: LAN or RDMA
1435 *
1436 * This function retrieves a free LAN or RDMA queue group node
1437 */
1438 struct ice_sched_node *
ice_sched_get_free_qparent(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u8 owner)1439 ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
1440 u8 owner)
1441 {
1442 struct ice_sched_node *vsi_node, *qgrp_node;
1443 struct ice_vsi_ctx *vsi_ctx;
1444 u16 max_children;
1445 u8 qgrp_layer;
1446
1447 qgrp_layer = ice_sched_get_qgrp_layer(pi->hw);
1448 max_children = pi->hw->max_children[qgrp_layer];
1449
1450 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
1451 if (!vsi_ctx)
1452 return NULL;
1453 vsi_node = vsi_ctx->sched.vsi_node[tc];
1454 /* validate invalid VSI ID */
1455 if (!vsi_node)
1456 return NULL;
1457
1458 /* get the first queue group node from VSI sub-tree */
1459 qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
1460 while (qgrp_node) {
1461 /* make sure the qgroup node is part of the VSI subtree */
1462 if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
1463 if (qgrp_node->num_children < max_children &&
1464 qgrp_node->owner == owner)
1465 break;
1466 qgrp_node = qgrp_node->sibling;
1467 }
1468
1469 /* Select the best queue group */
1470 return ice_sched_get_free_qgrp(pi, vsi_node, qgrp_node, owner);
1471 }
1472
1473 /**
1474 * ice_sched_get_vsi_node - Get a VSI node based on VSI ID
1475 * @pi: pointer to the port information structure
1476 * @tc_node: pointer to the TC node
1477 * @vsi_handle: software VSI handle
1478 *
1479 * This function retrieves a VSI node for a given VSI ID from a given
1480 * TC branch
1481 */
1482 static struct ice_sched_node *
ice_sched_get_vsi_node(struct ice_port_info * pi,struct ice_sched_node * tc_node,u16 vsi_handle)1483 ice_sched_get_vsi_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
1484 u16 vsi_handle)
1485 {
1486 struct ice_sched_node *node;
1487 u8 vsi_layer;
1488
1489 vsi_layer = ice_sched_get_vsi_layer(pi->hw);
1490 node = ice_sched_get_first_node(pi, tc_node, vsi_layer);
1491
1492 /* Check whether it already exists */
1493 while (node) {
1494 if (node->vsi_handle == vsi_handle)
1495 return node;
1496 node = node->sibling;
1497 }
1498
1499 return node;
1500 }
1501
1502 /**
1503 * ice_sched_get_agg_node - Get an aggregator node based on aggregator ID
1504 * @pi: pointer to the port information structure
1505 * @tc_node: pointer to the TC node
1506 * @agg_id: aggregator ID
1507 *
1508 * This function retrieves an aggregator node for a given aggregator ID from
1509 * a given TC branch
1510 */
1511 static struct ice_sched_node *
ice_sched_get_agg_node(struct ice_port_info * pi,struct ice_sched_node * tc_node,u32 agg_id)1512 ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
1513 u32 agg_id)
1514 {
1515 struct ice_sched_node *node;
1516 struct ice_hw *hw = pi->hw;
1517 u8 agg_layer;
1518
1519 if (!hw)
1520 return NULL;
1521 agg_layer = ice_sched_get_agg_layer(hw);
1522 node = ice_sched_get_first_node(pi, tc_node, agg_layer);
1523
1524 /* Check whether it already exists */
1525 while (node) {
1526 if (node->agg_id == agg_id)
1527 return node;
1528 node = node->sibling;
1529 }
1530
1531 return node;
1532 }
1533
1534 /**
1535 * ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes
1536 * @hw: pointer to the HW struct
1537 * @num_qs: number of queues
1538 * @num_nodes: num nodes array
1539 *
1540 * This function calculates the number of VSI child nodes based on the
1541 * number of queues.
1542 */
1543 static void
ice_sched_calc_vsi_child_nodes(struct ice_hw * hw,u16 num_qs,u16 * num_nodes)1544 ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
1545 {
1546 u16 num = num_qs;
1547 u8 i, qgl, vsil;
1548
1549 qgl = ice_sched_get_qgrp_layer(hw);
1550 vsil = ice_sched_get_vsi_layer(hw);
1551
1552 /* calculate num nodes from queue group to VSI layer */
1553 for (i = qgl; i > vsil; i--) {
1554 /* round to the next integer if there is a remainder */
1555 num = DIV_ROUND_UP(num, hw->max_children[i]);
1556
1557 /* need at least one node */
1558 num_nodes[i] = num ? num : 1;
1559 }
1560 }
1561
1562 /**
1563 * ice_sched_add_vsi_child_nodes - add VSI child nodes to tree
1564 * @pi: port information structure
1565 * @vsi_handle: software VSI handle
1566 * @tc_node: pointer to the TC node
1567 * @num_nodes: pointer to the num nodes that needs to be added per layer
1568 * @owner: node owner (LAN or RDMA)
1569 *
1570 * This function adds the VSI child nodes to tree. It gets called for
1571 * LAN and RDMA separately.
1572 */
1573 static enum ice_status
ice_sched_add_vsi_child_nodes(struct ice_port_info * pi,u16 vsi_handle,struct ice_sched_node * tc_node,u16 * num_nodes,u8 owner)1574 ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
1575 struct ice_sched_node *tc_node, u16 *num_nodes,
1576 u8 owner)
1577 {
1578 struct ice_sched_node *parent, *node;
1579 struct ice_hw *hw = pi->hw;
1580 enum ice_status status;
1581 u32 first_node_teid;
1582 u16 num_added = 0;
1583 u8 i, qgl, vsil;
1584
1585 qgl = ice_sched_get_qgrp_layer(hw);
1586 vsil = ice_sched_get_vsi_layer(hw);
1587 parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
1588 for (i = vsil + 1; i <= qgl; i++) {
1589 if (!parent)
1590 return ICE_ERR_CFG;
1591
1592 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
1593 num_nodes[i],
1594 &first_node_teid,
1595 &num_added);
1596 if (status || num_nodes[i] != num_added)
1597 return ICE_ERR_CFG;
1598
1599 /* The newly added node can be a new parent for the next
1600 * layer nodes
1601 */
1602 if (num_added) {
1603 parent = ice_sched_find_node_by_teid(tc_node,
1604 first_node_teid);
1605 node = parent;
1606 while (node) {
1607 node->owner = owner;
1608 node = node->sibling;
1609 }
1610 } else {
1611 parent = parent->children[0];
1612 }
1613 }
1614
1615 return 0;
1616 }
1617
1618 /**
1619 * ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes
1620 * @pi: pointer to the port info structure
1621 * @tc_node: pointer to TC node
1622 * @num_nodes: pointer to num nodes array
1623 *
1624 * This function calculates the number of supported nodes needed to add this
1625 * VSI into Tx tree including the VSI, parent and intermediate nodes in below
1626 * layers
1627 */
1628 static void
ice_sched_calc_vsi_support_nodes(struct ice_port_info * pi,struct ice_sched_node * tc_node,u16 * num_nodes)1629 ice_sched_calc_vsi_support_nodes(struct ice_port_info *pi,
1630 struct ice_sched_node *tc_node, u16 *num_nodes)
1631 {
1632 struct ice_sched_node *node;
1633 u8 vsil;
1634 int i;
1635
1636 vsil = ice_sched_get_vsi_layer(pi->hw);
1637 for (i = vsil; i >= pi->hw->sw_entry_point_layer; i--)
1638 /* Add intermediate nodes if TC has no children and
1639 * need at least one node for VSI
1640 */
1641 if (!tc_node->num_children || i == vsil) {
1642 num_nodes[i]++;
1643 } else {
1644 /* If intermediate nodes are reached max children
1645 * then add a new one.
1646 */
1647 node = ice_sched_get_first_node(pi, tc_node, (u8)i);
1648 /* scan all the siblings */
1649 while (node) {
1650 if (node->num_children < pi->hw->max_children[i])
1651 break;
1652 node = node->sibling;
1653 }
1654
1655 /* tree has one intermediate node to add this new VSI.
1656 * So no need to calculate supported nodes for below
1657 * layers.
1658 */
1659 if (node)
1660 break;
1661 /* all the nodes are full, allocate a new one */
1662 num_nodes[i]++;
1663 }
1664 }
1665
1666 /**
1667 * ice_sched_add_vsi_support_nodes - add VSI supported nodes into Tx tree
1668 * @pi: port information structure
1669 * @vsi_handle: software VSI handle
1670 * @tc_node: pointer to TC node
1671 * @num_nodes: pointer to num nodes array
1672 *
1673 * This function adds the VSI supported nodes into Tx tree including the
1674 * VSI, its parent and intermediate nodes in below layers
1675 */
1676 static enum ice_status
ice_sched_add_vsi_support_nodes(struct ice_port_info * pi,u16 vsi_handle,struct ice_sched_node * tc_node,u16 * num_nodes)1677 ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
1678 struct ice_sched_node *tc_node, u16 *num_nodes)
1679 {
1680 struct ice_sched_node *parent = tc_node;
1681 enum ice_status status;
1682 u32 first_node_teid;
1683 u16 num_added = 0;
1684 u8 i, vsil;
1685
1686 if (!pi)
1687 return ICE_ERR_PARAM;
1688
1689 vsil = ice_sched_get_vsi_layer(pi->hw);
1690 for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {
1691 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
1692 i, num_nodes[i],
1693 &first_node_teid,
1694 &num_added);
1695 if (status || num_nodes[i] != num_added)
1696 return ICE_ERR_CFG;
1697
1698 /* The newly added node can be a new parent for the next
1699 * layer nodes
1700 */
1701 if (num_added)
1702 parent = ice_sched_find_node_by_teid(tc_node,
1703 first_node_teid);
1704 else
1705 parent = parent->children[0];
1706
1707 if (!parent)
1708 return ICE_ERR_CFG;
1709
1710 if (i == vsil)
1711 parent->vsi_handle = vsi_handle;
1712 }
1713
1714 return 0;
1715 }
1716
1717 /**
1718 * ice_sched_add_vsi_to_topo - add a new VSI into tree
1719 * @pi: port information structure
1720 * @vsi_handle: software VSI handle
1721 * @tc: TC number
1722 *
1723 * This function adds a new VSI into scheduler tree
1724 */
1725 static enum ice_status
ice_sched_add_vsi_to_topo(struct ice_port_info * pi,u16 vsi_handle,u8 tc)1726 ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
1727 {
1728 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
1729 struct ice_sched_node *tc_node;
1730
1731 tc_node = ice_sched_get_tc_node(pi, tc);
1732 if (!tc_node)
1733 return ICE_ERR_PARAM;
1734
1735 /* calculate number of supported nodes needed for this VSI */
1736 ice_sched_calc_vsi_support_nodes(pi, tc_node, num_nodes);
1737
1738 /* add VSI supported nodes to TC subtree */
1739 return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node,
1740 num_nodes);
1741 }
1742
1743 /**
1744 * ice_sched_update_vsi_child_nodes - update VSI child nodes
1745 * @pi: port information structure
1746 * @vsi_handle: software VSI handle
1747 * @tc: TC number
1748 * @new_numqs: new number of max queues
1749 * @owner: owner of this subtree
1750 *
1751 * This function updates the VSI child nodes based on the number of queues
1752 */
1753 static enum ice_status
ice_sched_update_vsi_child_nodes(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u16 new_numqs,u8 owner)1754 ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
1755 u8 tc, u16 new_numqs, u8 owner)
1756 {
1757 u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
1758 struct ice_sched_node *vsi_node;
1759 struct ice_sched_node *tc_node;
1760 struct ice_vsi_ctx *vsi_ctx;
1761 enum ice_status status = 0;
1762 struct ice_hw *hw = pi->hw;
1763 u16 prev_numqs;
1764
1765 tc_node = ice_sched_get_tc_node(pi, tc);
1766 if (!tc_node)
1767 return ICE_ERR_CFG;
1768
1769 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
1770 if (!vsi_node)
1771 return ICE_ERR_CFG;
1772
1773 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1774 if (!vsi_ctx)
1775 return ICE_ERR_PARAM;
1776
1777 prev_numqs = vsi_ctx->sched.max_lanq[tc];
1778 /* num queues are not changed or less than the previous number */
1779 if (new_numqs <= prev_numqs)
1780 return status;
1781 status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs);
1782 if (status)
1783 return status;
1784
1785 if (new_numqs)
1786 ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
1787 /* Keep the max number of queue configuration all the time. Update the
1788 * tree only if number of queues > previous number of queues. This may
1789 * leave some extra nodes in the tree if number of queues < previous
1790 * number but that wouldn't harm anything. Removing those extra nodes
1791 * may complicate the code if those nodes are part of SRL or
1792 * individually rate limited.
1793 */
1794 status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
1795 new_num_nodes, owner);
1796 if (status)
1797 return status;
1798 vsi_ctx->sched.max_lanq[tc] = new_numqs;
1799
1800 return 0;
1801 }
1802
1803 /**
1804 * ice_sched_cfg_vsi - configure the new/existing VSI
1805 * @pi: port information structure
1806 * @vsi_handle: software VSI handle
1807 * @tc: TC number
1808 * @maxqs: max number of queues
1809 * @owner: LAN or RDMA
1810 * @enable: TC enabled or disabled
1811 *
1812 * This function adds/updates VSI nodes based on the number of queues. If TC is
1813 * enabled and VSI is in suspended state then resume the VSI back. If TC is
1814 * disabled then suspend the VSI if it is not already.
1815 */
1816 enum ice_status
ice_sched_cfg_vsi(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u16 maxqs,u8 owner,bool enable)1817 ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
1818 u8 owner, bool enable)
1819 {
1820 struct ice_sched_node *vsi_node, *tc_node;
1821 struct ice_vsi_ctx *vsi_ctx;
1822 enum ice_status status = 0;
1823 struct ice_hw *hw = pi->hw;
1824
1825 ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle);
1826 tc_node = ice_sched_get_tc_node(pi, tc);
1827 if (!tc_node)
1828 return ICE_ERR_PARAM;
1829 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1830 if (!vsi_ctx)
1831 return ICE_ERR_PARAM;
1832 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
1833
1834 /* suspend the VSI if TC is not enabled */
1835 if (!enable) {
1836 if (vsi_node && vsi_node->in_use) {
1837 u32 teid = le32_to_cpu(vsi_node->info.node_teid);
1838
1839 status = ice_sched_suspend_resume_elems(hw, 1, &teid,
1840 true);
1841 if (!status)
1842 vsi_node->in_use = false;
1843 }
1844 return status;
1845 }
1846
1847 /* TC is enabled, if it is a new VSI then add it to the tree */
1848 if (!vsi_node) {
1849 status = ice_sched_add_vsi_to_topo(pi, vsi_handle, tc);
1850 if (status)
1851 return status;
1852
1853 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
1854 if (!vsi_node)
1855 return ICE_ERR_CFG;
1856
1857 vsi_ctx->sched.vsi_node[tc] = vsi_node;
1858 vsi_node->in_use = true;
1859 /* invalidate the max queues whenever VSI gets added first time
1860 * into the scheduler tree (boot or after reset). We need to
1861 * recreate the child nodes all the time in these cases.
1862 */
1863 vsi_ctx->sched.max_lanq[tc] = 0;
1864 }
1865
1866 /* update the VSI child nodes */
1867 status = ice_sched_update_vsi_child_nodes(pi, vsi_handle, tc, maxqs,
1868 owner);
1869 if (status)
1870 return status;
1871
1872 /* TC is enabled, resume the VSI if it is in the suspend state */
1873 if (!vsi_node->in_use) {
1874 u32 teid = le32_to_cpu(vsi_node->info.node_teid);
1875
1876 status = ice_sched_suspend_resume_elems(hw, 1, &teid, false);
1877 if (!status)
1878 vsi_node->in_use = true;
1879 }
1880
1881 return status;
1882 }
1883
1884 /**
1885 * ice_sched_rm_agg_vsi_info - remove aggregator related VSI info entry
1886 * @pi: port information structure
1887 * @vsi_handle: software VSI handle
1888 *
1889 * This function removes single aggregator VSI info entry from
1890 * aggregator list.
1891 */
ice_sched_rm_agg_vsi_info(struct ice_port_info * pi,u16 vsi_handle)1892 static void ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle)
1893 {
1894 struct ice_sched_agg_info *agg_info;
1895 struct ice_sched_agg_info *atmp;
1896
1897 list_for_each_entry_safe(agg_info, atmp, &pi->hw->agg_list,
1898 list_entry) {
1899 struct ice_sched_agg_vsi_info *agg_vsi_info;
1900 struct ice_sched_agg_vsi_info *vtmp;
1901
1902 list_for_each_entry_safe(agg_vsi_info, vtmp,
1903 &agg_info->agg_vsi_list, list_entry)
1904 if (agg_vsi_info->vsi_handle == vsi_handle) {
1905 list_del(&agg_vsi_info->list_entry);
1906 devm_kfree(ice_hw_to_dev(pi->hw),
1907 agg_vsi_info);
1908 return;
1909 }
1910 }
1911 }
1912
1913 /**
1914 * ice_sched_is_leaf_node_present - check for a leaf node in the sub-tree
1915 * @node: pointer to the sub-tree node
1916 *
1917 * This function checks for a leaf node presence in a given sub-tree node.
1918 */
ice_sched_is_leaf_node_present(struct ice_sched_node * node)1919 static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node)
1920 {
1921 u8 i;
1922
1923 for (i = 0; i < node->num_children; i++)
1924 if (ice_sched_is_leaf_node_present(node->children[i]))
1925 return true;
1926 /* check for a leaf node */
1927 return (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF);
1928 }
1929
1930 /**
1931 * ice_sched_rm_vsi_cfg - remove the VSI and its children nodes
1932 * @pi: port information structure
1933 * @vsi_handle: software VSI handle
1934 * @owner: LAN or RDMA
1935 *
1936 * This function removes the VSI and its LAN or RDMA children nodes from the
1937 * scheduler tree.
1938 */
1939 static enum ice_status
ice_sched_rm_vsi_cfg(struct ice_port_info * pi,u16 vsi_handle,u8 owner)1940 ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
1941 {
1942 enum ice_status status = ICE_ERR_PARAM;
1943 struct ice_vsi_ctx *vsi_ctx;
1944 u8 i;
1945
1946 ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle);
1947 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
1948 return status;
1949 mutex_lock(&pi->sched_lock);
1950 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
1951 if (!vsi_ctx)
1952 goto exit_sched_rm_vsi_cfg;
1953
1954 ice_for_each_traffic_class(i) {
1955 struct ice_sched_node *vsi_node, *tc_node;
1956 u8 j = 0;
1957
1958 tc_node = ice_sched_get_tc_node(pi, i);
1959 if (!tc_node)
1960 continue;
1961
1962 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
1963 if (!vsi_node)
1964 continue;
1965
1966 if (ice_sched_is_leaf_node_present(vsi_node)) {
1967 ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i);
1968 status = ICE_ERR_IN_USE;
1969 goto exit_sched_rm_vsi_cfg;
1970 }
1971 while (j < vsi_node->num_children) {
1972 if (vsi_node->children[j]->owner == owner) {
1973 ice_free_sched_node(pi, vsi_node->children[j]);
1974
1975 /* reset the counter again since the num
1976 * children will be updated after node removal
1977 */
1978 j = 0;
1979 } else {
1980 j++;
1981 }
1982 }
1983 /* remove the VSI if it has no children */
1984 if (!vsi_node->num_children) {
1985 ice_free_sched_node(pi, vsi_node);
1986 vsi_ctx->sched.vsi_node[i] = NULL;
1987
1988 /* clean up aggregator related VSI info if any */
1989 ice_sched_rm_agg_vsi_info(pi, vsi_handle);
1990 }
1991 if (owner == ICE_SCHED_NODE_OWNER_LAN)
1992 vsi_ctx->sched.max_lanq[i] = 0;
1993 }
1994 status = 0;
1995
1996 exit_sched_rm_vsi_cfg:
1997 mutex_unlock(&pi->sched_lock);
1998 return status;
1999 }
2000
2001 /**
2002 * ice_rm_vsi_lan_cfg - remove VSI and its LAN children nodes
2003 * @pi: port information structure
2004 * @vsi_handle: software VSI handle
2005 *
2006 * This function clears the VSI and its LAN children nodes from scheduler tree
2007 * for all TCs.
2008 */
ice_rm_vsi_lan_cfg(struct ice_port_info * pi,u16 vsi_handle)2009 enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
2010 {
2011 return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN);
2012 }
2013
2014 /**
2015 * ice_get_agg_info - get the aggregator ID
2016 * @hw: pointer to the hardware structure
2017 * @agg_id: aggregator ID
2018 *
2019 * This function validates aggregator ID. The function returns info if
2020 * aggregator ID is present in list otherwise it returns null.
2021 */
2022 static struct ice_sched_agg_info *
ice_get_agg_info(struct ice_hw * hw,u32 agg_id)2023 ice_get_agg_info(struct ice_hw *hw, u32 agg_id)
2024 {
2025 struct ice_sched_agg_info *agg_info;
2026
2027 list_for_each_entry(agg_info, &hw->agg_list, list_entry)
2028 if (agg_info->agg_id == agg_id)
2029 return agg_info;
2030
2031 return NULL;
2032 }
2033
2034 /**
2035 * ice_sched_get_free_vsi_parent - Find a free parent node in aggregator subtree
2036 * @hw: pointer to the HW struct
2037 * @node: pointer to a child node
2038 * @num_nodes: num nodes count array
2039 *
2040 * This function walks through the aggregator subtree to find a free parent
2041 * node
2042 */
2043 static struct ice_sched_node *
ice_sched_get_free_vsi_parent(struct ice_hw * hw,struct ice_sched_node * node,u16 * num_nodes)2044 ice_sched_get_free_vsi_parent(struct ice_hw *hw, struct ice_sched_node *node,
2045 u16 *num_nodes)
2046 {
2047 u8 l = node->tx_sched_layer;
2048 u8 vsil, i;
2049
2050 vsil = ice_sched_get_vsi_layer(hw);
2051
2052 /* Is it VSI parent layer ? */
2053 if (l == vsil - 1)
2054 return (node->num_children < hw->max_children[l]) ? node : NULL;
2055
2056 /* We have intermediate nodes. Let's walk through the subtree. If the
2057 * intermediate node has space to add a new node then clear the count
2058 */
2059 if (node->num_children < hw->max_children[l])
2060 num_nodes[l] = 0;
2061 /* The below recursive call is intentional and wouldn't go more than
2062 * 2 or 3 iterations.
2063 */
2064
2065 for (i = 0; i < node->num_children; i++) {
2066 struct ice_sched_node *parent;
2067
2068 parent = ice_sched_get_free_vsi_parent(hw, node->children[i],
2069 num_nodes);
2070 if (parent)
2071 return parent;
2072 }
2073
2074 return NULL;
2075 }
2076
2077 /**
2078 * ice_sched_update_parent - update the new parent in SW DB
2079 * @new_parent: pointer to a new parent node
2080 * @node: pointer to a child node
2081 *
2082 * This function removes the child from the old parent and adds it to a new
2083 * parent
2084 */
2085 static void
ice_sched_update_parent(struct ice_sched_node * new_parent,struct ice_sched_node * node)2086 ice_sched_update_parent(struct ice_sched_node *new_parent,
2087 struct ice_sched_node *node)
2088 {
2089 struct ice_sched_node *old_parent;
2090 u8 i, j;
2091
2092 old_parent = node->parent;
2093
2094 /* update the old parent children */
2095 for (i = 0; i < old_parent->num_children; i++)
2096 if (old_parent->children[i] == node) {
2097 for (j = i + 1; j < old_parent->num_children; j++)
2098 old_parent->children[j - 1] =
2099 old_parent->children[j];
2100 old_parent->num_children--;
2101 break;
2102 }
2103
2104 /* now move the node to a new parent */
2105 new_parent->children[new_parent->num_children++] = node;
2106 node->parent = new_parent;
2107 node->info.parent_teid = new_parent->info.node_teid;
2108 }
2109
2110 /**
2111 * ice_sched_move_nodes - move child nodes to a given parent
2112 * @pi: port information structure
2113 * @parent: pointer to parent node
2114 * @num_items: number of child nodes to be moved
2115 * @list: pointer to child node teids
2116 *
2117 * This function move the child nodes to a given parent.
2118 */
2119 static enum ice_status
ice_sched_move_nodes(struct ice_port_info * pi,struct ice_sched_node * parent,u16 num_items,u32 * list)2120 ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
2121 u16 num_items, u32 *list)
2122 {
2123 struct ice_aqc_move_elem *buf;
2124 struct ice_sched_node *node;
2125 enum ice_status status = 0;
2126 u16 i, grps_movd = 0;
2127 struct ice_hw *hw;
2128 u16 buf_len;
2129
2130 hw = pi->hw;
2131
2132 if (!parent || !num_items)
2133 return ICE_ERR_PARAM;
2134
2135 /* Does parent have enough space */
2136 if (parent->num_children + num_items >
2137 hw->max_children[parent->tx_sched_layer])
2138 return ICE_ERR_AQ_FULL;
2139
2140 buf_len = struct_size(buf, teid, 1);
2141 buf = kzalloc(buf_len, GFP_KERNEL);
2142 if (!buf)
2143 return ICE_ERR_NO_MEMORY;
2144
2145 for (i = 0; i < num_items; i++) {
2146 node = ice_sched_find_node_by_teid(pi->root, list[i]);
2147 if (!node) {
2148 status = ICE_ERR_PARAM;
2149 goto move_err_exit;
2150 }
2151
2152 buf->hdr.src_parent_teid = node->info.parent_teid;
2153 buf->hdr.dest_parent_teid = parent->info.node_teid;
2154 buf->teid[0] = node->info.node_teid;
2155 buf->hdr.num_elems = cpu_to_le16(1);
2156 status = ice_aq_move_sched_elems(hw, 1, buf, buf_len,
2157 &grps_movd, NULL);
2158 if (status && grps_movd != 1) {
2159 status = ICE_ERR_CFG;
2160 goto move_err_exit;
2161 }
2162
2163 /* update the SW DB */
2164 ice_sched_update_parent(parent, node);
2165 }
2166
2167 move_err_exit:
2168 kfree(buf);
2169 return status;
2170 }
2171
2172 /**
2173 * ice_sched_move_vsi_to_agg - move VSI to aggregator node
2174 * @pi: port information structure
2175 * @vsi_handle: software VSI handle
2176 * @agg_id: aggregator ID
2177 * @tc: TC number
2178 *
2179 * This function moves a VSI to an aggregator node or its subtree.
2180 * Intermediate nodes may be created if required.
2181 */
2182 static enum ice_status
ice_sched_move_vsi_to_agg(struct ice_port_info * pi,u16 vsi_handle,u32 agg_id,u8 tc)2183 ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id,
2184 u8 tc)
2185 {
2186 struct ice_sched_node *vsi_node, *agg_node, *tc_node, *parent;
2187 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
2188 u32 first_node_teid, vsi_teid;
2189 enum ice_status status;
2190 u16 num_nodes_added;
2191 u8 aggl, vsil, i;
2192
2193 tc_node = ice_sched_get_tc_node(pi, tc);
2194 if (!tc_node)
2195 return ICE_ERR_CFG;
2196
2197 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
2198 if (!agg_node)
2199 return ICE_ERR_DOES_NOT_EXIST;
2200
2201 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
2202 if (!vsi_node)
2203 return ICE_ERR_DOES_NOT_EXIST;
2204
2205 /* Is this VSI already part of given aggregator? */
2206 if (ice_sched_find_node_in_subtree(pi->hw, agg_node, vsi_node))
2207 return 0;
2208
2209 aggl = ice_sched_get_agg_layer(pi->hw);
2210 vsil = ice_sched_get_vsi_layer(pi->hw);
2211
2212 /* set intermediate node count to 1 between aggregator and VSI layers */
2213 for (i = aggl + 1; i < vsil; i++)
2214 num_nodes[i] = 1;
2215
2216 /* Check if the aggregator subtree has any free node to add the VSI */
2217 for (i = 0; i < agg_node->num_children; i++) {
2218 parent = ice_sched_get_free_vsi_parent(pi->hw,
2219 agg_node->children[i],
2220 num_nodes);
2221 if (parent)
2222 goto move_nodes;
2223 }
2224
2225 /* add new nodes */
2226 parent = agg_node;
2227 for (i = aggl + 1; i < vsil; i++) {
2228 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
2229 num_nodes[i],
2230 &first_node_teid,
2231 &num_nodes_added);
2232 if (status || num_nodes[i] != num_nodes_added)
2233 return ICE_ERR_CFG;
2234
2235 /* The newly added node can be a new parent for the next
2236 * layer nodes
2237 */
2238 if (num_nodes_added)
2239 parent = ice_sched_find_node_by_teid(tc_node,
2240 first_node_teid);
2241 else
2242 parent = parent->children[0];
2243
2244 if (!parent)
2245 return ICE_ERR_CFG;
2246 }
2247
2248 move_nodes:
2249 vsi_teid = le32_to_cpu(vsi_node->info.node_teid);
2250 return ice_sched_move_nodes(pi, parent, 1, &vsi_teid);
2251 }
2252
2253 /**
2254 * ice_move_all_vsi_to_dflt_agg - move all VSI(s) to default aggregator
2255 * @pi: port information structure
2256 * @agg_info: aggregator info
2257 * @tc: traffic class number
2258 * @rm_vsi_info: true or false
2259 *
2260 * This function move all the VSI(s) to the default aggregator and delete
2261 * aggregator VSI info based on passed in boolean parameter rm_vsi_info. The
2262 * caller holds the scheduler lock.
2263 */
2264 static enum ice_status
ice_move_all_vsi_to_dflt_agg(struct ice_port_info * pi,struct ice_sched_agg_info * agg_info,u8 tc,bool rm_vsi_info)2265 ice_move_all_vsi_to_dflt_agg(struct ice_port_info *pi,
2266 struct ice_sched_agg_info *agg_info, u8 tc,
2267 bool rm_vsi_info)
2268 {
2269 struct ice_sched_agg_vsi_info *agg_vsi_info;
2270 struct ice_sched_agg_vsi_info *tmp;
2271 enum ice_status status = 0;
2272
2273 list_for_each_entry_safe(agg_vsi_info, tmp, &agg_info->agg_vsi_list,
2274 list_entry) {
2275 u16 vsi_handle = agg_vsi_info->vsi_handle;
2276
2277 /* Move VSI to default aggregator */
2278 if (!ice_is_tc_ena(agg_vsi_info->tc_bitmap[0], tc))
2279 continue;
2280
2281 status = ice_sched_move_vsi_to_agg(pi, vsi_handle,
2282 ICE_DFLT_AGG_ID, tc);
2283 if (status)
2284 break;
2285
2286 clear_bit(tc, agg_vsi_info->tc_bitmap);
2287 if (rm_vsi_info && !agg_vsi_info->tc_bitmap[0]) {
2288 list_del(&agg_vsi_info->list_entry);
2289 devm_kfree(ice_hw_to_dev(pi->hw), agg_vsi_info);
2290 }
2291 }
2292
2293 return status;
2294 }
2295
2296 /**
2297 * ice_sched_is_agg_inuse - check whether the aggregator is in use or not
2298 * @pi: port information structure
2299 * @node: node pointer
2300 *
2301 * This function checks whether the aggregator is attached with any VSI or not.
2302 */
2303 static bool
ice_sched_is_agg_inuse(struct ice_port_info * pi,struct ice_sched_node * node)2304 ice_sched_is_agg_inuse(struct ice_port_info *pi, struct ice_sched_node *node)
2305 {
2306 u8 vsil, i;
2307
2308 vsil = ice_sched_get_vsi_layer(pi->hw);
2309 if (node->tx_sched_layer < vsil - 1) {
2310 for (i = 0; i < node->num_children; i++)
2311 if (ice_sched_is_agg_inuse(pi, node->children[i]))
2312 return true;
2313 return false;
2314 } else {
2315 return node->num_children ? true : false;
2316 }
2317 }
2318
2319 /**
2320 * ice_sched_rm_agg_cfg - remove the aggregator node
2321 * @pi: port information structure
2322 * @agg_id: aggregator ID
2323 * @tc: TC number
2324 *
2325 * This function removes the aggregator node and intermediate nodes if any
2326 * from the given TC
2327 */
2328 static enum ice_status
ice_sched_rm_agg_cfg(struct ice_port_info * pi,u32 agg_id,u8 tc)2329 ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
2330 {
2331 struct ice_sched_node *tc_node, *agg_node;
2332 struct ice_hw *hw = pi->hw;
2333
2334 tc_node = ice_sched_get_tc_node(pi, tc);
2335 if (!tc_node)
2336 return ICE_ERR_CFG;
2337
2338 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
2339 if (!agg_node)
2340 return ICE_ERR_DOES_NOT_EXIST;
2341
2342 /* Can't remove the aggregator node if it has children */
2343 if (ice_sched_is_agg_inuse(pi, agg_node))
2344 return ICE_ERR_IN_USE;
2345
2346 /* need to remove the whole subtree if aggregator node is the
2347 * only child.
2348 */
2349 while (agg_node->tx_sched_layer > hw->sw_entry_point_layer) {
2350 struct ice_sched_node *parent = agg_node->parent;
2351
2352 if (!parent)
2353 return ICE_ERR_CFG;
2354
2355 if (parent->num_children > 1)
2356 break;
2357
2358 agg_node = parent;
2359 }
2360
2361 ice_free_sched_node(pi, agg_node);
2362 return 0;
2363 }
2364
2365 /**
2366 * ice_rm_agg_cfg_tc - remove aggregator configuration for TC
2367 * @pi: port information structure
2368 * @agg_info: aggregator ID
2369 * @tc: TC number
2370 * @rm_vsi_info: bool value true or false
2371 *
2372 * This function removes aggregator reference to VSI of given TC. It removes
2373 * the aggregator configuration completely for requested TC. The caller needs
2374 * to hold the scheduler lock.
2375 */
2376 static enum ice_status
ice_rm_agg_cfg_tc(struct ice_port_info * pi,struct ice_sched_agg_info * agg_info,u8 tc,bool rm_vsi_info)2377 ice_rm_agg_cfg_tc(struct ice_port_info *pi, struct ice_sched_agg_info *agg_info,
2378 u8 tc, bool rm_vsi_info)
2379 {
2380 enum ice_status status = 0;
2381
2382 /* If nothing to remove - return success */
2383 if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
2384 goto exit_rm_agg_cfg_tc;
2385
2386 status = ice_move_all_vsi_to_dflt_agg(pi, agg_info, tc, rm_vsi_info);
2387 if (status)
2388 goto exit_rm_agg_cfg_tc;
2389
2390 /* Delete aggregator node(s) */
2391 status = ice_sched_rm_agg_cfg(pi, agg_info->agg_id, tc);
2392 if (status)
2393 goto exit_rm_agg_cfg_tc;
2394
2395 clear_bit(tc, agg_info->tc_bitmap);
2396 exit_rm_agg_cfg_tc:
2397 return status;
2398 }
2399
2400 /**
2401 * ice_save_agg_tc_bitmap - save aggregator TC bitmap
2402 * @pi: port information structure
2403 * @agg_id: aggregator ID
2404 * @tc_bitmap: 8 bits TC bitmap
2405 *
2406 * Save aggregator TC bitmap. This function needs to be called with scheduler
2407 * lock held.
2408 */
2409 static enum ice_status
ice_save_agg_tc_bitmap(struct ice_port_info * pi,u32 agg_id,unsigned long * tc_bitmap)2410 ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id,
2411 unsigned long *tc_bitmap)
2412 {
2413 struct ice_sched_agg_info *agg_info;
2414
2415 agg_info = ice_get_agg_info(pi->hw, agg_id);
2416 if (!agg_info)
2417 return ICE_ERR_PARAM;
2418 bitmap_copy(agg_info->replay_tc_bitmap, tc_bitmap,
2419 ICE_MAX_TRAFFIC_CLASS);
2420 return 0;
2421 }
2422
2423 /**
2424 * ice_sched_add_agg_cfg - create an aggregator node
2425 * @pi: port information structure
2426 * @agg_id: aggregator ID
2427 * @tc: TC number
2428 *
2429 * This function creates an aggregator node and intermediate nodes if required
2430 * for the given TC
2431 */
2432 static enum ice_status
ice_sched_add_agg_cfg(struct ice_port_info * pi,u32 agg_id,u8 tc)2433 ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
2434 {
2435 struct ice_sched_node *parent, *agg_node, *tc_node;
2436 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
2437 enum ice_status status = 0;
2438 struct ice_hw *hw = pi->hw;
2439 u32 first_node_teid;
2440 u16 num_nodes_added;
2441 u8 i, aggl;
2442
2443 tc_node = ice_sched_get_tc_node(pi, tc);
2444 if (!tc_node)
2445 return ICE_ERR_CFG;
2446
2447 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
2448 /* Does Agg node already exist ? */
2449 if (agg_node)
2450 return status;
2451
2452 aggl = ice_sched_get_agg_layer(hw);
2453
2454 /* need one node in Agg layer */
2455 num_nodes[aggl] = 1;
2456
2457 /* Check whether the intermediate nodes have space to add the
2458 * new aggregator. If they are full, then SW needs to allocate a new
2459 * intermediate node on those layers
2460 */
2461 for (i = hw->sw_entry_point_layer; i < aggl; i++) {
2462 parent = ice_sched_get_first_node(pi, tc_node, i);
2463
2464 /* scan all the siblings */
2465 while (parent) {
2466 if (parent->num_children < hw->max_children[i])
2467 break;
2468 parent = parent->sibling;
2469 }
2470
2471 /* all the nodes are full, reserve one for this layer */
2472 if (!parent)
2473 num_nodes[i]++;
2474 }
2475
2476 /* add the aggregator node */
2477 parent = tc_node;
2478 for (i = hw->sw_entry_point_layer; i <= aggl; i++) {
2479 if (!parent)
2480 return ICE_ERR_CFG;
2481
2482 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
2483 num_nodes[i],
2484 &first_node_teid,
2485 &num_nodes_added);
2486 if (status || num_nodes[i] != num_nodes_added)
2487 return ICE_ERR_CFG;
2488
2489 /* The newly added node can be a new parent for the next
2490 * layer nodes
2491 */
2492 if (num_nodes_added) {
2493 parent = ice_sched_find_node_by_teid(tc_node,
2494 first_node_teid);
2495 /* register aggregator ID with the aggregator node */
2496 if (parent && i == aggl)
2497 parent->agg_id = agg_id;
2498 } else {
2499 parent = parent->children[0];
2500 }
2501 }
2502
2503 return 0;
2504 }
2505
2506 /**
2507 * ice_sched_cfg_agg - configure aggregator node
2508 * @pi: port information structure
2509 * @agg_id: aggregator ID
2510 * @agg_type: aggregator type queue, VSI, or aggregator group
2511 * @tc_bitmap: bits TC bitmap
2512 *
2513 * It registers a unique aggregator node into scheduler services. It
2514 * allows a user to register with a unique ID to track it's resources.
2515 * The aggregator type determines if this is a queue group, VSI group
2516 * or aggregator group. It then creates the aggregator node(s) for requested
2517 * TC(s) or removes an existing aggregator node including its configuration
2518 * if indicated via tc_bitmap. Call ice_rm_agg_cfg to release aggregator
2519 * resources and remove aggregator ID.
2520 * This function needs to be called with scheduler lock held.
2521 */
2522 static enum ice_status
ice_sched_cfg_agg(struct ice_port_info * pi,u32 agg_id,enum ice_agg_type agg_type,unsigned long * tc_bitmap)2523 ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id,
2524 enum ice_agg_type agg_type, unsigned long *tc_bitmap)
2525 {
2526 struct ice_sched_agg_info *agg_info;
2527 enum ice_status status = 0;
2528 struct ice_hw *hw = pi->hw;
2529 u8 tc;
2530
2531 agg_info = ice_get_agg_info(hw, agg_id);
2532 if (!agg_info) {
2533 /* Create new entry for new aggregator ID */
2534 agg_info = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*agg_info),
2535 GFP_KERNEL);
2536 if (!agg_info)
2537 return ICE_ERR_NO_MEMORY;
2538
2539 agg_info->agg_id = agg_id;
2540 agg_info->agg_type = agg_type;
2541 agg_info->tc_bitmap[0] = 0;
2542
2543 /* Initialize the aggregator VSI list head */
2544 INIT_LIST_HEAD(&agg_info->agg_vsi_list);
2545
2546 /* Add new entry in aggregator list */
2547 list_add(&agg_info->list_entry, &hw->agg_list);
2548 }
2549 /* Create aggregator node(s) for requested TC(s) */
2550 ice_for_each_traffic_class(tc) {
2551 if (!ice_is_tc_ena(*tc_bitmap, tc)) {
2552 /* Delete aggregator cfg TC if it exists previously */
2553 status = ice_rm_agg_cfg_tc(pi, agg_info, tc, false);
2554 if (status)
2555 break;
2556 continue;
2557 }
2558
2559 /* Check if aggregator node for TC already exists */
2560 if (ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
2561 continue;
2562
2563 /* Create new aggregator node for TC */
2564 status = ice_sched_add_agg_cfg(pi, agg_id, tc);
2565 if (status)
2566 break;
2567
2568 /* Save aggregator node's TC information */
2569 set_bit(tc, agg_info->tc_bitmap);
2570 }
2571
2572 return status;
2573 }
2574
2575 /**
2576 * ice_cfg_agg - config aggregator node
2577 * @pi: port information structure
2578 * @agg_id: aggregator ID
2579 * @agg_type: aggregator type queue, VSI, or aggregator group
2580 * @tc_bitmap: bits TC bitmap
2581 *
2582 * This function configures aggregator node(s).
2583 */
2584 enum ice_status
ice_cfg_agg(struct ice_port_info * pi,u32 agg_id,enum ice_agg_type agg_type,u8 tc_bitmap)2585 ice_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type,
2586 u8 tc_bitmap)
2587 {
2588 unsigned long bitmap = tc_bitmap;
2589 enum ice_status status;
2590
2591 mutex_lock(&pi->sched_lock);
2592 status = ice_sched_cfg_agg(pi, agg_id, agg_type,
2593 (unsigned long *)&bitmap);
2594 if (!status)
2595 status = ice_save_agg_tc_bitmap(pi, agg_id,
2596 (unsigned long *)&bitmap);
2597 mutex_unlock(&pi->sched_lock);
2598 return status;
2599 }
2600
2601 /**
2602 * ice_get_agg_vsi_info - get the aggregator ID
2603 * @agg_info: aggregator info
2604 * @vsi_handle: software VSI handle
2605 *
2606 * The function returns aggregator VSI info based on VSI handle. This function
2607 * needs to be called with scheduler lock held.
2608 */
2609 static struct ice_sched_agg_vsi_info *
ice_get_agg_vsi_info(struct ice_sched_agg_info * agg_info,u16 vsi_handle)2610 ice_get_agg_vsi_info(struct ice_sched_agg_info *agg_info, u16 vsi_handle)
2611 {
2612 struct ice_sched_agg_vsi_info *agg_vsi_info;
2613
2614 list_for_each_entry(agg_vsi_info, &agg_info->agg_vsi_list, list_entry)
2615 if (agg_vsi_info->vsi_handle == vsi_handle)
2616 return agg_vsi_info;
2617
2618 return NULL;
2619 }
2620
2621 /**
2622 * ice_get_vsi_agg_info - get the aggregator info of VSI
2623 * @hw: pointer to the hardware structure
2624 * @vsi_handle: Sw VSI handle
2625 *
2626 * The function returns aggregator info of VSI represented via vsi_handle. The
2627 * VSI has in this case a different aggregator than the default one. This
2628 * function needs to be called with scheduler lock held.
2629 */
2630 static struct ice_sched_agg_info *
ice_get_vsi_agg_info(struct ice_hw * hw,u16 vsi_handle)2631 ice_get_vsi_agg_info(struct ice_hw *hw, u16 vsi_handle)
2632 {
2633 struct ice_sched_agg_info *agg_info;
2634
2635 list_for_each_entry(agg_info, &hw->agg_list, list_entry) {
2636 struct ice_sched_agg_vsi_info *agg_vsi_info;
2637
2638 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
2639 if (agg_vsi_info)
2640 return agg_info;
2641 }
2642 return NULL;
2643 }
2644
2645 /**
2646 * ice_save_agg_vsi_tc_bitmap - save aggregator VSI TC bitmap
2647 * @pi: port information structure
2648 * @agg_id: aggregator ID
2649 * @vsi_handle: software VSI handle
2650 * @tc_bitmap: TC bitmap of enabled TC(s)
2651 *
2652 * Save VSI to aggregator TC bitmap. This function needs to call with scheduler
2653 * lock held.
2654 */
2655 static enum ice_status
ice_save_agg_vsi_tc_bitmap(struct ice_port_info * pi,u32 agg_id,u16 vsi_handle,unsigned long * tc_bitmap)2656 ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
2657 unsigned long *tc_bitmap)
2658 {
2659 struct ice_sched_agg_vsi_info *agg_vsi_info;
2660 struct ice_sched_agg_info *agg_info;
2661
2662 agg_info = ice_get_agg_info(pi->hw, agg_id);
2663 if (!agg_info)
2664 return ICE_ERR_PARAM;
2665 /* check if entry already exist */
2666 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
2667 if (!agg_vsi_info)
2668 return ICE_ERR_PARAM;
2669 bitmap_copy(agg_vsi_info->replay_tc_bitmap, tc_bitmap,
2670 ICE_MAX_TRAFFIC_CLASS);
2671 return 0;
2672 }
2673
2674 /**
2675 * ice_sched_assoc_vsi_to_agg - associate/move VSI to new/default aggregator
2676 * @pi: port information structure
2677 * @agg_id: aggregator ID
2678 * @vsi_handle: software VSI handle
2679 * @tc_bitmap: TC bitmap of enabled TC(s)
2680 *
2681 * This function moves VSI to a new or default aggregator node. If VSI is
2682 * already associated to the aggregator node then no operation is performed on
2683 * the tree. This function needs to be called with scheduler lock held.
2684 */
2685 static enum ice_status
ice_sched_assoc_vsi_to_agg(struct ice_port_info * pi,u32 agg_id,u16 vsi_handle,unsigned long * tc_bitmap)2686 ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
2687 u16 vsi_handle, unsigned long *tc_bitmap)
2688 {
2689 struct ice_sched_agg_vsi_info *agg_vsi_info;
2690 struct ice_sched_agg_info *agg_info;
2691 enum ice_status status = 0;
2692 struct ice_hw *hw = pi->hw;
2693 u8 tc;
2694
2695 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
2696 return ICE_ERR_PARAM;
2697 agg_info = ice_get_agg_info(hw, agg_id);
2698 if (!agg_info)
2699 return ICE_ERR_PARAM;
2700 /* check if entry already exist */
2701 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
2702 if (!agg_vsi_info) {
2703 /* Create new entry for VSI under aggregator list */
2704 agg_vsi_info = devm_kzalloc(ice_hw_to_dev(hw),
2705 sizeof(*agg_vsi_info), GFP_KERNEL);
2706 if (!agg_vsi_info)
2707 return ICE_ERR_PARAM;
2708
2709 /* add VSI ID into the aggregator list */
2710 agg_vsi_info->vsi_handle = vsi_handle;
2711 list_add(&agg_vsi_info->list_entry, &agg_info->agg_vsi_list);
2712 }
2713 /* Move VSI node to new aggregator node for requested TC(s) */
2714 ice_for_each_traffic_class(tc) {
2715 if (!ice_is_tc_ena(*tc_bitmap, tc))
2716 continue;
2717
2718 /* Move VSI to new aggregator */
2719 status = ice_sched_move_vsi_to_agg(pi, vsi_handle, agg_id, tc);
2720 if (status)
2721 break;
2722
2723 set_bit(tc, agg_vsi_info->tc_bitmap);
2724 }
2725 return status;
2726 }
2727
2728 /**
2729 * ice_sched_rm_unused_rl_prof - remove unused RL profile
2730 * @pi: port information structure
2731 *
2732 * This function removes unused rate limit profiles from the HW and
2733 * SW DB. The caller needs to hold scheduler lock.
2734 */
ice_sched_rm_unused_rl_prof(struct ice_port_info * pi)2735 static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi)
2736 {
2737 u16 ln;
2738
2739 for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) {
2740 struct ice_aqc_rl_profile_info *rl_prof_elem;
2741 struct ice_aqc_rl_profile_info *rl_prof_tmp;
2742
2743 list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp,
2744 &pi->rl_prof_list[ln], list_entry) {
2745 if (!ice_sched_del_rl_profile(pi->hw, rl_prof_elem))
2746 ice_debug(pi->hw, ICE_DBG_SCHED, "Removed rl profile\n");
2747 }
2748 }
2749 }
2750
2751 /**
2752 * ice_sched_update_elem - update element
2753 * @hw: pointer to the HW struct
2754 * @node: pointer to node
2755 * @info: node info to update
2756 *
2757 * Update the HW DB, and local SW DB of node. Update the scheduling
2758 * parameters of node from argument info data buffer (Info->data buf) and
2759 * returns success or error on config sched element failure. The caller
2760 * needs to hold scheduler lock.
2761 */
2762 static enum ice_status
ice_sched_update_elem(struct ice_hw * hw,struct ice_sched_node * node,struct ice_aqc_txsched_elem_data * info)2763 ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
2764 struct ice_aqc_txsched_elem_data *info)
2765 {
2766 struct ice_aqc_txsched_elem_data buf;
2767 enum ice_status status;
2768 u16 elem_cfgd = 0;
2769 u16 num_elems = 1;
2770
2771 buf = *info;
2772 /* Parent TEID is reserved field in this aq call */
2773 buf.parent_teid = 0;
2774 /* Element type is reserved field in this aq call */
2775 buf.data.elem_type = 0;
2776 /* Flags is reserved field in this aq call */
2777 buf.data.flags = 0;
2778
2779 /* Update HW DB */
2780 /* Configure element node */
2781 status = ice_aq_cfg_sched_elems(hw, num_elems, &buf, sizeof(buf),
2782 &elem_cfgd, NULL);
2783 if (status || elem_cfgd != num_elems) {
2784 ice_debug(hw, ICE_DBG_SCHED, "Config sched elem error\n");
2785 return ICE_ERR_CFG;
2786 }
2787
2788 /* Config success case */
2789 /* Now update local SW DB */
2790 /* Only copy the data portion of info buffer */
2791 node->info.data = info->data;
2792 return status;
2793 }
2794
2795 /**
2796 * ice_sched_cfg_node_bw_alloc - configure node BW weight/alloc params
2797 * @hw: pointer to the HW struct
2798 * @node: sched node to configure
2799 * @rl_type: rate limit type CIR, EIR, or shared
2800 * @bw_alloc: BW weight/allocation
2801 *
2802 * This function configures node element's BW allocation.
2803 */
2804 static enum ice_status
ice_sched_cfg_node_bw_alloc(struct ice_hw * hw,struct ice_sched_node * node,enum ice_rl_type rl_type,u16 bw_alloc)2805 ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
2806 enum ice_rl_type rl_type, u16 bw_alloc)
2807 {
2808 struct ice_aqc_txsched_elem_data buf;
2809 struct ice_aqc_txsched_elem *data;
2810
2811 buf = node->info;
2812 data = &buf.data;
2813 if (rl_type == ICE_MIN_BW) {
2814 data->valid_sections |= ICE_AQC_ELEM_VALID_CIR;
2815 data->cir_bw.bw_alloc = cpu_to_le16(bw_alloc);
2816 } else if (rl_type == ICE_MAX_BW) {
2817 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
2818 data->eir_bw.bw_alloc = cpu_to_le16(bw_alloc);
2819 } else {
2820 return ICE_ERR_PARAM;
2821 }
2822
2823 /* Configure element */
2824 return ice_sched_update_elem(hw, node, &buf);
2825 }
2826
2827 /**
2828 * ice_move_vsi_to_agg - moves VSI to new or default aggregator
2829 * @pi: port information structure
2830 * @agg_id: aggregator ID
2831 * @vsi_handle: software VSI handle
2832 * @tc_bitmap: TC bitmap of enabled TC(s)
2833 *
2834 * Move or associate VSI to a new or default aggregator node.
2835 */
2836 enum ice_status
ice_move_vsi_to_agg(struct ice_port_info * pi,u32 agg_id,u16 vsi_handle,u8 tc_bitmap)2837 ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
2838 u8 tc_bitmap)
2839 {
2840 unsigned long bitmap = tc_bitmap;
2841 enum ice_status status;
2842
2843 mutex_lock(&pi->sched_lock);
2844 status = ice_sched_assoc_vsi_to_agg(pi, agg_id, vsi_handle,
2845 (unsigned long *)&bitmap);
2846 if (!status)
2847 status = ice_save_agg_vsi_tc_bitmap(pi, agg_id, vsi_handle,
2848 (unsigned long *)&bitmap);
2849 mutex_unlock(&pi->sched_lock);
2850 return status;
2851 }
2852
2853 /**
2854 * ice_set_clear_cir_bw - set or clear CIR BW
2855 * @bw_t_info: bandwidth type information structure
2856 * @bw: bandwidth in Kbps - Kilo bits per sec
2857 *
2858 * Save or clear CIR bandwidth (BW) in the passed param bw_t_info.
2859 */
ice_set_clear_cir_bw(struct ice_bw_type_info * bw_t_info,u32 bw)2860 static void ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
2861 {
2862 if (bw == ICE_SCHED_DFLT_BW) {
2863 clear_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
2864 bw_t_info->cir_bw.bw = 0;
2865 } else {
2866 /* Save type of BW information */
2867 set_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
2868 bw_t_info->cir_bw.bw = bw;
2869 }
2870 }
2871
2872 /**
2873 * ice_set_clear_eir_bw - set or clear EIR BW
2874 * @bw_t_info: bandwidth type information structure
2875 * @bw: bandwidth in Kbps - Kilo bits per sec
2876 *
2877 * Save or clear EIR bandwidth (BW) in the passed param bw_t_info.
2878 */
ice_set_clear_eir_bw(struct ice_bw_type_info * bw_t_info,u32 bw)2879 static void ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
2880 {
2881 if (bw == ICE_SCHED_DFLT_BW) {
2882 clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
2883 bw_t_info->eir_bw.bw = 0;
2884 } else {
2885 /* EIR BW and Shared BW profiles are mutually exclusive and
2886 * hence only one of them may be set for any given element.
2887 * First clear earlier saved shared BW information.
2888 */
2889 clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
2890 bw_t_info->shared_bw = 0;
2891 /* save EIR BW information */
2892 set_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
2893 bw_t_info->eir_bw.bw = bw;
2894 }
2895 }
2896
2897 /**
2898 * ice_set_clear_shared_bw - set or clear shared BW
2899 * @bw_t_info: bandwidth type information structure
2900 * @bw: bandwidth in Kbps - Kilo bits per sec
2901 *
2902 * Save or clear shared bandwidth (BW) in the passed param bw_t_info.
2903 */
ice_set_clear_shared_bw(struct ice_bw_type_info * bw_t_info,u32 bw)2904 static void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
2905 {
2906 if (bw == ICE_SCHED_DFLT_BW) {
2907 clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
2908 bw_t_info->shared_bw = 0;
2909 } else {
2910 /* EIR BW and Shared BW profiles are mutually exclusive and
2911 * hence only one of them may be set for any given element.
2912 * First clear earlier saved EIR BW information.
2913 */
2914 clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
2915 bw_t_info->eir_bw.bw = 0;
2916 /* save shared BW information */
2917 set_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
2918 bw_t_info->shared_bw = bw;
2919 }
2920 }
2921
2922 /**
2923 * ice_sched_calc_wakeup - calculate RL profile wakeup parameter
2924 * @hw: pointer to the HW struct
2925 * @bw: bandwidth in Kbps
2926 *
2927 * This function calculates the wakeup parameter of RL profile.
2928 */
ice_sched_calc_wakeup(struct ice_hw * hw,s32 bw)2929 static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw)
2930 {
2931 s64 bytes_per_sec, wakeup_int, wakeup_a, wakeup_b, wakeup_f;
2932 s32 wakeup_f_int;
2933 u16 wakeup = 0;
2934
2935 /* Get the wakeup integer value */
2936 bytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE);
2937 wakeup_int = div64_long(hw->psm_clk_freq, bytes_per_sec);
2938 if (wakeup_int > 63) {
2939 wakeup = (u16)((1 << 15) | wakeup_int);
2940 } else {
2941 /* Calculate fraction value up to 4 decimals
2942 * Convert Integer value to a constant multiplier
2943 */
2944 wakeup_b = (s64)ICE_RL_PROF_MULTIPLIER * wakeup_int;
2945 wakeup_a = div64_long((s64)ICE_RL_PROF_MULTIPLIER *
2946 hw->psm_clk_freq, bytes_per_sec);
2947
2948 /* Get Fraction value */
2949 wakeup_f = wakeup_a - wakeup_b;
2950
2951 /* Round up the Fractional value via Ceil(Fractional value) */
2952 if (wakeup_f > div64_long(ICE_RL_PROF_MULTIPLIER, 2))
2953 wakeup_f += 1;
2954
2955 wakeup_f_int = (s32)div64_long(wakeup_f * ICE_RL_PROF_FRACTION,
2956 ICE_RL_PROF_MULTIPLIER);
2957 wakeup |= (u16)(wakeup_int << 9);
2958 wakeup |= (u16)(0x1ff & wakeup_f_int);
2959 }
2960
2961 return wakeup;
2962 }
2963
2964 /**
2965 * ice_sched_bw_to_rl_profile - convert BW to profile parameters
2966 * @hw: pointer to the HW struct
2967 * @bw: bandwidth in Kbps
2968 * @profile: profile parameters to return
2969 *
2970 * This function converts the BW to profile structure format.
2971 */
2972 static enum ice_status
ice_sched_bw_to_rl_profile(struct ice_hw * hw,u32 bw,struct ice_aqc_rl_profile_elem * profile)2973 ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw,
2974 struct ice_aqc_rl_profile_elem *profile)
2975 {
2976 enum ice_status status = ICE_ERR_PARAM;
2977 s64 bytes_per_sec, ts_rate, mv_tmp;
2978 bool found = false;
2979 s32 encode = 0;
2980 s64 mv = 0;
2981 s32 i;
2982
2983 /* Bw settings range is from 0.5Mb/sec to 100Gb/sec */
2984 if (bw < ICE_SCHED_MIN_BW || bw > ICE_SCHED_MAX_BW)
2985 return status;
2986
2987 /* Bytes per second from Kbps */
2988 bytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE);
2989
2990 /* encode is 6 bits but really useful are 5 bits */
2991 for (i = 0; i < 64; i++) {
2992 u64 pow_result = BIT_ULL(i);
2993
2994 ts_rate = div64_long((s64)hw->psm_clk_freq,
2995 pow_result * ICE_RL_PROF_TS_MULTIPLIER);
2996 if (ts_rate <= 0)
2997 continue;
2998
2999 /* Multiplier value */
3000 mv_tmp = div64_long(bytes_per_sec * ICE_RL_PROF_MULTIPLIER,
3001 ts_rate);
3002
3003 /* Round to the nearest ICE_RL_PROF_MULTIPLIER */
3004 mv = round_up_64bit(mv_tmp, ICE_RL_PROF_MULTIPLIER);
3005
3006 /* First multiplier value greater than the given
3007 * accuracy bytes
3008 */
3009 if (mv > ICE_RL_PROF_ACCURACY_BYTES) {
3010 encode = i;
3011 found = true;
3012 break;
3013 }
3014 }
3015 if (found) {
3016 u16 wm;
3017
3018 wm = ice_sched_calc_wakeup(hw, bw);
3019 profile->rl_multiply = cpu_to_le16(mv);
3020 profile->wake_up_calc = cpu_to_le16(wm);
3021 profile->rl_encode = cpu_to_le16(encode);
3022 status = 0;
3023 } else {
3024 status = ICE_ERR_DOES_NOT_EXIST;
3025 }
3026
3027 return status;
3028 }
3029
3030 /**
3031 * ice_sched_add_rl_profile - add RL profile
3032 * @pi: port information structure
3033 * @rl_type: type of rate limit BW - min, max, or shared
3034 * @bw: bandwidth in Kbps - Kilo bits per sec
3035 * @layer_num: specifies in which layer to create profile
3036 *
3037 * This function first checks the existing list for corresponding BW
3038 * parameter. If it exists, it returns the associated profile otherwise
3039 * it creates a new rate limit profile for requested BW, and adds it to
3040 * the HW DB and local list. It returns the new profile or null on error.
3041 * The caller needs to hold the scheduler lock.
3042 */
3043 static struct ice_aqc_rl_profile_info *
ice_sched_add_rl_profile(struct ice_port_info * pi,enum ice_rl_type rl_type,u32 bw,u8 layer_num)3044 ice_sched_add_rl_profile(struct ice_port_info *pi,
3045 enum ice_rl_type rl_type, u32 bw, u8 layer_num)
3046 {
3047 struct ice_aqc_rl_profile_info *rl_prof_elem;
3048 u16 profiles_added = 0, num_profiles = 1;
3049 struct ice_aqc_rl_profile_elem *buf;
3050 enum ice_status status;
3051 struct ice_hw *hw;
3052 u8 profile_type;
3053
3054 if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
3055 return NULL;
3056 switch (rl_type) {
3057 case ICE_MIN_BW:
3058 profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR;
3059 break;
3060 case ICE_MAX_BW:
3061 profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR;
3062 break;
3063 case ICE_SHARED_BW:
3064 profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL;
3065 break;
3066 default:
3067 return NULL;
3068 }
3069
3070 if (!pi)
3071 return NULL;
3072 hw = pi->hw;
3073 list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
3074 list_entry)
3075 if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
3076 profile_type && rl_prof_elem->bw == bw)
3077 /* Return existing profile ID info */
3078 return rl_prof_elem;
3079
3080 /* Create new profile ID */
3081 rl_prof_elem = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rl_prof_elem),
3082 GFP_KERNEL);
3083
3084 if (!rl_prof_elem)
3085 return NULL;
3086
3087 status = ice_sched_bw_to_rl_profile(hw, bw, &rl_prof_elem->profile);
3088 if (status)
3089 goto exit_add_rl_prof;
3090
3091 rl_prof_elem->bw = bw;
3092 /* layer_num is zero relative, and fw expects level from 1 to 9 */
3093 rl_prof_elem->profile.level = layer_num + 1;
3094 rl_prof_elem->profile.flags = profile_type;
3095 rl_prof_elem->profile.max_burst_size = cpu_to_le16(hw->max_burst_size);
3096
3097 /* Create new entry in HW DB */
3098 buf = &rl_prof_elem->profile;
3099 status = ice_aq_add_rl_profile(hw, num_profiles, buf, sizeof(*buf),
3100 &profiles_added, NULL);
3101 if (status || profiles_added != num_profiles)
3102 goto exit_add_rl_prof;
3103
3104 /* Good entry - add in the list */
3105 rl_prof_elem->prof_id_ref = 0;
3106 list_add(&rl_prof_elem->list_entry, &pi->rl_prof_list[layer_num]);
3107 return rl_prof_elem;
3108
3109 exit_add_rl_prof:
3110 devm_kfree(ice_hw_to_dev(hw), rl_prof_elem);
3111 return NULL;
3112 }
3113
3114 /**
3115 * ice_sched_cfg_node_bw_lmt - configure node sched params
3116 * @hw: pointer to the HW struct
3117 * @node: sched node to configure
3118 * @rl_type: rate limit type CIR, EIR, or shared
3119 * @rl_prof_id: rate limit profile ID
3120 *
3121 * This function configures node element's BW limit.
3122 */
3123 static enum ice_status
ice_sched_cfg_node_bw_lmt(struct ice_hw * hw,struct ice_sched_node * node,enum ice_rl_type rl_type,u16 rl_prof_id)3124 ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,
3125 enum ice_rl_type rl_type, u16 rl_prof_id)
3126 {
3127 struct ice_aqc_txsched_elem_data buf;
3128 struct ice_aqc_txsched_elem *data;
3129
3130 buf = node->info;
3131 data = &buf.data;
3132 switch (rl_type) {
3133 case ICE_MIN_BW:
3134 data->valid_sections |= ICE_AQC_ELEM_VALID_CIR;
3135 data->cir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id);
3136 break;
3137 case ICE_MAX_BW:
3138 /* EIR BW and Shared BW profiles are mutually exclusive and
3139 * hence only one of them may be set for any given element
3140 */
3141 if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED)
3142 return ICE_ERR_CFG;
3143 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
3144 data->eir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id);
3145 break;
3146 case ICE_SHARED_BW:
3147 /* Check for removing shared BW */
3148 if (rl_prof_id == ICE_SCHED_NO_SHARED_RL_PROF_ID) {
3149 /* remove shared profile */
3150 data->valid_sections &= ~ICE_AQC_ELEM_VALID_SHARED;
3151 data->srl_id = 0; /* clear SRL field */
3152
3153 /* enable back EIR to default profile */
3154 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
3155 data->eir_bw.bw_profile_idx =
3156 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
3157 break;
3158 }
3159 /* EIR BW and Shared BW profiles are mutually exclusive and
3160 * hence only one of them may be set for any given element
3161 */
3162 if ((data->valid_sections & ICE_AQC_ELEM_VALID_EIR) &&
3163 (le16_to_cpu(data->eir_bw.bw_profile_idx) !=
3164 ICE_SCHED_DFLT_RL_PROF_ID))
3165 return ICE_ERR_CFG;
3166 /* EIR BW is set to default, disable it */
3167 data->valid_sections &= ~ICE_AQC_ELEM_VALID_EIR;
3168 /* Okay to enable shared BW now */
3169 data->valid_sections |= ICE_AQC_ELEM_VALID_SHARED;
3170 data->srl_id = cpu_to_le16(rl_prof_id);
3171 break;
3172 default:
3173 /* Unknown rate limit type */
3174 return ICE_ERR_PARAM;
3175 }
3176
3177 /* Configure element */
3178 return ice_sched_update_elem(hw, node, &buf);
3179 }
3180
3181 /**
3182 * ice_sched_get_node_rl_prof_id - get node's rate limit profile ID
3183 * @node: sched node
3184 * @rl_type: rate limit type
3185 *
3186 * If existing profile matches, it returns the corresponding rate
3187 * limit profile ID, otherwise it returns an invalid ID as error.
3188 */
3189 static u16
ice_sched_get_node_rl_prof_id(struct ice_sched_node * node,enum ice_rl_type rl_type)3190 ice_sched_get_node_rl_prof_id(struct ice_sched_node *node,
3191 enum ice_rl_type rl_type)
3192 {
3193 u16 rl_prof_id = ICE_SCHED_INVAL_PROF_ID;
3194 struct ice_aqc_txsched_elem *data;
3195
3196 data = &node->info.data;
3197 switch (rl_type) {
3198 case ICE_MIN_BW:
3199 if (data->valid_sections & ICE_AQC_ELEM_VALID_CIR)
3200 rl_prof_id = le16_to_cpu(data->cir_bw.bw_profile_idx);
3201 break;
3202 case ICE_MAX_BW:
3203 if (data->valid_sections & ICE_AQC_ELEM_VALID_EIR)
3204 rl_prof_id = le16_to_cpu(data->eir_bw.bw_profile_idx);
3205 break;
3206 case ICE_SHARED_BW:
3207 if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED)
3208 rl_prof_id = le16_to_cpu(data->srl_id);
3209 break;
3210 default:
3211 break;
3212 }
3213
3214 return rl_prof_id;
3215 }
3216
3217 /**
3218 * ice_sched_get_rl_prof_layer - selects rate limit profile creation layer
3219 * @pi: port information structure
3220 * @rl_type: type of rate limit BW - min, max, or shared
3221 * @layer_index: layer index
3222 *
3223 * This function returns requested profile creation layer.
3224 */
3225 static u8
ice_sched_get_rl_prof_layer(struct ice_port_info * pi,enum ice_rl_type rl_type,u8 layer_index)3226 ice_sched_get_rl_prof_layer(struct ice_port_info *pi, enum ice_rl_type rl_type,
3227 u8 layer_index)
3228 {
3229 struct ice_hw *hw = pi->hw;
3230
3231 if (layer_index >= hw->num_tx_sched_layers)
3232 return ICE_SCHED_INVAL_LAYER_NUM;
3233 switch (rl_type) {
3234 case ICE_MIN_BW:
3235 if (hw->layer_info[layer_index].max_cir_rl_profiles)
3236 return layer_index;
3237 break;
3238 case ICE_MAX_BW:
3239 if (hw->layer_info[layer_index].max_eir_rl_profiles)
3240 return layer_index;
3241 break;
3242 case ICE_SHARED_BW:
3243 /* if current layer doesn't support SRL profile creation
3244 * then try a layer up or down.
3245 */
3246 if (hw->layer_info[layer_index].max_srl_profiles)
3247 return layer_index;
3248 else if (layer_index < hw->num_tx_sched_layers - 1 &&
3249 hw->layer_info[layer_index + 1].max_srl_profiles)
3250 return layer_index + 1;
3251 else if (layer_index > 0 &&
3252 hw->layer_info[layer_index - 1].max_srl_profiles)
3253 return layer_index - 1;
3254 break;
3255 default:
3256 break;
3257 }
3258 return ICE_SCHED_INVAL_LAYER_NUM;
3259 }
3260
3261 /**
3262 * ice_sched_get_srl_node - get shared rate limit node
3263 * @node: tree node
3264 * @srl_layer: shared rate limit layer
3265 *
3266 * This function returns SRL node to be used for shared rate limit purpose.
3267 * The caller needs to hold scheduler lock.
3268 */
3269 static struct ice_sched_node *
ice_sched_get_srl_node(struct ice_sched_node * node,u8 srl_layer)3270 ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer)
3271 {
3272 if (srl_layer > node->tx_sched_layer)
3273 return node->children[0];
3274 else if (srl_layer < node->tx_sched_layer)
3275 /* Node can't be created without a parent. It will always
3276 * have a valid parent except root node.
3277 */
3278 return node->parent;
3279 else
3280 return node;
3281 }
3282
3283 /**
3284 * ice_sched_rm_rl_profile - remove RL profile ID
3285 * @pi: port information structure
3286 * @layer_num: layer number where profiles are saved
3287 * @profile_type: profile type like EIR, CIR, or SRL
3288 * @profile_id: profile ID to remove
3289 *
3290 * This function removes rate limit profile from layer 'layer_num' of type
3291 * 'profile_type' and profile ID as 'profile_id'. The caller needs to hold
3292 * scheduler lock.
3293 */
3294 static enum ice_status
ice_sched_rm_rl_profile(struct ice_port_info * pi,u8 layer_num,u8 profile_type,u16 profile_id)3295 ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
3296 u16 profile_id)
3297 {
3298 struct ice_aqc_rl_profile_info *rl_prof_elem;
3299 enum ice_status status = 0;
3300
3301 if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
3302 return ICE_ERR_PARAM;
3303 /* Check the existing list for RL profile */
3304 list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
3305 list_entry)
3306 if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
3307 profile_type &&
3308 le16_to_cpu(rl_prof_elem->profile.profile_id) ==
3309 profile_id) {
3310 if (rl_prof_elem->prof_id_ref)
3311 rl_prof_elem->prof_id_ref--;
3312
3313 /* Remove old profile ID from database */
3314 status = ice_sched_del_rl_profile(pi->hw, rl_prof_elem);
3315 if (status && status != ICE_ERR_IN_USE)
3316 ice_debug(pi->hw, ICE_DBG_SCHED, "Remove rl profile failed\n");
3317 break;
3318 }
3319 if (status == ICE_ERR_IN_USE)
3320 status = 0;
3321 return status;
3322 }
3323
3324 /**
3325 * ice_sched_set_node_bw_dflt - set node's bandwidth limit to default
3326 * @pi: port information structure
3327 * @node: pointer to node structure
3328 * @rl_type: rate limit type min, max, or shared
3329 * @layer_num: layer number where RL profiles are saved
3330 *
3331 * This function configures node element's BW rate limit profile ID of
3332 * type CIR, EIR, or SRL to default. This function needs to be called
3333 * with the scheduler lock held.
3334 */
3335 static enum ice_status
ice_sched_set_node_bw_dflt(struct ice_port_info * pi,struct ice_sched_node * node,enum ice_rl_type rl_type,u8 layer_num)3336 ice_sched_set_node_bw_dflt(struct ice_port_info *pi,
3337 struct ice_sched_node *node,
3338 enum ice_rl_type rl_type, u8 layer_num)
3339 {
3340 enum ice_status status;
3341 struct ice_hw *hw;
3342 u8 profile_type;
3343 u16 rl_prof_id;
3344 u16 old_id;
3345
3346 hw = pi->hw;
3347 switch (rl_type) {
3348 case ICE_MIN_BW:
3349 profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR;
3350 rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID;
3351 break;
3352 case ICE_MAX_BW:
3353 profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR;
3354 rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID;
3355 break;
3356 case ICE_SHARED_BW:
3357 profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL;
3358 /* No SRL is configured for default case */
3359 rl_prof_id = ICE_SCHED_NO_SHARED_RL_PROF_ID;
3360 break;
3361 default:
3362 return ICE_ERR_PARAM;
3363 }
3364 /* Save existing RL prof ID for later clean up */
3365 old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
3366 /* Configure BW scheduling parameters */
3367 status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id);
3368 if (status)
3369 return status;
3370
3371 /* Remove stale RL profile ID */
3372 if (old_id == ICE_SCHED_DFLT_RL_PROF_ID ||
3373 old_id == ICE_SCHED_INVAL_PROF_ID)
3374 return 0;
3375
3376 return ice_sched_rm_rl_profile(pi, layer_num, profile_type, old_id);
3377 }
3378
3379 /**
3380 * ice_sched_set_eir_srl_excl - set EIR/SRL exclusiveness
3381 * @pi: port information structure
3382 * @node: pointer to node structure
3383 * @layer_num: layer number where rate limit profiles are saved
3384 * @rl_type: rate limit type min, max, or shared
3385 * @bw: bandwidth value
3386 *
3387 * This function prepares node element's bandwidth to SRL or EIR exclusively.
3388 * EIR BW and Shared BW profiles are mutually exclusive and hence only one of
3389 * them may be set for any given element. This function needs to be called
3390 * with the scheduler lock held.
3391 */
3392 static enum ice_status
ice_sched_set_eir_srl_excl(struct ice_port_info * pi,struct ice_sched_node * node,u8 layer_num,enum ice_rl_type rl_type,u32 bw)3393 ice_sched_set_eir_srl_excl(struct ice_port_info *pi,
3394 struct ice_sched_node *node,
3395 u8 layer_num, enum ice_rl_type rl_type, u32 bw)
3396 {
3397 if (rl_type == ICE_SHARED_BW) {
3398 /* SRL node passed in this case, it may be different node */
3399 if (bw == ICE_SCHED_DFLT_BW)
3400 /* SRL being removed, ice_sched_cfg_node_bw_lmt()
3401 * enables EIR to default. EIR is not set in this
3402 * case, so no additional action is required.
3403 */
3404 return 0;
3405
3406 /* SRL being configured, set EIR to default here.
3407 * ice_sched_cfg_node_bw_lmt() disables EIR when it
3408 * configures SRL
3409 */
3410 return ice_sched_set_node_bw_dflt(pi, node, ICE_MAX_BW,
3411 layer_num);
3412 } else if (rl_type == ICE_MAX_BW &&
3413 node->info.data.valid_sections & ICE_AQC_ELEM_VALID_SHARED) {
3414 /* Remove Shared profile. Set default shared BW call
3415 * removes shared profile for a node.
3416 */
3417 return ice_sched_set_node_bw_dflt(pi, node,
3418 ICE_SHARED_BW,
3419 layer_num);
3420 }
3421 return 0;
3422 }
3423
3424 /**
3425 * ice_sched_set_node_bw - set node's bandwidth
3426 * @pi: port information structure
3427 * @node: tree node
3428 * @rl_type: rate limit type min, max, or shared
3429 * @bw: bandwidth in Kbps - Kilo bits per sec
3430 * @layer_num: layer number
3431 *
3432 * This function adds new profile corresponding to requested BW, configures
3433 * node's RL profile ID of type CIR, EIR, or SRL, and removes old profile
3434 * ID from local database. The caller needs to hold scheduler lock.
3435 */
3436 static enum ice_status
ice_sched_set_node_bw(struct ice_port_info * pi,struct ice_sched_node * node,enum ice_rl_type rl_type,u32 bw,u8 layer_num)3437 ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
3438 enum ice_rl_type rl_type, u32 bw, u8 layer_num)
3439 {
3440 struct ice_aqc_rl_profile_info *rl_prof_info;
3441 enum ice_status status = ICE_ERR_PARAM;
3442 struct ice_hw *hw = pi->hw;
3443 u16 old_id, rl_prof_id;
3444
3445 rl_prof_info = ice_sched_add_rl_profile(pi, rl_type, bw, layer_num);
3446 if (!rl_prof_info)
3447 return status;
3448
3449 rl_prof_id = le16_to_cpu(rl_prof_info->profile.profile_id);
3450
3451 /* Save existing RL prof ID for later clean up */
3452 old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
3453 /* Configure BW scheduling parameters */
3454 status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id);
3455 if (status)
3456 return status;
3457
3458 /* New changes has been applied */
3459 /* Increment the profile ID reference count */
3460 rl_prof_info->prof_id_ref++;
3461
3462 /* Check for old ID removal */
3463 if ((old_id == ICE_SCHED_DFLT_RL_PROF_ID && rl_type != ICE_SHARED_BW) ||
3464 old_id == ICE_SCHED_INVAL_PROF_ID || old_id == rl_prof_id)
3465 return 0;
3466
3467 return ice_sched_rm_rl_profile(pi, layer_num,
3468 rl_prof_info->profile.flags &
3469 ICE_AQC_RL_PROFILE_TYPE_M, old_id);
3470 }
3471
3472 /**
3473 * ice_sched_set_node_bw_lmt - set node's BW limit
3474 * @pi: port information structure
3475 * @node: tree node
3476 * @rl_type: rate limit type min, max, or shared
3477 * @bw: bandwidth in Kbps - Kilo bits per sec
3478 *
3479 * It updates node's BW limit parameters like BW RL profile ID of type CIR,
3480 * EIR, or SRL. The caller needs to hold scheduler lock.
3481 */
3482 static enum ice_status
ice_sched_set_node_bw_lmt(struct ice_port_info * pi,struct ice_sched_node * node,enum ice_rl_type rl_type,u32 bw)3483 ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
3484 enum ice_rl_type rl_type, u32 bw)
3485 {
3486 struct ice_sched_node *cfg_node = node;
3487 enum ice_status status;
3488
3489 struct ice_hw *hw;
3490 u8 layer_num;
3491
3492 if (!pi)
3493 return ICE_ERR_PARAM;
3494 hw = pi->hw;
3495 /* Remove unused RL profile IDs from HW and SW DB */
3496 ice_sched_rm_unused_rl_prof(pi);
3497 layer_num = ice_sched_get_rl_prof_layer(pi, rl_type,
3498 node->tx_sched_layer);
3499 if (layer_num >= hw->num_tx_sched_layers)
3500 return ICE_ERR_PARAM;
3501
3502 if (rl_type == ICE_SHARED_BW) {
3503 /* SRL node may be different */
3504 cfg_node = ice_sched_get_srl_node(node, layer_num);
3505 if (!cfg_node)
3506 return ICE_ERR_CFG;
3507 }
3508 /* EIR BW and Shared BW profiles are mutually exclusive and
3509 * hence only one of them may be set for any given element
3510 */
3511 status = ice_sched_set_eir_srl_excl(pi, cfg_node, layer_num, rl_type,
3512 bw);
3513 if (status)
3514 return status;
3515 if (bw == ICE_SCHED_DFLT_BW)
3516 return ice_sched_set_node_bw_dflt(pi, cfg_node, rl_type,
3517 layer_num);
3518 return ice_sched_set_node_bw(pi, cfg_node, rl_type, bw, layer_num);
3519 }
3520
3521 /**
3522 * ice_sched_set_node_bw_dflt_lmt - set node's BW limit to default
3523 * @pi: port information structure
3524 * @node: pointer to node structure
3525 * @rl_type: rate limit type min, max, or shared
3526 *
3527 * This function configures node element's BW rate limit profile ID of
3528 * type CIR, EIR, or SRL to default. This function needs to be called
3529 * with the scheduler lock held.
3530 */
3531 static enum ice_status
ice_sched_set_node_bw_dflt_lmt(struct ice_port_info * pi,struct ice_sched_node * node,enum ice_rl_type rl_type)3532 ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi,
3533 struct ice_sched_node *node,
3534 enum ice_rl_type rl_type)
3535 {
3536 return ice_sched_set_node_bw_lmt(pi, node, rl_type,
3537 ICE_SCHED_DFLT_BW);
3538 }
3539
3540 /**
3541 * ice_sched_validate_srl_node - Check node for SRL applicability
3542 * @node: sched node to configure
3543 * @sel_layer: selected SRL layer
3544 *
3545 * This function checks if the SRL can be applied to a selected layer node on
3546 * behalf of the requested node (first argument). This function needs to be
3547 * called with scheduler lock held.
3548 */
3549 static enum ice_status
ice_sched_validate_srl_node(struct ice_sched_node * node,u8 sel_layer)3550 ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer)
3551 {
3552 /* SRL profiles are not available on all layers. Check if the
3553 * SRL profile can be applied to a node above or below the
3554 * requested node. SRL configuration is possible only if the
3555 * selected layer's node has single child.
3556 */
3557 if (sel_layer == node->tx_sched_layer ||
3558 ((sel_layer == node->tx_sched_layer + 1) &&
3559 node->num_children == 1) ||
3560 ((sel_layer == node->tx_sched_layer - 1) &&
3561 (node->parent && node->parent->num_children == 1)))
3562 return 0;
3563
3564 return ICE_ERR_CFG;
3565 }
3566
3567 /**
3568 * ice_sched_save_q_bw - save queue node's BW information
3569 * @q_ctx: queue context structure
3570 * @rl_type: rate limit type min, max, or shared
3571 * @bw: bandwidth in Kbps - Kilo bits per sec
3572 *
3573 * Save BW information of queue type node for post replay use.
3574 */
3575 static enum ice_status
ice_sched_save_q_bw(struct ice_q_ctx * q_ctx,enum ice_rl_type rl_type,u32 bw)3576 ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw)
3577 {
3578 switch (rl_type) {
3579 case ICE_MIN_BW:
3580 ice_set_clear_cir_bw(&q_ctx->bw_t_info, bw);
3581 break;
3582 case ICE_MAX_BW:
3583 ice_set_clear_eir_bw(&q_ctx->bw_t_info, bw);
3584 break;
3585 case ICE_SHARED_BW:
3586 ice_set_clear_shared_bw(&q_ctx->bw_t_info, bw);
3587 break;
3588 default:
3589 return ICE_ERR_PARAM;
3590 }
3591 return 0;
3592 }
3593
3594 /**
3595 * ice_sched_set_q_bw_lmt - sets queue BW limit
3596 * @pi: port information structure
3597 * @vsi_handle: sw VSI handle
3598 * @tc: traffic class
3599 * @q_handle: software queue handle
3600 * @rl_type: min, max, or shared
3601 * @bw: bandwidth in Kbps
3602 *
3603 * This function sets BW limit of queue scheduling node.
3604 */
3605 static enum ice_status
ice_sched_set_q_bw_lmt(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u16 q_handle,enum ice_rl_type rl_type,u32 bw)3606 ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
3607 u16 q_handle, enum ice_rl_type rl_type, u32 bw)
3608 {
3609 enum ice_status status = ICE_ERR_PARAM;
3610 struct ice_sched_node *node;
3611 struct ice_q_ctx *q_ctx;
3612
3613 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
3614 return ICE_ERR_PARAM;
3615 mutex_lock(&pi->sched_lock);
3616 q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handle);
3617 if (!q_ctx)
3618 goto exit_q_bw_lmt;
3619 node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
3620 if (!node) {
3621 ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong q_teid\n");
3622 goto exit_q_bw_lmt;
3623 }
3624
3625 /* Return error if it is not a leaf node */
3626 if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF)
3627 goto exit_q_bw_lmt;
3628
3629 /* SRL bandwidth layer selection */
3630 if (rl_type == ICE_SHARED_BW) {
3631 u8 sel_layer; /* selected layer */
3632
3633 sel_layer = ice_sched_get_rl_prof_layer(pi, rl_type,
3634 node->tx_sched_layer);
3635 if (sel_layer >= pi->hw->num_tx_sched_layers) {
3636 status = ICE_ERR_PARAM;
3637 goto exit_q_bw_lmt;
3638 }
3639 status = ice_sched_validate_srl_node(node, sel_layer);
3640 if (status)
3641 goto exit_q_bw_lmt;
3642 }
3643
3644 if (bw == ICE_SCHED_DFLT_BW)
3645 status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type);
3646 else
3647 status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw);
3648
3649 if (!status)
3650 status = ice_sched_save_q_bw(q_ctx, rl_type, bw);
3651
3652 exit_q_bw_lmt:
3653 mutex_unlock(&pi->sched_lock);
3654 return status;
3655 }
3656
3657 /**
3658 * ice_cfg_q_bw_lmt - configure queue BW limit
3659 * @pi: port information structure
3660 * @vsi_handle: sw VSI handle
3661 * @tc: traffic class
3662 * @q_handle: software queue handle
3663 * @rl_type: min, max, or shared
3664 * @bw: bandwidth in Kbps
3665 *
3666 * This function configures BW limit of queue scheduling node.
3667 */
3668 enum ice_status
ice_cfg_q_bw_lmt(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u16 q_handle,enum ice_rl_type rl_type,u32 bw)3669 ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
3670 u16 q_handle, enum ice_rl_type rl_type, u32 bw)
3671 {
3672 return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type,
3673 bw);
3674 }
3675
3676 /**
3677 * ice_cfg_q_bw_dflt_lmt - configure queue BW default limit
3678 * @pi: port information structure
3679 * @vsi_handle: sw VSI handle
3680 * @tc: traffic class
3681 * @q_handle: software queue handle
3682 * @rl_type: min, max, or shared
3683 *
3684 * This function configures BW default limit of queue scheduling node.
3685 */
3686 enum ice_status
ice_cfg_q_bw_dflt_lmt(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u16 q_handle,enum ice_rl_type rl_type)3687 ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
3688 u16 q_handle, enum ice_rl_type rl_type)
3689 {
3690 return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type,
3691 ICE_SCHED_DFLT_BW);
3692 }
3693
3694 /**
3695 * ice_cfg_rl_burst_size - Set burst size value
3696 * @hw: pointer to the HW struct
3697 * @bytes: burst size in bytes
3698 *
3699 * This function configures/set the burst size to requested new value. The new
3700 * burst size value is used for future rate limit calls. It doesn't change the
3701 * existing or previously created RL profiles.
3702 */
ice_cfg_rl_burst_size(struct ice_hw * hw,u32 bytes)3703 enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)
3704 {
3705 u16 burst_size_to_prog;
3706
3707 if (bytes < ICE_MIN_BURST_SIZE_ALLOWED ||
3708 bytes > ICE_MAX_BURST_SIZE_ALLOWED)
3709 return ICE_ERR_PARAM;
3710 if (ice_round_to_num(bytes, 64) <=
3711 ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY) {
3712 /* 64 byte granularity case */
3713 /* Disable MSB granularity bit */
3714 burst_size_to_prog = ICE_64_BYTE_GRANULARITY;
3715 /* round number to nearest 64 byte granularity */
3716 bytes = ice_round_to_num(bytes, 64);
3717 /* The value is in 64 byte chunks */
3718 burst_size_to_prog |= (u16)(bytes / 64);
3719 } else {
3720 /* k bytes granularity case */
3721 /* Enable MSB granularity bit */
3722 burst_size_to_prog = ICE_KBYTE_GRANULARITY;
3723 /* round number to nearest 1024 granularity */
3724 bytes = ice_round_to_num(bytes, 1024);
3725 /* check rounding doesn't go beyond allowed */
3726 if (bytes > ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY)
3727 bytes = ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY;
3728 /* The value is in k bytes */
3729 burst_size_to_prog |= (u16)(bytes / 1024);
3730 }
3731 hw->max_burst_size = burst_size_to_prog;
3732 return 0;
3733 }
3734
3735 /**
3736 * ice_sched_replay_node_prio - re-configure node priority
3737 * @hw: pointer to the HW struct
3738 * @node: sched node to configure
3739 * @priority: priority value
3740 *
3741 * This function configures node element's priority value. It
3742 * needs to be called with scheduler lock held.
3743 */
3744 static enum ice_status
ice_sched_replay_node_prio(struct ice_hw * hw,struct ice_sched_node * node,u8 priority)3745 ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node,
3746 u8 priority)
3747 {
3748 struct ice_aqc_txsched_elem_data buf;
3749 struct ice_aqc_txsched_elem *data;
3750 enum ice_status status;
3751
3752 buf = node->info;
3753 data = &buf.data;
3754 data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC;
3755 data->generic = priority;
3756
3757 /* Configure element */
3758 status = ice_sched_update_elem(hw, node, &buf);
3759 return status;
3760 }
3761
3762 /**
3763 * ice_sched_replay_node_bw - replay node(s) BW
3764 * @hw: pointer to the HW struct
3765 * @node: sched node to configure
3766 * @bw_t_info: BW type information
3767 *
3768 * This function restores node's BW from bw_t_info. The caller needs
3769 * to hold the scheduler lock.
3770 */
3771 static enum ice_status
ice_sched_replay_node_bw(struct ice_hw * hw,struct ice_sched_node * node,struct ice_bw_type_info * bw_t_info)3772 ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node,
3773 struct ice_bw_type_info *bw_t_info)
3774 {
3775 struct ice_port_info *pi = hw->port_info;
3776 enum ice_status status = ICE_ERR_PARAM;
3777 u16 bw_alloc;
3778
3779 if (!node)
3780 return status;
3781 if (bitmap_empty(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CNT))
3782 return 0;
3783 if (test_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap)) {
3784 status = ice_sched_replay_node_prio(hw, node,
3785 bw_t_info->generic);
3786 if (status)
3787 return status;
3788 }
3789 if (test_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap)) {
3790 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW,
3791 bw_t_info->cir_bw.bw);
3792 if (status)
3793 return status;
3794 }
3795 if (test_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap)) {
3796 bw_alloc = bw_t_info->cir_bw.bw_alloc;
3797 status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MIN_BW,
3798 bw_alloc);
3799 if (status)
3800 return status;
3801 }
3802 if (test_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap)) {
3803 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW,
3804 bw_t_info->eir_bw.bw);
3805 if (status)
3806 return status;
3807 }
3808 if (test_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap)) {
3809 bw_alloc = bw_t_info->eir_bw.bw_alloc;
3810 status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MAX_BW,
3811 bw_alloc);
3812 if (status)
3813 return status;
3814 }
3815 if (test_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap))
3816 status = ice_sched_set_node_bw_lmt(pi, node, ICE_SHARED_BW,
3817 bw_t_info->shared_bw);
3818 return status;
3819 }
3820
3821 /**
3822 * ice_sched_get_ena_tc_bitmap - get enabled TC bitmap
3823 * @pi: port info struct
3824 * @tc_bitmap: 8 bits TC bitmap to check
3825 * @ena_tc_bitmap: 8 bits enabled TC bitmap to return
3826 *
3827 * This function returns enabled TC bitmap in variable ena_tc_bitmap. Some TCs
3828 * may be missing, it returns enabled TCs. This function needs to be called with
3829 * scheduler lock held.
3830 */
3831 static void
ice_sched_get_ena_tc_bitmap(struct ice_port_info * pi,unsigned long * tc_bitmap,unsigned long * ena_tc_bitmap)3832 ice_sched_get_ena_tc_bitmap(struct ice_port_info *pi,
3833 unsigned long *tc_bitmap,
3834 unsigned long *ena_tc_bitmap)
3835 {
3836 u8 tc;
3837
3838 /* Some TC(s) may be missing after reset, adjust for replay */
3839 ice_for_each_traffic_class(tc)
3840 if (ice_is_tc_ena(*tc_bitmap, tc) &&
3841 (ice_sched_get_tc_node(pi, tc)))
3842 set_bit(tc, ena_tc_bitmap);
3843 }
3844
3845 /**
3846 * ice_sched_replay_agg - recreate aggregator node(s)
3847 * @hw: pointer to the HW struct
3848 *
3849 * This function recreate aggregator type nodes which are not replayed earlier.
3850 * It also replay aggregator BW information. These aggregator nodes are not
3851 * associated with VSI type node yet.
3852 */
ice_sched_replay_agg(struct ice_hw * hw)3853 void ice_sched_replay_agg(struct ice_hw *hw)
3854 {
3855 struct ice_port_info *pi = hw->port_info;
3856 struct ice_sched_agg_info *agg_info;
3857
3858 mutex_lock(&pi->sched_lock);
3859 list_for_each_entry(agg_info, &hw->agg_list, list_entry)
3860 /* replay aggregator (re-create aggregator node) */
3861 if (!bitmap_equal(agg_info->tc_bitmap, agg_info->replay_tc_bitmap,
3862 ICE_MAX_TRAFFIC_CLASS)) {
3863 DECLARE_BITMAP(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
3864 enum ice_status status;
3865
3866 bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
3867 ice_sched_get_ena_tc_bitmap(pi,
3868 agg_info->replay_tc_bitmap,
3869 replay_bitmap);
3870 status = ice_sched_cfg_agg(hw->port_info,
3871 agg_info->agg_id,
3872 ICE_AGG_TYPE_AGG,
3873 replay_bitmap);
3874 if (status) {
3875 dev_info(ice_hw_to_dev(hw),
3876 "Replay agg id[%d] failed\n",
3877 agg_info->agg_id);
3878 /* Move on to next one */
3879 continue;
3880 }
3881 }
3882 mutex_unlock(&pi->sched_lock);
3883 }
3884
3885 /**
3886 * ice_sched_replay_agg_vsi_preinit - Agg/VSI replay pre initialization
3887 * @hw: pointer to the HW struct
3888 *
3889 * This function initialize aggregator(s) TC bitmap to zero. A required
3890 * preinit step for replaying aggregators.
3891 */
ice_sched_replay_agg_vsi_preinit(struct ice_hw * hw)3892 void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw)
3893 {
3894 struct ice_port_info *pi = hw->port_info;
3895 struct ice_sched_agg_info *agg_info;
3896
3897 mutex_lock(&pi->sched_lock);
3898 list_for_each_entry(agg_info, &hw->agg_list, list_entry) {
3899 struct ice_sched_agg_vsi_info *agg_vsi_info;
3900
3901 agg_info->tc_bitmap[0] = 0;
3902 list_for_each_entry(agg_vsi_info, &agg_info->agg_vsi_list,
3903 list_entry)
3904 agg_vsi_info->tc_bitmap[0] = 0;
3905 }
3906 mutex_unlock(&pi->sched_lock);
3907 }
3908
3909 /**
3910 * ice_sched_replay_vsi_agg - replay aggregator & VSI to aggregator node(s)
3911 * @hw: pointer to the HW struct
3912 * @vsi_handle: software VSI handle
3913 *
3914 * This function replays aggregator node, VSI to aggregator type nodes, and
3915 * their node bandwidth information. This function needs to be called with
3916 * scheduler lock held.
3917 */
3918 static enum ice_status
ice_sched_replay_vsi_agg(struct ice_hw * hw,u16 vsi_handle)3919 ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
3920 {
3921 DECLARE_BITMAP(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
3922 struct ice_sched_agg_vsi_info *agg_vsi_info;
3923 struct ice_port_info *pi = hw->port_info;
3924 struct ice_sched_agg_info *agg_info;
3925 enum ice_status status;
3926
3927 bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
3928 if (!ice_is_vsi_valid(hw, vsi_handle))
3929 return ICE_ERR_PARAM;
3930 agg_info = ice_get_vsi_agg_info(hw, vsi_handle);
3931 if (!agg_info)
3932 return 0; /* Not present in list - default Agg case */
3933 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
3934 if (!agg_vsi_info)
3935 return 0; /* Not present in list - default Agg case */
3936 ice_sched_get_ena_tc_bitmap(pi, agg_info->replay_tc_bitmap,
3937 replay_bitmap);
3938 /* Replay aggregator node associated to vsi_handle */
3939 status = ice_sched_cfg_agg(hw->port_info, agg_info->agg_id,
3940 ICE_AGG_TYPE_AGG, replay_bitmap);
3941 if (status)
3942 return status;
3943
3944 bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
3945 ice_sched_get_ena_tc_bitmap(pi, agg_vsi_info->replay_tc_bitmap,
3946 replay_bitmap);
3947 /* Move this VSI (vsi_handle) to above aggregator */
3948 return ice_sched_assoc_vsi_to_agg(pi, agg_info->agg_id, vsi_handle,
3949 replay_bitmap);
3950 }
3951
3952 /**
3953 * ice_replay_vsi_agg - replay VSI to aggregator node
3954 * @hw: pointer to the HW struct
3955 * @vsi_handle: software VSI handle
3956 *
3957 * This function replays association of VSI to aggregator type nodes, and
3958 * node bandwidth information.
3959 */
ice_replay_vsi_agg(struct ice_hw * hw,u16 vsi_handle)3960 enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
3961 {
3962 struct ice_port_info *pi = hw->port_info;
3963 enum ice_status status;
3964
3965 mutex_lock(&pi->sched_lock);
3966 status = ice_sched_replay_vsi_agg(hw, vsi_handle);
3967 mutex_unlock(&pi->sched_lock);
3968 return status;
3969 }
3970
3971 /**
3972 * ice_sched_replay_q_bw - replay queue type node BW
3973 * @pi: port information structure
3974 * @q_ctx: queue context structure
3975 *
3976 * This function replays queue type node bandwidth. This function needs to be
3977 * called with scheduler lock held.
3978 */
3979 enum ice_status
ice_sched_replay_q_bw(struct ice_port_info * pi,struct ice_q_ctx * q_ctx)3980 ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx)
3981 {
3982 struct ice_sched_node *q_node;
3983
3984 /* Following also checks the presence of node in tree */
3985 q_node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
3986 if (!q_node)
3987 return ICE_ERR_PARAM;
3988 return ice_sched_replay_node_bw(pi->hw, q_node, &q_ctx->bw_t_info);
3989 }
3990