1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Ethernet driver 3 * 4 * Copyright (C) 2023 Marvell. 5 * 6 */ 7 #include <linux/netdevice.h> 8 #include <linux/etherdevice.h> 9 #include <linux/inetdevice.h> 10 #include <linux/bitfield.h> 11 12 #include "otx2_common.h" 13 #include "cn10k.h" 14 #include "qos.h" 15 16 #define OTX2_QOS_QID_INNER 0xFFFFU 17 #define OTX2_QOS_QID_NONE 0xFFFEU 18 #define OTX2_QOS_ROOT_CLASSID 0xFFFFFFFF 19 #define OTX2_QOS_CLASS_NONE 0 20 #define OTX2_QOS_DEFAULT_PRIO 0xF 21 #define OTX2_QOS_INVALID_SQ 0xFFFF 22 23 static void otx2_qos_update_tx_netdev_queues(struct otx2_nic *pfvf) 24 { 25 struct otx2_hw *hw = &pfvf->hw; 26 int tx_queues, qos_txqs, err; 27 28 qos_txqs = bitmap_weight(pfvf->qos.qos_sq_bmap, 29 OTX2_QOS_MAX_LEAF_NODES); 30 31 tx_queues = hw->tx_queues + qos_txqs; 32 33 err = netif_set_real_num_tx_queues(pfvf->netdev, tx_queues); 34 if (err) { 35 netdev_err(pfvf->netdev, 36 "Failed to set no of Tx queues: %d\n", tx_queues); 37 return; 38 } 39 } 40 41 static void otx2_qos_get_regaddr(struct otx2_qos_node *node, 42 struct nix_txschq_config *cfg, 43 int index) 44 { 45 if (node->level == NIX_TXSCH_LVL_SMQ) { 46 cfg->reg[index++] = NIX_AF_MDQX_PARENT(node->schq); 47 cfg->reg[index++] = NIX_AF_MDQX_SCHEDULE(node->schq); 48 cfg->reg[index++] = NIX_AF_MDQX_PIR(node->schq); 49 cfg->reg[index] = NIX_AF_MDQX_CIR(node->schq); 50 } else if (node->level == NIX_TXSCH_LVL_TL4) { 51 cfg->reg[index++] = NIX_AF_TL4X_PARENT(node->schq); 52 cfg->reg[index++] = NIX_AF_TL4X_SCHEDULE(node->schq); 53 cfg->reg[index++] = NIX_AF_TL4X_PIR(node->schq); 54 cfg->reg[index] = NIX_AF_TL4X_CIR(node->schq); 55 } else if (node->level == NIX_TXSCH_LVL_TL3) { 56 cfg->reg[index++] = NIX_AF_TL3X_PARENT(node->schq); 57 cfg->reg[index++] = NIX_AF_TL3X_SCHEDULE(node->schq); 58 cfg->reg[index++] = NIX_AF_TL3X_PIR(node->schq); 59 cfg->reg[index] = NIX_AF_TL3X_CIR(node->schq); 60 } else if (node->level == NIX_TXSCH_LVL_TL2) { 61 cfg->reg[index++] = NIX_AF_TL2X_PARENT(node->schq); 62 cfg->reg[index++] = NIX_AF_TL2X_SCHEDULE(node->schq); 63 cfg->reg[index++] = NIX_AF_TL2X_PIR(node->schq); 64 cfg->reg[index] = NIX_AF_TL2X_CIR(node->schq); 65 } 66 } 67 68 static void otx2_config_sched_shaping(struct otx2_nic *pfvf, 69 struct otx2_qos_node *node, 70 struct nix_txschq_config *cfg, 71 int *num_regs) 72 { 73 u64 maxrate; 74 75 otx2_qos_get_regaddr(node, cfg, *num_regs); 76 77 /* configure parent txschq */ 78 cfg->regval[*num_regs] = node->parent->schq << 16; 79 (*num_regs)++; 80 81 /* configure prio/quantum */ 82 if (node->qid == OTX2_QOS_QID_NONE) { 83 cfg->regval[*num_regs] = node->prio << 24 | 84 mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen); 85 (*num_regs)++; 86 return; 87 } 88 89 /* configure priority */ 90 cfg->regval[*num_regs] = (node->schq - node->parent->prio_anchor) << 24; 91 (*num_regs)++; 92 93 /* configure PIR */ 94 maxrate = (node->rate > node->ceil) ? node->rate : node->ceil; 95 96 cfg->regval[*num_regs] = 97 otx2_get_txschq_rate_regval(pfvf, maxrate, 65536); 98 (*num_regs)++; 99 100 /* Don't configure CIR when both CIR+PIR not supported 101 * On 96xx, CIR + PIR + RED_ALGO=STALL causes deadlock 102 */ 103 if (!test_bit(QOS_CIR_PIR_SUPPORT, &pfvf->hw.cap_flag)) 104 return; 105 106 cfg->regval[*num_regs] = 107 otx2_get_txschq_rate_regval(pfvf, node->rate, 65536); 108 (*num_regs)++; 109 } 110 111 static void __otx2_qos_txschq_cfg(struct otx2_nic *pfvf, 112 struct otx2_qos_node *node, 113 struct nix_txschq_config *cfg) 114 { 115 struct otx2_hw *hw = &pfvf->hw; 116 int num_regs = 0; 117 u8 level; 118 119 level = node->level; 120 121 /* program txschq registers */ 122 if (level == NIX_TXSCH_LVL_SMQ) { 123 cfg->reg[num_regs] = NIX_AF_SMQX_CFG(node->schq); 124 cfg->regval[num_regs] = ((u64)pfvf->tx_max_pktlen << 8) | 125 OTX2_MIN_MTU; 126 cfg->regval[num_regs] |= (0x20ULL << 51) | (0x80ULL << 39) | 127 (0x2ULL << 36); 128 num_regs++; 129 130 otx2_config_sched_shaping(pfvf, node, cfg, &num_regs); 131 132 } else if (level == NIX_TXSCH_LVL_TL4) { 133 otx2_config_sched_shaping(pfvf, node, cfg, &num_regs); 134 } else if (level == NIX_TXSCH_LVL_TL3) { 135 /* configure link cfg */ 136 if (level == pfvf->qos.link_cfg_lvl) { 137 cfg->reg[num_regs] = NIX_AF_TL3_TL2X_LINKX_CFG(node->schq, hw->tx_link); 138 cfg->regval[num_regs] = BIT_ULL(13) | BIT_ULL(12); 139 num_regs++; 140 } 141 142 otx2_config_sched_shaping(pfvf, node, cfg, &num_regs); 143 } else if (level == NIX_TXSCH_LVL_TL2) { 144 /* configure link cfg */ 145 if (level == pfvf->qos.link_cfg_lvl) { 146 cfg->reg[num_regs] = NIX_AF_TL3_TL2X_LINKX_CFG(node->schq, hw->tx_link); 147 cfg->regval[num_regs] = BIT_ULL(13) | BIT_ULL(12); 148 num_regs++; 149 } 150 151 /* check if node is root */ 152 if (node->qid == OTX2_QOS_QID_INNER && !node->parent) { 153 cfg->reg[num_regs] = NIX_AF_TL2X_SCHEDULE(node->schq); 154 cfg->regval[num_regs] = TXSCH_TL1_DFLT_RR_PRIO << 24 | 155 mtu_to_dwrr_weight(pfvf, 156 pfvf->tx_max_pktlen); 157 num_regs++; 158 goto txschq_cfg_out; 159 } 160 161 otx2_config_sched_shaping(pfvf, node, cfg, &num_regs); 162 } 163 164 txschq_cfg_out: 165 cfg->num_regs = num_regs; 166 } 167 168 static int otx2_qos_txschq_set_parent_topology(struct otx2_nic *pfvf, 169 struct otx2_qos_node *parent) 170 { 171 struct mbox *mbox = &pfvf->mbox; 172 struct nix_txschq_config *cfg; 173 int rc; 174 175 if (parent->level == NIX_TXSCH_LVL_MDQ) 176 return 0; 177 178 mutex_lock(&mbox->lock); 179 180 cfg = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox); 181 if (!cfg) { 182 mutex_unlock(&mbox->lock); 183 return -ENOMEM; 184 } 185 186 cfg->lvl = parent->level; 187 188 if (parent->level == NIX_TXSCH_LVL_TL4) 189 cfg->reg[0] = NIX_AF_TL4X_TOPOLOGY(parent->schq); 190 else if (parent->level == NIX_TXSCH_LVL_TL3) 191 cfg->reg[0] = NIX_AF_TL3X_TOPOLOGY(parent->schq); 192 else if (parent->level == NIX_TXSCH_LVL_TL2) 193 cfg->reg[0] = NIX_AF_TL2X_TOPOLOGY(parent->schq); 194 else if (parent->level == NIX_TXSCH_LVL_TL1) 195 cfg->reg[0] = NIX_AF_TL1X_TOPOLOGY(parent->schq); 196 197 cfg->regval[0] = (u64)parent->prio_anchor << 32; 198 if (parent->level == NIX_TXSCH_LVL_TL1) 199 cfg->regval[0] |= (u64)TXSCH_TL1_DFLT_RR_PRIO << 1; 200 201 cfg->num_regs++; 202 203 rc = otx2_sync_mbox_msg(&pfvf->mbox); 204 205 mutex_unlock(&mbox->lock); 206 207 return rc; 208 } 209 210 static void otx2_qos_free_hw_node_schq(struct otx2_nic *pfvf, 211 struct otx2_qos_node *parent) 212 { 213 struct otx2_qos_node *node; 214 215 list_for_each_entry_reverse(node, &parent->child_schq_list, list) 216 otx2_txschq_free_one(pfvf, node->level, node->schq); 217 } 218 219 static void otx2_qos_free_hw_node(struct otx2_nic *pfvf, 220 struct otx2_qos_node *parent) 221 { 222 struct otx2_qos_node *node, *tmp; 223 224 list_for_each_entry_safe(node, tmp, &parent->child_list, list) { 225 otx2_qos_free_hw_node(pfvf, node); 226 otx2_qos_free_hw_node_schq(pfvf, node); 227 otx2_txschq_free_one(pfvf, node->level, node->schq); 228 } 229 } 230 231 static void otx2_qos_free_hw_cfg(struct otx2_nic *pfvf, 232 struct otx2_qos_node *node) 233 { 234 mutex_lock(&pfvf->qos.qos_lock); 235 236 /* free child node hw mappings */ 237 otx2_qos_free_hw_node(pfvf, node); 238 otx2_qos_free_hw_node_schq(pfvf, node); 239 240 /* free node hw mappings */ 241 otx2_txschq_free_one(pfvf, node->level, node->schq); 242 243 mutex_unlock(&pfvf->qos.qos_lock); 244 } 245 246 static void otx2_qos_sw_node_delete(struct otx2_nic *pfvf, 247 struct otx2_qos_node *node) 248 { 249 hash_del_rcu(&node->hlist); 250 251 if (node->qid != OTX2_QOS_QID_INNER && node->qid != OTX2_QOS_QID_NONE) { 252 __clear_bit(node->qid, pfvf->qos.qos_sq_bmap); 253 otx2_qos_update_tx_netdev_queues(pfvf); 254 } 255 256 list_del(&node->list); 257 kfree(node); 258 } 259 260 static void otx2_qos_free_sw_node_schq(struct otx2_nic *pfvf, 261 struct otx2_qos_node *parent) 262 { 263 struct otx2_qos_node *node, *tmp; 264 265 list_for_each_entry_safe(node, tmp, &parent->child_schq_list, list) { 266 list_del(&node->list); 267 kfree(node); 268 } 269 } 270 271 static void __otx2_qos_free_sw_node(struct otx2_nic *pfvf, 272 struct otx2_qos_node *parent) 273 { 274 struct otx2_qos_node *node, *tmp; 275 276 list_for_each_entry_safe(node, tmp, &parent->child_list, list) { 277 __otx2_qos_free_sw_node(pfvf, node); 278 otx2_qos_free_sw_node_schq(pfvf, node); 279 otx2_qos_sw_node_delete(pfvf, node); 280 } 281 } 282 283 static void otx2_qos_free_sw_node(struct otx2_nic *pfvf, 284 struct otx2_qos_node *node) 285 { 286 mutex_lock(&pfvf->qos.qos_lock); 287 288 __otx2_qos_free_sw_node(pfvf, node); 289 otx2_qos_free_sw_node_schq(pfvf, node); 290 otx2_qos_sw_node_delete(pfvf, node); 291 292 mutex_unlock(&pfvf->qos.qos_lock); 293 } 294 295 static void otx2_qos_destroy_node(struct otx2_nic *pfvf, 296 struct otx2_qos_node *node) 297 { 298 otx2_qos_free_hw_cfg(pfvf, node); 299 otx2_qos_free_sw_node(pfvf, node); 300 } 301 302 static void otx2_qos_fill_cfg_schq(struct otx2_qos_node *parent, 303 struct otx2_qos_cfg *cfg) 304 { 305 struct otx2_qos_node *node; 306 307 list_for_each_entry(node, &parent->child_schq_list, list) 308 cfg->schq[node->level]++; 309 } 310 311 static void otx2_qos_fill_cfg_tl(struct otx2_qos_node *parent, 312 struct otx2_qos_cfg *cfg) 313 { 314 struct otx2_qos_node *node; 315 316 list_for_each_entry(node, &parent->child_list, list) { 317 otx2_qos_fill_cfg_tl(node, cfg); 318 cfg->schq_contig[node->level]++; 319 otx2_qos_fill_cfg_schq(node, cfg); 320 } 321 } 322 323 static void otx2_qos_prepare_txschq_cfg(struct otx2_nic *pfvf, 324 struct otx2_qos_node *parent, 325 struct otx2_qos_cfg *cfg) 326 { 327 mutex_lock(&pfvf->qos.qos_lock); 328 otx2_qos_fill_cfg_tl(parent, cfg); 329 mutex_unlock(&pfvf->qos.qos_lock); 330 } 331 332 static void otx2_qos_read_txschq_cfg_schq(struct otx2_qos_node *parent, 333 struct otx2_qos_cfg *cfg) 334 { 335 struct otx2_qos_node *node; 336 int cnt; 337 338 list_for_each_entry(node, &parent->child_schq_list, list) { 339 cnt = cfg->dwrr_node_pos[node->level]; 340 cfg->schq_list[node->level][cnt] = node->schq; 341 cfg->schq[node->level]++; 342 cfg->dwrr_node_pos[node->level]++; 343 } 344 } 345 346 static void otx2_qos_read_txschq_cfg_tl(struct otx2_qos_node *parent, 347 struct otx2_qos_cfg *cfg) 348 { 349 struct otx2_qos_node *node; 350 int cnt; 351 352 list_for_each_entry(node, &parent->child_list, list) { 353 otx2_qos_read_txschq_cfg_tl(node, cfg); 354 cnt = cfg->static_node_pos[node->level]; 355 cfg->schq_contig_list[node->level][cnt] = node->schq; 356 cfg->schq_contig[node->level]++; 357 cfg->static_node_pos[node->level]++; 358 otx2_qos_read_txschq_cfg_schq(node, cfg); 359 } 360 } 361 362 static void otx2_qos_read_txschq_cfg(struct otx2_nic *pfvf, 363 struct otx2_qos_node *node, 364 struct otx2_qos_cfg *cfg) 365 { 366 mutex_lock(&pfvf->qos.qos_lock); 367 otx2_qos_read_txschq_cfg_tl(node, cfg); 368 mutex_unlock(&pfvf->qos.qos_lock); 369 } 370 371 static struct otx2_qos_node * 372 otx2_qos_alloc_root(struct otx2_nic *pfvf) 373 { 374 struct otx2_qos_node *node; 375 376 node = kzalloc(sizeof(*node), GFP_KERNEL); 377 if (!node) 378 return ERR_PTR(-ENOMEM); 379 380 node->parent = NULL; 381 if (!is_otx2_vf(pfvf->pcifunc)) 382 node->level = NIX_TXSCH_LVL_TL1; 383 else 384 node->level = NIX_TXSCH_LVL_TL2; 385 386 WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER); 387 node->classid = OTX2_QOS_ROOT_CLASSID; 388 389 hash_add_rcu(pfvf->qos.qos_hlist, &node->hlist, node->classid); 390 list_add_tail(&node->list, &pfvf->qos.qos_tree); 391 INIT_LIST_HEAD(&node->child_list); 392 INIT_LIST_HEAD(&node->child_schq_list); 393 394 return node; 395 } 396 397 static int otx2_qos_add_child_node(struct otx2_qos_node *parent, 398 struct otx2_qos_node *node) 399 { 400 struct list_head *head = &parent->child_list; 401 struct otx2_qos_node *tmp_node; 402 struct list_head *tmp; 403 404 for (tmp = head->next; tmp != head; tmp = tmp->next) { 405 tmp_node = list_entry(tmp, struct otx2_qos_node, list); 406 if (tmp_node->prio == node->prio) 407 return -EEXIST; 408 if (tmp_node->prio > node->prio) { 409 list_add_tail(&node->list, tmp); 410 return 0; 411 } 412 } 413 414 list_add_tail(&node->list, head); 415 return 0; 416 } 417 418 static int otx2_qos_alloc_txschq_node(struct otx2_nic *pfvf, 419 struct otx2_qos_node *node) 420 { 421 struct otx2_qos_node *txschq_node, *parent, *tmp; 422 int lvl; 423 424 parent = node; 425 for (lvl = node->level - 1; lvl >= NIX_TXSCH_LVL_MDQ; lvl--) { 426 txschq_node = kzalloc(sizeof(*txschq_node), GFP_KERNEL); 427 if (!txschq_node) 428 goto err_out; 429 430 txschq_node->parent = parent; 431 txschq_node->level = lvl; 432 txschq_node->classid = OTX2_QOS_CLASS_NONE; 433 WRITE_ONCE(txschq_node->qid, OTX2_QOS_QID_NONE); 434 txschq_node->rate = 0; 435 txschq_node->ceil = 0; 436 txschq_node->prio = 0; 437 438 mutex_lock(&pfvf->qos.qos_lock); 439 list_add_tail(&txschq_node->list, &node->child_schq_list); 440 mutex_unlock(&pfvf->qos.qos_lock); 441 442 INIT_LIST_HEAD(&txschq_node->child_list); 443 INIT_LIST_HEAD(&txschq_node->child_schq_list); 444 parent = txschq_node; 445 } 446 447 return 0; 448 449 err_out: 450 list_for_each_entry_safe(txschq_node, tmp, &node->child_schq_list, 451 list) { 452 list_del(&txschq_node->list); 453 kfree(txschq_node); 454 } 455 return -ENOMEM; 456 } 457 458 static struct otx2_qos_node * 459 otx2_qos_sw_create_leaf_node(struct otx2_nic *pfvf, 460 struct otx2_qos_node *parent, 461 u16 classid, u32 prio, u64 rate, u64 ceil, 462 u16 qid) 463 { 464 struct otx2_qos_node *node; 465 int err; 466 467 node = kzalloc(sizeof(*node), GFP_KERNEL); 468 if (!node) 469 return ERR_PTR(-ENOMEM); 470 471 node->parent = parent; 472 node->level = parent->level - 1; 473 node->classid = classid; 474 WRITE_ONCE(node->qid, qid); 475 476 node->rate = otx2_convert_rate(rate); 477 node->ceil = otx2_convert_rate(ceil); 478 node->prio = prio; 479 480 __set_bit(qid, pfvf->qos.qos_sq_bmap); 481 482 hash_add_rcu(pfvf->qos.qos_hlist, &node->hlist, classid); 483 484 mutex_lock(&pfvf->qos.qos_lock); 485 err = otx2_qos_add_child_node(parent, node); 486 if (err) { 487 mutex_unlock(&pfvf->qos.qos_lock); 488 return ERR_PTR(err); 489 } 490 mutex_unlock(&pfvf->qos.qos_lock); 491 492 INIT_LIST_HEAD(&node->child_list); 493 INIT_LIST_HEAD(&node->child_schq_list); 494 495 err = otx2_qos_alloc_txschq_node(pfvf, node); 496 if (err) { 497 otx2_qos_sw_node_delete(pfvf, node); 498 return ERR_PTR(-ENOMEM); 499 } 500 501 return node; 502 } 503 504 static struct otx2_qos_node * 505 otx2_sw_node_find(struct otx2_nic *pfvf, u32 classid) 506 { 507 struct otx2_qos_node *node = NULL; 508 509 hash_for_each_possible(pfvf->qos.qos_hlist, node, hlist, classid) { 510 if (node->classid == classid) 511 break; 512 } 513 514 return node; 515 } 516 517 static struct otx2_qos_node * 518 otx2_sw_node_find_rcu(struct otx2_nic *pfvf, u32 classid) 519 { 520 struct otx2_qos_node *node = NULL; 521 522 hash_for_each_possible_rcu(pfvf->qos.qos_hlist, node, hlist, classid) { 523 if (node->classid == classid) 524 break; 525 } 526 527 return node; 528 } 529 530 int otx2_get_txq_by_classid(struct otx2_nic *pfvf, u16 classid) 531 { 532 struct otx2_qos_node *node; 533 u16 qid; 534 int res; 535 536 node = otx2_sw_node_find_rcu(pfvf, classid); 537 if (!node) { 538 res = -ENOENT; 539 goto out; 540 } 541 qid = READ_ONCE(node->qid); 542 if (qid == OTX2_QOS_QID_INNER) { 543 res = -EINVAL; 544 goto out; 545 } 546 res = pfvf->hw.tx_queues + qid; 547 out: 548 return res; 549 } 550 551 static int 552 otx2_qos_txschq_config(struct otx2_nic *pfvf, struct otx2_qos_node *node) 553 { 554 struct mbox *mbox = &pfvf->mbox; 555 struct nix_txschq_config *req; 556 int rc; 557 558 mutex_lock(&mbox->lock); 559 560 req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox); 561 if (!req) { 562 mutex_unlock(&mbox->lock); 563 return -ENOMEM; 564 } 565 566 req->lvl = node->level; 567 __otx2_qos_txschq_cfg(pfvf, node, req); 568 569 rc = otx2_sync_mbox_msg(&pfvf->mbox); 570 571 mutex_unlock(&mbox->lock); 572 573 return rc; 574 } 575 576 static int otx2_qos_txschq_alloc(struct otx2_nic *pfvf, 577 struct otx2_qos_cfg *cfg) 578 { 579 struct nix_txsch_alloc_req *req; 580 struct nix_txsch_alloc_rsp *rsp; 581 struct mbox *mbox = &pfvf->mbox; 582 int lvl, rc, schq; 583 584 mutex_lock(&mbox->lock); 585 req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox); 586 if (!req) { 587 mutex_unlock(&mbox->lock); 588 return -ENOMEM; 589 } 590 591 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 592 req->schq[lvl] = cfg->schq[lvl]; 593 req->schq_contig[lvl] = cfg->schq_contig[lvl]; 594 } 595 596 rc = otx2_sync_mbox_msg(&pfvf->mbox); 597 if (rc) { 598 mutex_unlock(&mbox->lock); 599 return rc; 600 } 601 602 rsp = (struct nix_txsch_alloc_rsp *) 603 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); 604 605 if (IS_ERR(rsp)) { 606 rc = PTR_ERR(rsp); 607 goto out; 608 } 609 610 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 611 for (schq = 0; schq < rsp->schq_contig[lvl]; schq++) { 612 cfg->schq_contig_list[lvl][schq] = 613 rsp->schq_contig_list[lvl][schq]; 614 } 615 } 616 617 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 618 for (schq = 0; schq < rsp->schq[lvl]; schq++) { 619 cfg->schq_list[lvl][schq] = 620 rsp->schq_list[lvl][schq]; 621 } 622 } 623 624 pfvf->qos.link_cfg_lvl = rsp->link_cfg_lvl; 625 626 out: 627 mutex_unlock(&mbox->lock); 628 return rc; 629 } 630 631 static void otx2_qos_txschq_fill_cfg_schq(struct otx2_nic *pfvf, 632 struct otx2_qos_node *node, 633 struct otx2_qos_cfg *cfg) 634 { 635 struct otx2_qos_node *tmp; 636 int cnt; 637 638 list_for_each_entry(tmp, &node->child_schq_list, list) { 639 cnt = cfg->dwrr_node_pos[tmp->level]; 640 tmp->schq = cfg->schq_list[tmp->level][cnt]; 641 cfg->dwrr_node_pos[tmp->level]++; 642 } 643 } 644 645 static void otx2_qos_txschq_fill_cfg_tl(struct otx2_nic *pfvf, 646 struct otx2_qos_node *node, 647 struct otx2_qos_cfg *cfg) 648 { 649 struct otx2_qos_node *tmp; 650 int cnt; 651 652 list_for_each_entry(tmp, &node->child_list, list) { 653 otx2_qos_txschq_fill_cfg_tl(pfvf, tmp, cfg); 654 cnt = cfg->static_node_pos[tmp->level]; 655 tmp->schq = cfg->schq_contig_list[tmp->level][cnt]; 656 if (cnt == 0) 657 node->prio_anchor = tmp->schq; 658 cfg->static_node_pos[tmp->level]++; 659 otx2_qos_txschq_fill_cfg_schq(pfvf, tmp, cfg); 660 } 661 } 662 663 static void otx2_qos_txschq_fill_cfg(struct otx2_nic *pfvf, 664 struct otx2_qos_node *node, 665 struct otx2_qos_cfg *cfg) 666 { 667 mutex_lock(&pfvf->qos.qos_lock); 668 otx2_qos_txschq_fill_cfg_tl(pfvf, node, cfg); 669 otx2_qos_txschq_fill_cfg_schq(pfvf, node, cfg); 670 mutex_unlock(&pfvf->qos.qos_lock); 671 } 672 673 static int otx2_qos_txschq_push_cfg_schq(struct otx2_nic *pfvf, 674 struct otx2_qos_node *node, 675 struct otx2_qos_cfg *cfg) 676 { 677 struct otx2_qos_node *tmp; 678 int ret; 679 680 list_for_each_entry(tmp, &node->child_schq_list, list) { 681 ret = otx2_qos_txschq_config(pfvf, tmp); 682 if (ret) 683 return -EIO; 684 ret = otx2_qos_txschq_set_parent_topology(pfvf, tmp->parent); 685 if (ret) 686 return -EIO; 687 } 688 689 return 0; 690 } 691 692 static int otx2_qos_txschq_push_cfg_tl(struct otx2_nic *pfvf, 693 struct otx2_qos_node *node, 694 struct otx2_qos_cfg *cfg) 695 { 696 struct otx2_qos_node *tmp; 697 int ret; 698 699 list_for_each_entry(tmp, &node->child_list, list) { 700 ret = otx2_qos_txschq_push_cfg_tl(pfvf, tmp, cfg); 701 if (ret) 702 return -EIO; 703 ret = otx2_qos_txschq_config(pfvf, tmp); 704 if (ret) 705 return -EIO; 706 ret = otx2_qos_txschq_push_cfg_schq(pfvf, tmp, cfg); 707 if (ret) 708 return -EIO; 709 } 710 711 ret = otx2_qos_txschq_set_parent_topology(pfvf, node); 712 if (ret) 713 return -EIO; 714 715 return 0; 716 } 717 718 static int otx2_qos_txschq_push_cfg(struct otx2_nic *pfvf, 719 struct otx2_qos_node *node, 720 struct otx2_qos_cfg *cfg) 721 { 722 int ret; 723 724 mutex_lock(&pfvf->qos.qos_lock); 725 ret = otx2_qos_txschq_push_cfg_tl(pfvf, node, cfg); 726 if (ret) 727 goto out; 728 ret = otx2_qos_txschq_push_cfg_schq(pfvf, node, cfg); 729 out: 730 mutex_unlock(&pfvf->qos.qos_lock); 731 return ret; 732 } 733 734 static int otx2_qos_txschq_update_config(struct otx2_nic *pfvf, 735 struct otx2_qos_node *node, 736 struct otx2_qos_cfg *cfg) 737 { 738 otx2_qos_txschq_fill_cfg(pfvf, node, cfg); 739 740 return otx2_qos_txschq_push_cfg(pfvf, node, cfg); 741 } 742 743 static int otx2_qos_txschq_update_root_cfg(struct otx2_nic *pfvf, 744 struct otx2_qos_node *root, 745 struct otx2_qos_cfg *cfg) 746 { 747 root->schq = cfg->schq_list[root->level][0]; 748 return otx2_qos_txschq_config(pfvf, root); 749 } 750 751 static void otx2_qos_free_cfg(struct otx2_nic *pfvf, struct otx2_qos_cfg *cfg) 752 { 753 int lvl, idx, schq; 754 755 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 756 for (idx = 0; idx < cfg->schq[lvl]; idx++) { 757 schq = cfg->schq_list[lvl][idx]; 758 otx2_txschq_free_one(pfvf, lvl, schq); 759 } 760 } 761 762 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 763 for (idx = 0; idx < cfg->schq_contig[lvl]; idx++) { 764 schq = cfg->schq_contig_list[lvl][idx]; 765 otx2_txschq_free_one(pfvf, lvl, schq); 766 } 767 } 768 } 769 770 static void otx2_qos_enadis_sq(struct otx2_nic *pfvf, 771 struct otx2_qos_node *node, 772 u16 qid) 773 { 774 if (pfvf->qos.qid_to_sqmap[qid] != OTX2_QOS_INVALID_SQ) 775 otx2_qos_disable_sq(pfvf, qid); 776 777 pfvf->qos.qid_to_sqmap[qid] = node->schq; 778 otx2_qos_enable_sq(pfvf, qid); 779 } 780 781 static void otx2_qos_update_smq_schq(struct otx2_nic *pfvf, 782 struct otx2_qos_node *node, 783 bool action) 784 { 785 struct otx2_qos_node *tmp; 786 787 if (node->qid == OTX2_QOS_QID_INNER) 788 return; 789 790 list_for_each_entry(tmp, &node->child_schq_list, list) { 791 if (tmp->level == NIX_TXSCH_LVL_MDQ) { 792 if (action == QOS_SMQ_FLUSH) 793 otx2_smq_flush(pfvf, tmp->schq); 794 else 795 otx2_qos_enadis_sq(pfvf, tmp, node->qid); 796 } 797 } 798 } 799 800 static void __otx2_qos_update_smq(struct otx2_nic *pfvf, 801 struct otx2_qos_node *node, 802 bool action) 803 { 804 struct otx2_qos_node *tmp; 805 806 list_for_each_entry(tmp, &node->child_list, list) { 807 __otx2_qos_update_smq(pfvf, tmp, action); 808 if (tmp->qid == OTX2_QOS_QID_INNER) 809 continue; 810 if (tmp->level == NIX_TXSCH_LVL_MDQ) { 811 if (action == QOS_SMQ_FLUSH) 812 otx2_smq_flush(pfvf, tmp->schq); 813 else 814 otx2_qos_enadis_sq(pfvf, tmp, tmp->qid); 815 } else { 816 otx2_qos_update_smq_schq(pfvf, tmp, action); 817 } 818 } 819 } 820 821 static void otx2_qos_update_smq(struct otx2_nic *pfvf, 822 struct otx2_qos_node *node, 823 bool action) 824 { 825 mutex_lock(&pfvf->qos.qos_lock); 826 __otx2_qos_update_smq(pfvf, node, action); 827 otx2_qos_update_smq_schq(pfvf, node, action); 828 mutex_unlock(&pfvf->qos.qos_lock); 829 } 830 831 static int otx2_qos_push_txschq_cfg(struct otx2_nic *pfvf, 832 struct otx2_qos_node *node, 833 struct otx2_qos_cfg *cfg) 834 { 835 int ret; 836 837 ret = otx2_qos_txschq_alloc(pfvf, cfg); 838 if (ret) 839 return -ENOSPC; 840 841 if (!(pfvf->netdev->flags & IFF_UP)) { 842 otx2_qos_txschq_fill_cfg(pfvf, node, cfg); 843 return 0; 844 } 845 846 ret = otx2_qos_txschq_update_config(pfvf, node, cfg); 847 if (ret) { 848 otx2_qos_free_cfg(pfvf, cfg); 849 return -EIO; 850 } 851 852 otx2_qos_update_smq(pfvf, node, QOS_CFG_SQ); 853 854 return 0; 855 } 856 857 static int otx2_qos_update_tree(struct otx2_nic *pfvf, 858 struct otx2_qos_node *node, 859 struct otx2_qos_cfg *cfg) 860 { 861 otx2_qos_prepare_txschq_cfg(pfvf, node->parent, cfg); 862 return otx2_qos_push_txschq_cfg(pfvf, node->parent, cfg); 863 } 864 865 static int otx2_qos_root_add(struct otx2_nic *pfvf, u16 htb_maj_id, u16 htb_defcls, 866 struct netlink_ext_ack *extack) 867 { 868 struct otx2_qos_cfg *new_cfg; 869 struct otx2_qos_node *root; 870 int err; 871 872 netdev_dbg(pfvf->netdev, 873 "TC_HTB_CREATE: handle=0x%x defcls=0x%x\n", 874 htb_maj_id, htb_defcls); 875 876 root = otx2_qos_alloc_root(pfvf); 877 if (IS_ERR(root)) { 878 err = PTR_ERR(root); 879 return err; 880 } 881 882 /* allocate txschq queue */ 883 new_cfg = kzalloc(sizeof(*new_cfg), GFP_KERNEL); 884 if (!new_cfg) { 885 NL_SET_ERR_MSG_MOD(extack, "Memory allocation error"); 886 err = -ENOMEM; 887 goto free_root_node; 888 } 889 /* allocate htb root node */ 890 new_cfg->schq[root->level] = 1; 891 err = otx2_qos_txschq_alloc(pfvf, new_cfg); 892 if (err) { 893 NL_SET_ERR_MSG_MOD(extack, "Error allocating txschq"); 894 goto free_root_node; 895 } 896 897 if (!(pfvf->netdev->flags & IFF_UP) || 898 root->level == NIX_TXSCH_LVL_TL1) { 899 root->schq = new_cfg->schq_list[root->level][0]; 900 goto out; 901 } 902 903 /* update the txschq configuration in hw */ 904 err = otx2_qos_txschq_update_root_cfg(pfvf, root, new_cfg); 905 if (err) { 906 NL_SET_ERR_MSG_MOD(extack, 907 "Error updating txschq configuration"); 908 goto txschq_free; 909 } 910 911 out: 912 WRITE_ONCE(pfvf->qos.defcls, htb_defcls); 913 /* Pairs with smp_load_acquire() in ndo_select_queue */ 914 smp_store_release(&pfvf->qos.maj_id, htb_maj_id); 915 kfree(new_cfg); 916 return 0; 917 918 txschq_free: 919 otx2_qos_free_cfg(pfvf, new_cfg); 920 free_root_node: 921 kfree(new_cfg); 922 otx2_qos_sw_node_delete(pfvf, root); 923 return err; 924 } 925 926 static int otx2_qos_root_destroy(struct otx2_nic *pfvf) 927 { 928 struct otx2_qos_node *root; 929 930 netdev_dbg(pfvf->netdev, "TC_HTB_DESTROY\n"); 931 932 /* find root node */ 933 root = otx2_sw_node_find(pfvf, OTX2_QOS_ROOT_CLASSID); 934 if (!root) 935 return -ENOENT; 936 937 /* free the hw mappings */ 938 otx2_qos_destroy_node(pfvf, root); 939 940 return 0; 941 } 942 943 static int otx2_qos_validate_configuration(struct otx2_qos_node *parent, 944 struct netlink_ext_ack *extack, 945 struct otx2_nic *pfvf, 946 u64 prio) 947 { 948 if (test_bit(prio, parent->prio_bmap)) { 949 NL_SET_ERR_MSG_MOD(extack, 950 "Static priority child with same priority exists"); 951 return -EEXIST; 952 } 953 954 if (prio == TXSCH_TL1_DFLT_RR_PRIO) { 955 NL_SET_ERR_MSG_MOD(extack, 956 "Priority is reserved for Round Robin"); 957 return -EINVAL; 958 } 959 960 return 0; 961 } 962 963 static int otx2_qos_leaf_alloc_queue(struct otx2_nic *pfvf, u16 classid, 964 u32 parent_classid, u64 rate, u64 ceil, 965 u64 prio, struct netlink_ext_ack *extack) 966 { 967 struct otx2_qos_cfg *old_cfg, *new_cfg; 968 struct otx2_qos_node *node, *parent; 969 int qid, ret, err; 970 971 netdev_dbg(pfvf->netdev, 972 "TC_HTB_LEAF_ALLOC_QUEUE: classid=0x%x parent_classid=0x%x rate=%lld ceil=%lld prio=%lld\n", 973 classid, parent_classid, rate, ceil, prio); 974 975 if (prio > OTX2_QOS_MAX_PRIO) { 976 NL_SET_ERR_MSG_MOD(extack, "Valid priority range 0 to 7"); 977 ret = -EOPNOTSUPP; 978 goto out; 979 } 980 981 /* get parent node */ 982 parent = otx2_sw_node_find(pfvf, parent_classid); 983 if (!parent) { 984 NL_SET_ERR_MSG_MOD(extack, "parent node not found"); 985 ret = -ENOENT; 986 goto out; 987 } 988 if (parent->level == NIX_TXSCH_LVL_MDQ) { 989 NL_SET_ERR_MSG_MOD(extack, "HTB qos max levels reached"); 990 ret = -EOPNOTSUPP; 991 goto out; 992 } 993 994 ret = otx2_qos_validate_configuration(parent, extack, pfvf, prio); 995 if (ret) 996 goto out; 997 998 set_bit(prio, parent->prio_bmap); 999 1000 /* read current txschq configuration */ 1001 old_cfg = kzalloc(sizeof(*old_cfg), GFP_KERNEL); 1002 if (!old_cfg) { 1003 NL_SET_ERR_MSG_MOD(extack, "Memory allocation error"); 1004 ret = -ENOMEM; 1005 goto reset_prio; 1006 } 1007 otx2_qos_read_txschq_cfg(pfvf, parent, old_cfg); 1008 1009 /* allocate a new sq */ 1010 qid = otx2_qos_get_qid(pfvf); 1011 if (qid < 0) { 1012 NL_SET_ERR_MSG_MOD(extack, "Reached max supported QOS SQ's"); 1013 ret = -ENOMEM; 1014 goto free_old_cfg; 1015 } 1016 1017 /* Actual SQ mapping will be updated after SMQ alloc */ 1018 pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ; 1019 1020 /* allocate and initialize a new child node */ 1021 node = otx2_qos_sw_create_leaf_node(pfvf, parent, classid, prio, rate, 1022 ceil, qid); 1023 if (IS_ERR(node)) { 1024 NL_SET_ERR_MSG_MOD(extack, "Unable to allocate leaf node"); 1025 ret = PTR_ERR(node); 1026 goto free_old_cfg; 1027 } 1028 1029 /* push new txschq config to hw */ 1030 new_cfg = kzalloc(sizeof(*new_cfg), GFP_KERNEL); 1031 if (!new_cfg) { 1032 NL_SET_ERR_MSG_MOD(extack, "Memory allocation error"); 1033 ret = -ENOMEM; 1034 goto free_node; 1035 } 1036 ret = otx2_qos_update_tree(pfvf, node, new_cfg); 1037 if (ret) { 1038 NL_SET_ERR_MSG_MOD(extack, "HTB HW configuration error"); 1039 kfree(new_cfg); 1040 otx2_qos_sw_node_delete(pfvf, node); 1041 /* restore the old qos tree */ 1042 err = otx2_qos_txschq_update_config(pfvf, parent, old_cfg); 1043 if (err) { 1044 netdev_err(pfvf->netdev, 1045 "Failed to restore txcshq configuration"); 1046 goto free_old_cfg; 1047 } 1048 1049 otx2_qos_update_smq(pfvf, parent, QOS_CFG_SQ); 1050 goto free_old_cfg; 1051 } 1052 1053 /* update tx_real_queues */ 1054 otx2_qos_update_tx_netdev_queues(pfvf); 1055 1056 /* free new txschq config */ 1057 kfree(new_cfg); 1058 1059 /* free old txschq config */ 1060 otx2_qos_free_cfg(pfvf, old_cfg); 1061 kfree(old_cfg); 1062 1063 return pfvf->hw.tx_queues + qid; 1064 1065 free_node: 1066 otx2_qos_sw_node_delete(pfvf, node); 1067 free_old_cfg: 1068 kfree(old_cfg); 1069 reset_prio: 1070 clear_bit(prio, parent->prio_bmap); 1071 out: 1072 return ret; 1073 } 1074 1075 static int otx2_qos_leaf_to_inner(struct otx2_nic *pfvf, u16 classid, 1076 u16 child_classid, u64 rate, u64 ceil, u64 prio, 1077 struct netlink_ext_ack *extack) 1078 { 1079 struct otx2_qos_cfg *old_cfg, *new_cfg; 1080 struct otx2_qos_node *node, *child; 1081 int ret, err; 1082 u16 qid; 1083 1084 netdev_dbg(pfvf->netdev, 1085 "TC_HTB_LEAF_TO_INNER classid %04x, child %04x, rate %llu, ceil %llu\n", 1086 classid, child_classid, rate, ceil); 1087 1088 if (prio > OTX2_QOS_MAX_PRIO) { 1089 NL_SET_ERR_MSG_MOD(extack, "Valid priority range 0 to 7"); 1090 ret = -EOPNOTSUPP; 1091 goto out; 1092 } 1093 1094 /* find node related to classid */ 1095 node = otx2_sw_node_find(pfvf, classid); 1096 if (!node) { 1097 NL_SET_ERR_MSG_MOD(extack, "HTB node not found"); 1098 ret = -ENOENT; 1099 goto out; 1100 } 1101 /* check max qos txschq level */ 1102 if (node->level == NIX_TXSCH_LVL_MDQ) { 1103 NL_SET_ERR_MSG_MOD(extack, "HTB qos level not supported"); 1104 ret = -EOPNOTSUPP; 1105 goto out; 1106 } 1107 1108 set_bit(prio, node->prio_bmap); 1109 1110 /* store the qid to assign to leaf node */ 1111 qid = node->qid; 1112 1113 /* read current txschq configuration */ 1114 old_cfg = kzalloc(sizeof(*old_cfg), GFP_KERNEL); 1115 if (!old_cfg) { 1116 NL_SET_ERR_MSG_MOD(extack, "Memory allocation error"); 1117 ret = -ENOMEM; 1118 goto reset_prio; 1119 } 1120 otx2_qos_read_txschq_cfg(pfvf, node, old_cfg); 1121 1122 /* delete the txschq nodes allocated for this node */ 1123 otx2_qos_free_sw_node_schq(pfvf, node); 1124 1125 /* mark this node as htb inner node */ 1126 WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER); 1127 1128 /* allocate and initialize a new child node */ 1129 child = otx2_qos_sw_create_leaf_node(pfvf, node, child_classid, 1130 prio, rate, ceil, qid); 1131 if (IS_ERR(child)) { 1132 NL_SET_ERR_MSG_MOD(extack, "Unable to allocate leaf node"); 1133 ret = PTR_ERR(child); 1134 goto free_old_cfg; 1135 } 1136 1137 /* push new txschq config to hw */ 1138 new_cfg = kzalloc(sizeof(*new_cfg), GFP_KERNEL); 1139 if (!new_cfg) { 1140 NL_SET_ERR_MSG_MOD(extack, "Memory allocation error"); 1141 ret = -ENOMEM; 1142 goto free_node; 1143 } 1144 ret = otx2_qos_update_tree(pfvf, child, new_cfg); 1145 if (ret) { 1146 NL_SET_ERR_MSG_MOD(extack, "HTB HW configuration error"); 1147 kfree(new_cfg); 1148 otx2_qos_sw_node_delete(pfvf, child); 1149 /* restore the old qos tree */ 1150 WRITE_ONCE(node->qid, qid); 1151 err = otx2_qos_alloc_txschq_node(pfvf, node); 1152 if (err) { 1153 netdev_err(pfvf->netdev, 1154 "Failed to restore old leaf node"); 1155 goto free_old_cfg; 1156 } 1157 err = otx2_qos_txschq_update_config(pfvf, node, old_cfg); 1158 if (err) { 1159 netdev_err(pfvf->netdev, 1160 "Failed to restore txcshq configuration"); 1161 goto free_old_cfg; 1162 } 1163 otx2_qos_update_smq(pfvf, node, QOS_CFG_SQ); 1164 goto free_old_cfg; 1165 } 1166 1167 /* free new txschq config */ 1168 kfree(new_cfg); 1169 1170 /* free old txschq config */ 1171 otx2_qos_free_cfg(pfvf, old_cfg); 1172 kfree(old_cfg); 1173 1174 return 0; 1175 1176 free_node: 1177 otx2_qos_sw_node_delete(pfvf, child); 1178 free_old_cfg: 1179 kfree(old_cfg); 1180 reset_prio: 1181 clear_bit(prio, node->prio_bmap); 1182 out: 1183 return ret; 1184 } 1185 1186 static int otx2_qos_leaf_del(struct otx2_nic *pfvf, u16 *classid, 1187 struct netlink_ext_ack *extack) 1188 { 1189 struct otx2_qos_node *node, *parent; 1190 u64 prio; 1191 u16 qid; 1192 1193 netdev_dbg(pfvf->netdev, "TC_HTB_LEAF_DEL classid %04x\n", *classid); 1194 1195 /* find node related to classid */ 1196 node = otx2_sw_node_find(pfvf, *classid); 1197 if (!node) { 1198 NL_SET_ERR_MSG_MOD(extack, "HTB node not found"); 1199 return -ENOENT; 1200 } 1201 parent = node->parent; 1202 prio = node->prio; 1203 qid = node->qid; 1204 1205 otx2_qos_disable_sq(pfvf, node->qid); 1206 1207 otx2_qos_destroy_node(pfvf, node); 1208 pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ; 1209 1210 clear_bit(prio, parent->prio_bmap); 1211 1212 return 0; 1213 } 1214 1215 static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force, 1216 struct netlink_ext_ack *extack) 1217 { 1218 struct otx2_qos_node *node, *parent; 1219 struct otx2_qos_cfg *new_cfg; 1220 u64 prio; 1221 int err; 1222 u16 qid; 1223 1224 netdev_dbg(pfvf->netdev, 1225 "TC_HTB_LEAF_DEL_LAST classid %04x\n", classid); 1226 1227 /* find node related to classid */ 1228 node = otx2_sw_node_find(pfvf, classid); 1229 if (!node) { 1230 NL_SET_ERR_MSG_MOD(extack, "HTB node not found"); 1231 return -ENOENT; 1232 } 1233 1234 /* save qid for use by parent */ 1235 qid = node->qid; 1236 prio = node->prio; 1237 1238 parent = otx2_sw_node_find(pfvf, node->parent->classid); 1239 if (!parent) { 1240 NL_SET_ERR_MSG_MOD(extack, "parent node not found"); 1241 return -ENOENT; 1242 } 1243 1244 /* destroy the leaf node */ 1245 otx2_qos_destroy_node(pfvf, node); 1246 pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ; 1247 1248 clear_bit(prio, parent->prio_bmap); 1249 1250 /* create downstream txschq entries to parent */ 1251 err = otx2_qos_alloc_txschq_node(pfvf, parent); 1252 if (err) { 1253 NL_SET_ERR_MSG_MOD(extack, "HTB failed to create txsch configuration"); 1254 return err; 1255 } 1256 WRITE_ONCE(parent->qid, qid); 1257 __set_bit(qid, pfvf->qos.qos_sq_bmap); 1258 1259 /* push new txschq config to hw */ 1260 new_cfg = kzalloc(sizeof(*new_cfg), GFP_KERNEL); 1261 if (!new_cfg) { 1262 NL_SET_ERR_MSG_MOD(extack, "Memory allocation error"); 1263 return -ENOMEM; 1264 } 1265 /* fill txschq cfg and push txschq cfg to hw */ 1266 otx2_qos_fill_cfg_schq(parent, new_cfg); 1267 err = otx2_qos_push_txschq_cfg(pfvf, parent, new_cfg); 1268 if (err) { 1269 NL_SET_ERR_MSG_MOD(extack, "HTB HW configuration error"); 1270 kfree(new_cfg); 1271 return err; 1272 } 1273 kfree(new_cfg); 1274 1275 /* update tx_real_queues */ 1276 otx2_qos_update_tx_netdev_queues(pfvf); 1277 1278 return 0; 1279 } 1280 1281 void otx2_clean_qos_queues(struct otx2_nic *pfvf) 1282 { 1283 struct otx2_qos_node *root; 1284 1285 root = otx2_sw_node_find(pfvf, OTX2_QOS_ROOT_CLASSID); 1286 if (!root) 1287 return; 1288 1289 otx2_qos_update_smq(pfvf, root, QOS_SMQ_FLUSH); 1290 } 1291 1292 void otx2_qos_config_txschq(struct otx2_nic *pfvf) 1293 { 1294 struct otx2_qos_node *root; 1295 int err; 1296 1297 root = otx2_sw_node_find(pfvf, OTX2_QOS_ROOT_CLASSID); 1298 if (!root) 1299 return; 1300 1301 err = otx2_qos_txschq_config(pfvf, root); 1302 if (err) { 1303 netdev_err(pfvf->netdev, "Error update txschq configuration\n"); 1304 goto root_destroy; 1305 } 1306 1307 err = otx2_qos_txschq_push_cfg_tl(pfvf, root, NULL); 1308 if (err) { 1309 netdev_err(pfvf->netdev, "Error update txschq configuration\n"); 1310 goto root_destroy; 1311 } 1312 1313 otx2_qos_update_smq(pfvf, root, QOS_CFG_SQ); 1314 return; 1315 1316 root_destroy: 1317 netdev_err(pfvf->netdev, "Failed to update Scheduler/Shaping config in Hardware\n"); 1318 /* Free resources allocated */ 1319 otx2_qos_root_destroy(pfvf); 1320 } 1321 1322 int otx2_setup_tc_htb(struct net_device *ndev, struct tc_htb_qopt_offload *htb) 1323 { 1324 struct otx2_nic *pfvf = netdev_priv(ndev); 1325 int res; 1326 1327 switch (htb->command) { 1328 case TC_HTB_CREATE: 1329 return otx2_qos_root_add(pfvf, htb->parent_classid, 1330 htb->classid, htb->extack); 1331 case TC_HTB_DESTROY: 1332 return otx2_qos_root_destroy(pfvf); 1333 case TC_HTB_LEAF_ALLOC_QUEUE: 1334 res = otx2_qos_leaf_alloc_queue(pfvf, htb->classid, 1335 htb->parent_classid, 1336 htb->rate, htb->ceil, 1337 htb->prio, htb->extack); 1338 if (res < 0) 1339 return res; 1340 htb->qid = res; 1341 return 0; 1342 case TC_HTB_LEAF_TO_INNER: 1343 return otx2_qos_leaf_to_inner(pfvf, htb->parent_classid, 1344 htb->classid, htb->rate, 1345 htb->ceil, htb->prio, 1346 htb->extack); 1347 case TC_HTB_LEAF_DEL: 1348 return otx2_qos_leaf_del(pfvf, &htb->classid, htb->extack); 1349 case TC_HTB_LEAF_DEL_LAST: 1350 case TC_HTB_LEAF_DEL_LAST_FORCE: 1351 return otx2_qos_leaf_del_last(pfvf, htb->classid, 1352 htb->command == TC_HTB_LEAF_DEL_LAST_FORCE, 1353 htb->extack); 1354 case TC_HTB_LEAF_QUERY_QUEUE: 1355 res = otx2_get_txq_by_classid(pfvf, htb->classid); 1356 htb->qid = res; 1357 return 0; 1358 case TC_HTB_NODE_MODIFY: 1359 fallthrough; 1360 default: 1361 return -EOPNOTSUPP; 1362 } 1363 } 1364