1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell RVU Ethernet driver 3 * 4 * Copyright (C) 2021 Marvell. 5 * 6 */ 7 8 #include <linux/netdevice.h> 9 #include <linux/etherdevice.h> 10 #include <linux/inetdevice.h> 11 #include <linux/rhashtable.h> 12 #include <linux/bitfield.h> 13 #include <net/flow_dissector.h> 14 #include <net/pkt_cls.h> 15 #include <net/tc_act/tc_gact.h> 16 #include <net/tc_act/tc_mirred.h> 17 #include <net/tc_act/tc_vlan.h> 18 #include <net/ipv6.h> 19 20 #include "cn10k.h" 21 #include "otx2_common.h" 22 #include "qos.h" 23 24 #define CN10K_MAX_BURST_MANTISSA 0x7FFFULL 25 #define CN10K_MAX_BURST_SIZE 8453888ULL 26 27 #define CN10K_TLX_BURST_MANTISSA GENMASK_ULL(43, 29) 28 #define CN10K_TLX_BURST_EXPONENT GENMASK_ULL(47, 44) 29 30 #define OTX2_UNSUPP_LSE_DEPTH GENMASK(6, 4) 31 32 #define MCAST_INVALID_GRP (-1U) 33 34 struct otx2_tc_flow_stats { 35 u64 bytes; 36 u64 pkts; 37 u64 used; 38 }; 39 40 struct otx2_tc_flow { 41 struct list_head list; 42 unsigned long cookie; 43 struct rcu_head rcu; 44 struct otx2_tc_flow_stats stats; 45 spinlock_t lock; /* lock for stats */ 46 u16 rq; 47 u16 entry; 48 u16 leaf_profile; 49 bool is_act_police; 50 u32 prio; 51 struct npc_install_flow_req req; 52 u32 mcast_grp_idx; 53 u64 rate; 54 u32 burst; 55 bool is_pps; 56 }; 57 58 static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst, 59 u32 *burst_exp, u32 *burst_mantissa) 60 { 61 int max_burst, max_mantissa; 62 unsigned int tmp; 63 64 if (is_dev_otx2(nic->pdev)) { 65 max_burst = MAX_BURST_SIZE; 66 max_mantissa = MAX_BURST_MANTISSA; 67 } else { 68 max_burst = CN10K_MAX_BURST_SIZE; 69 max_mantissa = CN10K_MAX_BURST_MANTISSA; 70 } 71 72 /* Burst is calculated as 73 * ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256 74 * Max supported burst size is 130,816 bytes. 75 */ 76 burst = min_t(u32, burst, max_burst); 77 if (burst) { 78 *burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0; 79 tmp = burst - rounddown_pow_of_two(burst); 80 if (burst < max_mantissa) 81 *burst_mantissa = tmp * 2; 82 else 83 *burst_mantissa = tmp / (1ULL << (*burst_exp - 7)); 84 } else { 85 *burst_exp = MAX_BURST_EXPONENT; 86 *burst_mantissa = max_mantissa; 87 } 88 } 89 90 static void otx2_get_egress_rate_cfg(u64 maxrate, u32 *exp, 91 u32 *mantissa, u32 *div_exp) 92 { 93 u64 tmp; 94 95 /* Rate calculation by hardware 96 * 97 * PIR_ADD = ((256 + mantissa) << exp) / 256 98 * rate = (2 * PIR_ADD) / ( 1 << div_exp) 99 * The resultant rate is in Mbps. 100 */ 101 102 /* 2Mbps to 100Gbps can be expressed with div_exp = 0. 103 * Setting this to '0' will ease the calculation of 104 * exponent and mantissa. 105 */ 106 *div_exp = 0; 107 108 if (maxrate) { 109 *exp = ilog2(maxrate) ? ilog2(maxrate) - 1 : 0; 110 tmp = maxrate - rounddown_pow_of_two(maxrate); 111 if (maxrate < MAX_RATE_MANTISSA) 112 *mantissa = tmp * 2; 113 else 114 *mantissa = tmp / (1ULL << (*exp - 7)); 115 } else { 116 /* Instead of disabling rate limiting, set all values to max */ 117 *exp = MAX_RATE_EXPONENT; 118 *mantissa = MAX_RATE_MANTISSA; 119 } 120 } 121 122 u64 otx2_get_txschq_rate_regval(struct otx2_nic *nic, 123 u64 maxrate, u32 burst) 124 { 125 u32 burst_exp, burst_mantissa; 126 u32 exp, mantissa, div_exp; 127 u64 regval = 0; 128 129 /* Get exponent and mantissa values from the desired rate */ 130 otx2_get_egress_burst_cfg(nic, burst, &burst_exp, &burst_mantissa); 131 otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp); 132 133 if (is_dev_otx2(nic->pdev)) { 134 regval = FIELD_PREP(TLX_BURST_EXPONENT, (u64)burst_exp) | 135 FIELD_PREP(TLX_BURST_MANTISSA, (u64)burst_mantissa) | 136 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) | 137 FIELD_PREP(TLX_RATE_EXPONENT, exp) | 138 FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0); 139 } else { 140 regval = FIELD_PREP(CN10K_TLX_BURST_EXPONENT, (u64)burst_exp) | 141 FIELD_PREP(CN10K_TLX_BURST_MANTISSA, (u64)burst_mantissa) | 142 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) | 143 FIELD_PREP(TLX_RATE_EXPONENT, exp) | 144 FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0); 145 } 146 147 return regval; 148 } 149 150 static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, 151 u32 burst, u64 maxrate) 152 { 153 struct otx2_hw *hw = &nic->hw; 154 struct nix_txschq_config *req; 155 int txschq, err; 156 157 /* All SQs share the same TL4, so pick the first scheduler */ 158 txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0]; 159 160 mutex_lock(&nic->mbox.lock); 161 req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox); 162 if (!req) { 163 mutex_unlock(&nic->mbox.lock); 164 return -ENOMEM; 165 } 166 167 req->lvl = NIX_TXSCH_LVL_TL4; 168 req->num_regs = 1; 169 req->reg[0] = NIX_AF_TL4X_PIR(txschq); 170 req->regval[0] = otx2_get_txschq_rate_regval(nic, maxrate, burst); 171 172 err = otx2_sync_mbox_msg(&nic->mbox); 173 mutex_unlock(&nic->mbox.lock); 174 return err; 175 } 176 177 static int otx2_tc_validate_flow(struct otx2_nic *nic, 178 struct flow_action *actions, 179 struct netlink_ext_ack *extack) 180 { 181 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 182 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 183 return -EINVAL; 184 } 185 186 if (!flow_action_has_entries(actions)) { 187 NL_SET_ERR_MSG_MOD(extack, "MATCHALL offload called with no action"); 188 return -EINVAL; 189 } 190 191 if (!flow_offload_has_one_action(actions)) { 192 NL_SET_ERR_MSG_MOD(extack, 193 "Egress MATCHALL offload supports only 1 policing action"); 194 return -EINVAL; 195 } 196 return 0; 197 } 198 199 static int otx2_policer_validate(const struct flow_action *action, 200 const struct flow_action_entry *act, 201 struct netlink_ext_ack *extack) 202 { 203 if (act->police.exceed.act_id != FLOW_ACTION_DROP) { 204 NL_SET_ERR_MSG_MOD(extack, 205 "Offload not supported when exceed action is not drop"); 206 return -EOPNOTSUPP; 207 } 208 209 if (act->police.notexceed.act_id != FLOW_ACTION_PIPE && 210 act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) { 211 NL_SET_ERR_MSG_MOD(extack, 212 "Offload not supported when conform action is not pipe or ok"); 213 return -EOPNOTSUPP; 214 } 215 216 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT && 217 !flow_action_is_last_entry(action, act)) { 218 NL_SET_ERR_MSG_MOD(extack, 219 "Offload not supported when conform action is ok, but action is not last"); 220 return -EOPNOTSUPP; 221 } 222 223 if (act->police.peakrate_bytes_ps || 224 act->police.avrate || act->police.overhead) { 225 NL_SET_ERR_MSG_MOD(extack, 226 "Offload not supported when peakrate/avrate/overhead is configured"); 227 return -EOPNOTSUPP; 228 } 229 230 return 0; 231 } 232 233 static int otx2_tc_egress_matchall_install(struct otx2_nic *nic, 234 struct tc_cls_matchall_offload *cls) 235 { 236 struct netlink_ext_ack *extack = cls->common.extack; 237 struct flow_action *actions = &cls->rule->action; 238 struct flow_action_entry *entry; 239 int err; 240 241 err = otx2_tc_validate_flow(nic, actions, extack); 242 if (err) 243 return err; 244 245 if (nic->flags & OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED) { 246 NL_SET_ERR_MSG_MOD(extack, 247 "Only one Egress MATCHALL ratelimiter can be offloaded"); 248 return -ENOMEM; 249 } 250 251 entry = &cls->rule->action.entries[0]; 252 switch (entry->id) { 253 case FLOW_ACTION_POLICE: 254 err = otx2_policer_validate(&cls->rule->action, entry, extack); 255 if (err) 256 return err; 257 258 if (entry->police.rate_pkt_ps) { 259 NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second"); 260 return -EOPNOTSUPP; 261 } 262 err = otx2_set_matchall_egress_rate(nic, entry->police.burst, 263 otx2_convert_rate(entry->police.rate_bytes_ps)); 264 if (err) 265 return err; 266 nic->flags |= OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED; 267 break; 268 default: 269 NL_SET_ERR_MSG_MOD(extack, 270 "Only police action is supported with Egress MATCHALL offload"); 271 return -EOPNOTSUPP; 272 } 273 274 return 0; 275 } 276 277 static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic, 278 struct tc_cls_matchall_offload *cls) 279 { 280 struct netlink_ext_ack *extack = cls->common.extack; 281 int err; 282 283 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 284 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 285 return -EINVAL; 286 } 287 288 err = otx2_set_matchall_egress_rate(nic, 0, 0); 289 nic->flags &= ~OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED; 290 return err; 291 } 292 293 static int otx2_tc_act_set_hw_police(struct otx2_nic *nic, 294 struct otx2_tc_flow *node) 295 { 296 int rc; 297 298 mutex_lock(&nic->mbox.lock); 299 300 rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile); 301 if (rc) { 302 mutex_unlock(&nic->mbox.lock); 303 return rc; 304 } 305 306 rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, 307 node->burst, node->rate, node->is_pps); 308 if (rc) 309 goto free_leaf; 310 311 rc = cn10k_map_unmap_rq_policer(nic, node->rq, node->leaf_profile, true); 312 if (rc) 313 goto free_leaf; 314 315 mutex_unlock(&nic->mbox.lock); 316 317 return 0; 318 319 free_leaf: 320 if (cn10k_free_leaf_profile(nic, node->leaf_profile)) 321 netdev_err(nic->netdev, 322 "Unable to free leaf bandwidth profile(%d)\n", 323 node->leaf_profile); 324 mutex_unlock(&nic->mbox.lock); 325 return rc; 326 } 327 328 static int otx2_tc_act_set_police(struct otx2_nic *nic, 329 struct otx2_tc_flow *node, 330 struct flow_cls_offload *f, 331 u64 rate, u32 burst, u32 mark, 332 struct npc_install_flow_req *req, bool pps) 333 { 334 struct netlink_ext_ack *extack = f->common.extack; 335 struct otx2_hw *hw = &nic->hw; 336 int rq_idx, rc; 337 338 rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues); 339 if (rq_idx >= hw->rx_queues) { 340 NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded"); 341 return -EINVAL; 342 } 343 344 req->match_id = mark & 0xFFFFULL; 345 req->index = rq_idx; 346 req->op = NIX_RX_ACTIONOP_UCAST; 347 348 node->is_act_police = true; 349 node->rq = rq_idx; 350 node->burst = burst; 351 node->rate = rate; 352 node->is_pps = pps; 353 354 rc = otx2_tc_act_set_hw_police(nic, node); 355 if (!rc) 356 set_bit(rq_idx, &nic->rq_bmap); 357 358 return rc; 359 } 360 361 static int otx2_tc_update_mcast(struct otx2_nic *nic, 362 struct npc_install_flow_req *req, 363 struct netlink_ext_ack *extack, 364 struct otx2_tc_flow *node, 365 struct nix_mcast_grp_update_req *ureq, 366 u8 num_intf) 367 { 368 struct nix_mcast_grp_update_req *grp_update_req; 369 struct nix_mcast_grp_create_req *creq; 370 struct nix_mcast_grp_create_rsp *crsp; 371 u32 grp_index; 372 int rc; 373 374 mutex_lock(&nic->mbox.lock); 375 creq = otx2_mbox_alloc_msg_nix_mcast_grp_create(&nic->mbox); 376 if (!creq) { 377 rc = -ENOMEM; 378 goto error; 379 } 380 381 creq->dir = NIX_MCAST_INGRESS; 382 /* Send message to AF */ 383 rc = otx2_sync_mbox_msg(&nic->mbox); 384 if (rc) { 385 NL_SET_ERR_MSG_MOD(extack, "Failed to create multicast group"); 386 goto error; 387 } 388 389 crsp = (struct nix_mcast_grp_create_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox, 390 0, 391 &creq->hdr); 392 if (IS_ERR(crsp)) { 393 rc = PTR_ERR(crsp); 394 goto error; 395 } 396 397 grp_index = crsp->mcast_grp_idx; 398 grp_update_req = otx2_mbox_alloc_msg_nix_mcast_grp_update(&nic->mbox); 399 if (!grp_update_req) { 400 NL_SET_ERR_MSG_MOD(extack, "Failed to update multicast group"); 401 rc = -ENOMEM; 402 goto error; 403 } 404 405 ureq->op = NIX_MCAST_OP_ADD_ENTRY; 406 ureq->mcast_grp_idx = grp_index; 407 ureq->num_mce_entry = num_intf; 408 ureq->pcifunc[0] = nic->pcifunc; 409 ureq->channel[0] = nic->hw.tx_chan_base; 410 411 ureq->dest_type[0] = NIX_RX_RSS; 412 ureq->rq_rss_index[0] = 0; 413 memcpy(&ureq->hdr, &grp_update_req->hdr, sizeof(struct mbox_msghdr)); 414 memcpy(grp_update_req, ureq, sizeof(struct nix_mcast_grp_update_req)); 415 416 /* Send message to AF */ 417 rc = otx2_sync_mbox_msg(&nic->mbox); 418 if (rc) { 419 NL_SET_ERR_MSG_MOD(extack, "Failed to update multicast group"); 420 goto error; 421 } 422 423 mutex_unlock(&nic->mbox.lock); 424 req->op = NIX_RX_ACTIONOP_MCAST; 425 req->index = grp_index; 426 node->mcast_grp_idx = grp_index; 427 return 0; 428 429 error: 430 mutex_unlock(&nic->mbox.lock); 431 return rc; 432 } 433 434 static int otx2_tc_parse_actions(struct otx2_nic *nic, 435 struct flow_action *flow_action, 436 struct npc_install_flow_req *req, 437 struct flow_cls_offload *f, 438 struct otx2_tc_flow *node) 439 { 440 struct nix_mcast_grp_update_req dummy_grp_update_req = { 0 }; 441 struct netlink_ext_ack *extack = f->common.extack; 442 bool pps = false, mcast = false; 443 struct flow_action_entry *act; 444 struct net_device *target; 445 struct otx2_nic *priv; 446 u32 burst, mark = 0; 447 u8 nr_police = 0; 448 u8 num_intf = 1; 449 int err, i; 450 u64 rate; 451 452 if (!flow_action_has_entries(flow_action)) { 453 NL_SET_ERR_MSG_MOD(extack, "no tc actions specified"); 454 return -EINVAL; 455 } 456 457 flow_action_for_each(i, act, flow_action) { 458 switch (act->id) { 459 case FLOW_ACTION_DROP: 460 req->op = NIX_RX_ACTIONOP_DROP; 461 return 0; 462 case FLOW_ACTION_ACCEPT: 463 req->op = NIX_RX_ACTION_DEFAULT; 464 return 0; 465 case FLOW_ACTION_REDIRECT_INGRESS: 466 target = act->dev; 467 priv = netdev_priv(target); 468 /* npc_install_flow_req doesn't support passing a target pcifunc */ 469 if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) { 470 NL_SET_ERR_MSG_MOD(extack, 471 "can't redirect to other pf/vf"); 472 return -EOPNOTSUPP; 473 } 474 req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK; 475 476 /* if op is already set; avoid overwriting the same */ 477 if (!req->op) 478 req->op = NIX_RX_ACTION_DEFAULT; 479 break; 480 481 case FLOW_ACTION_VLAN_POP: 482 req->vtag0_valid = true; 483 /* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */ 484 req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7; 485 break; 486 case FLOW_ACTION_POLICE: 487 /* Ingress ratelimiting is not supported on OcteonTx2 */ 488 if (is_dev_otx2(nic->pdev)) { 489 NL_SET_ERR_MSG_MOD(extack, 490 "Ingress policing not supported on this platform"); 491 return -EOPNOTSUPP; 492 } 493 494 err = otx2_policer_validate(flow_action, act, extack); 495 if (err) 496 return err; 497 498 if (act->police.rate_bytes_ps > 0) { 499 rate = act->police.rate_bytes_ps * 8; 500 burst = act->police.burst; 501 } else if (act->police.rate_pkt_ps > 0) { 502 /* The algorithm used to calculate rate 503 * mantissa, exponent values for a given token 504 * rate (token can be byte or packet) requires 505 * token rate to be mutiplied by 8. 506 */ 507 rate = act->police.rate_pkt_ps * 8; 508 burst = act->police.burst_pkt; 509 pps = true; 510 } 511 nr_police++; 512 break; 513 case FLOW_ACTION_MARK: 514 mark = act->mark; 515 break; 516 517 case FLOW_ACTION_RX_QUEUE_MAPPING: 518 req->op = NIX_RX_ACTIONOP_UCAST; 519 req->index = act->rx_queue; 520 break; 521 522 case FLOW_ACTION_MIRRED_INGRESS: 523 target = act->dev; 524 priv = netdev_priv(target); 525 dummy_grp_update_req.pcifunc[num_intf] = priv->pcifunc; 526 dummy_grp_update_req.channel[num_intf] = priv->hw.tx_chan_base; 527 dummy_grp_update_req.dest_type[num_intf] = NIX_RX_RSS; 528 dummy_grp_update_req.rq_rss_index[num_intf] = 0; 529 mcast = true; 530 num_intf++; 531 break; 532 533 default: 534 return -EOPNOTSUPP; 535 } 536 } 537 538 if (mcast) { 539 err = otx2_tc_update_mcast(nic, req, extack, node, 540 &dummy_grp_update_req, 541 num_intf); 542 if (err) 543 return err; 544 } 545 546 if (nr_police > 1) { 547 NL_SET_ERR_MSG_MOD(extack, 548 "rate limit police offload requires a single action"); 549 return -EOPNOTSUPP; 550 } 551 552 if (nr_police) 553 return otx2_tc_act_set_police(nic, node, f, rate, burst, 554 mark, req, pps); 555 556 return 0; 557 } 558 559 static int otx2_tc_process_vlan(struct otx2_nic *nic, struct flow_msg *flow_spec, 560 struct flow_msg *flow_mask, struct flow_rule *rule, 561 struct npc_install_flow_req *req, bool is_inner) 562 { 563 struct flow_match_vlan match; 564 u16 vlan_tci, vlan_tci_mask; 565 566 if (is_inner) 567 flow_rule_match_cvlan(rule, &match); 568 else 569 flow_rule_match_vlan(rule, &match); 570 571 if (!eth_type_vlan(match.key->vlan_tpid)) { 572 netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n", 573 ntohs(match.key->vlan_tpid)); 574 return -EOPNOTSUPP; 575 } 576 577 if (!match.mask->vlan_id) { 578 struct flow_action_entry *act; 579 int i; 580 581 flow_action_for_each(i, act, &rule->action) { 582 if (act->id == FLOW_ACTION_DROP) { 583 netdev_err(nic->netdev, 584 "vlan tpid 0x%x with vlan_id %d is not supported for DROP rule.\n", 585 ntohs(match.key->vlan_tpid), match.key->vlan_id); 586 return -EOPNOTSUPP; 587 } 588 } 589 } 590 591 if (match.mask->vlan_id || 592 match.mask->vlan_dei || 593 match.mask->vlan_priority) { 594 vlan_tci = match.key->vlan_id | 595 match.key->vlan_dei << 12 | 596 match.key->vlan_priority << 13; 597 598 vlan_tci_mask = match.mask->vlan_id | 599 match.mask->vlan_dei << 12 | 600 match.mask->vlan_priority << 13; 601 if (is_inner) { 602 flow_spec->vlan_itci = htons(vlan_tci); 603 flow_mask->vlan_itci = htons(vlan_tci_mask); 604 req->features |= BIT_ULL(NPC_INNER_VID); 605 } else { 606 flow_spec->vlan_tci = htons(vlan_tci); 607 flow_mask->vlan_tci = htons(vlan_tci_mask); 608 req->features |= BIT_ULL(NPC_OUTER_VID); 609 } 610 } 611 612 return 0; 613 } 614 615 static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, 616 struct flow_cls_offload *f, 617 struct npc_install_flow_req *req) 618 { 619 struct netlink_ext_ack *extack = f->common.extack; 620 struct flow_msg *flow_spec = &req->packet; 621 struct flow_msg *flow_mask = &req->mask; 622 struct flow_dissector *dissector; 623 struct flow_rule *rule; 624 u8 ip_proto = 0; 625 626 rule = flow_cls_offload_flow_rule(f); 627 dissector = rule->match.dissector; 628 629 if ((dissector->used_keys & 630 ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | 631 BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | 632 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 633 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | 634 BIT(FLOW_DISSECTOR_KEY_CVLAN) | 635 BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 636 BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 637 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | 638 BIT(FLOW_DISSECTOR_KEY_IPSEC) | 639 BIT_ULL(FLOW_DISSECTOR_KEY_MPLS) | 640 BIT_ULL(FLOW_DISSECTOR_KEY_ICMP) | 641 BIT_ULL(FLOW_DISSECTOR_KEY_IP)))) { 642 netdev_info(nic->netdev, "unsupported flow used key 0x%llx", 643 dissector->used_keys); 644 return -EOPNOTSUPP; 645 } 646 647 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 648 struct flow_match_basic match; 649 650 flow_rule_match_basic(rule, &match); 651 652 /* All EtherTypes can be matched, no hw limitation */ 653 flow_spec->etype = match.key->n_proto; 654 flow_mask->etype = match.mask->n_proto; 655 req->features |= BIT_ULL(NPC_ETYPE); 656 657 if (match.mask->ip_proto && 658 (match.key->ip_proto != IPPROTO_TCP && 659 match.key->ip_proto != IPPROTO_UDP && 660 match.key->ip_proto != IPPROTO_SCTP && 661 match.key->ip_proto != IPPROTO_ICMP && 662 match.key->ip_proto != IPPROTO_ESP && 663 match.key->ip_proto != IPPROTO_AH && 664 match.key->ip_proto != IPPROTO_ICMPV6)) { 665 netdev_info(nic->netdev, 666 "ip_proto=0x%x not supported\n", 667 match.key->ip_proto); 668 return -EOPNOTSUPP; 669 } 670 if (match.mask->ip_proto) 671 ip_proto = match.key->ip_proto; 672 673 if (ip_proto == IPPROTO_UDP) 674 req->features |= BIT_ULL(NPC_IPPROTO_UDP); 675 else if (ip_proto == IPPROTO_TCP) 676 req->features |= BIT_ULL(NPC_IPPROTO_TCP); 677 else if (ip_proto == IPPROTO_SCTP) 678 req->features |= BIT_ULL(NPC_IPPROTO_SCTP); 679 else if (ip_proto == IPPROTO_ICMP) 680 req->features |= BIT_ULL(NPC_IPPROTO_ICMP); 681 else if (ip_proto == IPPROTO_ICMPV6) 682 req->features |= BIT_ULL(NPC_IPPROTO_ICMP6); 683 else if (ip_proto == IPPROTO_ESP) 684 req->features |= BIT_ULL(NPC_IPPROTO_ESP); 685 else if (ip_proto == IPPROTO_AH) 686 req->features |= BIT_ULL(NPC_IPPROTO_AH); 687 } 688 689 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 690 struct flow_match_control match; 691 692 flow_rule_match_control(rule, &match); 693 if (match.mask->flags & FLOW_DIS_FIRST_FRAG) { 694 NL_SET_ERR_MSG_MOD(extack, "HW doesn't support frag first/later"); 695 return -EOPNOTSUPP; 696 } 697 698 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) { 699 if (ntohs(flow_spec->etype) == ETH_P_IP) { 700 flow_spec->ip_flag = IPV4_FLAG_MORE; 701 flow_mask->ip_flag = IPV4_FLAG_MORE; 702 req->features |= BIT_ULL(NPC_IPFRAG_IPV4); 703 } else if (ntohs(flow_spec->etype) == ETH_P_IPV6) { 704 flow_spec->next_header = IPPROTO_FRAGMENT; 705 flow_mask->next_header = 0xff; 706 req->features |= BIT_ULL(NPC_IPFRAG_IPV6); 707 } else { 708 NL_SET_ERR_MSG_MOD(extack, "flow-type should be either IPv4 and IPv6"); 709 return -EOPNOTSUPP; 710 } 711 } 712 } 713 714 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 715 struct flow_match_eth_addrs match; 716 717 flow_rule_match_eth_addrs(rule, &match); 718 if (!is_zero_ether_addr(match.mask->src)) { 719 NL_SET_ERR_MSG_MOD(extack, "src mac match not supported"); 720 return -EOPNOTSUPP; 721 } 722 723 if (!is_zero_ether_addr(match.mask->dst)) { 724 ether_addr_copy(flow_spec->dmac, (u8 *)&match.key->dst); 725 ether_addr_copy(flow_mask->dmac, 726 (u8 *)&match.mask->dst); 727 req->features |= BIT_ULL(NPC_DMAC); 728 } 729 } 730 731 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPSEC)) { 732 struct flow_match_ipsec match; 733 734 flow_rule_match_ipsec(rule, &match); 735 if (!match.mask->spi) { 736 NL_SET_ERR_MSG_MOD(extack, "spi index not specified"); 737 return -EOPNOTSUPP; 738 } 739 if (ip_proto != IPPROTO_ESP && 740 ip_proto != IPPROTO_AH) { 741 NL_SET_ERR_MSG_MOD(extack, 742 "SPI index is valid only for ESP/AH proto"); 743 return -EOPNOTSUPP; 744 } 745 746 flow_spec->spi = match.key->spi; 747 flow_mask->spi = match.mask->spi; 748 req->features |= BIT_ULL(NPC_IPSEC_SPI); 749 } 750 751 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { 752 struct flow_match_ip match; 753 754 flow_rule_match_ip(rule, &match); 755 if ((ntohs(flow_spec->etype) != ETH_P_IP) && 756 match.mask->tos) { 757 NL_SET_ERR_MSG_MOD(extack, "tos not supported"); 758 return -EOPNOTSUPP; 759 } 760 if (match.mask->ttl) { 761 NL_SET_ERR_MSG_MOD(extack, "ttl not supported"); 762 return -EOPNOTSUPP; 763 } 764 flow_spec->tos = match.key->tos; 765 flow_mask->tos = match.mask->tos; 766 req->features |= BIT_ULL(NPC_TOS); 767 } 768 769 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 770 int ret; 771 772 ret = otx2_tc_process_vlan(nic, flow_spec, flow_mask, rule, req, false); 773 if (ret) 774 return ret; 775 } 776 777 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) { 778 int ret; 779 780 ret = otx2_tc_process_vlan(nic, flow_spec, flow_mask, rule, req, true); 781 if (ret) 782 return ret; 783 } 784 785 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { 786 struct flow_match_ipv4_addrs match; 787 788 flow_rule_match_ipv4_addrs(rule, &match); 789 790 flow_spec->ip4dst = match.key->dst; 791 flow_mask->ip4dst = match.mask->dst; 792 req->features |= BIT_ULL(NPC_DIP_IPV4); 793 794 flow_spec->ip4src = match.key->src; 795 flow_mask->ip4src = match.mask->src; 796 req->features |= BIT_ULL(NPC_SIP_IPV4); 797 } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { 798 struct flow_match_ipv6_addrs match; 799 800 flow_rule_match_ipv6_addrs(rule, &match); 801 802 if (ipv6_addr_loopback(&match.key->dst) || 803 ipv6_addr_loopback(&match.key->src)) { 804 NL_SET_ERR_MSG_MOD(extack, 805 "Flow matching IPv6 loopback addr not supported"); 806 return -EOPNOTSUPP; 807 } 808 809 if (!ipv6_addr_any(&match.mask->dst)) { 810 memcpy(&flow_spec->ip6dst, 811 (struct in6_addr *)&match.key->dst, 812 sizeof(flow_spec->ip6dst)); 813 memcpy(&flow_mask->ip6dst, 814 (struct in6_addr *)&match.mask->dst, 815 sizeof(flow_spec->ip6dst)); 816 req->features |= BIT_ULL(NPC_DIP_IPV6); 817 } 818 819 if (!ipv6_addr_any(&match.mask->src)) { 820 memcpy(&flow_spec->ip6src, 821 (struct in6_addr *)&match.key->src, 822 sizeof(flow_spec->ip6src)); 823 memcpy(&flow_mask->ip6src, 824 (struct in6_addr *)&match.mask->src, 825 sizeof(flow_spec->ip6src)); 826 req->features |= BIT_ULL(NPC_SIP_IPV6); 827 } 828 } 829 830 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 831 struct flow_match_ports match; 832 833 flow_rule_match_ports(rule, &match); 834 835 flow_spec->dport = match.key->dst; 836 flow_mask->dport = match.mask->dst; 837 838 if (flow_mask->dport) { 839 if (ip_proto == IPPROTO_UDP) 840 req->features |= BIT_ULL(NPC_DPORT_UDP); 841 else if (ip_proto == IPPROTO_TCP) 842 req->features |= BIT_ULL(NPC_DPORT_TCP); 843 else if (ip_proto == IPPROTO_SCTP) 844 req->features |= BIT_ULL(NPC_DPORT_SCTP); 845 } 846 847 flow_spec->sport = match.key->src; 848 flow_mask->sport = match.mask->src; 849 850 if (flow_mask->sport) { 851 if (ip_proto == IPPROTO_UDP) 852 req->features |= BIT_ULL(NPC_SPORT_UDP); 853 else if (ip_proto == IPPROTO_TCP) 854 req->features |= BIT_ULL(NPC_SPORT_TCP); 855 else if (ip_proto == IPPROTO_SCTP) 856 req->features |= BIT_ULL(NPC_SPORT_SCTP); 857 } 858 } 859 860 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) { 861 struct flow_match_mpls match; 862 u8 bit; 863 864 flow_rule_match_mpls(rule, &match); 865 866 if (match.mask->used_lses & OTX2_UNSUPP_LSE_DEPTH) { 867 NL_SET_ERR_MSG_MOD(extack, 868 "unsupported LSE depth for MPLS match offload"); 869 return -EOPNOTSUPP; 870 } 871 872 for_each_set_bit(bit, (unsigned long *)&match.mask->used_lses, 873 FLOW_DIS_MPLS_MAX) { 874 /* check if any of the fields LABEL,TC,BOS are set */ 875 if (*((u32 *)&match.mask->ls[bit]) & 876 OTX2_FLOWER_MASK_MPLS_NON_TTL) { 877 /* Hardware will capture 4 byte MPLS header into 878 * two fields NPC_MPLSX_LBTCBOS and NPC_MPLSX_TTL. 879 * Derive the associated NPC key based on header 880 * index and offset. 881 */ 882 883 req->features |= BIT_ULL(NPC_MPLS1_LBTCBOS + 884 2 * bit); 885 flow_spec->mpls_lse[bit] = 886 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_LB, 887 match.key->ls[bit].mpls_label) | 888 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TC, 889 match.key->ls[bit].mpls_tc) | 890 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_BOS, 891 match.key->ls[bit].mpls_bos); 892 893 flow_mask->mpls_lse[bit] = 894 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_LB, 895 match.mask->ls[bit].mpls_label) | 896 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TC, 897 match.mask->ls[bit].mpls_tc) | 898 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_BOS, 899 match.mask->ls[bit].mpls_bos); 900 } 901 902 if (match.mask->ls[bit].mpls_ttl) { 903 req->features |= BIT_ULL(NPC_MPLS1_TTL + 904 2 * bit); 905 flow_spec->mpls_lse[bit] |= 906 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TTL, 907 match.key->ls[bit].mpls_ttl); 908 flow_mask->mpls_lse[bit] |= 909 FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TTL, 910 match.mask->ls[bit].mpls_ttl); 911 } 912 } 913 } 914 915 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) { 916 struct flow_match_icmp match; 917 918 flow_rule_match_icmp(rule, &match); 919 920 flow_spec->icmp_type = match.key->type; 921 flow_mask->icmp_type = match.mask->type; 922 req->features |= BIT_ULL(NPC_TYPE_ICMP); 923 924 flow_spec->icmp_code = match.key->code; 925 flow_mask->icmp_code = match.mask->code; 926 req->features |= BIT_ULL(NPC_CODE_ICMP); 927 } 928 return otx2_tc_parse_actions(nic, &rule->action, req, f, node); 929 } 930 931 static void otx2_destroy_tc_flow_list(struct otx2_nic *pfvf) 932 { 933 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; 934 struct otx2_tc_flow *iter, *tmp; 935 936 if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC)) 937 return; 938 939 list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list_tc, list) { 940 list_del(&iter->list); 941 kfree(iter); 942 flow_cfg->nr_flows--; 943 } 944 } 945 946 static struct otx2_tc_flow *otx2_tc_get_entry_by_cookie(struct otx2_flow_config *flow_cfg, 947 unsigned long cookie) 948 { 949 struct otx2_tc_flow *tmp; 950 951 list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) { 952 if (tmp->cookie == cookie) 953 return tmp; 954 } 955 956 return NULL; 957 } 958 959 static struct otx2_tc_flow *otx2_tc_get_entry_by_index(struct otx2_flow_config *flow_cfg, 960 int index) 961 { 962 struct otx2_tc_flow *tmp; 963 int i = 0; 964 965 list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) { 966 if (i == index) 967 return tmp; 968 i++; 969 } 970 971 return NULL; 972 } 973 974 static void otx2_tc_del_from_flow_list(struct otx2_flow_config *flow_cfg, 975 struct otx2_tc_flow *node) 976 { 977 struct list_head *pos, *n; 978 struct otx2_tc_flow *tmp; 979 980 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { 981 tmp = list_entry(pos, struct otx2_tc_flow, list); 982 if (node == tmp) { 983 list_del(&node->list); 984 return; 985 } 986 } 987 } 988 989 static int otx2_tc_add_to_flow_list(struct otx2_flow_config *flow_cfg, 990 struct otx2_tc_flow *node) 991 { 992 struct list_head *pos, *n; 993 struct otx2_tc_flow *tmp; 994 int index = 0; 995 996 /* If the flow list is empty then add the new node */ 997 if (list_empty(&flow_cfg->flow_list_tc)) { 998 list_add(&node->list, &flow_cfg->flow_list_tc); 999 return index; 1000 } 1001 1002 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { 1003 tmp = list_entry(pos, struct otx2_tc_flow, list); 1004 if (node->prio < tmp->prio) 1005 break; 1006 index++; 1007 } 1008 1009 list_add(&node->list, pos->prev); 1010 return index; 1011 } 1012 1013 static int otx2_add_mcam_flow_entry(struct otx2_nic *nic, struct npc_install_flow_req *req) 1014 { 1015 struct npc_install_flow_req *tmp_req; 1016 int err; 1017 1018 mutex_lock(&nic->mbox.lock); 1019 tmp_req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); 1020 if (!tmp_req) { 1021 mutex_unlock(&nic->mbox.lock); 1022 return -ENOMEM; 1023 } 1024 1025 memcpy(tmp_req, req, sizeof(struct npc_install_flow_req)); 1026 /* Send message to AF */ 1027 err = otx2_sync_mbox_msg(&nic->mbox); 1028 if (err) { 1029 netdev_err(nic->netdev, "Failed to install MCAM flow entry %d\n", 1030 req->entry); 1031 mutex_unlock(&nic->mbox.lock); 1032 return -EFAULT; 1033 } 1034 1035 mutex_unlock(&nic->mbox.lock); 1036 return 0; 1037 } 1038 1039 static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry, u16 *cntr_val) 1040 { 1041 struct npc_delete_flow_rsp *rsp; 1042 struct npc_delete_flow_req *req; 1043 int err; 1044 1045 mutex_lock(&nic->mbox.lock); 1046 req = otx2_mbox_alloc_msg_npc_delete_flow(&nic->mbox); 1047 if (!req) { 1048 mutex_unlock(&nic->mbox.lock); 1049 return -ENOMEM; 1050 } 1051 1052 req->entry = entry; 1053 1054 /* Send message to AF */ 1055 err = otx2_sync_mbox_msg(&nic->mbox); 1056 if (err) { 1057 netdev_err(nic->netdev, "Failed to delete MCAM flow entry %d\n", 1058 entry); 1059 mutex_unlock(&nic->mbox.lock); 1060 return -EFAULT; 1061 } 1062 1063 if (cntr_val) { 1064 rsp = (struct npc_delete_flow_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox, 1065 0, &req->hdr); 1066 if (IS_ERR(rsp)) { 1067 netdev_err(nic->netdev, "Failed to get MCAM delete response for entry %d\n", 1068 entry); 1069 mutex_unlock(&nic->mbox.lock); 1070 return -EFAULT; 1071 } 1072 1073 *cntr_val = rsp->cntr_val; 1074 } 1075 1076 mutex_unlock(&nic->mbox.lock); 1077 return 0; 1078 } 1079 1080 static int otx2_tc_update_mcam_table_del_req(struct otx2_nic *nic, 1081 struct otx2_flow_config *flow_cfg, 1082 struct otx2_tc_flow *node) 1083 { 1084 struct list_head *pos, *n; 1085 struct otx2_tc_flow *tmp; 1086 int i = 0, index = 0; 1087 u16 cntr_val = 0; 1088 1089 /* Find and delete the entry from the list and re-install 1090 * all the entries from beginning to the index of the 1091 * deleted entry to higher mcam indexes. 1092 */ 1093 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { 1094 tmp = list_entry(pos, struct otx2_tc_flow, list); 1095 if (node == tmp) { 1096 list_del(&tmp->list); 1097 break; 1098 } 1099 1100 otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val); 1101 tmp->entry++; 1102 tmp->req.entry = tmp->entry; 1103 tmp->req.cntr_val = cntr_val; 1104 index++; 1105 } 1106 1107 list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) { 1108 if (i == index) 1109 break; 1110 1111 tmp = list_entry(pos, struct otx2_tc_flow, list); 1112 otx2_add_mcam_flow_entry(nic, &tmp->req); 1113 i++; 1114 } 1115 1116 return 0; 1117 } 1118 1119 static int otx2_tc_update_mcam_table_add_req(struct otx2_nic *nic, 1120 struct otx2_flow_config *flow_cfg, 1121 struct otx2_tc_flow *node) 1122 { 1123 int mcam_idx = flow_cfg->max_flows - flow_cfg->nr_flows - 1; 1124 struct otx2_tc_flow *tmp; 1125 int list_idx, i; 1126 u16 cntr_val = 0; 1127 1128 /* Find the index of the entry(list_idx) whose priority 1129 * is greater than the new entry and re-install all 1130 * the entries from beginning to list_idx to higher 1131 * mcam indexes. 1132 */ 1133 list_idx = otx2_tc_add_to_flow_list(flow_cfg, node); 1134 for (i = 0; i < list_idx; i++) { 1135 tmp = otx2_tc_get_entry_by_index(flow_cfg, i); 1136 if (!tmp) 1137 return -ENOMEM; 1138 1139 otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val); 1140 tmp->entry = flow_cfg->flow_ent[mcam_idx]; 1141 tmp->req.entry = tmp->entry; 1142 tmp->req.cntr_val = cntr_val; 1143 otx2_add_mcam_flow_entry(nic, &tmp->req); 1144 mcam_idx++; 1145 } 1146 1147 return mcam_idx; 1148 } 1149 1150 static int otx2_tc_update_mcam_table(struct otx2_nic *nic, 1151 struct otx2_flow_config *flow_cfg, 1152 struct otx2_tc_flow *node, 1153 bool add_req) 1154 { 1155 if (add_req) 1156 return otx2_tc_update_mcam_table_add_req(nic, flow_cfg, node); 1157 1158 return otx2_tc_update_mcam_table_del_req(nic, flow_cfg, node); 1159 } 1160 1161 static int otx2_tc_del_flow(struct otx2_nic *nic, 1162 struct flow_cls_offload *tc_flow_cmd) 1163 { 1164 struct otx2_flow_config *flow_cfg = nic->flow_cfg; 1165 struct nix_mcast_grp_destroy_req *grp_destroy_req; 1166 struct otx2_tc_flow *flow_node; 1167 int err; 1168 1169 flow_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie); 1170 if (!flow_node) { 1171 netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n", 1172 tc_flow_cmd->cookie); 1173 return -EINVAL; 1174 } 1175 1176 if (flow_node->is_act_police) { 1177 __clear_bit(flow_node->rq, &nic->rq_bmap); 1178 1179 if (nic->flags & OTX2_FLAG_INTF_DOWN) 1180 goto free_mcam_flow; 1181 1182 mutex_lock(&nic->mbox.lock); 1183 1184 err = cn10k_map_unmap_rq_policer(nic, flow_node->rq, 1185 flow_node->leaf_profile, false); 1186 if (err) 1187 netdev_err(nic->netdev, 1188 "Unmapping RQ %d & profile %d failed\n", 1189 flow_node->rq, flow_node->leaf_profile); 1190 1191 err = cn10k_free_leaf_profile(nic, flow_node->leaf_profile); 1192 if (err) 1193 netdev_err(nic->netdev, 1194 "Unable to free leaf bandwidth profile(%d)\n", 1195 flow_node->leaf_profile); 1196 1197 mutex_unlock(&nic->mbox.lock); 1198 } 1199 /* Remove the multicast/mirror related nodes */ 1200 if (flow_node->mcast_grp_idx != MCAST_INVALID_GRP) { 1201 mutex_lock(&nic->mbox.lock); 1202 grp_destroy_req = otx2_mbox_alloc_msg_nix_mcast_grp_destroy(&nic->mbox); 1203 grp_destroy_req->mcast_grp_idx = flow_node->mcast_grp_idx; 1204 otx2_sync_mbox_msg(&nic->mbox); 1205 mutex_unlock(&nic->mbox.lock); 1206 } 1207 1208 1209 free_mcam_flow: 1210 otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL); 1211 otx2_tc_update_mcam_table(nic, flow_cfg, flow_node, false); 1212 kfree_rcu(flow_node, rcu); 1213 flow_cfg->nr_flows--; 1214 return 0; 1215 } 1216 1217 static int otx2_tc_add_flow(struct otx2_nic *nic, 1218 struct flow_cls_offload *tc_flow_cmd) 1219 { 1220 struct netlink_ext_ack *extack = tc_flow_cmd->common.extack; 1221 struct otx2_flow_config *flow_cfg = nic->flow_cfg; 1222 struct otx2_tc_flow *new_node, *old_node; 1223 struct npc_install_flow_req *req, dummy; 1224 int rc, err, mcam_idx; 1225 1226 if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)) 1227 return -ENOMEM; 1228 1229 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 1230 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 1231 return -EINVAL; 1232 } 1233 1234 if (flow_cfg->nr_flows == flow_cfg->max_flows) { 1235 NL_SET_ERR_MSG_MOD(extack, 1236 "Free MCAM entry not available to add the flow"); 1237 return -ENOMEM; 1238 } 1239 1240 /* allocate memory for the new flow and it's node */ 1241 new_node = kzalloc(sizeof(*new_node), GFP_KERNEL); 1242 if (!new_node) 1243 return -ENOMEM; 1244 spin_lock_init(&new_node->lock); 1245 new_node->cookie = tc_flow_cmd->cookie; 1246 new_node->prio = tc_flow_cmd->common.prio; 1247 new_node->mcast_grp_idx = MCAST_INVALID_GRP; 1248 1249 memset(&dummy, 0, sizeof(struct npc_install_flow_req)); 1250 1251 rc = otx2_tc_prepare_flow(nic, new_node, tc_flow_cmd, &dummy); 1252 if (rc) { 1253 kfree_rcu(new_node, rcu); 1254 return rc; 1255 } 1256 1257 /* If a flow exists with the same cookie, delete it */ 1258 old_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie); 1259 if (old_node) 1260 otx2_tc_del_flow(nic, tc_flow_cmd); 1261 1262 mcam_idx = otx2_tc_update_mcam_table(nic, flow_cfg, new_node, true); 1263 mutex_lock(&nic->mbox.lock); 1264 req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); 1265 if (!req) { 1266 mutex_unlock(&nic->mbox.lock); 1267 rc = -ENOMEM; 1268 goto free_leaf; 1269 } 1270 1271 memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr)); 1272 memcpy(req, &dummy, sizeof(struct npc_install_flow_req)); 1273 req->channel = nic->hw.rx_chan_base; 1274 req->entry = flow_cfg->flow_ent[mcam_idx]; 1275 req->intf = NIX_INTF_RX; 1276 req->set_cntr = 1; 1277 new_node->entry = req->entry; 1278 1279 /* Send message to AF */ 1280 rc = otx2_sync_mbox_msg(&nic->mbox); 1281 if (rc) { 1282 NL_SET_ERR_MSG_MOD(extack, "Failed to install MCAM flow entry"); 1283 mutex_unlock(&nic->mbox.lock); 1284 goto free_leaf; 1285 } 1286 1287 mutex_unlock(&nic->mbox.lock); 1288 memcpy(&new_node->req, req, sizeof(struct npc_install_flow_req)); 1289 1290 flow_cfg->nr_flows++; 1291 return 0; 1292 1293 free_leaf: 1294 otx2_tc_del_from_flow_list(flow_cfg, new_node); 1295 kfree_rcu(new_node, rcu); 1296 if (new_node->is_act_police) { 1297 mutex_lock(&nic->mbox.lock); 1298 1299 err = cn10k_map_unmap_rq_policer(nic, new_node->rq, 1300 new_node->leaf_profile, false); 1301 if (err) 1302 netdev_err(nic->netdev, 1303 "Unmapping RQ %d & profile %d failed\n", 1304 new_node->rq, new_node->leaf_profile); 1305 err = cn10k_free_leaf_profile(nic, new_node->leaf_profile); 1306 if (err) 1307 netdev_err(nic->netdev, 1308 "Unable to free leaf bandwidth profile(%d)\n", 1309 new_node->leaf_profile); 1310 1311 __clear_bit(new_node->rq, &nic->rq_bmap); 1312 1313 mutex_unlock(&nic->mbox.lock); 1314 } 1315 1316 return rc; 1317 } 1318 1319 static int otx2_tc_get_flow_stats(struct otx2_nic *nic, 1320 struct flow_cls_offload *tc_flow_cmd) 1321 { 1322 struct npc_mcam_get_stats_req *req; 1323 struct npc_mcam_get_stats_rsp *rsp; 1324 struct otx2_tc_flow_stats *stats; 1325 struct otx2_tc_flow *flow_node; 1326 int err; 1327 1328 flow_node = otx2_tc_get_entry_by_cookie(nic->flow_cfg, tc_flow_cmd->cookie); 1329 if (!flow_node) { 1330 netdev_info(nic->netdev, "tc flow not found for cookie %lx", 1331 tc_flow_cmd->cookie); 1332 return -EINVAL; 1333 } 1334 1335 mutex_lock(&nic->mbox.lock); 1336 1337 req = otx2_mbox_alloc_msg_npc_mcam_entry_stats(&nic->mbox); 1338 if (!req) { 1339 mutex_unlock(&nic->mbox.lock); 1340 return -ENOMEM; 1341 } 1342 1343 req->entry = flow_node->entry; 1344 1345 err = otx2_sync_mbox_msg(&nic->mbox); 1346 if (err) { 1347 netdev_err(nic->netdev, "Failed to get stats for MCAM flow entry %d\n", 1348 req->entry); 1349 mutex_unlock(&nic->mbox.lock); 1350 return -EFAULT; 1351 } 1352 1353 rsp = (struct npc_mcam_get_stats_rsp *)otx2_mbox_get_rsp 1354 (&nic->mbox.mbox, 0, &req->hdr); 1355 if (IS_ERR(rsp)) { 1356 mutex_unlock(&nic->mbox.lock); 1357 return PTR_ERR(rsp); 1358 } 1359 1360 mutex_unlock(&nic->mbox.lock); 1361 1362 if (!rsp->stat_ena) 1363 return -EINVAL; 1364 1365 stats = &flow_node->stats; 1366 1367 spin_lock(&flow_node->lock); 1368 flow_stats_update(&tc_flow_cmd->stats, 0x0, rsp->stat - stats->pkts, 0x0, 0x0, 1369 FLOW_ACTION_HW_STATS_IMMEDIATE); 1370 stats->pkts = rsp->stat; 1371 spin_unlock(&flow_node->lock); 1372 1373 return 0; 1374 } 1375 1376 static int otx2_setup_tc_cls_flower(struct otx2_nic *nic, 1377 struct flow_cls_offload *cls_flower) 1378 { 1379 switch (cls_flower->command) { 1380 case FLOW_CLS_REPLACE: 1381 return otx2_tc_add_flow(nic, cls_flower); 1382 case FLOW_CLS_DESTROY: 1383 return otx2_tc_del_flow(nic, cls_flower); 1384 case FLOW_CLS_STATS: 1385 return otx2_tc_get_flow_stats(nic, cls_flower); 1386 default: 1387 return -EOPNOTSUPP; 1388 } 1389 } 1390 1391 static int otx2_tc_ingress_matchall_install(struct otx2_nic *nic, 1392 struct tc_cls_matchall_offload *cls) 1393 { 1394 struct netlink_ext_ack *extack = cls->common.extack; 1395 struct flow_action *actions = &cls->rule->action; 1396 struct flow_action_entry *entry; 1397 u64 rate; 1398 int err; 1399 1400 err = otx2_tc_validate_flow(nic, actions, extack); 1401 if (err) 1402 return err; 1403 1404 if (nic->flags & OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED) { 1405 NL_SET_ERR_MSG_MOD(extack, 1406 "Only one ingress MATCHALL ratelimitter can be offloaded"); 1407 return -ENOMEM; 1408 } 1409 1410 entry = &cls->rule->action.entries[0]; 1411 switch (entry->id) { 1412 case FLOW_ACTION_POLICE: 1413 /* Ingress ratelimiting is not supported on OcteonTx2 */ 1414 if (is_dev_otx2(nic->pdev)) { 1415 NL_SET_ERR_MSG_MOD(extack, 1416 "Ingress policing not supported on this platform"); 1417 return -EOPNOTSUPP; 1418 } 1419 1420 err = cn10k_alloc_matchall_ipolicer(nic); 1421 if (err) 1422 return err; 1423 1424 /* Convert to bits per second */ 1425 rate = entry->police.rate_bytes_ps * 8; 1426 err = cn10k_set_matchall_ipolicer_rate(nic, entry->police.burst, rate); 1427 if (err) 1428 return err; 1429 nic->flags |= OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED; 1430 break; 1431 default: 1432 NL_SET_ERR_MSG_MOD(extack, 1433 "Only police action supported with Ingress MATCHALL offload"); 1434 return -EOPNOTSUPP; 1435 } 1436 1437 return 0; 1438 } 1439 1440 static int otx2_tc_ingress_matchall_delete(struct otx2_nic *nic, 1441 struct tc_cls_matchall_offload *cls) 1442 { 1443 struct netlink_ext_ack *extack = cls->common.extack; 1444 int err; 1445 1446 if (nic->flags & OTX2_FLAG_INTF_DOWN) { 1447 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized"); 1448 return -EINVAL; 1449 } 1450 1451 err = cn10k_free_matchall_ipolicer(nic); 1452 nic->flags &= ~OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED; 1453 return err; 1454 } 1455 1456 static int otx2_setup_tc_ingress_matchall(struct otx2_nic *nic, 1457 struct tc_cls_matchall_offload *cls_matchall) 1458 { 1459 switch (cls_matchall->command) { 1460 case TC_CLSMATCHALL_REPLACE: 1461 return otx2_tc_ingress_matchall_install(nic, cls_matchall); 1462 case TC_CLSMATCHALL_DESTROY: 1463 return otx2_tc_ingress_matchall_delete(nic, cls_matchall); 1464 case TC_CLSMATCHALL_STATS: 1465 default: 1466 break; 1467 } 1468 1469 return -EOPNOTSUPP; 1470 } 1471 1472 static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type, 1473 void *type_data, void *cb_priv) 1474 { 1475 struct otx2_nic *nic = cb_priv; 1476 bool ntuple; 1477 1478 if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data)) 1479 return -EOPNOTSUPP; 1480 1481 ntuple = nic->netdev->features & NETIF_F_NTUPLE; 1482 switch (type) { 1483 case TC_SETUP_CLSFLOWER: 1484 if (ntuple) { 1485 netdev_warn(nic->netdev, 1486 "Can't install TC flower offload rule when NTUPLE is active"); 1487 return -EOPNOTSUPP; 1488 } 1489 1490 return otx2_setup_tc_cls_flower(nic, type_data); 1491 case TC_SETUP_CLSMATCHALL: 1492 return otx2_setup_tc_ingress_matchall(nic, type_data); 1493 default: 1494 break; 1495 } 1496 1497 return -EOPNOTSUPP; 1498 } 1499 1500 static int otx2_setup_tc_egress_matchall(struct otx2_nic *nic, 1501 struct tc_cls_matchall_offload *cls_matchall) 1502 { 1503 switch (cls_matchall->command) { 1504 case TC_CLSMATCHALL_REPLACE: 1505 return otx2_tc_egress_matchall_install(nic, cls_matchall); 1506 case TC_CLSMATCHALL_DESTROY: 1507 return otx2_tc_egress_matchall_delete(nic, cls_matchall); 1508 case TC_CLSMATCHALL_STATS: 1509 default: 1510 break; 1511 } 1512 1513 return -EOPNOTSUPP; 1514 } 1515 1516 static int otx2_setup_tc_block_egress_cb(enum tc_setup_type type, 1517 void *type_data, void *cb_priv) 1518 { 1519 struct otx2_nic *nic = cb_priv; 1520 1521 if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data)) 1522 return -EOPNOTSUPP; 1523 1524 switch (type) { 1525 case TC_SETUP_CLSMATCHALL: 1526 return otx2_setup_tc_egress_matchall(nic, type_data); 1527 default: 1528 break; 1529 } 1530 1531 return -EOPNOTSUPP; 1532 } 1533 1534 static LIST_HEAD(otx2_block_cb_list); 1535 1536 static int otx2_setup_tc_block(struct net_device *netdev, 1537 struct flow_block_offload *f) 1538 { 1539 struct otx2_nic *nic = netdev_priv(netdev); 1540 flow_setup_cb_t *cb; 1541 bool ingress; 1542 1543 if (f->block_shared) 1544 return -EOPNOTSUPP; 1545 1546 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { 1547 cb = otx2_setup_tc_block_ingress_cb; 1548 ingress = true; 1549 } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { 1550 cb = otx2_setup_tc_block_egress_cb; 1551 ingress = false; 1552 } else { 1553 return -EOPNOTSUPP; 1554 } 1555 1556 return flow_block_cb_setup_simple(f, &otx2_block_cb_list, cb, 1557 nic, nic, ingress); 1558 } 1559 1560 int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, 1561 void *type_data) 1562 { 1563 switch (type) { 1564 case TC_SETUP_BLOCK: 1565 return otx2_setup_tc_block(netdev, type_data); 1566 case TC_SETUP_QDISC_HTB: 1567 return otx2_setup_tc_htb(netdev, type_data); 1568 default: 1569 return -EOPNOTSUPP; 1570 } 1571 } 1572 EXPORT_SYMBOL(otx2_setup_tc); 1573 1574 int otx2_init_tc(struct otx2_nic *nic) 1575 { 1576 /* Exclude receive queue 0 being used for police action */ 1577 set_bit(0, &nic->rq_bmap); 1578 1579 if (!nic->flow_cfg) { 1580 netdev_err(nic->netdev, 1581 "Can't init TC, nic->flow_cfg is not setup\n"); 1582 return -EINVAL; 1583 } 1584 1585 return 0; 1586 } 1587 EXPORT_SYMBOL(otx2_init_tc); 1588 1589 void otx2_shutdown_tc(struct otx2_nic *nic) 1590 { 1591 otx2_destroy_tc_flow_list(nic); 1592 } 1593 EXPORT_SYMBOL(otx2_shutdown_tc); 1594 1595 static void otx2_tc_config_ingress_rule(struct otx2_nic *nic, 1596 struct otx2_tc_flow *node) 1597 { 1598 struct npc_install_flow_req *req; 1599 1600 if (otx2_tc_act_set_hw_police(nic, node)) 1601 return; 1602 1603 mutex_lock(&nic->mbox.lock); 1604 1605 req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox); 1606 if (!req) 1607 goto err; 1608 1609 memcpy(req, &node->req, sizeof(struct npc_install_flow_req)); 1610 1611 if (otx2_sync_mbox_msg(&nic->mbox)) 1612 netdev_err(nic->netdev, 1613 "Failed to install MCAM flow entry for ingress rule"); 1614 err: 1615 mutex_unlock(&nic->mbox.lock); 1616 } 1617 1618 void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic) 1619 { 1620 struct otx2_flow_config *flow_cfg = nic->flow_cfg; 1621 struct otx2_tc_flow *node; 1622 1623 /* If any ingress policer rules exist for the interface then 1624 * apply those rules. Ingress policer rules depend on bandwidth 1625 * profiles linked to the receive queues. Since no receive queues 1626 * exist when interface is down, ingress policer rules are stored 1627 * and configured in hardware after all receive queues are allocated 1628 * in otx2_open. 1629 */ 1630 list_for_each_entry(node, &flow_cfg->flow_list_tc, list) { 1631 if (node->is_act_police) 1632 otx2_tc_config_ingress_rule(nic, node); 1633 } 1634 } 1635 EXPORT_SYMBOL(otx2_tc_apply_ingress_police_rules); 1636