1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* Copyright (c) 2020 Mellanox Technologies. */ 3 4 #include <linux/netdevice.h> 5 #include <linux/if_macvlan.h> 6 #include <linux/list.h> 7 #include <linux/rculist.h> 8 #include <linux/rtnetlink.h> 9 #include <linux/workqueue.h> 10 #include <linux/spinlock.h> 11 #include "tc.h" 12 #include "neigh.h" 13 #include "en_rep.h" 14 #include "eswitch.h" 15 #include "lib/fs_chains.h" 16 #include "en/tc_ct.h" 17 #include "en/mapping.h" 18 #include "en/tc_tun.h" 19 #include "lib/port_tun.h" 20 #include "en/tc/sample.h" 21 #include "en_accel/ipsec_rxtx.h" 22 #include "en/tc/int_port.h" 23 #include "en/tc/act/act.h" 24 25 struct mlx5e_rep_indr_block_priv { 26 struct net_device *netdev; 27 struct mlx5e_rep_priv *rpriv; 28 enum flow_block_binder_type binder_type; 29 30 struct list_head list; 31 }; 32 33 int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv, 34 struct mlx5e_encap_entry *e, 35 struct mlx5e_neigh *m_neigh, 36 struct net_device *neigh_dev) 37 { 38 struct mlx5e_rep_priv *rpriv = priv->ppriv; 39 struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv; 40 struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy; 41 struct mlx5e_neigh_hash_entry *nhe; 42 int err; 43 44 err = mlx5_tun_entropy_refcount_inc(tun_entropy, e->reformat_type); 45 if (err) 46 return err; 47 48 mutex_lock(&rpriv->neigh_update.encap_lock); 49 nhe = mlx5e_rep_neigh_entry_lookup(priv, m_neigh); 50 if (!nhe) { 51 err = mlx5e_rep_neigh_entry_create(priv, m_neigh, neigh_dev, &nhe); 52 if (err) { 53 mutex_unlock(&rpriv->neigh_update.encap_lock); 54 mlx5_tun_entropy_refcount_dec(tun_entropy, 55 e->reformat_type); 56 return err; 57 } 58 } 59 60 e->nhe = nhe; 61 spin_lock(&nhe->encap_list_lock); 62 list_add_rcu(&e->encap_list, &nhe->encap_list); 63 spin_unlock(&nhe->encap_list_lock); 64 65 mutex_unlock(&rpriv->neigh_update.encap_lock); 66 67 return 0; 68 } 69 70 void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv, 71 struct mlx5e_encap_entry *e) 72 { 73 struct mlx5e_rep_priv *rpriv = priv->ppriv; 74 struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv; 75 struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy; 76 77 if (!e->nhe) 78 return; 79 80 spin_lock(&e->nhe->encap_list_lock); 81 list_del_rcu(&e->encap_list); 82 spin_unlock(&e->nhe->encap_list_lock); 83 84 mlx5e_rep_neigh_entry_release(e->nhe); 85 e->nhe = NULL; 86 mlx5_tun_entropy_refcount_dec(tun_entropy, e->reformat_type); 87 } 88 89 void mlx5e_rep_update_flows(struct mlx5e_priv *priv, 90 struct mlx5e_encap_entry *e, 91 bool neigh_connected, 92 unsigned char ha[ETH_ALEN]) 93 { 94 struct ethhdr *eth = (struct ethhdr *)e->encap_header; 95 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 96 bool encap_connected; 97 LIST_HEAD(flow_list); 98 99 ASSERT_RTNL(); 100 101 mutex_lock(&esw->offloads.encap_tbl_lock); 102 encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID); 103 if (encap_connected == neigh_connected && ether_addr_equal(e->h_dest, ha)) 104 goto unlock; 105 106 mlx5e_take_all_encap_flows(e, &flow_list); 107 108 if ((e->flags & MLX5_ENCAP_ENTRY_VALID) && 109 (!neigh_connected || !ether_addr_equal(e->h_dest, ha))) 110 mlx5e_tc_encap_flows_del(priv, e, &flow_list); 111 112 if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) { 113 struct net_device *route_dev; 114 115 ether_addr_copy(e->h_dest, ha); 116 ether_addr_copy(eth->h_dest, ha); 117 /* Update the encap source mac, in case that we delete 118 * the flows when encap source mac changed. 119 */ 120 route_dev = __dev_get_by_index(dev_net(priv->netdev), e->route_dev_ifindex); 121 if (route_dev) 122 ether_addr_copy(eth->h_source, route_dev->dev_addr); 123 124 mlx5e_tc_encap_flows_add(priv, e, &flow_list); 125 } 126 unlock: 127 mutex_unlock(&esw->offloads.encap_tbl_lock); 128 mlx5e_put_flow_list(priv, &flow_list); 129 } 130 131 static int 132 mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv, 133 struct flow_cls_offload *cls_flower, int flags) 134 { 135 switch (cls_flower->command) { 136 case FLOW_CLS_REPLACE: 137 return mlx5e_configure_flower(priv->netdev, priv, cls_flower, 138 flags); 139 case FLOW_CLS_DESTROY: 140 return mlx5e_delete_flower(priv->netdev, priv, cls_flower, 141 flags); 142 case FLOW_CLS_STATS: 143 return mlx5e_stats_flower(priv->netdev, priv, cls_flower, 144 flags); 145 default: 146 return -EOPNOTSUPP; 147 } 148 } 149 150 static 151 int mlx5e_rep_setup_tc_cls_matchall(struct mlx5e_priv *priv, 152 struct tc_cls_matchall_offload *ma) 153 { 154 switch (ma->command) { 155 case TC_CLSMATCHALL_REPLACE: 156 return mlx5e_tc_configure_matchall(priv, ma); 157 case TC_CLSMATCHALL_DESTROY: 158 return mlx5e_tc_delete_matchall(priv, ma); 159 case TC_CLSMATCHALL_STATS: 160 mlx5e_tc_stats_matchall(priv, ma); 161 return 0; 162 default: 163 return -EOPNOTSUPP; 164 } 165 } 166 167 static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data, 168 void *cb_priv) 169 { 170 unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD); 171 struct mlx5e_priv *priv = cb_priv; 172 173 if (!priv->netdev || !netif_device_present(priv->netdev)) 174 return -EOPNOTSUPP; 175 176 switch (type) { 177 case TC_SETUP_CLSFLOWER: 178 return mlx5e_rep_setup_tc_cls_flower(priv, type_data, flags); 179 case TC_SETUP_CLSMATCHALL: 180 return mlx5e_rep_setup_tc_cls_matchall(priv, type_data); 181 default: 182 return -EOPNOTSUPP; 183 } 184 } 185 186 static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data, 187 void *cb_priv) 188 { 189 struct flow_cls_offload tmp, *f = type_data; 190 struct mlx5e_priv *priv = cb_priv; 191 struct mlx5_eswitch *esw; 192 unsigned long flags; 193 int err; 194 195 flags = MLX5_TC_FLAG(INGRESS) | 196 MLX5_TC_FLAG(ESW_OFFLOAD) | 197 MLX5_TC_FLAG(FT_OFFLOAD); 198 esw = priv->mdev->priv.eswitch; 199 200 switch (type) { 201 case TC_SETUP_CLSFLOWER: 202 memcpy(&tmp, f, sizeof(*f)); 203 204 if (!mlx5_chains_prios_supported(esw_chains(esw))) 205 return -EOPNOTSUPP; 206 207 /* Re-use tc offload path by moving the ft flow to the 208 * reserved ft chain. 209 * 210 * FT offload can use prio range [0, INT_MAX], so we normalize 211 * it to range [1, mlx5_esw_chains_get_prio_range(esw)] 212 * as with tc, where prio 0 isn't supported. 213 * 214 * We only support chain 0 of FT offload. 215 */ 216 if (tmp.common.prio >= mlx5_chains_get_prio_range(esw_chains(esw))) 217 return -EOPNOTSUPP; 218 if (tmp.common.chain_index != 0) 219 return -EOPNOTSUPP; 220 221 tmp.common.chain_index = mlx5_chains_get_nf_ft_chain(esw_chains(esw)); 222 tmp.common.prio++; 223 err = mlx5e_rep_setup_tc_cls_flower(priv, &tmp, flags); 224 memcpy(&f->stats, &tmp.stats, sizeof(f->stats)); 225 return err; 226 default: 227 return -EOPNOTSUPP; 228 } 229 } 230 231 static LIST_HEAD(mlx5e_rep_block_tc_cb_list); 232 static LIST_HEAD(mlx5e_rep_block_ft_cb_list); 233 int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type, 234 void *type_data) 235 { 236 struct mlx5e_priv *priv = netdev_priv(dev); 237 struct flow_block_offload *f = type_data; 238 239 f->unlocked_driver_cb = true; 240 241 switch (type) { 242 case TC_SETUP_BLOCK: 243 return flow_block_cb_setup_simple(type_data, 244 &mlx5e_rep_block_tc_cb_list, 245 mlx5e_rep_setup_tc_cb, 246 priv, priv, true); 247 case TC_SETUP_FT: 248 return flow_block_cb_setup_simple(type_data, 249 &mlx5e_rep_block_ft_cb_list, 250 mlx5e_rep_setup_ft_cb, 251 priv, priv, true); 252 default: 253 return -EOPNOTSUPP; 254 } 255 } 256 257 int mlx5e_rep_tc_init(struct mlx5e_rep_priv *rpriv) 258 { 259 struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv; 260 int err; 261 262 mutex_init(&uplink_priv->unready_flows_lock); 263 INIT_LIST_HEAD(&uplink_priv->unready_flows); 264 265 /* init shared tc flow table */ 266 err = mlx5e_tc_esw_init(uplink_priv); 267 return err; 268 } 269 270 void mlx5e_rep_tc_cleanup(struct mlx5e_rep_priv *rpriv) 271 { 272 /* delete shared tc flow table */ 273 mlx5e_tc_esw_cleanup(&rpriv->uplink_priv); 274 mutex_destroy(&rpriv->uplink_priv.unready_flows_lock); 275 } 276 277 void mlx5e_rep_tc_enable(struct mlx5e_priv *priv) 278 { 279 struct mlx5e_rep_priv *rpriv = priv->ppriv; 280 281 INIT_WORK(&rpriv->uplink_priv.reoffload_flows_work, 282 mlx5e_tc_reoffload_flows_work); 283 } 284 285 void mlx5e_rep_tc_disable(struct mlx5e_priv *priv) 286 { 287 struct mlx5e_rep_priv *rpriv = priv->ppriv; 288 289 cancel_work_sync(&rpriv->uplink_priv.reoffload_flows_work); 290 } 291 292 int mlx5e_rep_tc_event_port_affinity(struct mlx5e_priv *priv) 293 { 294 struct mlx5e_rep_priv *rpriv = priv->ppriv; 295 296 queue_work(priv->wq, &rpriv->uplink_priv.reoffload_flows_work); 297 298 return NOTIFY_OK; 299 } 300 301 static struct mlx5e_rep_indr_block_priv * 302 mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv, 303 struct net_device *netdev, 304 enum flow_block_binder_type binder_type) 305 { 306 struct mlx5e_rep_indr_block_priv *cb_priv; 307 308 list_for_each_entry(cb_priv, 309 &rpriv->uplink_priv.tc_indr_block_priv_list, 310 list) 311 if (cb_priv->netdev == netdev && 312 cb_priv->binder_type == binder_type) 313 return cb_priv; 314 315 return NULL; 316 } 317 318 static int 319 mlx5e_rep_indr_offload(struct net_device *netdev, 320 struct flow_cls_offload *flower, 321 struct mlx5e_rep_indr_block_priv *indr_priv, 322 unsigned long flags) 323 { 324 struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev); 325 int err = 0; 326 327 if (!netif_device_present(indr_priv->rpriv->netdev)) 328 return -EOPNOTSUPP; 329 330 switch (flower->command) { 331 case FLOW_CLS_REPLACE: 332 err = mlx5e_configure_flower(netdev, priv, flower, flags); 333 break; 334 case FLOW_CLS_DESTROY: 335 err = mlx5e_delete_flower(netdev, priv, flower, flags); 336 break; 337 case FLOW_CLS_STATS: 338 err = mlx5e_stats_flower(netdev, priv, flower, flags); 339 break; 340 default: 341 err = -EOPNOTSUPP; 342 } 343 344 return err; 345 } 346 347 static int mlx5e_rep_indr_setup_tc_cb(enum tc_setup_type type, 348 void *type_data, void *indr_priv) 349 { 350 unsigned long flags = MLX5_TC_FLAG(ESW_OFFLOAD); 351 struct mlx5e_rep_indr_block_priv *priv = indr_priv; 352 353 flags |= (priv->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) ? 354 MLX5_TC_FLAG(EGRESS) : 355 MLX5_TC_FLAG(INGRESS); 356 357 switch (type) { 358 case TC_SETUP_CLSFLOWER: 359 return mlx5e_rep_indr_offload(priv->netdev, type_data, priv, 360 flags); 361 default: 362 return -EOPNOTSUPP; 363 } 364 } 365 366 static int mlx5e_rep_indr_setup_ft_cb(enum tc_setup_type type, 367 void *type_data, void *indr_priv) 368 { 369 struct mlx5e_rep_indr_block_priv *priv = indr_priv; 370 struct flow_cls_offload *f = type_data; 371 struct flow_cls_offload tmp; 372 struct mlx5e_priv *mpriv; 373 struct mlx5_eswitch *esw; 374 unsigned long flags; 375 int err; 376 377 mpriv = netdev_priv(priv->rpriv->netdev); 378 esw = mpriv->mdev->priv.eswitch; 379 380 flags = MLX5_TC_FLAG(EGRESS) | 381 MLX5_TC_FLAG(ESW_OFFLOAD) | 382 MLX5_TC_FLAG(FT_OFFLOAD); 383 384 switch (type) { 385 case TC_SETUP_CLSFLOWER: 386 memcpy(&tmp, f, sizeof(*f)); 387 388 /* Re-use tc offload path by moving the ft flow to the 389 * reserved ft chain. 390 * 391 * FT offload can use prio range [0, INT_MAX], so we normalize 392 * it to range [1, mlx5_esw_chains_get_prio_range(esw)] 393 * as with tc, where prio 0 isn't supported. 394 * 395 * We only support chain 0 of FT offload. 396 */ 397 if (!mlx5_chains_prios_supported(esw_chains(esw)) || 398 tmp.common.prio >= mlx5_chains_get_prio_range(esw_chains(esw)) || 399 tmp.common.chain_index) 400 return -EOPNOTSUPP; 401 402 tmp.common.chain_index = mlx5_chains_get_nf_ft_chain(esw_chains(esw)); 403 tmp.common.prio++; 404 err = mlx5e_rep_indr_offload(priv->netdev, &tmp, priv, flags); 405 memcpy(&f->stats, &tmp.stats, sizeof(f->stats)); 406 return err; 407 default: 408 return -EOPNOTSUPP; 409 } 410 } 411 412 static void mlx5e_rep_indr_block_unbind(void *cb_priv) 413 { 414 struct mlx5e_rep_indr_block_priv *indr_priv = cb_priv; 415 416 list_del(&indr_priv->list); 417 kfree(indr_priv); 418 } 419 420 static LIST_HEAD(mlx5e_block_cb_list); 421 422 static bool mlx5e_rep_macvlan_mode_supported(const struct net_device *dev) 423 { 424 struct macvlan_dev *macvlan = netdev_priv(dev); 425 426 return macvlan->mode == MACVLAN_MODE_PASSTHRU; 427 } 428 429 static int 430 mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch, 431 struct mlx5e_rep_priv *rpriv, 432 struct flow_block_offload *f, 433 flow_setup_cb_t *setup_cb, 434 void *data, 435 void (*cleanup)(struct flow_block_cb *block_cb)) 436 { 437 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); 438 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 439 bool is_ovs_int_port = netif_is_ovs_master(netdev); 440 struct mlx5e_rep_indr_block_priv *indr_priv; 441 struct flow_block_cb *block_cb; 442 443 if (!mlx5e_tc_tun_device_to_offload(priv, netdev) && 444 !(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev) && 445 !is_ovs_int_port) { 446 if (!(netif_is_macvlan(netdev) && macvlan_dev_real_dev(netdev) == rpriv->netdev)) 447 return -EOPNOTSUPP; 448 if (!mlx5e_rep_macvlan_mode_supported(netdev)) { 449 netdev_warn(netdev, "Offloading ingress filter is supported only with macvlan passthru mode"); 450 return -EOPNOTSUPP; 451 } 452 } 453 454 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && 455 f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) 456 return -EOPNOTSUPP; 457 458 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS && !is_ovs_int_port) 459 return -EOPNOTSUPP; 460 461 if (is_ovs_int_port && !mlx5e_tc_int_port_supported(esw)) 462 return -EOPNOTSUPP; 463 464 f->unlocked_driver_cb = true; 465 f->driver_block_list = &mlx5e_block_cb_list; 466 467 switch (f->command) { 468 case FLOW_BLOCK_BIND: 469 indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev, f->binder_type); 470 if (indr_priv) 471 return -EEXIST; 472 473 indr_priv = kmalloc(sizeof(*indr_priv), GFP_KERNEL); 474 if (!indr_priv) 475 return -ENOMEM; 476 477 indr_priv->netdev = netdev; 478 indr_priv->rpriv = rpriv; 479 indr_priv->binder_type = f->binder_type; 480 list_add(&indr_priv->list, 481 &rpriv->uplink_priv.tc_indr_block_priv_list); 482 483 block_cb = flow_indr_block_cb_alloc(setup_cb, indr_priv, indr_priv, 484 mlx5e_rep_indr_block_unbind, 485 f, netdev, sch, data, rpriv, 486 cleanup); 487 if (IS_ERR(block_cb)) { 488 list_del(&indr_priv->list); 489 kfree(indr_priv); 490 return PTR_ERR(block_cb); 491 } 492 flow_block_cb_add(block_cb, f); 493 list_add_tail(&block_cb->driver_list, &mlx5e_block_cb_list); 494 495 return 0; 496 case FLOW_BLOCK_UNBIND: 497 indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev, f->binder_type); 498 if (!indr_priv) 499 return -ENOENT; 500 501 block_cb = flow_block_cb_lookup(f->block, setup_cb, indr_priv); 502 if (!block_cb) 503 return -ENOENT; 504 505 flow_indr_block_cb_remove(block_cb, f); 506 list_del(&block_cb->driver_list); 507 return 0; 508 default: 509 return -EOPNOTSUPP; 510 } 511 return 0; 512 } 513 514 static int 515 mlx5e_rep_indr_replace_act(struct mlx5e_rep_priv *rpriv, 516 struct flow_offload_action *fl_act) 517 518 { 519 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); 520 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 521 enum mlx5_flow_namespace_type ns_type; 522 struct flow_action_entry *action; 523 struct mlx5e_tc_act *act; 524 bool add = false; 525 int i; 526 527 /* There is no use case currently for more than one action (e.g. pedit). 528 * when there will be, need to handle cleaning multiple actions on err. 529 */ 530 if (!flow_offload_has_one_action(&fl_act->action)) 531 return -EOPNOTSUPP; 532 533 if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS) 534 ns_type = MLX5_FLOW_NAMESPACE_FDB; 535 else 536 ns_type = MLX5_FLOW_NAMESPACE_KERNEL; 537 538 flow_action_for_each(i, action, &fl_act->action) { 539 act = mlx5e_tc_act_get(action->id, ns_type); 540 if (!act) 541 continue; 542 543 if (!act->offload_action) 544 continue; 545 546 if (!act->offload_action(priv, fl_act, action)) 547 add = true; 548 } 549 550 return add ? 0 : -EOPNOTSUPP; 551 } 552 553 static int 554 mlx5e_rep_indr_destroy_act(struct mlx5e_rep_priv *rpriv, 555 struct flow_offload_action *fl_act) 556 { 557 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); 558 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 559 enum mlx5_flow_namespace_type ns_type; 560 struct mlx5e_tc_act *act; 561 562 if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS) 563 ns_type = MLX5_FLOW_NAMESPACE_FDB; 564 else 565 ns_type = MLX5_FLOW_NAMESPACE_KERNEL; 566 567 act = mlx5e_tc_act_get(fl_act->id, ns_type); 568 if (!act || !act->destroy_action) 569 return -EOPNOTSUPP; 570 571 return act->destroy_action(priv, fl_act); 572 } 573 574 static int 575 mlx5e_rep_indr_stats_act(struct mlx5e_rep_priv *rpriv, 576 struct flow_offload_action *fl_act) 577 578 { 579 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); 580 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 581 enum mlx5_flow_namespace_type ns_type; 582 struct mlx5e_tc_act *act; 583 584 if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS) 585 ns_type = MLX5_FLOW_NAMESPACE_FDB; 586 else 587 ns_type = MLX5_FLOW_NAMESPACE_KERNEL; 588 589 act = mlx5e_tc_act_get(fl_act->id, ns_type); 590 if (!act || !act->stats_action) 591 return mlx5e_tc_fill_action_stats(priv, fl_act); 592 593 return act->stats_action(priv, fl_act); 594 } 595 596 static int 597 mlx5e_rep_indr_setup_act(struct mlx5e_rep_priv *rpriv, 598 struct flow_offload_action *fl_act) 599 { 600 switch (fl_act->command) { 601 case FLOW_ACT_REPLACE: 602 return mlx5e_rep_indr_replace_act(rpriv, fl_act); 603 case FLOW_ACT_DESTROY: 604 return mlx5e_rep_indr_destroy_act(rpriv, fl_act); 605 case FLOW_ACT_STATS: 606 return mlx5e_rep_indr_stats_act(rpriv, fl_act); 607 default: 608 return -EOPNOTSUPP; 609 } 610 } 611 612 static int 613 mlx5e_rep_indr_no_dev_setup(struct mlx5e_rep_priv *rpriv, 614 enum tc_setup_type type, 615 void *data) 616 { 617 if (!data) 618 return -EOPNOTSUPP; 619 620 switch (type) { 621 case TC_SETUP_ACT: 622 return mlx5e_rep_indr_setup_act(rpriv, data); 623 default: 624 return -EOPNOTSUPP; 625 } 626 } 627 628 static 629 int mlx5e_rep_indr_setup_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv, 630 enum tc_setup_type type, void *type_data, 631 void *data, 632 void (*cleanup)(struct flow_block_cb *block_cb)) 633 { 634 if (!netdev) 635 return mlx5e_rep_indr_no_dev_setup(cb_priv, type, data); 636 637 switch (type) { 638 case TC_SETUP_BLOCK: 639 return mlx5e_rep_indr_setup_block(netdev, sch, cb_priv, type_data, 640 mlx5e_rep_indr_setup_tc_cb, 641 data, cleanup); 642 case TC_SETUP_FT: 643 return mlx5e_rep_indr_setup_block(netdev, sch, cb_priv, type_data, 644 mlx5e_rep_indr_setup_ft_cb, 645 data, cleanup); 646 default: 647 return -EOPNOTSUPP; 648 } 649 } 650 651 int mlx5e_rep_tc_netdevice_event_register(struct mlx5e_rep_priv *rpriv) 652 { 653 struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv; 654 655 /* init indirect block notifications */ 656 INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list); 657 658 return flow_indr_dev_register(mlx5e_rep_indr_setup_cb, rpriv); 659 } 660 661 void mlx5e_rep_tc_netdevice_event_unregister(struct mlx5e_rep_priv *rpriv) 662 { 663 flow_indr_dev_unregister(mlx5e_rep_indr_setup_cb, rpriv, 664 mlx5e_rep_indr_block_unbind); 665 } 666 667 void mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq, 668 struct sk_buff *skb) 669 { 670 u32 reg_c0, reg_c1, zone_restore_id, tunnel_id; 671 struct mlx5e_tc_update_priv tc_priv = {}; 672 struct mlx5_rep_uplink_priv *uplink_priv; 673 struct mlx5e_rep_priv *uplink_rpriv; 674 struct mlx5_tc_ct_priv *ct_priv; 675 struct mapping_ctx *mapping_ctx; 676 struct mlx5_eswitch *esw; 677 struct mlx5e_priv *priv; 678 679 reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK); 680 if (!reg_c0 || reg_c0 == MLX5_FS_DEFAULT_FLOW_TAG) 681 goto forward; 682 683 /* If mapped_obj_id is not equal to the default flow tag then skb->mark 684 * is not supported and must be reset back to 0. 685 */ 686 skb->mark = 0; 687 688 priv = netdev_priv(skb->dev); 689 esw = priv->mdev->priv.eswitch; 690 mapping_ctx = esw->offloads.reg_c0_obj_pool; 691 reg_c1 = be32_to_cpu(cqe->ft_metadata); 692 zone_restore_id = reg_c1 & ESW_ZONE_ID_MASK; 693 tunnel_id = (reg_c1 >> ESW_TUN_OFFSET) & TUNNEL_ID_MASK; 694 695 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); 696 uplink_priv = &uplink_rpriv->uplink_priv; 697 ct_priv = uplink_priv->ct_priv; 698 699 if (!mlx5_ipsec_is_rx_flow(cqe) && 700 !mlx5e_tc_update_skb(cqe, skb, mapping_ctx, reg_c0, ct_priv, zone_restore_id, tunnel_id, 701 &tc_priv)) 702 goto free_skb; 703 704 forward: 705 if (tc_priv.skb_done) 706 goto free_skb; 707 708 if (tc_priv.forward_tx) 709 dev_queue_xmit(skb); 710 else 711 napi_gro_receive(rq->cq.napi, skb); 712 713 dev_put(tc_priv.fwd_dev); 714 715 return; 716 717 free_skb: 718 dev_kfree_skb_any(skb); 719 } 720