1 /* 2 * Copyright (c) 2017, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <rdma/ib_verbs.h> 34 #include <linux/mlx5/fs.h> 35 #include "en.h" 36 #include "ipoib.h" 37 38 #define IB_DEFAULT_Q_KEY 0xb1b 39 #define MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE 9 40 41 static int mlx5i_open(struct net_device *netdev); 42 static int mlx5i_close(struct net_device *netdev); 43 static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu); 44 45 static const struct net_device_ops mlx5i_netdev_ops = { 46 .ndo_open = mlx5i_open, 47 .ndo_stop = mlx5i_close, 48 .ndo_get_stats64 = mlx5i_get_stats, 49 .ndo_init = mlx5i_dev_init, 50 .ndo_uninit = mlx5i_dev_cleanup, 51 .ndo_change_mtu = mlx5i_change_mtu, 52 .ndo_do_ioctl = mlx5i_ioctl, 53 }; 54 55 /* IPoIB mlx5 netdev profile */ 56 static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev, 57 struct mlx5e_params *params) 58 { 59 /* Override RQ params as IPoIB supports only LINKED LIST RQ for now */ 60 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, false); 61 mlx5e_set_rq_type(mdev, params); 62 mlx5e_init_rq_type_params(mdev, params); 63 64 /* RQ size in ipoib by default is 512 */ 65 params->log_rq_mtu_frames = is_kdump_kernel() ? 66 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE : 67 MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE; 68 69 params->lro_en = false; 70 params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN; 71 params->tunneled_offload_en = false; 72 } 73 74 /* Called directly after IPoIB netdevice was created to initialize SW structs */ 75 int mlx5i_init(struct mlx5_core_dev *mdev, 76 struct net_device *netdev, 77 const struct mlx5e_profile *profile, 78 void *ppriv) 79 { 80 struct mlx5e_priv *priv = mlx5i_epriv(netdev); 81 int err; 82 83 err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv); 84 if (err) 85 return err; 86 87 mlx5e_set_netdev_mtu_boundaries(priv); 88 netdev->mtu = netdev->max_mtu; 89 90 mlx5e_build_nic_params(priv, NULL, &priv->rss_params, &priv->channels.params, 91 netdev->mtu); 92 mlx5i_build_nic_params(mdev, &priv->channels.params); 93 94 mlx5e_timestamp_init(priv); 95 96 /* netdev init */ 97 netdev->hw_features |= NETIF_F_SG; 98 netdev->hw_features |= NETIF_F_IP_CSUM; 99 netdev->hw_features |= NETIF_F_IPV6_CSUM; 100 netdev->hw_features |= NETIF_F_GRO; 101 netdev->hw_features |= NETIF_F_TSO; 102 netdev->hw_features |= NETIF_F_TSO6; 103 netdev->hw_features |= NETIF_F_RXCSUM; 104 netdev->hw_features |= NETIF_F_RXHASH; 105 106 netdev->netdev_ops = &mlx5i_netdev_ops; 107 netdev->ethtool_ops = &mlx5i_ethtool_ops; 108 109 return 0; 110 } 111 112 /* Called directly before IPoIB netdevice is destroyed to cleanup SW structs */ 113 void mlx5i_cleanup(struct mlx5e_priv *priv) 114 { 115 mlx5e_netdev_cleanup(priv->netdev, priv); 116 } 117 118 static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv) 119 { 120 struct mlx5e_sw_stats s = { 0 }; 121 int i, j; 122 123 for (i = 0; i < priv->max_nch; i++) { 124 struct mlx5e_channel_stats *channel_stats; 125 struct mlx5e_rq_stats *rq_stats; 126 127 channel_stats = &priv->channel_stats[i]; 128 rq_stats = &channel_stats->rq; 129 130 s.rx_packets += rq_stats->packets; 131 s.rx_bytes += rq_stats->bytes; 132 133 for (j = 0; j < priv->max_opened_tc; j++) { 134 struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j]; 135 136 s.tx_packets += sq_stats->packets; 137 s.tx_bytes += sq_stats->bytes; 138 s.tx_queue_dropped += sq_stats->dropped; 139 } 140 } 141 142 memcpy(&priv->stats.sw, &s, sizeof(s)); 143 } 144 145 void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) 146 { 147 struct mlx5e_priv *priv = mlx5i_epriv(dev); 148 struct mlx5e_sw_stats *sstats = &priv->stats.sw; 149 150 mlx5i_grp_sw_update_stats(priv); 151 152 stats->rx_packets = sstats->rx_packets; 153 stats->rx_bytes = sstats->rx_bytes; 154 stats->tx_packets = sstats->tx_packets; 155 stats->tx_bytes = sstats->tx_bytes; 156 stats->tx_dropped = sstats->tx_queue_dropped; 157 } 158 159 int mlx5i_init_underlay_qp(struct mlx5e_priv *priv) 160 { 161 struct mlx5_core_dev *mdev = priv->mdev; 162 struct mlx5i_priv *ipriv = priv->ppriv; 163 struct mlx5_core_qp *qp = &ipriv->qp; 164 struct mlx5_qp_context *context; 165 int ret; 166 167 /* QP states */ 168 context = kzalloc(sizeof(*context), GFP_KERNEL); 169 if (!context) 170 return -ENOMEM; 171 172 context->flags = cpu_to_be32(MLX5_QP_PM_MIGRATED << 11); 173 context->pri_path.port = 1; 174 context->pri_path.pkey_index = cpu_to_be16(ipriv->pkey_index); 175 context->qkey = cpu_to_be32(IB_DEFAULT_Q_KEY); 176 177 ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, context, qp); 178 if (ret) { 179 mlx5_core_err(mdev, "Failed to modify qp RST2INIT, err: %d\n", ret); 180 goto err_qp_modify_to_err; 181 } 182 memset(context, 0, sizeof(*context)); 183 184 ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, context, qp); 185 if (ret) { 186 mlx5_core_err(mdev, "Failed to modify qp INIT2RTR, err: %d\n", ret); 187 goto err_qp_modify_to_err; 188 } 189 190 ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, 0, context, qp); 191 if (ret) { 192 mlx5_core_err(mdev, "Failed to modify qp RTR2RTS, err: %d\n", ret); 193 goto err_qp_modify_to_err; 194 } 195 196 kfree(context); 197 return 0; 198 199 err_qp_modify_to_err: 200 mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2ERR_QP, 0, &context, qp); 201 kfree(context); 202 return ret; 203 } 204 205 void mlx5i_uninit_underlay_qp(struct mlx5e_priv *priv) 206 { 207 struct mlx5i_priv *ipriv = priv->ppriv; 208 struct mlx5_core_dev *mdev = priv->mdev; 209 struct mlx5_qp_context context; 210 int err; 211 212 err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2RST_QP, 0, &context, 213 &ipriv->qp); 214 if (err) 215 mlx5_core_err(mdev, "Failed to modify qp 2RST, err: %d\n", err); 216 } 217 218 #define MLX5_QP_ENHANCED_ULP_STATELESS_MODE 2 219 220 int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp) 221 { 222 u32 *in = NULL; 223 void *addr_path; 224 int ret = 0; 225 int inlen; 226 void *qpc; 227 228 inlen = MLX5_ST_SZ_BYTES(create_qp_in); 229 in = kvzalloc(inlen, GFP_KERNEL); 230 if (!in) 231 return -ENOMEM; 232 233 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); 234 MLX5_SET(qpc, qpc, st, MLX5_QP_ST_UD); 235 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 236 MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, 237 MLX5_QP_ENHANCED_ULP_STATELESS_MODE); 238 239 addr_path = MLX5_ADDR_OF(qpc, qpc, primary_address_path); 240 MLX5_SET(ads, addr_path, vhca_port_num, 1); 241 MLX5_SET(ads, addr_path, grh, 1); 242 243 ret = mlx5_core_create_qp(mdev, qp, in, inlen); 244 if (ret) { 245 mlx5_core_err(mdev, "Failed creating IPoIB QP err : %d\n", ret); 246 goto out; 247 } 248 249 out: 250 kvfree(in); 251 return ret; 252 } 253 254 void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp) 255 { 256 mlx5_core_destroy_qp(mdev, qp); 257 } 258 259 int mlx5i_create_tis(struct mlx5_core_dev *mdev, u32 underlay_qpn, u32 *tisn) 260 { 261 u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {}; 262 void *tisc; 263 264 tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); 265 266 MLX5_SET(tisc, tisc, underlay_qpn, underlay_qpn); 267 268 return mlx5e_create_tis(mdev, in, tisn); 269 } 270 271 static int mlx5i_init_tx(struct mlx5e_priv *priv) 272 { 273 struct mlx5i_priv *ipriv = priv->ppriv; 274 int err; 275 276 err = mlx5i_create_underlay_qp(priv->mdev, &ipriv->qp); 277 if (err) { 278 mlx5_core_warn(priv->mdev, "create underlay QP failed, %d\n", err); 279 return err; 280 } 281 282 err = mlx5i_create_tis(priv->mdev, ipriv->qp.qpn, &priv->tisn[0][0]); 283 if (err) { 284 mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err); 285 goto err_destroy_underlay_qp; 286 } 287 288 return 0; 289 290 err_destroy_underlay_qp: 291 mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp); 292 return err; 293 } 294 295 static void mlx5i_cleanup_tx(struct mlx5e_priv *priv) 296 { 297 struct mlx5i_priv *ipriv = priv->ppriv; 298 299 mlx5e_destroy_tis(priv->mdev, priv->tisn[0][0]); 300 mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp); 301 } 302 303 static int mlx5i_create_flow_steering(struct mlx5e_priv *priv) 304 { 305 struct ttc_params ttc_params = {}; 306 int tt, err; 307 308 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev, 309 MLX5_FLOW_NAMESPACE_KERNEL); 310 311 if (!priv->fs.ns) 312 return -EINVAL; 313 314 err = mlx5e_arfs_create_tables(priv); 315 if (err) { 316 netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n", 317 err); 318 priv->netdev->hw_features &= ~NETIF_F_NTUPLE; 319 } 320 321 mlx5e_set_ttc_basic_params(priv, &ttc_params); 322 mlx5e_set_inner_ttc_ft_params(&ttc_params); 323 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) 324 ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn; 325 326 err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc); 327 if (err) { 328 netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n", 329 err); 330 goto err_destroy_arfs_tables; 331 } 332 333 mlx5e_set_ttc_ft_params(&ttc_params); 334 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) 335 ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn; 336 337 err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc); 338 if (err) { 339 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n", 340 err); 341 goto err_destroy_inner_ttc_table; 342 } 343 344 return 0; 345 346 err_destroy_inner_ttc_table: 347 mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc); 348 err_destroy_arfs_tables: 349 mlx5e_arfs_destroy_tables(priv); 350 351 return err; 352 } 353 354 static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv) 355 { 356 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); 357 mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc); 358 mlx5e_arfs_destroy_tables(priv); 359 } 360 361 static int mlx5i_init_rx(struct mlx5e_priv *priv) 362 { 363 struct mlx5_core_dev *mdev = priv->mdev; 364 int err; 365 366 mlx5e_create_q_counters(priv); 367 368 err = mlx5e_open_drop_rq(priv, &priv->drop_rq); 369 if (err) { 370 mlx5_core_err(mdev, "open drop rq failed, %d\n", err); 371 goto err_destroy_q_counters; 372 } 373 374 err = mlx5e_create_indirect_rqt(priv); 375 if (err) 376 goto err_close_drop_rq; 377 378 err = mlx5e_create_direct_rqts(priv, priv->direct_tir); 379 if (err) 380 goto err_destroy_indirect_rqts; 381 382 err = mlx5e_create_indirect_tirs(priv, true); 383 if (err) 384 goto err_destroy_direct_rqts; 385 386 err = mlx5e_create_direct_tirs(priv, priv->direct_tir); 387 if (err) 388 goto err_destroy_indirect_tirs; 389 390 err = mlx5i_create_flow_steering(priv); 391 if (err) 392 goto err_destroy_direct_tirs; 393 394 return 0; 395 396 err_destroy_direct_tirs: 397 mlx5e_destroy_direct_tirs(priv, priv->direct_tir); 398 err_destroy_indirect_tirs: 399 mlx5e_destroy_indirect_tirs(priv, true); 400 err_destroy_direct_rqts: 401 mlx5e_destroy_direct_rqts(priv, priv->direct_tir); 402 err_destroy_indirect_rqts: 403 mlx5e_destroy_rqt(priv, &priv->indir_rqt); 404 err_close_drop_rq: 405 mlx5e_close_drop_rq(&priv->drop_rq); 406 err_destroy_q_counters: 407 mlx5e_destroy_q_counters(priv); 408 return err; 409 } 410 411 static void mlx5i_cleanup_rx(struct mlx5e_priv *priv) 412 { 413 mlx5i_destroy_flow_steering(priv); 414 mlx5e_destroy_direct_tirs(priv, priv->direct_tir); 415 mlx5e_destroy_indirect_tirs(priv, true); 416 mlx5e_destroy_direct_rqts(priv, priv->direct_tir); 417 mlx5e_destroy_rqt(priv, &priv->indir_rqt); 418 mlx5e_close_drop_rq(&priv->drop_rq); 419 mlx5e_destroy_q_counters(priv); 420 } 421 422 /* The stats groups order is opposite to the update_stats() order calls */ 423 static mlx5e_stats_grp_t mlx5i_stats_grps[] = { 424 &MLX5E_STATS_GRP(sw), 425 &MLX5E_STATS_GRP(qcnt), 426 &MLX5E_STATS_GRP(vnic_env), 427 &MLX5E_STATS_GRP(vport), 428 &MLX5E_STATS_GRP(802_3), 429 &MLX5E_STATS_GRP(2863), 430 &MLX5E_STATS_GRP(2819), 431 &MLX5E_STATS_GRP(phy), 432 &MLX5E_STATS_GRP(pcie), 433 &MLX5E_STATS_GRP(per_prio), 434 &MLX5E_STATS_GRP(pme), 435 &MLX5E_STATS_GRP(channels), 436 &MLX5E_STATS_GRP(per_port_buff_congest), 437 }; 438 439 static unsigned int mlx5i_stats_grps_num(struct mlx5e_priv *priv) 440 { 441 return ARRAY_SIZE(mlx5i_stats_grps); 442 } 443 444 static const struct mlx5e_profile mlx5i_nic_profile = { 445 .init = mlx5i_init, 446 .cleanup = mlx5i_cleanup, 447 .init_tx = mlx5i_init_tx, 448 .cleanup_tx = mlx5i_cleanup_tx, 449 .init_rx = mlx5i_init_rx, 450 .cleanup_rx = mlx5i_cleanup_rx, 451 .enable = NULL, /* mlx5i_enable */ 452 .disable = NULL, /* mlx5i_disable */ 453 .update_rx = mlx5e_update_nic_rx, 454 .update_stats = NULL, /* mlx5i_update_stats */ 455 .update_carrier = NULL, /* no HW update in IB link */ 456 .rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe, 457 .rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */ 458 .max_tc = MLX5I_MAX_NUM_TC, 459 .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), 460 .stats_grps = mlx5i_stats_grps, 461 .stats_grps_num = mlx5i_stats_grps_num, 462 }; 463 464 /* mlx5i netdev NDos */ 465 466 static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu) 467 { 468 struct mlx5e_priv *priv = mlx5i_epriv(netdev); 469 struct mlx5e_channels new_channels = {}; 470 struct mlx5e_params *params; 471 int err = 0; 472 473 mutex_lock(&priv->state_lock); 474 475 params = &priv->channels.params; 476 477 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { 478 params->sw_mtu = new_mtu; 479 netdev->mtu = params->sw_mtu; 480 goto out; 481 } 482 483 new_channels.params = *params; 484 new_channels.params.sw_mtu = new_mtu; 485 486 err = mlx5e_safe_switch_channels(priv, &new_channels, NULL); 487 if (err) 488 goto out; 489 490 netdev->mtu = new_channels.params.sw_mtu; 491 492 out: 493 mutex_unlock(&priv->state_lock); 494 return err; 495 } 496 497 int mlx5i_dev_init(struct net_device *dev) 498 { 499 struct mlx5e_priv *priv = mlx5i_epriv(dev); 500 struct mlx5i_priv *ipriv = priv->ppriv; 501 502 /* Set dev address using underlay QP */ 503 dev->dev_addr[1] = (ipriv->qp.qpn >> 16) & 0xff; 504 dev->dev_addr[2] = (ipriv->qp.qpn >> 8) & 0xff; 505 dev->dev_addr[3] = (ipriv->qp.qpn) & 0xff; 506 507 /* Add QPN to net-device mapping to HT */ 508 mlx5i_pkey_add_qpn(dev ,ipriv->qp.qpn); 509 510 return 0; 511 } 512 513 int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 514 { 515 struct mlx5e_priv *priv = mlx5i_epriv(dev); 516 517 switch (cmd) { 518 case SIOCSHWTSTAMP: 519 return mlx5e_hwstamp_set(priv, ifr); 520 case SIOCGHWTSTAMP: 521 return mlx5e_hwstamp_get(priv, ifr); 522 default: 523 return -EOPNOTSUPP; 524 } 525 } 526 527 void mlx5i_dev_cleanup(struct net_device *dev) 528 { 529 struct mlx5e_priv *priv = mlx5i_epriv(dev); 530 struct mlx5i_priv *ipriv = priv->ppriv; 531 532 mlx5i_uninit_underlay_qp(priv); 533 534 /* Delete QPN to net-device mapping from HT */ 535 mlx5i_pkey_del_qpn(dev, ipriv->qp.qpn); 536 } 537 538 static int mlx5i_open(struct net_device *netdev) 539 { 540 struct mlx5e_priv *epriv = mlx5i_epriv(netdev); 541 struct mlx5i_priv *ipriv = epriv->ppriv; 542 struct mlx5_core_dev *mdev = epriv->mdev; 543 int err; 544 545 mutex_lock(&epriv->state_lock); 546 547 set_bit(MLX5E_STATE_OPENED, &epriv->state); 548 549 err = mlx5i_init_underlay_qp(epriv); 550 if (err) { 551 mlx5_core_warn(mdev, "prepare underlay qp state failed, %d\n", err); 552 goto err_clear_state_opened_flag; 553 } 554 555 err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qp.qpn); 556 if (err) { 557 mlx5_core_warn(mdev, "attach underlay qp to ft failed, %d\n", err); 558 goto err_reset_qp; 559 } 560 561 err = mlx5e_open_channels(epriv, &epriv->channels); 562 if (err) 563 goto err_remove_fs_underlay_qp; 564 565 epriv->profile->update_rx(epriv); 566 mlx5e_activate_priv_channels(epriv); 567 568 mutex_unlock(&epriv->state_lock); 569 return 0; 570 571 err_remove_fs_underlay_qp: 572 mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn); 573 err_reset_qp: 574 mlx5i_uninit_underlay_qp(epriv); 575 err_clear_state_opened_flag: 576 clear_bit(MLX5E_STATE_OPENED, &epriv->state); 577 mutex_unlock(&epriv->state_lock); 578 return err; 579 } 580 581 static int mlx5i_close(struct net_device *netdev) 582 { 583 struct mlx5e_priv *epriv = mlx5i_epriv(netdev); 584 struct mlx5i_priv *ipriv = epriv->ppriv; 585 struct mlx5_core_dev *mdev = epriv->mdev; 586 587 /* May already be CLOSED in case a previous configuration operation 588 * (e.g RX/TX queue size change) that involves close&open failed. 589 */ 590 mutex_lock(&epriv->state_lock); 591 592 if (!test_bit(MLX5E_STATE_OPENED, &epriv->state)) 593 goto unlock; 594 595 clear_bit(MLX5E_STATE_OPENED, &epriv->state); 596 597 netif_carrier_off(epriv->netdev); 598 mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn); 599 mlx5e_deactivate_priv_channels(epriv); 600 mlx5e_close_channels(&epriv->channels); 601 mlx5i_uninit_underlay_qp(epriv); 602 unlock: 603 mutex_unlock(&epriv->state_lock); 604 return 0; 605 } 606 607 /* IPoIB RDMA netdev callbacks */ 608 static int mlx5i_attach_mcast(struct net_device *netdev, struct ib_device *hca, 609 union ib_gid *gid, u16 lid, int set_qkey, 610 u32 qkey) 611 { 612 struct mlx5e_priv *epriv = mlx5i_epriv(netdev); 613 struct mlx5_core_dev *mdev = epriv->mdev; 614 struct mlx5i_priv *ipriv = epriv->ppriv; 615 int err; 616 617 mlx5_core_dbg(mdev, "attaching QPN 0x%x, MGID %pI6\n", ipriv->qp.qpn, gid->raw); 618 err = mlx5_core_attach_mcg(mdev, gid, ipriv->qp.qpn); 619 if (err) 620 mlx5_core_warn(mdev, "failed attaching QPN 0x%x, MGID %pI6\n", 621 ipriv->qp.qpn, gid->raw); 622 623 if (set_qkey) { 624 mlx5_core_dbg(mdev, "%s setting qkey 0x%x\n", 625 netdev->name, qkey); 626 ipriv->qkey = qkey; 627 } 628 629 return err; 630 } 631 632 static int mlx5i_detach_mcast(struct net_device *netdev, struct ib_device *hca, 633 union ib_gid *gid, u16 lid) 634 { 635 struct mlx5e_priv *epriv = mlx5i_epriv(netdev); 636 struct mlx5_core_dev *mdev = epriv->mdev; 637 struct mlx5i_priv *ipriv = epriv->ppriv; 638 int err; 639 640 mlx5_core_dbg(mdev, "detaching QPN 0x%x, MGID %pI6\n", ipriv->qp.qpn, gid->raw); 641 642 err = mlx5_core_detach_mcg(mdev, gid, ipriv->qp.qpn); 643 if (err) 644 mlx5_core_dbg(mdev, "failed detaching QPN 0x%x, MGID %pI6\n", 645 ipriv->qp.qpn, gid->raw); 646 647 return err; 648 } 649 650 static int mlx5i_xmit(struct net_device *dev, struct sk_buff *skb, 651 struct ib_ah *address, u32 dqpn) 652 { 653 struct mlx5e_priv *epriv = mlx5i_epriv(dev); 654 struct mlx5e_txqsq *sq = epriv->txq2sq[skb_get_queue_mapping(skb)]; 655 struct mlx5_ib_ah *mah = to_mah(address); 656 struct mlx5i_priv *ipriv = epriv->ppriv; 657 658 return mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, ipriv->qkey, netdev_xmit_more()); 659 } 660 661 static void mlx5i_set_pkey_index(struct net_device *netdev, int id) 662 { 663 struct mlx5i_priv *ipriv = netdev_priv(netdev); 664 665 ipriv->pkey_index = (u16)id; 666 } 667 668 static int mlx5i_check_required_hca_cap(struct mlx5_core_dev *mdev) 669 { 670 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_IB) 671 return -EOPNOTSUPP; 672 673 if (!MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) { 674 mlx5_core_warn(mdev, "IPoIB enhanced offloads are not supported\n"); 675 return -EOPNOTSUPP; 676 } 677 678 return 0; 679 } 680 681 static void mlx5_rdma_netdev_free(struct net_device *netdev) 682 { 683 struct mlx5e_priv *priv = mlx5i_epriv(netdev); 684 struct mlx5i_priv *ipriv = priv->ppriv; 685 const struct mlx5e_profile *profile = priv->profile; 686 687 mlx5e_detach_netdev(priv); 688 profile->cleanup(priv); 689 690 if (!ipriv->sub_interface) { 691 mlx5i_pkey_qpn_ht_cleanup(netdev); 692 mlx5e_destroy_mdev_resources(priv->mdev); 693 } 694 } 695 696 static bool mlx5_is_sub_interface(struct mlx5_core_dev *mdev) 697 { 698 return mdev->mlx5e_res.pdn != 0; 699 } 700 701 static const struct mlx5e_profile *mlx5_get_profile(struct mlx5_core_dev *mdev) 702 { 703 if (mlx5_is_sub_interface(mdev)) 704 return mlx5i_pkey_get_profile(); 705 return &mlx5i_nic_profile; 706 } 707 708 static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u8 port_num, 709 struct net_device *netdev, void *param) 710 { 711 struct mlx5_core_dev *mdev = (struct mlx5_core_dev *)param; 712 const struct mlx5e_profile *prof = mlx5_get_profile(mdev); 713 struct mlx5i_priv *ipriv; 714 struct mlx5e_priv *epriv; 715 struct rdma_netdev *rn; 716 int err; 717 718 ipriv = netdev_priv(netdev); 719 epriv = mlx5i_epriv(netdev); 720 721 ipriv->sub_interface = mlx5_is_sub_interface(mdev); 722 if (!ipriv->sub_interface) { 723 err = mlx5i_pkey_qpn_ht_init(netdev); 724 if (err) { 725 mlx5_core_warn(mdev, "allocate qpn_to_netdev ht failed\n"); 726 return err; 727 } 728 729 /* This should only be called once per mdev */ 730 err = mlx5e_create_mdev_resources(mdev); 731 if (err) 732 goto destroy_ht; 733 } 734 735 prof->init(mdev, netdev, prof, ipriv); 736 737 err = mlx5e_attach_netdev(epriv); 738 if (err) 739 goto detach; 740 netif_carrier_off(netdev); 741 742 /* set rdma_netdev func pointers */ 743 rn = &ipriv->rn; 744 rn->hca = ibdev; 745 rn->send = mlx5i_xmit; 746 rn->attach_mcast = mlx5i_attach_mcast; 747 rn->detach_mcast = mlx5i_detach_mcast; 748 rn->set_id = mlx5i_set_pkey_index; 749 750 netdev->priv_destructor = mlx5_rdma_netdev_free; 751 netdev->needs_free_netdev = 1; 752 753 return 0; 754 755 detach: 756 prof->cleanup(epriv); 757 if (ipriv->sub_interface) 758 return err; 759 mlx5e_destroy_mdev_resources(mdev); 760 destroy_ht: 761 mlx5i_pkey_qpn_ht_cleanup(netdev); 762 return err; 763 } 764 765 int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev, 766 struct ib_device *device, 767 struct rdma_netdev_alloc_params *params) 768 { 769 int nch; 770 int rc; 771 772 rc = mlx5i_check_required_hca_cap(mdev); 773 if (rc) 774 return rc; 775 776 nch = mlx5e_get_max_num_channels(mdev); 777 778 *params = (struct rdma_netdev_alloc_params){ 779 .sizeof_priv = sizeof(struct mlx5i_priv) + 780 sizeof(struct mlx5e_priv), 781 .txqs = nch * MLX5E_MAX_NUM_TC, 782 .rxqs = nch, 783 .param = mdev, 784 .initialize_rdma_netdev = mlx5_rdma_setup_rn, 785 }; 786 787 return 0; 788 } 789 EXPORT_SYMBOL(mlx5_rdma_rn_get_params); 790