1 /*- 2 * Copyright (c) 2013-2021, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 #include "opt_rss.h" 29 #include "opt_ratelimit.h" 30 31 #include <linux/kmod.h> 32 #include <linux/module.h> 33 #include <linux/errno.h> 34 #include <linux/pci.h> 35 #include <linux/dma-mapping.h> 36 #include <linux/slab.h> 37 #include <linux/io-mapping.h> 38 #include <linux/interrupt.h> 39 #include <linux/hardirq.h> 40 #include <dev/mlx5/driver.h> 41 #include <dev/mlx5/cq.h> 42 #include <dev/mlx5/qp.h> 43 #include <dev/mlx5/srq.h> 44 #include <dev/mlx5/mpfs.h> 45 #include <dev/mlx5/vport.h> 46 #include <linux/delay.h> 47 #include <dev/mlx5/mlx5_ifc.h> 48 #include <dev/mlx5/mlx5_fpga/core.h> 49 #include <dev/mlx5/mlx5_lib/mlx5.h> 50 #include <dev/mlx5/mlx5_core/mlx5_core.h> 51 #include <dev/mlx5/mlx5_core/eswitch.h> 52 #include <dev/mlx5/mlx5_core/fs_core.h> 53 #ifdef PCI_IOV 54 #include <sys/nv.h> 55 #include <dev/pci/pci_iov.h> 56 #include <sys/iov_schema.h> 57 #endif 58 59 static const char mlx5_version[] = "Mellanox Core driver " 60 DRIVER_VERSION " (" DRIVER_RELDATE ")"; 61 MODULE_DESCRIPTION("Mellanox ConnectX-4 and onwards core driver"); 62 MODULE_LICENSE("Dual BSD/GPL"); 63 MODULE_DEPEND(mlx5, linuxkpi, 1, 1, 1); 64 MODULE_DEPEND(mlx5, mlxfw, 1, 1, 1); 65 MODULE_DEPEND(mlx5, firmware, 1, 1, 1); 66 MODULE_VERSION(mlx5, 1); 67 68 SYSCTL_NODE(_hw, OID_AUTO, mlx5, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 69 "mlx5 hardware controls"); 70 71 int mlx5_core_debug_mask; 72 SYSCTL_INT(_hw_mlx5, OID_AUTO, debug_mask, CTLFLAG_RWTUN, 73 &mlx5_core_debug_mask, 0, 74 "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0"); 75 76 #define MLX5_DEFAULT_PROF 2 77 static int mlx5_prof_sel = MLX5_DEFAULT_PROF; 78 SYSCTL_INT(_hw_mlx5, OID_AUTO, prof_sel, CTLFLAG_RWTUN, 79 &mlx5_prof_sel, 0, 80 "profile selector. Valid range 0 - 2"); 81 82 static int mlx5_fast_unload_enabled = 1; 83 SYSCTL_INT(_hw_mlx5, OID_AUTO, fast_unload_enabled, CTLFLAG_RWTUN, 84 &mlx5_fast_unload_enabled, 0, 85 "Set to enable fast unload. Clear to disable."); 86 87 static LIST_HEAD(intf_list); 88 static LIST_HEAD(dev_list); 89 static DEFINE_MUTEX(intf_mutex); 90 91 struct mlx5_device_context { 92 struct list_head list; 93 struct mlx5_interface *intf; 94 void *context; 95 }; 96 97 enum { 98 MLX5_ATOMIC_REQ_MODE_BE = 0x0, 99 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS = 0x1, 100 }; 101 102 static struct mlx5_profile profiles[] = { 103 [0] = { 104 .mask = 0, 105 }, 106 [1] = { 107 .mask = MLX5_PROF_MASK_QP_SIZE, 108 .log_max_qp = 12, 109 }, 110 [2] = { 111 .mask = MLX5_PROF_MASK_QP_SIZE | 112 MLX5_PROF_MASK_MR_CACHE, 113 .log_max_qp = 17, 114 .mr_cache[0] = { 115 .size = 500, 116 .limit = 250 117 }, 118 .mr_cache[1] = { 119 .size = 500, 120 .limit = 250 121 }, 122 .mr_cache[2] = { 123 .size = 500, 124 .limit = 250 125 }, 126 .mr_cache[3] = { 127 .size = 500, 128 .limit = 250 129 }, 130 .mr_cache[4] = { 131 .size = 500, 132 .limit = 250 133 }, 134 .mr_cache[5] = { 135 .size = 500, 136 .limit = 250 137 }, 138 .mr_cache[6] = { 139 .size = 500, 140 .limit = 250 141 }, 142 .mr_cache[7] = { 143 .size = 500, 144 .limit = 250 145 }, 146 .mr_cache[8] = { 147 .size = 500, 148 .limit = 250 149 }, 150 .mr_cache[9] = { 151 .size = 500, 152 .limit = 250 153 }, 154 .mr_cache[10] = { 155 .size = 500, 156 .limit = 250 157 }, 158 .mr_cache[11] = { 159 .size = 500, 160 .limit = 250 161 }, 162 .mr_cache[12] = { 163 .size = 64, 164 .limit = 32 165 }, 166 .mr_cache[13] = { 167 .size = 32, 168 .limit = 16 169 }, 170 .mr_cache[14] = { 171 .size = 16, 172 .limit = 8 173 }, 174 }, 175 [3] = { 176 .mask = MLX5_PROF_MASK_QP_SIZE, 177 .log_max_qp = 17, 178 }, 179 }; 180 181 static void mlx5_set_driver_version(struct mlx5_core_dev *dev) 182 { 183 const size_t driver_ver_sz = 184 MLX5_FLD_SZ_BYTES(set_driver_version_in, driver_version); 185 u8 in[MLX5_ST_SZ_BYTES(set_driver_version_in)] = {}; 186 u8 out[MLX5_ST_SZ_BYTES(set_driver_version_out)] = {}; 187 char *string; 188 189 if (!MLX5_CAP_GEN(dev, driver_version)) 190 return; 191 192 string = MLX5_ADDR_OF(set_driver_version_in, in, driver_version); 193 194 snprintf(string, driver_ver_sz, "FreeBSD,mlx5_core,%u.%u.%u," DRIVER_VERSION, 195 __FreeBSD_version / 100000, (__FreeBSD_version / 1000) % 100, 196 __FreeBSD_version % 1000); 197 198 /* Send the command */ 199 MLX5_SET(set_driver_version_in, in, opcode, 200 MLX5_CMD_OP_SET_DRIVER_VERSION); 201 202 mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 203 } 204 205 #ifdef PCI_IOV 206 static const char iov_mac_addr_name[] = "mac-addr"; 207 static const char iov_node_guid_name[] = "node-guid"; 208 static const char iov_port_guid_name[] = "port-guid"; 209 #endif 210 211 static int set_dma_caps(struct pci_dev *pdev) 212 { 213 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 214 int err; 215 216 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 217 if (err) { 218 mlx5_core_warn(dev, "couldn't set 64-bit PCI DMA mask\n"); 219 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 220 if (err) { 221 mlx5_core_err(dev, "Can't set PCI DMA mask, aborting\n"); 222 return err; 223 } 224 } 225 226 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 227 if (err) { 228 mlx5_core_warn(dev, "couldn't set 64-bit consistent PCI DMA mask\n"); 229 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 230 if (err) { 231 mlx5_core_err(dev, "Can't set consistent PCI DMA mask, aborting\n"); 232 return err; 233 } 234 } 235 236 dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024); 237 return err; 238 } 239 240 int mlx5_pci_read_power_status(struct mlx5_core_dev *dev, 241 u16 *p_power, u8 *p_status) 242 { 243 u32 in[MLX5_ST_SZ_DW(mpein_reg)] = {}; 244 u32 out[MLX5_ST_SZ_DW(mpein_reg)] = {}; 245 int err; 246 247 err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), 248 MLX5_ACCESS_REG_SUMMARY_CTRL_ID_MPEIN, 0, 0); 249 250 *p_status = MLX5_GET(mpein_reg, out, pwr_status); 251 *p_power = MLX5_GET(mpein_reg, out, pci_power); 252 return err; 253 } 254 255 static int mlx5_pci_enable_device(struct mlx5_core_dev *dev) 256 { 257 struct pci_dev *pdev = dev->pdev; 258 int err = 0; 259 260 mutex_lock(&dev->pci_status_mutex); 261 if (dev->pci_status == MLX5_PCI_STATUS_DISABLED) { 262 err = pci_enable_device(pdev); 263 if (!err) 264 dev->pci_status = MLX5_PCI_STATUS_ENABLED; 265 } 266 mutex_unlock(&dev->pci_status_mutex); 267 268 return err; 269 } 270 271 static void mlx5_pci_disable_device(struct mlx5_core_dev *dev) 272 { 273 struct pci_dev *pdev = dev->pdev; 274 275 mutex_lock(&dev->pci_status_mutex); 276 if (dev->pci_status == MLX5_PCI_STATUS_ENABLED) { 277 pci_disable_device(pdev); 278 dev->pci_status = MLX5_PCI_STATUS_DISABLED; 279 } 280 mutex_unlock(&dev->pci_status_mutex); 281 } 282 283 static int request_bar(struct pci_dev *pdev) 284 { 285 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 286 int err = 0; 287 288 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 289 mlx5_core_err(dev, "Missing registers BAR, aborting\n"); 290 return -ENODEV; 291 } 292 293 err = pci_request_regions(pdev, DRIVER_NAME); 294 if (err) 295 mlx5_core_err(dev, "Couldn't get PCI resources, aborting\n"); 296 297 return err; 298 } 299 300 static void release_bar(struct pci_dev *pdev) 301 { 302 pci_release_regions(pdev); 303 } 304 305 static int mlx5_enable_msix(struct mlx5_core_dev *dev) 306 { 307 struct mlx5_priv *priv = &dev->priv; 308 struct mlx5_eq_table *table = &priv->eq_table; 309 int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq); 310 int limit = dev->msix_eqvec; 311 int nvec = MLX5_EQ_VEC_COMP_BASE; 312 int i; 313 314 if (limit > 0) 315 nvec += limit; 316 else 317 nvec += MLX5_CAP_GEN(dev, num_ports) * num_online_cpus(); 318 319 if (nvec > num_eqs) 320 nvec = num_eqs; 321 if (nvec > 256) 322 nvec = 256; /* limit of firmware API */ 323 if (nvec <= MLX5_EQ_VEC_COMP_BASE) 324 return -ENOMEM; 325 326 priv->msix_arr = kzalloc(nvec * sizeof(*priv->msix_arr), GFP_KERNEL); 327 328 for (i = 0; i < nvec; i++) 329 priv->msix_arr[i].entry = i; 330 331 nvec = pci_enable_msix_range(dev->pdev, priv->msix_arr, 332 MLX5_EQ_VEC_COMP_BASE + 1, nvec); 333 if (nvec < 0) 334 return nvec; 335 336 table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE; 337 return 0; 338 } 339 340 static void mlx5_disable_msix(struct mlx5_core_dev *dev) 341 { 342 struct mlx5_priv *priv = &dev->priv; 343 344 pci_disable_msix(dev->pdev); 345 kfree(priv->msix_arr); 346 } 347 348 struct mlx5_reg_host_endianess { 349 u8 he; 350 u8 rsvd[15]; 351 }; 352 353 354 #define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos)) 355 356 enum { 357 MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) | 358 MLX5_DEV_CAP_FLAG_DCT | 359 MLX5_DEV_CAP_FLAG_DRAIN_SIGERR, 360 }; 361 362 static u16 to_fw_pkey_sz(struct mlx5_core_dev *dev, u32 size) 363 { 364 switch (size) { 365 case 128: 366 return 0; 367 case 256: 368 return 1; 369 case 512: 370 return 2; 371 case 1024: 372 return 3; 373 case 2048: 374 return 4; 375 case 4096: 376 return 5; 377 default: 378 mlx5_core_warn(dev, "invalid pkey table size %d\n", size); 379 return 0; 380 } 381 } 382 383 static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, 384 enum mlx5_cap_type cap_type, 385 enum mlx5_cap_mode cap_mode) 386 { 387 u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)]; 388 int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); 389 void *out, *hca_caps; 390 u16 opmod = (cap_type << 1) | (cap_mode & 0x01); 391 int err; 392 393 memset(in, 0, sizeof(in)); 394 out = kzalloc(out_sz, GFP_KERNEL); 395 396 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); 397 MLX5_SET(query_hca_cap_in, in, op_mod, opmod); 398 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); 399 if (err) { 400 mlx5_core_warn(dev, 401 "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n", 402 cap_type, cap_mode, err); 403 goto query_ex; 404 } 405 406 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability); 407 408 switch (cap_mode) { 409 case HCA_CAP_OPMOD_GET_MAX: 410 memcpy(dev->hca_caps_max[cap_type], hca_caps, 411 MLX5_UN_SZ_BYTES(hca_cap_union)); 412 break; 413 case HCA_CAP_OPMOD_GET_CUR: 414 memcpy(dev->hca_caps_cur[cap_type], hca_caps, 415 MLX5_UN_SZ_BYTES(hca_cap_union)); 416 break; 417 default: 418 mlx5_core_warn(dev, 419 "Tried to query dev cap type(%x) with wrong opmode(%x)\n", 420 cap_type, cap_mode); 421 err = -EINVAL; 422 break; 423 } 424 query_ex: 425 kfree(out); 426 return err; 427 } 428 429 int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type) 430 { 431 int ret; 432 433 ret = mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_CUR); 434 if (ret) 435 return ret; 436 437 return mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_MAX); 438 } 439 440 static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz) 441 { 442 u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)] = {0}; 443 444 MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP); 445 446 return mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out)); 447 } 448 449 static int handle_hca_cap(struct mlx5_core_dev *dev) 450 { 451 void *set_ctx = NULL; 452 struct mlx5_profile *prof = dev->profile; 453 int err = -ENOMEM; 454 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); 455 void *set_hca_cap; 456 457 set_ctx = kzalloc(set_sz, GFP_KERNEL); 458 459 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL); 460 if (err) 461 goto query_ex; 462 463 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, 464 capability); 465 memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL], 466 MLX5_ST_SZ_BYTES(cmd_hca_cap)); 467 468 mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n", 469 mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)), 470 128); 471 /* we limit the size of the pkey table to 128 entries for now */ 472 MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size, 473 to_fw_pkey_sz(dev, 128)); 474 475 if (prof->mask & MLX5_PROF_MASK_QP_SIZE) 476 MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp, 477 prof->log_max_qp); 478 479 /* disable cmdif checksum */ 480 MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0); 481 482 /* Enable 4K UAR only when HCA supports it and page size is bigger 483 * than 4K. 484 */ 485 if (MLX5_CAP_GEN_MAX(dev, uar_4k) && PAGE_SIZE > 4096) 486 MLX5_SET(cmd_hca_cap, set_hca_cap, uar_4k, 1); 487 488 /* enable drain sigerr */ 489 MLX5_SET(cmd_hca_cap, set_hca_cap, drain_sigerr, 1); 490 491 MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12); 492 493 err = set_caps(dev, set_ctx, set_sz); 494 495 query_ex: 496 kfree(set_ctx); 497 return err; 498 } 499 500 static int handle_hca_cap_atomic(struct mlx5_core_dev *dev) 501 { 502 void *set_ctx; 503 void *set_hca_cap; 504 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); 505 int req_endianness; 506 int err; 507 508 if (MLX5_CAP_GEN(dev, atomic)) { 509 err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC); 510 if (err) 511 return err; 512 } else { 513 return 0; 514 } 515 516 req_endianness = 517 MLX5_CAP_ATOMIC(dev, 518 supported_atomic_req_8B_endianess_mode_1); 519 520 if (req_endianness != MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS) 521 return 0; 522 523 set_ctx = kzalloc(set_sz, GFP_KERNEL); 524 if (!set_ctx) 525 return -ENOMEM; 526 527 MLX5_SET(set_hca_cap_in, set_ctx, op_mod, 528 MLX5_SET_HCA_CAP_OP_MOD_ATOMIC << 1); 529 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability); 530 531 /* Set requestor to host endianness */ 532 MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianess_mode, 533 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS); 534 535 err = set_caps(dev, set_ctx, set_sz); 536 537 kfree(set_ctx); 538 return err; 539 } 540 541 static int set_hca_ctrl(struct mlx5_core_dev *dev) 542 { 543 struct mlx5_reg_host_endianess he_in; 544 struct mlx5_reg_host_endianess he_out; 545 int err; 546 547 if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH && 548 !MLX5_CAP_GEN(dev, roce)) 549 return 0; 550 551 memset(&he_in, 0, sizeof(he_in)); 552 he_in.he = MLX5_SET_HOST_ENDIANNESS; 553 err = mlx5_core_access_reg(dev, &he_in, sizeof(he_in), 554 &he_out, sizeof(he_out), 555 MLX5_REG_HOST_ENDIANNESS, 0, 1); 556 return err; 557 } 558 559 static int mlx5_core_set_hca_defaults(struct mlx5_core_dev *dev) 560 { 561 int ret = 0; 562 563 /* Disable local_lb by default */ 564 if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) 565 ret = mlx5_nic_vport_update_local_lb(dev, false); 566 567 return ret; 568 } 569 570 static int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id) 571 { 572 u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {0}; 573 u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {0}; 574 575 MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA); 576 MLX5_SET(enable_hca_in, in, function_id, func_id); 577 return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); 578 } 579 580 static int mlx5_core_disable_hca(struct mlx5_core_dev *dev) 581 { 582 u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {0}; 583 u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {0}; 584 585 MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA); 586 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 587 } 588 589 static int mlx5_core_set_issi(struct mlx5_core_dev *dev) 590 { 591 u32 query_in[MLX5_ST_SZ_DW(query_issi_in)] = {0}; 592 u32 query_out[MLX5_ST_SZ_DW(query_issi_out)] = {0}; 593 u32 sup_issi; 594 int err; 595 596 MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI); 597 598 err = mlx5_cmd_exec(dev, query_in, sizeof(query_in), query_out, sizeof(query_out)); 599 if (err) { 600 u32 syndrome; 601 u8 status; 602 603 mlx5_cmd_mbox_status(query_out, &status, &syndrome); 604 if (status == MLX5_CMD_STAT_BAD_OP_ERR) { 605 mlx5_core_dbg(dev, "Only ISSI 0 is supported\n"); 606 return 0; 607 } 608 609 mlx5_core_err(dev, "failed to query ISSI\n"); 610 return err; 611 } 612 613 sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0); 614 615 if (sup_issi & (1 << 1)) { 616 u32 set_in[MLX5_ST_SZ_DW(set_issi_in)] = {0}; 617 u32 set_out[MLX5_ST_SZ_DW(set_issi_out)] = {0}; 618 619 MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI); 620 MLX5_SET(set_issi_in, set_in, current_issi, 1); 621 622 err = mlx5_cmd_exec(dev, set_in, sizeof(set_in), set_out, sizeof(set_out)); 623 if (err) { 624 mlx5_core_err(dev, "failed to set ISSI=1 err(%d)\n", err); 625 return err; 626 } 627 628 dev->issi = 1; 629 630 return 0; 631 } else if (sup_issi & (1 << 0)) { 632 return 0; 633 } 634 635 return -ENOTSUPP; 636 } 637 638 639 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn) 640 { 641 struct mlx5_eq_table *table = &dev->priv.eq_table; 642 struct mlx5_eq *eq; 643 int err = -ENOENT; 644 645 spin_lock(&table->lock); 646 list_for_each_entry(eq, &table->comp_eqs_list, list) { 647 if (eq->index == vector) { 648 *eqn = eq->eqn; 649 *irqn = eq->irqn; 650 err = 0; 651 break; 652 } 653 } 654 spin_unlock(&table->lock); 655 656 return err; 657 } 658 EXPORT_SYMBOL(mlx5_vector2eqn); 659 660 static void free_comp_eqs(struct mlx5_core_dev *dev) 661 { 662 struct mlx5_eq_table *table = &dev->priv.eq_table; 663 struct mlx5_eq *eq, *n; 664 665 spin_lock(&table->lock); 666 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { 667 list_del(&eq->list); 668 spin_unlock(&table->lock); 669 if (mlx5_destroy_unmap_eq(dev, eq)) 670 mlx5_core_warn(dev, "failed to destroy EQ 0x%x\n", 671 eq->eqn); 672 kfree(eq); 673 spin_lock(&table->lock); 674 } 675 spin_unlock(&table->lock); 676 } 677 678 static int alloc_comp_eqs(struct mlx5_core_dev *dev) 679 { 680 struct mlx5_eq_table *table = &dev->priv.eq_table; 681 struct mlx5_eq *eq; 682 int ncomp_vec; 683 int nent; 684 int err; 685 int i; 686 687 INIT_LIST_HEAD(&table->comp_eqs_list); 688 ncomp_vec = table->num_comp_vectors; 689 nent = MLX5_COMP_EQ_SIZE; 690 for (i = 0; i < ncomp_vec; i++) { 691 eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, dev->priv.numa_node); 692 693 err = mlx5_create_map_eq(dev, eq, 694 i + MLX5_EQ_VEC_COMP_BASE, nent, 0); 695 if (err) { 696 kfree(eq); 697 goto clean; 698 } 699 mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->eqn); 700 eq->index = i; 701 spin_lock(&table->lock); 702 list_add_tail(&eq->list, &table->comp_eqs_list); 703 spin_unlock(&table->lock); 704 } 705 706 return 0; 707 708 clean: 709 free_comp_eqs(dev); 710 return err; 711 } 712 713 static inline int fw_initializing(struct mlx5_core_dev *dev) 714 { 715 return ioread32be(&dev->iseg->initializing) >> 31; 716 } 717 718 static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili, 719 u32 warn_time_mili) 720 { 721 int warn = jiffies + msecs_to_jiffies(warn_time_mili); 722 int end = jiffies + msecs_to_jiffies(max_wait_mili); 723 int err = 0; 724 725 MPASS(max_wait_mili > warn_time_mili); 726 727 while (fw_initializing(dev) == 1) { 728 if (time_after(jiffies, end)) { 729 err = -EBUSY; 730 break; 731 } 732 if (warn_time_mili && time_after(jiffies, warn)) { 733 mlx5_core_warn(dev, 734 "Waiting for FW initialization, timeout abort in %u s\n", 735 (unsigned)(jiffies_to_msecs(end - warn) / 1000)); 736 warn = jiffies + msecs_to_jiffies(warn_time_mili); 737 } 738 msleep(FW_INIT_WAIT_MS); 739 } 740 741 if (err != 0) 742 mlx5_core_dbg(dev, "Full initializing bit dword = 0x%x\n", 743 ioread32be(&dev->iseg->initializing)); 744 745 return err; 746 } 747 748 static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) 749 { 750 struct mlx5_device_context *dev_ctx; 751 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv); 752 753 dev_ctx = kzalloc_node(sizeof(*dev_ctx), GFP_KERNEL, priv->numa_node); 754 if (!dev_ctx) 755 return; 756 757 dev_ctx->intf = intf; 758 CURVNET_SET_QUIET(vnet0); 759 dev_ctx->context = intf->add(dev); 760 CURVNET_RESTORE(); 761 762 if (dev_ctx->context) { 763 spin_lock_irq(&priv->ctx_lock); 764 list_add_tail(&dev_ctx->list, &priv->ctx_list); 765 spin_unlock_irq(&priv->ctx_lock); 766 } else { 767 kfree(dev_ctx); 768 } 769 } 770 771 static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv) 772 { 773 struct mlx5_device_context *dev_ctx; 774 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv); 775 776 list_for_each_entry(dev_ctx, &priv->ctx_list, list) 777 if (dev_ctx->intf == intf) { 778 spin_lock_irq(&priv->ctx_lock); 779 list_del(&dev_ctx->list); 780 spin_unlock_irq(&priv->ctx_lock); 781 782 intf->remove(dev, dev_ctx->context); 783 kfree(dev_ctx); 784 return; 785 } 786 } 787 788 int 789 mlx5_register_device(struct mlx5_core_dev *dev) 790 { 791 struct mlx5_priv *priv = &dev->priv; 792 struct mlx5_interface *intf; 793 794 mutex_lock(&intf_mutex); 795 list_add_tail(&priv->dev_list, &dev_list); 796 list_for_each_entry(intf, &intf_list, list) 797 mlx5_add_device(intf, priv); 798 mutex_unlock(&intf_mutex); 799 800 return 0; 801 } 802 803 void 804 mlx5_unregister_device(struct mlx5_core_dev *dev) 805 { 806 struct mlx5_priv *priv = &dev->priv; 807 struct mlx5_interface *intf; 808 809 mutex_lock(&intf_mutex); 810 list_for_each_entry(intf, &intf_list, list) 811 mlx5_remove_device(intf, priv); 812 list_del(&priv->dev_list); 813 mutex_unlock(&intf_mutex); 814 } 815 816 int mlx5_register_interface(struct mlx5_interface *intf) 817 { 818 struct mlx5_priv *priv; 819 820 if (!intf->add || !intf->remove) 821 return -EINVAL; 822 823 mutex_lock(&intf_mutex); 824 list_add_tail(&intf->list, &intf_list); 825 list_for_each_entry(priv, &dev_list, dev_list) 826 mlx5_add_device(intf, priv); 827 mutex_unlock(&intf_mutex); 828 829 return 0; 830 } 831 EXPORT_SYMBOL(mlx5_register_interface); 832 833 void mlx5_unregister_interface(struct mlx5_interface *intf) 834 { 835 struct mlx5_priv *priv; 836 837 mutex_lock(&intf_mutex); 838 list_for_each_entry(priv, &dev_list, dev_list) 839 mlx5_remove_device(intf, priv); 840 list_del(&intf->list); 841 mutex_unlock(&intf_mutex); 842 } 843 EXPORT_SYMBOL(mlx5_unregister_interface); 844 845 void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol) 846 { 847 struct mlx5_priv *priv = &mdev->priv; 848 struct mlx5_device_context *dev_ctx; 849 unsigned long flags; 850 void *result = NULL; 851 852 spin_lock_irqsave(&priv->ctx_lock, flags); 853 854 list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list) 855 if ((dev_ctx->intf->protocol == protocol) && 856 dev_ctx->intf->get_dev) { 857 result = dev_ctx->intf->get_dev(dev_ctx->context); 858 break; 859 } 860 861 spin_unlock_irqrestore(&priv->ctx_lock, flags); 862 863 return result; 864 } 865 EXPORT_SYMBOL(mlx5_get_protocol_dev); 866 867 static int mlx5_auto_fw_update; 868 SYSCTL_INT(_hw_mlx5, OID_AUTO, auto_fw_update, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 869 &mlx5_auto_fw_update, 0, 870 "Allow automatic firmware update on driver start"); 871 static int 872 mlx5_firmware_update(struct mlx5_core_dev *dev) 873 { 874 const struct firmware *fw; 875 int err; 876 877 TUNABLE_INT_FETCH("hw.mlx5.auto_fw_update", &mlx5_auto_fw_update); 878 if (!mlx5_auto_fw_update) 879 return (0); 880 fw = firmware_get("mlx5fw_mfa"); 881 if (fw) { 882 err = mlx5_firmware_flash(dev, fw); 883 firmware_put(fw, FIRMWARE_UNLOAD); 884 } 885 else 886 return (-ENOENT); 887 888 return err; 889 } 890 891 static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv) 892 { 893 struct pci_dev *pdev = dev->pdev; 894 device_t bsddev; 895 int err; 896 897 pdev = dev->pdev; 898 bsddev = pdev->dev.bsddev; 899 pci_set_drvdata(dev->pdev, dev); 900 strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN); 901 priv->name[MLX5_MAX_NAME_LEN - 1] = 0; 902 903 mutex_init(&priv->pgdir_mutex); 904 INIT_LIST_HEAD(&priv->pgdir_list); 905 spin_lock_init(&priv->mkey_lock); 906 907 err = mlx5_pci_enable_device(dev); 908 if (err) { 909 mlx5_core_err(dev, "Cannot enable PCI device, aborting\n"); 910 goto err_dbg; 911 } 912 913 err = request_bar(pdev); 914 if (err) { 915 mlx5_core_err(dev, "error requesting BARs, aborting\n"); 916 goto err_disable; 917 } 918 919 pci_set_master(pdev); 920 921 err = set_dma_caps(pdev); 922 if (err) { 923 mlx5_core_err(dev, "Failed setting DMA capabilities mask, aborting\n"); 924 goto err_clr_master; 925 } 926 927 dev->iseg_base = pci_resource_start(dev->pdev, 0); 928 dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg)); 929 if (!dev->iseg) { 930 err = -ENOMEM; 931 mlx5_core_err(dev, "Failed mapping initialization segment, aborting\n"); 932 goto err_clr_master; 933 } 934 935 return 0; 936 937 err_clr_master: 938 release_bar(dev->pdev); 939 err_disable: 940 mlx5_pci_disable_device(dev); 941 err_dbg: 942 return err; 943 } 944 945 static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv) 946 { 947 #ifdef PCI_IOV 948 if (MLX5_CAP_GEN(dev, eswitch_flow_table)) 949 pci_iov_detach(dev->pdev->dev.bsddev); 950 #endif 951 iounmap(dev->iseg); 952 release_bar(dev->pdev); 953 mlx5_pci_disable_device(dev); 954 } 955 956 static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) 957 { 958 int err; 959 960 err = mlx5_vsc_find_cap(dev); 961 if (err) 962 mlx5_core_warn(dev, "Unable to find vendor specific capabilities\n"); 963 964 err = mlx5_query_hca_caps(dev); 965 if (err) { 966 mlx5_core_err(dev, "query hca failed\n"); 967 goto out; 968 } 969 970 err = mlx5_query_board_id(dev); 971 if (err) { 972 mlx5_core_err(dev, "query board id failed\n"); 973 goto out; 974 } 975 976 err = mlx5_eq_init(dev); 977 if (err) { 978 mlx5_core_err(dev, "failed to initialize eq\n"); 979 goto out; 980 } 981 982 MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock); 983 984 err = mlx5_init_cq_table(dev); 985 if (err) { 986 mlx5_core_err(dev, "failed to initialize cq table\n"); 987 goto err_eq_cleanup; 988 } 989 990 mlx5_init_qp_table(dev); 991 mlx5_init_srq_table(dev); 992 mlx5_init_mr_table(dev); 993 994 mlx5_init_reserved_gids(dev); 995 mlx5_fpga_init(dev); 996 997 #ifdef RATELIMIT 998 err = mlx5_init_rl_table(dev); 999 if (err) { 1000 mlx5_core_err(dev, "Failed to init rate limiting\n"); 1001 goto err_tables_cleanup; 1002 } 1003 #endif 1004 return 0; 1005 1006 #ifdef RATELIMIT 1007 err_tables_cleanup: 1008 mlx5_cleanup_mr_table(dev); 1009 mlx5_cleanup_srq_table(dev); 1010 mlx5_cleanup_qp_table(dev); 1011 mlx5_cleanup_cq_table(dev); 1012 #endif 1013 1014 err_eq_cleanup: 1015 mlx5_eq_cleanup(dev); 1016 1017 out: 1018 return err; 1019 } 1020 1021 static void mlx5_cleanup_once(struct mlx5_core_dev *dev) 1022 { 1023 #ifdef RATELIMIT 1024 mlx5_cleanup_rl_table(dev); 1025 #endif 1026 mlx5_fpga_cleanup(dev); 1027 mlx5_cleanup_reserved_gids(dev); 1028 mlx5_cleanup_mr_table(dev); 1029 mlx5_cleanup_srq_table(dev); 1030 mlx5_cleanup_qp_table(dev); 1031 mlx5_cleanup_cq_table(dev); 1032 mlx5_eq_cleanup(dev); 1033 } 1034 1035 static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, 1036 bool boot) 1037 { 1038 int err; 1039 1040 mutex_lock(&dev->intf_state_mutex); 1041 if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { 1042 mlx5_core_warn(dev, "interface is up, NOP\n"); 1043 goto out; 1044 } 1045 1046 mlx5_core_dbg(dev, "firmware version: %d.%d.%d\n", 1047 fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev)); 1048 1049 /* 1050 * On load removing any previous indication of internal error, 1051 * device is up 1052 */ 1053 dev->state = MLX5_DEVICE_STATE_UP; 1054 1055 /* wait for firmware to accept initialization segments configurations 1056 */ 1057 err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI, 1058 FW_INIT_WARN_MESSAGE_INTERVAL); 1059 if (err) { 1060 dev_err(&dev->pdev->dev, 1061 "Firmware over %d MS in pre-initializing state, aborting\n", 1062 FW_PRE_INIT_TIMEOUT_MILI); 1063 goto out_err; 1064 } 1065 1066 err = mlx5_cmd_init(dev); 1067 if (err) { 1068 mlx5_core_err(dev, 1069 "Failed initializing command interface, aborting\n"); 1070 goto out_err; 1071 } 1072 1073 err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI, 0); 1074 if (err) { 1075 mlx5_core_err(dev, 1076 "Firmware over %d MS in initializing state, aborting\n", 1077 FW_INIT_TIMEOUT_MILI); 1078 goto err_cmd_cleanup; 1079 } 1080 1081 err = mlx5_core_enable_hca(dev, 0); 1082 if (err) { 1083 mlx5_core_err(dev, "enable hca failed\n"); 1084 goto err_cmd_cleanup; 1085 } 1086 1087 err = mlx5_core_set_issi(dev); 1088 if (err) { 1089 mlx5_core_err(dev, "failed to set issi\n"); 1090 goto err_disable_hca; 1091 } 1092 1093 err = mlx5_pagealloc_start(dev); 1094 if (err) { 1095 mlx5_core_err(dev, "mlx5_pagealloc_start failed\n"); 1096 goto err_disable_hca; 1097 } 1098 1099 err = mlx5_satisfy_startup_pages(dev, 1); 1100 if (err) { 1101 mlx5_core_err(dev, "failed to allocate boot pages\n"); 1102 goto err_pagealloc_stop; 1103 } 1104 1105 err = set_hca_ctrl(dev); 1106 if (err) { 1107 mlx5_core_err(dev, "set_hca_ctrl failed\n"); 1108 goto reclaim_boot_pages; 1109 } 1110 1111 err = handle_hca_cap(dev); 1112 if (err) { 1113 mlx5_core_err(dev, "handle_hca_cap failed\n"); 1114 goto reclaim_boot_pages; 1115 } 1116 1117 err = handle_hca_cap_atomic(dev); 1118 if (err) { 1119 mlx5_core_err(dev, "handle_hca_cap_atomic failed\n"); 1120 goto reclaim_boot_pages; 1121 } 1122 1123 err = mlx5_satisfy_startup_pages(dev, 0); 1124 if (err) { 1125 mlx5_core_err(dev, "failed to allocate init pages\n"); 1126 goto reclaim_boot_pages; 1127 } 1128 1129 err = mlx5_cmd_init_hca(dev); 1130 if (err) { 1131 mlx5_core_err(dev, "init hca failed\n"); 1132 goto reclaim_boot_pages; 1133 } 1134 1135 mlx5_set_driver_version(dev); 1136 1137 mlx5_start_health_poll(dev); 1138 1139 if (boot && (err = mlx5_init_once(dev, priv))) { 1140 mlx5_core_err(dev, "sw objs init failed\n"); 1141 goto err_stop_poll; 1142 } 1143 1144 dev->priv.uar = mlx5_get_uars_page(dev); 1145 if (IS_ERR(dev->priv.uar)) { 1146 mlx5_core_err(dev, "Failed allocating uar, aborting\n"); 1147 err = PTR_ERR(dev->priv.uar); 1148 goto err_cleanup_once; 1149 } 1150 1151 err = mlx5_enable_msix(dev); 1152 if (err) { 1153 mlx5_core_err(dev, "enable msix failed\n"); 1154 goto err_cleanup_uar; 1155 } 1156 1157 err = mlx5_start_eqs(dev); 1158 if (err) { 1159 mlx5_core_err(dev, "Failed to start pages and async EQs\n"); 1160 goto err_disable_msix; 1161 } 1162 1163 err = alloc_comp_eqs(dev); 1164 if (err) { 1165 mlx5_core_err(dev, "Failed to alloc completion EQs\n"); 1166 goto err_stop_eqs; 1167 } 1168 1169 err = mlx5_init_fs(dev); 1170 if (err) { 1171 mlx5_core_err(dev, "flow steering init %d\n", err); 1172 goto err_free_comp_eqs; 1173 } 1174 1175 err = mlx5_core_set_hca_defaults(dev); 1176 if (err) { 1177 mlx5_core_err(dev, "Failed to set HCA defaults %d\n", err); 1178 goto err_free_comp_eqs; 1179 } 1180 1181 err = mlx5_mpfs_init(dev); 1182 if (err) { 1183 mlx5_core_err(dev, "mpfs init failed %d\n", err); 1184 goto err_fs; 1185 } 1186 1187 err = mlx5_fpga_device_start(dev); 1188 if (err) { 1189 mlx5_core_err(dev, "fpga device start failed %d\n", err); 1190 goto err_mpfs; 1191 } 1192 1193 err = mlx5_register_device(dev); 1194 if (err) { 1195 mlx5_core_err(dev, "mlx5_register_device failed %d\n", err); 1196 goto err_fpga; 1197 } 1198 1199 set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); 1200 1201 out: 1202 mutex_unlock(&dev->intf_state_mutex); 1203 return 0; 1204 1205 err_fpga: 1206 mlx5_fpga_device_stop(dev); 1207 1208 err_mpfs: 1209 mlx5_mpfs_destroy(dev); 1210 1211 err_fs: 1212 mlx5_cleanup_fs(dev); 1213 1214 err_free_comp_eqs: 1215 free_comp_eqs(dev); 1216 1217 err_stop_eqs: 1218 mlx5_stop_eqs(dev); 1219 1220 err_disable_msix: 1221 mlx5_disable_msix(dev); 1222 1223 err_cleanup_uar: 1224 mlx5_put_uars_page(dev, dev->priv.uar); 1225 1226 err_cleanup_once: 1227 if (boot) 1228 mlx5_cleanup_once(dev); 1229 1230 err_stop_poll: 1231 mlx5_stop_health_poll(dev, boot); 1232 if (mlx5_cmd_teardown_hca(dev)) { 1233 mlx5_core_err(dev, "tear_down_hca failed, skip cleanup\n"); 1234 goto out_err; 1235 } 1236 1237 reclaim_boot_pages: 1238 mlx5_reclaim_startup_pages(dev); 1239 1240 err_pagealloc_stop: 1241 mlx5_pagealloc_stop(dev); 1242 1243 err_disable_hca: 1244 mlx5_core_disable_hca(dev); 1245 1246 err_cmd_cleanup: 1247 mlx5_cmd_cleanup(dev); 1248 1249 out_err: 1250 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; 1251 mutex_unlock(&dev->intf_state_mutex); 1252 1253 return err; 1254 } 1255 1256 static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, 1257 bool cleanup) 1258 { 1259 int err = 0; 1260 1261 if (cleanup) 1262 mlx5_drain_health_recovery(dev); 1263 1264 mutex_lock(&dev->intf_state_mutex); 1265 if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { 1266 mlx5_core_warn(dev, "%s: interface is down, NOP\n", __func__); 1267 if (cleanup) 1268 mlx5_cleanup_once(dev); 1269 goto out; 1270 } 1271 1272 mlx5_unregister_device(dev); 1273 1274 mlx5_eswitch_cleanup(dev->priv.eswitch); 1275 mlx5_fpga_device_stop(dev); 1276 mlx5_mpfs_destroy(dev); 1277 mlx5_cleanup_fs(dev); 1278 mlx5_wait_for_reclaim_vfs_pages(dev); 1279 free_comp_eqs(dev); 1280 mlx5_stop_eqs(dev); 1281 mlx5_disable_msix(dev); 1282 mlx5_put_uars_page(dev, dev->priv.uar); 1283 if (cleanup) 1284 mlx5_cleanup_once(dev); 1285 mlx5_stop_health_poll(dev, cleanup); 1286 err = mlx5_cmd_teardown_hca(dev); 1287 if (err) { 1288 mlx5_core_err(dev, "tear_down_hca failed, skip cleanup\n"); 1289 goto out; 1290 } 1291 mlx5_pagealloc_stop(dev); 1292 mlx5_reclaim_startup_pages(dev); 1293 mlx5_core_disable_hca(dev); 1294 mlx5_cmd_cleanup(dev); 1295 1296 out: 1297 clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); 1298 mutex_unlock(&dev->intf_state_mutex); 1299 return err; 1300 } 1301 1302 void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, 1303 unsigned long param) 1304 { 1305 struct mlx5_priv *priv = &dev->priv; 1306 struct mlx5_device_context *dev_ctx; 1307 unsigned long flags; 1308 1309 spin_lock_irqsave(&priv->ctx_lock, flags); 1310 1311 list_for_each_entry(dev_ctx, &priv->ctx_list, list) 1312 if (dev_ctx->intf->event) 1313 dev_ctx->intf->event(dev, dev_ctx->context, event, param); 1314 1315 spin_unlock_irqrestore(&priv->ctx_lock, flags); 1316 } 1317 1318 struct mlx5_core_event_handler { 1319 void (*event)(struct mlx5_core_dev *dev, 1320 enum mlx5_dev_event event, 1321 void *data); 1322 }; 1323 1324 #define MLX5_STATS_DESC(a, b, c, d, e, ...) d, e, 1325 1326 #define MLX5_PORT_MODULE_ERROR_STATS(m) \ 1327 m(+1, u64, power_budget_exceeded, "power_budget", "Module Power Budget Exceeded") \ 1328 m(+1, u64, long_range, "long_range", "Module Long Range for non MLNX cable/module") \ 1329 m(+1, u64, bus_stuck, "bus_stuck", "Module Bus stuck(I2C or data shorted)") \ 1330 m(+1, u64, no_eeprom, "no_eeprom", "No EEPROM/retry timeout") \ 1331 m(+1, u64, enforce_part_number, "enforce_part_number", "Module Enforce part number list") \ 1332 m(+1, u64, unknown_id, "unknown_id", "Module Unknown identifier") \ 1333 m(+1, u64, high_temp, "high_temp", "Module High Temperature") \ 1334 m(+1, u64, cable_shorted, "cable_shorted", "Module Cable is shorted") \ 1335 m(+1, u64, pmd_type_not_enabled, "pmd_type_not_enabled", "PMD type is not enabled") \ 1336 m(+1, u64, laster_tec_failure, "laster_tec_failure", "Laster TEC failure") \ 1337 m(+1, u64, high_current, "high_current", "High current") \ 1338 m(+1, u64, high_voltage, "high_voltage", "High voltage") \ 1339 m(+1, u64, pcie_sys_power_slot_exceeded, "pcie_sys_power_slot_exceeded", "PCIe system power slot Exceeded") \ 1340 m(+1, u64, high_power, "high_power", "High power") \ 1341 m(+1, u64, module_state_machine_fault, "module_state_machine_fault", "Module State Machine fault") 1342 1343 static const char *mlx5_pme_err_desc[] = { 1344 MLX5_PORT_MODULE_ERROR_STATS(MLX5_STATS_DESC) 1345 }; 1346 1347 static int init_one(struct pci_dev *pdev, 1348 const struct pci_device_id *id) 1349 { 1350 struct mlx5_core_dev *dev; 1351 struct mlx5_priv *priv; 1352 device_t bsddev = pdev->dev.bsddev; 1353 #ifdef PCI_IOV 1354 nvlist_t *pf_schema, *vf_schema; 1355 int num_vfs, sriov_pos; 1356 #endif 1357 int i,err; 1358 int numa_node; 1359 struct sysctl_oid *pme_sysctl_node; 1360 struct sysctl_oid *pme_err_sysctl_node; 1361 struct sysctl_oid *cap_sysctl_node; 1362 struct sysctl_oid *current_cap_sysctl_node; 1363 struct sysctl_oid *max_cap_sysctl_node; 1364 1365 printk_once("mlx5: %s", mlx5_version); 1366 1367 numa_node = dev_to_node(&pdev->dev); 1368 1369 dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, numa_node); 1370 1371 priv = &dev->priv; 1372 priv->numa_node = numa_node; 1373 1374 if (id) 1375 priv->pci_dev_data = id->driver_data; 1376 1377 if (mlx5_prof_sel < 0 || mlx5_prof_sel >= ARRAY_SIZE(profiles)) { 1378 device_printf(bsddev, 1379 "WARN: selected profile out of range, selecting default (%d)\n", 1380 MLX5_DEFAULT_PROF); 1381 mlx5_prof_sel = MLX5_DEFAULT_PROF; 1382 } 1383 dev->profile = &profiles[mlx5_prof_sel]; 1384 dev->pdev = pdev; 1385 dev->event = mlx5_core_event; 1386 1387 /* Set desc */ 1388 device_set_desc(bsddev, mlx5_version); 1389 1390 sysctl_ctx_init(&dev->sysctl_ctx); 1391 SYSCTL_ADD_INT(&dev->sysctl_ctx, 1392 SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)), 1393 OID_AUTO, "msix_eqvec", CTLFLAG_RDTUN, &dev->msix_eqvec, 0, 1394 "Maximum number of MSIX event queue vectors, if set"); 1395 SYSCTL_ADD_INT(&dev->sysctl_ctx, 1396 SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)), 1397 OID_AUTO, "power_status", CTLFLAG_RD, &dev->pwr_status, 0, 1398 "0:Invalid 1:Sufficient 2:Insufficient"); 1399 SYSCTL_ADD_INT(&dev->sysctl_ctx, 1400 SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)), 1401 OID_AUTO, "power_value", CTLFLAG_RD, &dev->pwr_value, 0, 1402 "Current power value in Watts"); 1403 1404 pme_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx, 1405 SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)), 1406 OID_AUTO, "pme_stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 1407 "Port module event statistics"); 1408 if (pme_sysctl_node == NULL) { 1409 err = -ENOMEM; 1410 goto clean_sysctl_ctx; 1411 } 1412 pme_err_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx, 1413 SYSCTL_CHILDREN(pme_sysctl_node), 1414 OID_AUTO, "errors", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 1415 "Port module event error statistics"); 1416 if (pme_err_sysctl_node == NULL) { 1417 err = -ENOMEM; 1418 goto clean_sysctl_ctx; 1419 } 1420 SYSCTL_ADD_U64(&dev->sysctl_ctx, 1421 SYSCTL_CHILDREN(pme_sysctl_node), OID_AUTO, 1422 "module_plug", CTLFLAG_RD | CTLFLAG_MPSAFE, 1423 &dev->priv.pme_stats.status_counters[MLX5_MODULE_STATUS_PLUGGED_ENABLED], 1424 0, "Number of time module plugged"); 1425 SYSCTL_ADD_U64(&dev->sysctl_ctx, 1426 SYSCTL_CHILDREN(pme_sysctl_node), OID_AUTO, 1427 "module_unplug", CTLFLAG_RD | CTLFLAG_MPSAFE, 1428 &dev->priv.pme_stats.status_counters[MLX5_MODULE_STATUS_UNPLUGGED], 1429 0, "Number of time module unplugged"); 1430 for (i = 0 ; i < MLX5_MODULE_EVENT_ERROR_NUM; i++) { 1431 SYSCTL_ADD_U64(&dev->sysctl_ctx, 1432 SYSCTL_CHILDREN(pme_err_sysctl_node), OID_AUTO, 1433 mlx5_pme_err_desc[2 * i], CTLFLAG_RD | CTLFLAG_MPSAFE, 1434 &dev->priv.pme_stats.error_counters[i], 1435 0, mlx5_pme_err_desc[2 * i + 1]); 1436 } 1437 1438 cap_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx, 1439 SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)), 1440 OID_AUTO, "caps", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 1441 "hardware capabilities raw bitstrings"); 1442 if (cap_sysctl_node == NULL) { 1443 err = -ENOMEM; 1444 goto clean_sysctl_ctx; 1445 } 1446 current_cap_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx, 1447 SYSCTL_CHILDREN(cap_sysctl_node), 1448 OID_AUTO, "current", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 1449 ""); 1450 if (current_cap_sysctl_node == NULL) { 1451 err = -ENOMEM; 1452 goto clean_sysctl_ctx; 1453 } 1454 max_cap_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx, 1455 SYSCTL_CHILDREN(cap_sysctl_node), 1456 OID_AUTO, "max", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 1457 ""); 1458 if (max_cap_sysctl_node == NULL) { 1459 err = -ENOMEM; 1460 goto clean_sysctl_ctx; 1461 } 1462 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1463 SYSCTL_CHILDREN(current_cap_sysctl_node), 1464 OID_AUTO, "general", CTLFLAG_RD | CTLFLAG_MPSAFE, 1465 &dev->hca_caps_cur[MLX5_CAP_GENERAL], 1466 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1467 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1468 SYSCTL_CHILDREN(max_cap_sysctl_node), 1469 OID_AUTO, "general", CTLFLAG_RD | CTLFLAG_MPSAFE, 1470 &dev->hca_caps_max[MLX5_CAP_GENERAL], 1471 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1472 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1473 SYSCTL_CHILDREN(current_cap_sysctl_node), 1474 OID_AUTO, "ether", CTLFLAG_RD | CTLFLAG_MPSAFE, 1475 &dev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], 1476 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1477 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1478 SYSCTL_CHILDREN(max_cap_sysctl_node), 1479 OID_AUTO, "ether", CTLFLAG_RD | CTLFLAG_MPSAFE, 1480 &dev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], 1481 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1482 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1483 SYSCTL_CHILDREN(current_cap_sysctl_node), 1484 OID_AUTO, "odp", CTLFLAG_RD | CTLFLAG_MPSAFE, 1485 &dev->hca_caps_cur[MLX5_CAP_ODP], 1486 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1487 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1488 SYSCTL_CHILDREN(max_cap_sysctl_node), 1489 OID_AUTO, "odp", CTLFLAG_RD | CTLFLAG_MPSAFE, 1490 &dev->hca_caps_max[MLX5_CAP_ODP], 1491 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1492 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1493 SYSCTL_CHILDREN(current_cap_sysctl_node), 1494 OID_AUTO, "atomic", CTLFLAG_RD | CTLFLAG_MPSAFE, 1495 &dev->hca_caps_cur[MLX5_CAP_ATOMIC], 1496 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1497 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1498 SYSCTL_CHILDREN(max_cap_sysctl_node), 1499 OID_AUTO, "atomic", CTLFLAG_RD | CTLFLAG_MPSAFE, 1500 &dev->hca_caps_max[MLX5_CAP_ATOMIC], 1501 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1502 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1503 SYSCTL_CHILDREN(current_cap_sysctl_node), 1504 OID_AUTO, "roce", CTLFLAG_RD | CTLFLAG_MPSAFE, 1505 &dev->hca_caps_cur[MLX5_CAP_ROCE], 1506 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1507 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1508 SYSCTL_CHILDREN(max_cap_sysctl_node), 1509 OID_AUTO, "roce", CTLFLAG_RD | CTLFLAG_MPSAFE, 1510 &dev->hca_caps_max[MLX5_CAP_ROCE], 1511 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1512 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1513 SYSCTL_CHILDREN(current_cap_sysctl_node), 1514 OID_AUTO, "ipoib", CTLFLAG_RD | CTLFLAG_MPSAFE, 1515 &dev->hca_caps_cur[MLX5_CAP_IPOIB_OFFLOADS], 1516 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1517 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1518 SYSCTL_CHILDREN(max_cap_sysctl_node), 1519 OID_AUTO, "ipoib", CTLFLAG_RD | CTLFLAG_MPSAFE, 1520 &dev->hca_caps_max[MLX5_CAP_IPOIB_OFFLOADS], 1521 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1522 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1523 SYSCTL_CHILDREN(current_cap_sysctl_node), 1524 OID_AUTO, "eoib", CTLFLAG_RD | CTLFLAG_MPSAFE, 1525 &dev->hca_caps_cur[MLX5_CAP_EOIB_OFFLOADS], 1526 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1527 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1528 SYSCTL_CHILDREN(max_cap_sysctl_node), 1529 OID_AUTO, "eoib", CTLFLAG_RD | CTLFLAG_MPSAFE, 1530 &dev->hca_caps_max[MLX5_CAP_EOIB_OFFLOADS], 1531 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1532 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1533 SYSCTL_CHILDREN(current_cap_sysctl_node), 1534 OID_AUTO, "flow_table", CTLFLAG_RD | CTLFLAG_MPSAFE, 1535 &dev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], 1536 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1537 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1538 SYSCTL_CHILDREN(max_cap_sysctl_node), 1539 OID_AUTO, "flow_table", CTLFLAG_RD | CTLFLAG_MPSAFE, 1540 &dev->hca_caps_max[MLX5_CAP_FLOW_TABLE], 1541 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1542 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1543 SYSCTL_CHILDREN(current_cap_sysctl_node), 1544 OID_AUTO, "eswitch_flow_table", CTLFLAG_RD | CTLFLAG_MPSAFE, 1545 &dev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], 1546 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1547 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1548 SYSCTL_CHILDREN(max_cap_sysctl_node), 1549 OID_AUTO, "eswitch_flow_table", CTLFLAG_RD | CTLFLAG_MPSAFE, 1550 &dev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE], 1551 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1552 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1553 SYSCTL_CHILDREN(current_cap_sysctl_node), 1554 OID_AUTO, "eswitch", CTLFLAG_RD | CTLFLAG_MPSAFE, 1555 &dev->hca_caps_cur[MLX5_CAP_ESWITCH], 1556 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1557 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1558 SYSCTL_CHILDREN(max_cap_sysctl_node), 1559 OID_AUTO, "eswitch", CTLFLAG_RD | CTLFLAG_MPSAFE, 1560 &dev->hca_caps_max[MLX5_CAP_ESWITCH], 1561 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1562 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1563 SYSCTL_CHILDREN(current_cap_sysctl_node), 1564 OID_AUTO, "snapshot", CTLFLAG_RD | CTLFLAG_MPSAFE, 1565 &dev->hca_caps_cur[MLX5_CAP_SNAPSHOT], 1566 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1567 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1568 SYSCTL_CHILDREN(max_cap_sysctl_node), 1569 OID_AUTO, "snapshot", CTLFLAG_RD | CTLFLAG_MPSAFE, 1570 &dev->hca_caps_max[MLX5_CAP_SNAPSHOT], 1571 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1572 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1573 SYSCTL_CHILDREN(current_cap_sysctl_node), 1574 OID_AUTO, "vector_calc", CTLFLAG_RD | CTLFLAG_MPSAFE, 1575 &dev->hca_caps_cur[MLX5_CAP_VECTOR_CALC], 1576 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1577 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1578 SYSCTL_CHILDREN(max_cap_sysctl_node), 1579 OID_AUTO, "vector_calc", CTLFLAG_RD | CTLFLAG_MPSAFE, 1580 &dev->hca_caps_max[MLX5_CAP_VECTOR_CALC], 1581 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1582 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1583 SYSCTL_CHILDREN(current_cap_sysctl_node), 1584 OID_AUTO, "qos", CTLFLAG_RD | CTLFLAG_MPSAFE, 1585 &dev->hca_caps_cur[MLX5_CAP_QOS], 1586 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1587 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1588 SYSCTL_CHILDREN(max_cap_sysctl_node), 1589 OID_AUTO, "qos", CTLFLAG_RD | CTLFLAG_MPSAFE, 1590 &dev->hca_caps_max[MLX5_CAP_QOS], 1591 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1592 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1593 SYSCTL_CHILDREN(current_cap_sysctl_node), 1594 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_MPSAFE, 1595 &dev->hca_caps_cur[MLX5_CAP_DEBUG], 1596 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1597 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1598 SYSCTL_CHILDREN(max_cap_sysctl_node), 1599 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_MPSAFE, 1600 &dev->hca_caps_max[MLX5_CAP_DEBUG], 1601 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1602 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1603 SYSCTL_CHILDREN(cap_sysctl_node), 1604 OID_AUTO, "pcam", CTLFLAG_RD | CTLFLAG_MPSAFE, 1605 &dev->caps.pcam, sizeof(dev->caps.pcam), "IU", ""); 1606 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1607 SYSCTL_CHILDREN(cap_sysctl_node), 1608 OID_AUTO, "mcam", CTLFLAG_RD | CTLFLAG_MPSAFE, 1609 &dev->caps.mcam, sizeof(dev->caps.mcam), "IU", ""); 1610 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1611 SYSCTL_CHILDREN(cap_sysctl_node), 1612 OID_AUTO, "qcam", CTLFLAG_RD | CTLFLAG_MPSAFE, 1613 &dev->caps.qcam, sizeof(dev->caps.qcam), "IU", ""); 1614 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1615 SYSCTL_CHILDREN(cap_sysctl_node), 1616 OID_AUTO, "fpga", CTLFLAG_RD | CTLFLAG_MPSAFE, 1617 &dev->caps.fpga, sizeof(dev->caps.fpga), "IU", ""); 1618 1619 INIT_LIST_HEAD(&priv->ctx_list); 1620 spin_lock_init(&priv->ctx_lock); 1621 mutex_init(&dev->pci_status_mutex); 1622 mutex_init(&dev->intf_state_mutex); 1623 1624 mutex_init(&priv->bfregs.reg_head.lock); 1625 mutex_init(&priv->bfregs.wc_head.lock); 1626 INIT_LIST_HEAD(&priv->bfregs.reg_head.list); 1627 INIT_LIST_HEAD(&priv->bfregs.wc_head.list); 1628 1629 mtx_init(&dev->dump_lock, "mlx5dmp", NULL, MTX_DEF | MTX_NEW); 1630 err = mlx5_pci_init(dev, priv); 1631 if (err) { 1632 mlx5_core_err(dev, "mlx5_pci_init failed %d\n", err); 1633 goto clean_dev; 1634 } 1635 1636 err = mlx5_health_init(dev); 1637 if (err) { 1638 mlx5_core_err(dev, "mlx5_health_init failed %d\n", err); 1639 goto close_pci; 1640 } 1641 1642 mlx5_pagealloc_init(dev); 1643 1644 err = mlx5_load_one(dev, priv, true); 1645 if (err) { 1646 mlx5_core_err(dev, "mlx5_load_one failed %d\n", err); 1647 goto clean_health; 1648 } 1649 1650 mlx5_fwdump_prep(dev); 1651 1652 mlx5_firmware_update(dev); 1653 1654 #ifdef PCI_IOV 1655 if (MLX5_CAP_GEN(dev, vport_group_manager)) { 1656 if (pci_find_extcap(bsddev, PCIZ_SRIOV, &sriov_pos) == 0) { 1657 num_vfs = pci_read_config(bsddev, sriov_pos + 1658 PCIR_SRIOV_TOTAL_VFS, 2); 1659 } else { 1660 mlx5_core_info(dev, "cannot find SR-IOV PCIe cap\n"); 1661 num_vfs = 0; 1662 } 1663 err = mlx5_eswitch_init(dev, 1 + num_vfs); 1664 if (err == 0) { 1665 pf_schema = pci_iov_schema_alloc_node(); 1666 vf_schema = pci_iov_schema_alloc_node(); 1667 pci_iov_schema_add_unicast_mac(vf_schema, 1668 iov_mac_addr_name, 0, NULL); 1669 pci_iov_schema_add_uint64(vf_schema, iov_node_guid_name, 1670 0, 0); 1671 pci_iov_schema_add_uint64(vf_schema, iov_port_guid_name, 1672 0, 0); 1673 err = pci_iov_attach(bsddev, pf_schema, vf_schema); 1674 if (err != 0) { 1675 device_printf(bsddev, 1676 "Failed to initialize SR-IOV support, error %d\n", 1677 err); 1678 } 1679 } else { 1680 mlx5_core_err(dev, "eswitch init failed, error %d\n", 1681 err); 1682 } 1683 } 1684 #endif 1685 1686 pci_save_state(pdev); 1687 return 0; 1688 1689 clean_health: 1690 mlx5_pagealloc_cleanup(dev); 1691 mlx5_health_cleanup(dev); 1692 close_pci: 1693 mlx5_pci_close(dev, priv); 1694 clean_dev: 1695 mtx_destroy(&dev->dump_lock); 1696 clean_sysctl_ctx: 1697 sysctl_ctx_free(&dev->sysctl_ctx); 1698 kfree(dev); 1699 return err; 1700 } 1701 1702 static void remove_one(struct pci_dev *pdev) 1703 { 1704 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1705 struct mlx5_priv *priv = &dev->priv; 1706 1707 #ifdef PCI_IOV 1708 pci_iov_detach(pdev->dev.bsddev); 1709 mlx5_eswitch_disable_sriov(priv->eswitch); 1710 #endif 1711 1712 if (mlx5_unload_one(dev, priv, true)) { 1713 mlx5_core_err(dev, "mlx5_unload_one() failed, leaked %lld bytes\n", 1714 (long long)(dev->priv.fw_pages * MLX5_ADAPTER_PAGE_SIZE)); 1715 } 1716 1717 mlx5_pagealloc_cleanup(dev); 1718 mlx5_health_cleanup(dev); 1719 mlx5_fwdump_clean(dev); 1720 mlx5_pci_close(dev, priv); 1721 mtx_destroy(&dev->dump_lock); 1722 pci_set_drvdata(pdev, NULL); 1723 sysctl_ctx_free(&dev->sysctl_ctx); 1724 kfree(dev); 1725 } 1726 1727 static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev, 1728 pci_channel_state_t state) 1729 { 1730 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1731 struct mlx5_priv *priv = &dev->priv; 1732 1733 mlx5_core_info(dev, "%s was called\n", __func__); 1734 mlx5_enter_error_state(dev, false); 1735 mlx5_unload_one(dev, priv, false); 1736 1737 if (state) { 1738 mlx5_drain_health_wq(dev); 1739 mlx5_pci_disable_device(dev); 1740 } 1741 1742 return state == pci_channel_io_perm_failure ? 1743 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; 1744 } 1745 1746 static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev) 1747 { 1748 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1749 int err = 0; 1750 1751 mlx5_core_info(dev,"%s was called\n", __func__); 1752 1753 err = mlx5_pci_enable_device(dev); 1754 if (err) { 1755 mlx5_core_err(dev, "mlx5_pci_enable_device failed with error code: %d\n" 1756 ,err); 1757 return PCI_ERS_RESULT_DISCONNECT; 1758 } 1759 pci_set_master(pdev); 1760 pci_set_powerstate(pdev->dev.bsddev, PCI_POWERSTATE_D0); 1761 pci_restore_state(pdev); 1762 pci_save_state(pdev); 1763 1764 return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; 1765 } 1766 1767 /* wait for the device to show vital signs. For now we check 1768 * that we can read the device ID and that the health buffer 1769 * shows a non zero value which is different than 0xffffffff 1770 */ 1771 static void wait_vital(struct pci_dev *pdev) 1772 { 1773 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1774 struct mlx5_core_health *health = &dev->priv.health; 1775 const int niter = 100; 1776 u32 count; 1777 u16 did; 1778 int i; 1779 1780 /* Wait for firmware to be ready after reset */ 1781 msleep(1000); 1782 for (i = 0; i < niter; i++) { 1783 if (pci_read_config_word(pdev, 2, &did)) { 1784 mlx5_core_warn(dev, "failed reading config word\n"); 1785 break; 1786 } 1787 if (did == pdev->device) { 1788 mlx5_core_info(dev, 1789 "device ID correctly read after %d iterations\n", i); 1790 break; 1791 } 1792 msleep(50); 1793 } 1794 if (i == niter) 1795 mlx5_core_warn(dev, "could not read device ID\n"); 1796 1797 for (i = 0; i < niter; i++) { 1798 count = ioread32be(health->health_counter); 1799 if (count && count != 0xffffffff) { 1800 mlx5_core_info(dev, 1801 "Counter value 0x%x after %d iterations\n", count, i); 1802 break; 1803 } 1804 msleep(50); 1805 } 1806 1807 if (i == niter) 1808 mlx5_core_warn(dev, "could not read device ID\n"); 1809 } 1810 1811 static void mlx5_pci_resume(struct pci_dev *pdev) 1812 { 1813 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1814 struct mlx5_priv *priv = &dev->priv; 1815 int err; 1816 1817 mlx5_core_info(dev,"%s was called\n", __func__); 1818 1819 wait_vital(pdev); 1820 1821 err = mlx5_load_one(dev, priv, false); 1822 if (err) 1823 mlx5_core_err(dev, 1824 "mlx5_load_one failed with error code: %d\n" ,err); 1825 else 1826 mlx5_core_info(dev,"device recovered\n"); 1827 } 1828 1829 static const struct pci_error_handlers mlx5_err_handler = { 1830 .error_detected = mlx5_pci_err_detected, 1831 .slot_reset = mlx5_pci_slot_reset, 1832 .resume = mlx5_pci_resume 1833 }; 1834 1835 #ifdef PCI_IOV 1836 static int 1837 mlx5_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *pf_config) 1838 { 1839 struct pci_dev *pdev; 1840 struct mlx5_core_dev *core_dev; 1841 struct mlx5_priv *priv; 1842 int err; 1843 1844 pdev = device_get_softc(dev); 1845 core_dev = pci_get_drvdata(pdev); 1846 priv = &core_dev->priv; 1847 1848 if (priv->eswitch == NULL) 1849 return (ENXIO); 1850 if (priv->eswitch->total_vports < num_vfs + 1) 1851 num_vfs = priv->eswitch->total_vports - 1; 1852 err = mlx5_eswitch_enable_sriov(priv->eswitch, num_vfs); 1853 return (-err); 1854 } 1855 1856 static void 1857 mlx5_iov_uninit(device_t dev) 1858 { 1859 struct pci_dev *pdev; 1860 struct mlx5_core_dev *core_dev; 1861 struct mlx5_priv *priv; 1862 1863 pdev = device_get_softc(dev); 1864 core_dev = pci_get_drvdata(pdev); 1865 priv = &core_dev->priv; 1866 1867 mlx5_eswitch_disable_sriov(priv->eswitch); 1868 } 1869 1870 static int 1871 mlx5_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *vf_config) 1872 { 1873 struct pci_dev *pdev; 1874 struct mlx5_core_dev *core_dev; 1875 struct mlx5_priv *priv; 1876 const void *mac; 1877 size_t mac_size; 1878 uint64_t node_guid, port_guid; 1879 int error; 1880 1881 pdev = device_get_softc(dev); 1882 core_dev = pci_get_drvdata(pdev); 1883 priv = &core_dev->priv; 1884 1885 if (vfnum + 1 >= priv->eswitch->total_vports) 1886 return (ENXIO); 1887 1888 if (nvlist_exists_binary(vf_config, iov_mac_addr_name)) { 1889 mac = nvlist_get_binary(vf_config, iov_mac_addr_name, 1890 &mac_size); 1891 error = -mlx5_eswitch_set_vport_mac(priv->eswitch, 1892 vfnum + 1, __DECONST(u8 *, mac)); 1893 if (error != 0) { 1894 mlx5_core_err(core_dev, 1895 "setting MAC for VF %d failed, error %d\n", 1896 vfnum + 1, error); 1897 } 1898 } 1899 1900 if (nvlist_exists_number(vf_config, iov_node_guid_name)) { 1901 node_guid = nvlist_get_number(vf_config, iov_node_guid_name); 1902 error = -mlx5_modify_nic_vport_node_guid(core_dev, vfnum + 1, 1903 node_guid); 1904 if (error != 0) { 1905 mlx5_core_err(core_dev, 1906 "modifying node GUID for VF %d failed, error %d\n", 1907 vfnum + 1, error); 1908 } 1909 } 1910 1911 if (nvlist_exists_number(vf_config, iov_port_guid_name)) { 1912 port_guid = nvlist_get_number(vf_config, iov_port_guid_name); 1913 error = -mlx5_modify_nic_vport_port_guid(core_dev, vfnum + 1, 1914 port_guid); 1915 if (error != 0) { 1916 mlx5_core_err(core_dev, 1917 "modifying port GUID for VF %d failed, error %d\n", 1918 vfnum + 1, error); 1919 } 1920 } 1921 1922 error = -mlx5_eswitch_set_vport_state(priv->eswitch, vfnum + 1, 1923 VPORT_STATE_FOLLOW); 1924 if (error != 0) { 1925 mlx5_core_err(core_dev, 1926 "upping vport for VF %d failed, error %d\n", 1927 vfnum + 1, error); 1928 } 1929 error = -mlx5_core_enable_hca(core_dev, vfnum + 1); 1930 if (error != 0) { 1931 mlx5_core_err(core_dev, "enabling VF %d failed, error %d\n", 1932 vfnum + 1, error); 1933 } 1934 return (error); 1935 } 1936 #endif 1937 1938 static int mlx5_try_fast_unload(struct mlx5_core_dev *dev) 1939 { 1940 bool fast_teardown, force_teardown; 1941 int err; 1942 1943 if (!mlx5_fast_unload_enabled) { 1944 mlx5_core_dbg(dev, "fast unload is disabled by user\n"); 1945 return -EOPNOTSUPP; 1946 } 1947 1948 fast_teardown = MLX5_CAP_GEN(dev, fast_teardown); 1949 force_teardown = MLX5_CAP_GEN(dev, force_teardown); 1950 1951 mlx5_core_dbg(dev, "force teardown firmware support=%d\n", force_teardown); 1952 mlx5_core_dbg(dev, "fast teardown firmware support=%d\n", fast_teardown); 1953 1954 if (!fast_teardown && !force_teardown) 1955 return -EOPNOTSUPP; 1956 1957 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 1958 mlx5_core_dbg(dev, "Device in internal error state, giving up\n"); 1959 return -EAGAIN; 1960 } 1961 1962 /* Panic tear down fw command will stop the PCI bus communication 1963 * with the HCA, so the health polll is no longer needed. 1964 */ 1965 mlx5_drain_health_wq(dev); 1966 mlx5_stop_health_poll(dev, false); 1967 1968 err = mlx5_cmd_fast_teardown_hca(dev); 1969 if (!err) 1970 goto done; 1971 1972 err = mlx5_cmd_force_teardown_hca(dev); 1973 if (!err) 1974 goto done; 1975 1976 mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", err); 1977 mlx5_start_health_poll(dev); 1978 return err; 1979 done: 1980 mlx5_enter_error_state(dev, true); 1981 return 0; 1982 } 1983 1984 static void mlx5_shutdown_disable_interrupts(struct mlx5_core_dev *mdev) 1985 { 1986 int nvec = mdev->priv.eq_table.num_comp_vectors + MLX5_EQ_VEC_COMP_BASE; 1987 int x; 1988 1989 mdev->priv.disable_irqs = 1; 1990 1991 /* wait for all IRQ handlers to finish processing */ 1992 for (x = 0; x != nvec; x++) 1993 synchronize_irq(mdev->priv.msix_arr[x].vector); 1994 } 1995 1996 static void shutdown_one(struct pci_dev *pdev) 1997 { 1998 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1999 struct mlx5_priv *priv = &dev->priv; 2000 int err; 2001 2002 /* enter polling mode */ 2003 mlx5_cmd_use_polling(dev); 2004 2005 set_bit(MLX5_INTERFACE_STATE_TEARDOWN, &dev->intf_state); 2006 2007 /* disable all interrupts */ 2008 mlx5_shutdown_disable_interrupts(dev); 2009 2010 err = mlx5_try_fast_unload(dev); 2011 if (err) 2012 mlx5_unload_one(dev, priv, false); 2013 mlx5_pci_disable_device(dev); 2014 } 2015 2016 static const struct pci_device_id mlx5_core_pci_table[] = { 2017 { PCI_VDEVICE(MELLANOX, 4113) }, /* Connect-IB */ 2018 { PCI_VDEVICE(MELLANOX, 4114) }, /* Connect-IB VF */ 2019 { PCI_VDEVICE(MELLANOX, 4115) }, /* ConnectX-4 */ 2020 { PCI_VDEVICE(MELLANOX, 4116) }, /* ConnectX-4 VF */ 2021 { PCI_VDEVICE(MELLANOX, 4117) }, /* ConnectX-4LX */ 2022 { PCI_VDEVICE(MELLANOX, 4118) }, /* ConnectX-4LX VF */ 2023 { PCI_VDEVICE(MELLANOX, 4119) }, /* ConnectX-5, PCIe 3.0 */ 2024 { PCI_VDEVICE(MELLANOX, 4120) }, /* ConnectX-5 VF */ 2025 { PCI_VDEVICE(MELLANOX, 4121) }, /* ConnectX-5 Ex */ 2026 { PCI_VDEVICE(MELLANOX, 4122) }, /* ConnectX-5 Ex VF */ 2027 { PCI_VDEVICE(MELLANOX, 4123) }, /* ConnectX-6 */ 2028 { PCI_VDEVICE(MELLANOX, 4124) }, /* ConnectX-6 VF */ 2029 { PCI_VDEVICE(MELLANOX, 4125) }, /* ConnectX-6 Dx */ 2030 { PCI_VDEVICE(MELLANOX, 4126) }, /* ConnectX Family mlx5Gen Virtual Function */ 2031 { PCI_VDEVICE(MELLANOX, 4127) }, /* ConnectX-6 LX */ 2032 { PCI_VDEVICE(MELLANOX, 4128) }, 2033 { PCI_VDEVICE(MELLANOX, 4129) }, /* ConnectX-7 */ 2034 { PCI_VDEVICE(MELLANOX, 4130) }, 2035 { PCI_VDEVICE(MELLANOX, 4131) }, /* ConnectX-8 */ 2036 { PCI_VDEVICE(MELLANOX, 4132) }, 2037 { PCI_VDEVICE(MELLANOX, 4133) }, 2038 { PCI_VDEVICE(MELLANOX, 4134) }, 2039 { PCI_VDEVICE(MELLANOX, 4135) }, 2040 { PCI_VDEVICE(MELLANOX, 4136) }, 2041 { PCI_VDEVICE(MELLANOX, 4137) }, 2042 { PCI_VDEVICE(MELLANOX, 4138) }, 2043 { PCI_VDEVICE(MELLANOX, 4139) }, 2044 { PCI_VDEVICE(MELLANOX, 4140) }, 2045 { PCI_VDEVICE(MELLANOX, 4141) }, 2046 { PCI_VDEVICE(MELLANOX, 4142) }, 2047 { PCI_VDEVICE(MELLANOX, 4143) }, 2048 { PCI_VDEVICE(MELLANOX, 4144) }, 2049 { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */ 2050 { PCI_VDEVICE(MELLANOX, 0xa2d3) }, /* BlueField integrated ConnectX-5 network controller VF */ 2051 { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */ 2052 { PCI_VDEVICE(MELLANOX, 0xa2dc) }, /* BlueField-3 integrated ConnectX-7 network controller */ 2053 { PCI_VDEVICE(MELLANOX, 0xa2df) }, /* BlueField-4 integrated ConnectX-8 network controller */ 2054 { } 2055 }; 2056 2057 MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table); 2058 2059 void mlx5_disable_device(struct mlx5_core_dev *dev) 2060 { 2061 mlx5_pci_err_detected(dev->pdev, 0); 2062 } 2063 2064 void mlx5_recover_device(struct mlx5_core_dev *dev) 2065 { 2066 mlx5_pci_disable_device(dev); 2067 if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED) 2068 mlx5_pci_resume(dev->pdev); 2069 } 2070 2071 struct pci_driver mlx5_core_driver = { 2072 .name = DRIVER_NAME, 2073 .id_table = mlx5_core_pci_table, 2074 .shutdown = shutdown_one, 2075 .probe = init_one, 2076 .remove = remove_one, 2077 .err_handler = &mlx5_err_handler, 2078 #ifdef PCI_IOV 2079 .bsd_iov_init = mlx5_iov_init, 2080 .bsd_iov_uninit = mlx5_iov_uninit, 2081 .bsd_iov_add_vf = mlx5_iov_add_vf, 2082 #endif 2083 }; 2084 2085 static int __init init(void) 2086 { 2087 int err; 2088 2089 err = pci_register_driver(&mlx5_core_driver); 2090 if (err) 2091 goto err_debug; 2092 2093 err = mlx5_ctl_init(); 2094 if (err) 2095 goto err_ctl; 2096 2097 return 0; 2098 2099 err_ctl: 2100 pci_unregister_driver(&mlx5_core_driver); 2101 2102 err_debug: 2103 return err; 2104 } 2105 2106 static void __exit cleanup(void) 2107 { 2108 mlx5_ctl_fini(); 2109 pci_unregister_driver(&mlx5_core_driver); 2110 } 2111 2112 module_init_order(init, SI_ORDER_FIRST); 2113 module_exit_order(cleanup, SI_ORDER_FIRST); 2114