1 /*- 2 * Copyright (c) 2013-2021, Mellanox Technologies, Ltd. All rights reserved. 3 * Copyright (c) 2022 NVIDIA corporation & affiliates. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #include "opt_rss.h" 30 #include "opt_ratelimit.h" 31 32 #include <linux/kmod.h> 33 #include <linux/module.h> 34 #include <linux/errno.h> 35 #include <linux/pci.h> 36 #include <linux/dma-mapping.h> 37 #include <linux/slab.h> 38 #include <linux/io-mapping.h> 39 #include <linux/interrupt.h> 40 #include <linux/hardirq.h> 41 #include <dev/mlx5/driver.h> 42 #include <dev/mlx5/cq.h> 43 #include <dev/mlx5/qp.h> 44 #include <dev/mlx5/srq.h> 45 #include <dev/mlx5/mpfs.h> 46 #include <dev/mlx5/vport.h> 47 #include <linux/delay.h> 48 #include <dev/mlx5/mlx5_ifc.h> 49 #include <dev/mlx5/mlx5_fpga/core.h> 50 #include <dev/mlx5/mlx5_lib/mlx5.h> 51 #include <dev/mlx5/mlx5_core/mlx5_core.h> 52 #include <dev/mlx5/mlx5_core/eswitch.h> 53 #include <dev/mlx5/mlx5_core/fs_core.h> 54 #include <dev/mlx5/mlx5_core/diag_cnt.h> 55 #ifdef PCI_IOV 56 #include <sys/nv.h> 57 #include <dev/pci/pci_iov.h> 58 #include <sys/iov_schema.h> 59 #endif 60 61 static const char mlx5_version[] = "Mellanox Core driver " 62 DRIVER_VERSION " (" DRIVER_RELDATE ")"; 63 MODULE_DESCRIPTION("Mellanox ConnectX-4 and onwards core driver"); 64 MODULE_LICENSE("Dual BSD/GPL"); 65 MODULE_DEPEND(mlx5, linuxkpi, 1, 1, 1); 66 MODULE_DEPEND(mlx5, mlxfw, 1, 1, 1); 67 MODULE_DEPEND(mlx5, firmware, 1, 1, 1); 68 MODULE_VERSION(mlx5, 1); 69 70 SYSCTL_NODE(_hw, OID_AUTO, mlx5, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 71 "mlx5 hardware controls"); 72 73 int mlx5_core_debug_mask; 74 SYSCTL_INT(_hw_mlx5, OID_AUTO, debug_mask, CTLFLAG_RWTUN, 75 &mlx5_core_debug_mask, 0, 76 "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0"); 77 78 #define MLX5_DEFAULT_PROF 2 79 static int mlx5_prof_sel = MLX5_DEFAULT_PROF; 80 SYSCTL_INT(_hw_mlx5, OID_AUTO, prof_sel, CTLFLAG_RWTUN, 81 &mlx5_prof_sel, 0, 82 "profile selector. Valid range 0 - 2"); 83 84 static int mlx5_fast_unload_enabled = 1; 85 SYSCTL_INT(_hw_mlx5, OID_AUTO, fast_unload_enabled, CTLFLAG_RWTUN, 86 &mlx5_fast_unload_enabled, 0, 87 "Set to enable fast unload. Clear to disable."); 88 89 static int mlx5_core_comp_eq_size = 1024; 90 SYSCTL_INT(_hw_mlx5, OID_AUTO, comp_eq_size, CTLFLAG_RDTUN | CTLFLAG_MPSAFE, 91 &mlx5_core_comp_eq_size, 0, 92 "Set default completion EQ size between 1024 and 16384 inclusivly. Value should be power of two."); 93 94 static LIST_HEAD(intf_list); 95 static LIST_HEAD(dev_list); 96 static DEFINE_MUTEX(intf_mutex); 97 98 struct mlx5_device_context { 99 struct list_head list; 100 struct mlx5_interface *intf; 101 void *context; 102 }; 103 104 enum { 105 MLX5_ATOMIC_REQ_MODE_BE = 0x0, 106 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS = 0x1, 107 }; 108 109 static struct mlx5_profile profiles[] = { 110 [0] = { 111 .mask = 0, 112 }, 113 [1] = { 114 .mask = MLX5_PROF_MASK_QP_SIZE, 115 .log_max_qp = 12, 116 }, 117 [2] = { 118 .mask = MLX5_PROF_MASK_QP_SIZE | 119 MLX5_PROF_MASK_MR_CACHE, 120 .log_max_qp = 17, 121 .mr_cache[0] = { 122 .size = 500, 123 .limit = 250 124 }, 125 .mr_cache[1] = { 126 .size = 500, 127 .limit = 250 128 }, 129 .mr_cache[2] = { 130 .size = 500, 131 .limit = 250 132 }, 133 .mr_cache[3] = { 134 .size = 500, 135 .limit = 250 136 }, 137 .mr_cache[4] = { 138 .size = 500, 139 .limit = 250 140 }, 141 .mr_cache[5] = { 142 .size = 500, 143 .limit = 250 144 }, 145 .mr_cache[6] = { 146 .size = 500, 147 .limit = 250 148 }, 149 .mr_cache[7] = { 150 .size = 500, 151 .limit = 250 152 }, 153 .mr_cache[8] = { 154 .size = 500, 155 .limit = 250 156 }, 157 .mr_cache[9] = { 158 .size = 500, 159 .limit = 250 160 }, 161 .mr_cache[10] = { 162 .size = 500, 163 .limit = 250 164 }, 165 .mr_cache[11] = { 166 .size = 500, 167 .limit = 250 168 }, 169 .mr_cache[12] = { 170 .size = 64, 171 .limit = 32 172 }, 173 .mr_cache[13] = { 174 .size = 32, 175 .limit = 16 176 }, 177 .mr_cache[14] = { 178 .size = 16, 179 .limit = 8 180 }, 181 }, 182 [3] = { 183 .mask = MLX5_PROF_MASK_QP_SIZE, 184 .log_max_qp = 17, 185 }, 186 }; 187 188 static int 189 mlx5_core_get_comp_eq_size(void) 190 { 191 int value = mlx5_core_comp_eq_size; 192 193 if (value < 1024) 194 value = 1024; 195 else if (value > 16384) 196 value = 16384; 197 198 /* make value power of two, rounded down */ 199 while (value & (value - 1)) 200 value &= (value - 1); 201 return (value); 202 } 203 204 static void mlx5_set_driver_version(struct mlx5_core_dev *dev) 205 { 206 const size_t driver_ver_sz = 207 MLX5_FLD_SZ_BYTES(set_driver_version_in, driver_version); 208 u8 in[MLX5_ST_SZ_BYTES(set_driver_version_in)] = {}; 209 u8 out[MLX5_ST_SZ_BYTES(set_driver_version_out)] = {}; 210 char *string; 211 212 if (!MLX5_CAP_GEN(dev, driver_version)) 213 return; 214 215 string = MLX5_ADDR_OF(set_driver_version_in, in, driver_version); 216 217 snprintf(string, driver_ver_sz, "FreeBSD,mlx5_core,%u.%u.%u," DRIVER_VERSION, 218 __FreeBSD_version / 100000, (__FreeBSD_version / 1000) % 100, 219 __FreeBSD_version % 1000); 220 221 /* Send the command */ 222 MLX5_SET(set_driver_version_in, in, opcode, 223 MLX5_CMD_OP_SET_DRIVER_VERSION); 224 225 mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 226 } 227 228 #ifdef PCI_IOV 229 static const char iov_mac_addr_name[] = "mac-addr"; 230 static const char iov_node_guid_name[] = "node-guid"; 231 static const char iov_port_guid_name[] = "port-guid"; 232 #endif 233 234 static int set_dma_caps(struct pci_dev *pdev) 235 { 236 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 237 int err; 238 239 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 240 if (err) { 241 mlx5_core_warn(dev, "couldn't set 64-bit PCI DMA mask\n"); 242 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 243 if (err) { 244 mlx5_core_err(dev, "Can't set PCI DMA mask, aborting\n"); 245 return err; 246 } 247 } 248 249 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 250 if (err) { 251 mlx5_core_warn(dev, "couldn't set 64-bit consistent PCI DMA mask\n"); 252 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 253 if (err) { 254 mlx5_core_err(dev, "Can't set consistent PCI DMA mask, aborting\n"); 255 return err; 256 } 257 } 258 259 dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024); 260 return err; 261 } 262 263 int mlx5_pci_read_power_status(struct mlx5_core_dev *dev, 264 u16 *p_power, u8 *p_status) 265 { 266 u32 in[MLX5_ST_SZ_DW(mpein_reg)] = {}; 267 u32 out[MLX5_ST_SZ_DW(mpein_reg)] = {}; 268 int err; 269 270 err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), 271 MLX5_ACCESS_REG_SUMMARY_CTRL_ID_MPEIN, 0, 0); 272 273 *p_status = MLX5_GET(mpein_reg, out, pwr_status); 274 *p_power = MLX5_GET(mpein_reg, out, pci_power); 275 return err; 276 } 277 278 static int mlx5_pci_enable_device(struct mlx5_core_dev *dev) 279 { 280 struct pci_dev *pdev = dev->pdev; 281 int err = 0; 282 283 mutex_lock(&dev->pci_status_mutex); 284 if (dev->pci_status == MLX5_PCI_STATUS_DISABLED) { 285 err = pci_enable_device(pdev); 286 if (!err) 287 dev->pci_status = MLX5_PCI_STATUS_ENABLED; 288 } 289 mutex_unlock(&dev->pci_status_mutex); 290 291 return err; 292 } 293 294 static void mlx5_pci_disable_device(struct mlx5_core_dev *dev) 295 { 296 struct pci_dev *pdev = dev->pdev; 297 298 mutex_lock(&dev->pci_status_mutex); 299 if (dev->pci_status == MLX5_PCI_STATUS_ENABLED) { 300 pci_disable_device(pdev); 301 dev->pci_status = MLX5_PCI_STATUS_DISABLED; 302 } 303 mutex_unlock(&dev->pci_status_mutex); 304 } 305 306 static int request_bar(struct pci_dev *pdev) 307 { 308 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 309 int err = 0; 310 311 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 312 mlx5_core_err(dev, "Missing registers BAR, aborting\n"); 313 return -ENODEV; 314 } 315 316 err = pci_request_regions(pdev, DRIVER_NAME); 317 if (err) 318 mlx5_core_err(dev, "Couldn't get PCI resources, aborting\n"); 319 320 return err; 321 } 322 323 static void release_bar(struct pci_dev *pdev) 324 { 325 pci_release_regions(pdev); 326 } 327 328 static int mlx5_enable_msix(struct mlx5_core_dev *dev) 329 { 330 struct mlx5_priv *priv = &dev->priv; 331 struct mlx5_eq_table *table = &priv->eq_table; 332 int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq); 333 int limit = dev->msix_eqvec; 334 int nvec = MLX5_EQ_VEC_COMP_BASE; 335 int i; 336 337 if (limit > 0) 338 nvec += limit; 339 else 340 nvec += MLX5_CAP_GEN(dev, num_ports) * num_online_cpus(); 341 342 if (nvec > num_eqs) 343 nvec = num_eqs; 344 if (nvec > 256) 345 nvec = 256; /* limit of firmware API */ 346 if (nvec <= MLX5_EQ_VEC_COMP_BASE) 347 return -ENOMEM; 348 349 priv->msix_arr = kzalloc(nvec * sizeof(*priv->msix_arr), GFP_KERNEL); 350 351 for (i = 0; i < nvec; i++) 352 priv->msix_arr[i].entry = i; 353 354 nvec = pci_enable_msix_range(dev->pdev, priv->msix_arr, 355 MLX5_EQ_VEC_COMP_BASE + 1, nvec); 356 if (nvec < 0) 357 return nvec; 358 359 table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE; 360 return 0; 361 } 362 363 static void mlx5_disable_msix(struct mlx5_core_dev *dev) 364 { 365 struct mlx5_priv *priv = &dev->priv; 366 367 pci_disable_msix(dev->pdev); 368 kfree(priv->msix_arr); 369 } 370 371 struct mlx5_reg_host_endianess { 372 u8 he; 373 u8 rsvd[15]; 374 }; 375 376 377 #define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos)) 378 379 enum { 380 MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) | 381 MLX5_DEV_CAP_FLAG_DCT | 382 MLX5_DEV_CAP_FLAG_DRAIN_SIGERR, 383 }; 384 385 static u16 to_fw_pkey_sz(struct mlx5_core_dev *dev, u32 size) 386 { 387 switch (size) { 388 case 128: 389 return 0; 390 case 256: 391 return 1; 392 case 512: 393 return 2; 394 case 1024: 395 return 3; 396 case 2048: 397 return 4; 398 case 4096: 399 return 5; 400 default: 401 mlx5_core_warn(dev, "invalid pkey table size %d\n", size); 402 return 0; 403 } 404 } 405 406 static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, 407 enum mlx5_cap_type cap_type, 408 enum mlx5_cap_mode cap_mode) 409 { 410 u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)]; 411 int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); 412 void *out, *hca_caps; 413 u16 opmod = (cap_type << 1) | (cap_mode & 0x01); 414 int err; 415 416 memset(in, 0, sizeof(in)); 417 out = kzalloc(out_sz, GFP_KERNEL); 418 419 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); 420 MLX5_SET(query_hca_cap_in, in, op_mod, opmod); 421 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); 422 if (err) { 423 mlx5_core_warn(dev, 424 "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n", 425 cap_type, cap_mode, err); 426 goto query_ex; 427 } 428 429 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability); 430 431 switch (cap_mode) { 432 case HCA_CAP_OPMOD_GET_MAX: 433 memcpy(dev->hca_caps_max[cap_type], hca_caps, 434 MLX5_UN_SZ_BYTES(hca_cap_union)); 435 break; 436 case HCA_CAP_OPMOD_GET_CUR: 437 memcpy(dev->hca_caps_cur[cap_type], hca_caps, 438 MLX5_UN_SZ_BYTES(hca_cap_union)); 439 break; 440 default: 441 mlx5_core_warn(dev, 442 "Tried to query dev cap type(%x) with wrong opmode(%x)\n", 443 cap_type, cap_mode); 444 err = -EINVAL; 445 break; 446 } 447 query_ex: 448 kfree(out); 449 return err; 450 } 451 452 int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type) 453 { 454 int ret; 455 456 ret = mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_CUR); 457 if (ret) 458 return ret; 459 460 return mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_MAX); 461 } 462 463 static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz) 464 { 465 u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)] = {0}; 466 467 MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP); 468 469 return mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out)); 470 } 471 472 static int handle_hca_cap(struct mlx5_core_dev *dev) 473 { 474 void *set_ctx = NULL; 475 struct mlx5_profile *prof = dev->profile; 476 int err = -ENOMEM; 477 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); 478 void *set_hca_cap; 479 480 set_ctx = kzalloc(set_sz, GFP_KERNEL); 481 482 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL); 483 if (err) 484 goto query_ex; 485 486 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, 487 capability); 488 memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL], 489 MLX5_ST_SZ_BYTES(cmd_hca_cap)); 490 491 mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n", 492 mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)), 493 128); 494 /* we limit the size of the pkey table to 128 entries for now */ 495 MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size, 496 to_fw_pkey_sz(dev, 128)); 497 498 if (prof->mask & MLX5_PROF_MASK_QP_SIZE) 499 MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp, 500 prof->log_max_qp); 501 502 /* disable cmdif checksum */ 503 MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0); 504 505 /* Enable 4K UAR only when HCA supports it and page size is bigger 506 * than 4K. 507 */ 508 if (MLX5_CAP_GEN_MAX(dev, uar_4k) && PAGE_SIZE > 4096) 509 MLX5_SET(cmd_hca_cap, set_hca_cap, uar_4k, 1); 510 511 /* enable drain sigerr */ 512 MLX5_SET(cmd_hca_cap, set_hca_cap, drain_sigerr, 1); 513 514 MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12); 515 516 err = set_caps(dev, set_ctx, set_sz); 517 518 query_ex: 519 kfree(set_ctx); 520 return err; 521 } 522 523 static int handle_hca_cap_atomic(struct mlx5_core_dev *dev) 524 { 525 void *set_ctx; 526 void *set_hca_cap; 527 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); 528 int req_endianness; 529 int err; 530 531 if (MLX5_CAP_GEN(dev, atomic)) { 532 err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC); 533 if (err) 534 return err; 535 } else { 536 return 0; 537 } 538 539 req_endianness = 540 MLX5_CAP_ATOMIC(dev, 541 supported_atomic_req_8B_endianess_mode_1); 542 543 if (req_endianness != MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS) 544 return 0; 545 546 set_ctx = kzalloc(set_sz, GFP_KERNEL); 547 if (!set_ctx) 548 return -ENOMEM; 549 550 MLX5_SET(set_hca_cap_in, set_ctx, op_mod, 551 MLX5_SET_HCA_CAP_OP_MOD_ATOMIC << 1); 552 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability); 553 554 /* Set requestor to host endianness */ 555 MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianess_mode, 556 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS); 557 558 err = set_caps(dev, set_ctx, set_sz); 559 560 kfree(set_ctx); 561 return err; 562 } 563 564 static int set_hca_ctrl(struct mlx5_core_dev *dev) 565 { 566 struct mlx5_reg_host_endianess he_in; 567 struct mlx5_reg_host_endianess he_out; 568 int err; 569 570 if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH && 571 !MLX5_CAP_GEN(dev, roce)) 572 return 0; 573 574 memset(&he_in, 0, sizeof(he_in)); 575 he_in.he = MLX5_SET_HOST_ENDIANNESS; 576 err = mlx5_core_access_reg(dev, &he_in, sizeof(he_in), 577 &he_out, sizeof(he_out), 578 MLX5_REG_HOST_ENDIANNESS, 0, 1); 579 return err; 580 } 581 582 static int mlx5_core_set_hca_defaults(struct mlx5_core_dev *dev) 583 { 584 int ret = 0; 585 586 /* Disable local_lb by default */ 587 if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) 588 ret = mlx5_nic_vport_update_local_lb(dev, false); 589 590 return ret; 591 } 592 593 static int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id) 594 { 595 u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {0}; 596 u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {0}; 597 598 MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA); 599 MLX5_SET(enable_hca_in, in, function_id, func_id); 600 return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); 601 } 602 603 static int mlx5_core_disable_hca(struct mlx5_core_dev *dev) 604 { 605 u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {0}; 606 u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {0}; 607 608 MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA); 609 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 610 } 611 612 static int mlx5_core_set_issi(struct mlx5_core_dev *dev) 613 { 614 u32 query_in[MLX5_ST_SZ_DW(query_issi_in)] = {0}; 615 u32 query_out[MLX5_ST_SZ_DW(query_issi_out)] = {0}; 616 u32 sup_issi; 617 int err; 618 619 MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI); 620 621 err = mlx5_cmd_exec(dev, query_in, sizeof(query_in), query_out, sizeof(query_out)); 622 if (err) { 623 u32 syndrome; 624 u8 status; 625 626 mlx5_cmd_mbox_status(query_out, &status, &syndrome); 627 if (status == MLX5_CMD_STAT_BAD_OP_ERR) { 628 mlx5_core_dbg(dev, "Only ISSI 0 is supported\n"); 629 return 0; 630 } 631 632 mlx5_core_err(dev, "failed to query ISSI\n"); 633 return err; 634 } 635 636 sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0); 637 638 if (sup_issi & (1 << 1)) { 639 u32 set_in[MLX5_ST_SZ_DW(set_issi_in)] = {0}; 640 u32 set_out[MLX5_ST_SZ_DW(set_issi_out)] = {0}; 641 642 MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI); 643 MLX5_SET(set_issi_in, set_in, current_issi, 1); 644 645 err = mlx5_cmd_exec(dev, set_in, sizeof(set_in), set_out, sizeof(set_out)); 646 if (err) { 647 mlx5_core_err(dev, "failed to set ISSI=1 err(%d)\n", err); 648 return err; 649 } 650 651 dev->issi = 1; 652 653 return 0; 654 } else if (sup_issi & (1 << 0)) { 655 return 0; 656 } 657 658 return -ENOTSUPP; 659 } 660 661 662 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn) 663 { 664 struct mlx5_eq_table *table = &dev->priv.eq_table; 665 struct mlx5_eq *eq; 666 int err = -ENOENT; 667 668 spin_lock(&table->lock); 669 list_for_each_entry(eq, &table->comp_eqs_list, list) { 670 if (eq->index == vector) { 671 *eqn = eq->eqn; 672 *irqn = eq->irqn; 673 err = 0; 674 break; 675 } 676 } 677 spin_unlock(&table->lock); 678 679 return err; 680 } 681 EXPORT_SYMBOL(mlx5_vector2eqn); 682 683 static void free_comp_eqs(struct mlx5_core_dev *dev) 684 { 685 struct mlx5_eq_table *table = &dev->priv.eq_table; 686 struct mlx5_eq *eq, *n; 687 688 spin_lock(&table->lock); 689 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { 690 list_del(&eq->list); 691 spin_unlock(&table->lock); 692 if (mlx5_destroy_unmap_eq(dev, eq)) 693 mlx5_core_warn(dev, "failed to destroy EQ 0x%x\n", 694 eq->eqn); 695 kfree(eq); 696 spin_lock(&table->lock); 697 } 698 spin_unlock(&table->lock); 699 } 700 701 static int alloc_comp_eqs(struct mlx5_core_dev *dev) 702 { 703 struct mlx5_eq_table *table = &dev->priv.eq_table; 704 struct mlx5_eq *eq; 705 int ncomp_vec; 706 int nent; 707 int err; 708 int i; 709 710 INIT_LIST_HEAD(&table->comp_eqs_list); 711 ncomp_vec = table->num_comp_vectors; 712 nent = mlx5_core_get_comp_eq_size(); 713 for (i = 0; i < ncomp_vec; i++) { 714 eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, dev->priv.numa_node); 715 716 err = mlx5_create_map_eq(dev, eq, 717 i + MLX5_EQ_VEC_COMP_BASE, nent, 0); 718 if (err) { 719 kfree(eq); 720 goto clean; 721 } 722 mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->eqn); 723 eq->index = i; 724 spin_lock(&table->lock); 725 list_add_tail(&eq->list, &table->comp_eqs_list); 726 spin_unlock(&table->lock); 727 } 728 729 return 0; 730 731 clean: 732 free_comp_eqs(dev); 733 return err; 734 } 735 736 static inline int fw_initializing(struct mlx5_core_dev *dev) 737 { 738 return ioread32be(&dev->iseg->initializing) >> 31; 739 } 740 741 static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili, 742 u32 warn_time_mili) 743 { 744 int warn = jiffies + msecs_to_jiffies(warn_time_mili); 745 int end = jiffies + msecs_to_jiffies(max_wait_mili); 746 int err = 0; 747 748 MPASS(max_wait_mili > warn_time_mili); 749 750 while (fw_initializing(dev) == 1) { 751 if (time_after(jiffies, end)) { 752 err = -EBUSY; 753 break; 754 } 755 if (warn_time_mili && time_after(jiffies, warn)) { 756 mlx5_core_warn(dev, 757 "Waiting for FW initialization, timeout abort in %u s\n", 758 (unsigned)(jiffies_to_msecs(end - warn) / 1000)); 759 warn = jiffies + msecs_to_jiffies(warn_time_mili); 760 } 761 msleep(FW_INIT_WAIT_MS); 762 } 763 764 if (err != 0) 765 mlx5_core_dbg(dev, "Full initializing bit dword = 0x%x\n", 766 ioread32be(&dev->iseg->initializing)); 767 768 return err; 769 } 770 771 static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) 772 { 773 struct mlx5_device_context *dev_ctx; 774 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv); 775 776 dev_ctx = kzalloc_node(sizeof(*dev_ctx), GFP_KERNEL, priv->numa_node); 777 if (!dev_ctx) 778 return; 779 780 dev_ctx->intf = intf; 781 CURVNET_SET_QUIET(vnet0); 782 dev_ctx->context = intf->add(dev); 783 CURVNET_RESTORE(); 784 785 if (dev_ctx->context) { 786 spin_lock_irq(&priv->ctx_lock); 787 list_add_tail(&dev_ctx->list, &priv->ctx_list); 788 spin_unlock_irq(&priv->ctx_lock); 789 } else { 790 kfree(dev_ctx); 791 } 792 } 793 794 static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv) 795 { 796 struct mlx5_device_context *dev_ctx; 797 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv); 798 799 list_for_each_entry(dev_ctx, &priv->ctx_list, list) 800 if (dev_ctx->intf == intf) { 801 spin_lock_irq(&priv->ctx_lock); 802 list_del(&dev_ctx->list); 803 spin_unlock_irq(&priv->ctx_lock); 804 805 intf->remove(dev, dev_ctx->context); 806 kfree(dev_ctx); 807 return; 808 } 809 } 810 811 int 812 mlx5_register_device(struct mlx5_core_dev *dev) 813 { 814 struct mlx5_priv *priv = &dev->priv; 815 struct mlx5_interface *intf; 816 817 mutex_lock(&intf_mutex); 818 list_add_tail(&priv->dev_list, &dev_list); 819 list_for_each_entry(intf, &intf_list, list) 820 mlx5_add_device(intf, priv); 821 mutex_unlock(&intf_mutex); 822 823 return 0; 824 } 825 826 void 827 mlx5_unregister_device(struct mlx5_core_dev *dev) 828 { 829 struct mlx5_priv *priv = &dev->priv; 830 struct mlx5_interface *intf; 831 832 mutex_lock(&intf_mutex); 833 list_for_each_entry(intf, &intf_list, list) 834 mlx5_remove_device(intf, priv); 835 list_del(&priv->dev_list); 836 mutex_unlock(&intf_mutex); 837 } 838 839 int mlx5_register_interface(struct mlx5_interface *intf) 840 { 841 struct mlx5_priv *priv; 842 843 if (!intf->add || !intf->remove) 844 return -EINVAL; 845 846 mutex_lock(&intf_mutex); 847 list_add_tail(&intf->list, &intf_list); 848 list_for_each_entry(priv, &dev_list, dev_list) 849 mlx5_add_device(intf, priv); 850 mutex_unlock(&intf_mutex); 851 852 return 0; 853 } 854 EXPORT_SYMBOL(mlx5_register_interface); 855 856 void mlx5_unregister_interface(struct mlx5_interface *intf) 857 { 858 struct mlx5_priv *priv; 859 860 mutex_lock(&intf_mutex); 861 list_for_each_entry(priv, &dev_list, dev_list) 862 mlx5_remove_device(intf, priv); 863 list_del(&intf->list); 864 mutex_unlock(&intf_mutex); 865 } 866 EXPORT_SYMBOL(mlx5_unregister_interface); 867 868 void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol) 869 { 870 struct mlx5_priv *priv = &mdev->priv; 871 struct mlx5_device_context *dev_ctx; 872 unsigned long flags; 873 void *result = NULL; 874 875 spin_lock_irqsave(&priv->ctx_lock, flags); 876 877 list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list) 878 if ((dev_ctx->intf->protocol == protocol) && 879 dev_ctx->intf->get_dev) { 880 result = dev_ctx->intf->get_dev(dev_ctx->context); 881 break; 882 } 883 884 spin_unlock_irqrestore(&priv->ctx_lock, flags); 885 886 return result; 887 } 888 EXPORT_SYMBOL(mlx5_get_protocol_dev); 889 890 static int mlx5_auto_fw_update; 891 SYSCTL_INT(_hw_mlx5, OID_AUTO, auto_fw_update, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 892 &mlx5_auto_fw_update, 0, 893 "Allow automatic firmware update on driver start"); 894 static int 895 mlx5_firmware_update(struct mlx5_core_dev *dev) 896 { 897 const struct firmware *fw; 898 int err; 899 900 TUNABLE_INT_FETCH("hw.mlx5.auto_fw_update", &mlx5_auto_fw_update); 901 if (!mlx5_auto_fw_update) 902 return (0); 903 fw = firmware_get("mlx5fw_mfa"); 904 if (fw) { 905 err = mlx5_firmware_flash(dev, fw); 906 firmware_put(fw, FIRMWARE_UNLOAD); 907 } 908 else 909 return (-ENOENT); 910 911 return err; 912 } 913 914 static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv) 915 { 916 struct pci_dev *pdev = dev->pdev; 917 int err; 918 919 pdev = dev->pdev; 920 pci_set_drvdata(dev->pdev, dev); 921 strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN); 922 priv->name[MLX5_MAX_NAME_LEN - 1] = 0; 923 924 mutex_init(&priv->pgdir_mutex); 925 INIT_LIST_HEAD(&priv->pgdir_list); 926 spin_lock_init(&priv->mkey_lock); 927 928 err = mlx5_pci_enable_device(dev); 929 if (err) { 930 mlx5_core_err(dev, "Cannot enable PCI device, aborting\n"); 931 goto err_dbg; 932 } 933 934 err = request_bar(pdev); 935 if (err) { 936 mlx5_core_err(dev, "error requesting BARs, aborting\n"); 937 goto err_disable; 938 } 939 940 pci_set_master(pdev); 941 942 err = set_dma_caps(pdev); 943 if (err) { 944 mlx5_core_err(dev, "Failed setting DMA capabilities mask, aborting\n"); 945 goto err_clr_master; 946 } 947 948 dev->iseg_base = pci_resource_start(dev->pdev, 0); 949 dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg)); 950 if (!dev->iseg) { 951 err = -ENOMEM; 952 mlx5_core_err(dev, "Failed mapping initialization segment, aborting\n"); 953 goto err_clr_master; 954 } 955 956 return 0; 957 958 err_clr_master: 959 release_bar(dev->pdev); 960 err_disable: 961 mlx5_pci_disable_device(dev); 962 err_dbg: 963 return err; 964 } 965 966 static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv) 967 { 968 #ifdef PCI_IOV 969 if (MLX5_CAP_GEN(dev, eswitch_flow_table)) 970 pci_iov_detach(dev->pdev->dev.bsddev); 971 #endif 972 iounmap(dev->iseg); 973 release_bar(dev->pdev); 974 mlx5_pci_disable_device(dev); 975 } 976 977 static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) 978 { 979 int err; 980 981 err = mlx5_vsc_find_cap(dev); 982 if (err) 983 mlx5_core_warn(dev, "Unable to find vendor specific capabilities\n"); 984 985 err = mlx5_query_hca_caps(dev); 986 if (err) { 987 mlx5_core_err(dev, "query hca failed\n"); 988 goto out; 989 } 990 991 err = mlx5_query_board_id(dev); 992 if (err) { 993 mlx5_core_err(dev, "query board id failed\n"); 994 goto out; 995 } 996 997 err = mlx5_eq_init(dev); 998 if (err) { 999 mlx5_core_err(dev, "failed to initialize eq\n"); 1000 goto out; 1001 } 1002 1003 MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock); 1004 1005 err = mlx5_init_cq_table(dev); 1006 if (err) { 1007 mlx5_core_err(dev, "failed to initialize cq table\n"); 1008 goto err_eq_cleanup; 1009 } 1010 1011 mlx5_init_qp_table(dev); 1012 mlx5_init_srq_table(dev); 1013 mlx5_init_mr_table(dev); 1014 1015 mlx5_init_reserved_gids(dev); 1016 mlx5_fpga_init(dev); 1017 1018 #ifdef RATELIMIT 1019 err = mlx5_init_rl_table(dev); 1020 if (err) { 1021 mlx5_core_err(dev, "Failed to init rate limiting\n"); 1022 goto err_tables_cleanup; 1023 } 1024 #endif 1025 return 0; 1026 1027 #ifdef RATELIMIT 1028 err_tables_cleanup: 1029 mlx5_cleanup_mr_table(dev); 1030 mlx5_cleanup_srq_table(dev); 1031 mlx5_cleanup_qp_table(dev); 1032 mlx5_cleanup_cq_table(dev); 1033 #endif 1034 1035 err_eq_cleanup: 1036 mlx5_eq_cleanup(dev); 1037 1038 out: 1039 return err; 1040 } 1041 1042 static void mlx5_cleanup_once(struct mlx5_core_dev *dev) 1043 { 1044 #ifdef RATELIMIT 1045 mlx5_cleanup_rl_table(dev); 1046 #endif 1047 mlx5_fpga_cleanup(dev); 1048 mlx5_cleanup_reserved_gids(dev); 1049 mlx5_cleanup_mr_table(dev); 1050 mlx5_cleanup_srq_table(dev); 1051 mlx5_cleanup_qp_table(dev); 1052 mlx5_cleanup_cq_table(dev); 1053 mlx5_eq_cleanup(dev); 1054 } 1055 1056 static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, 1057 bool boot) 1058 { 1059 int err; 1060 1061 mutex_lock(&dev->intf_state_mutex); 1062 if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { 1063 mlx5_core_warn(dev, "interface is up, NOP\n"); 1064 goto out; 1065 } 1066 1067 mlx5_core_dbg(dev, "firmware version: %d.%d.%d\n", 1068 fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev)); 1069 1070 /* 1071 * On load removing any previous indication of internal error, 1072 * device is up 1073 */ 1074 dev->state = MLX5_DEVICE_STATE_UP; 1075 1076 /* wait for firmware to accept initialization segments configurations 1077 */ 1078 err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI, 1079 FW_INIT_WARN_MESSAGE_INTERVAL); 1080 if (err) { 1081 dev_err(&dev->pdev->dev, 1082 "Firmware over %d MS in pre-initializing state, aborting\n", 1083 FW_PRE_INIT_TIMEOUT_MILI); 1084 goto out_err; 1085 } 1086 1087 err = mlx5_cmd_init(dev); 1088 if (err) { 1089 mlx5_core_err(dev, 1090 "Failed initializing command interface, aborting\n"); 1091 goto out_err; 1092 } 1093 1094 err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI, 0); 1095 if (err) { 1096 mlx5_core_err(dev, 1097 "Firmware over %d MS in initializing state, aborting\n", 1098 FW_INIT_TIMEOUT_MILI); 1099 goto err_cmd_cleanup; 1100 } 1101 1102 err = mlx5_core_enable_hca(dev, 0); 1103 if (err) { 1104 mlx5_core_err(dev, "enable hca failed\n"); 1105 goto err_cmd_cleanup; 1106 } 1107 1108 err = mlx5_core_set_issi(dev); 1109 if (err) { 1110 mlx5_core_err(dev, "failed to set issi\n"); 1111 goto err_disable_hca; 1112 } 1113 1114 err = mlx5_pagealloc_start(dev); 1115 if (err) { 1116 mlx5_core_err(dev, "mlx5_pagealloc_start failed\n"); 1117 goto err_disable_hca; 1118 } 1119 1120 err = mlx5_satisfy_startup_pages(dev, 1); 1121 if (err) { 1122 mlx5_core_err(dev, "failed to allocate boot pages\n"); 1123 goto err_pagealloc_stop; 1124 } 1125 1126 err = set_hca_ctrl(dev); 1127 if (err) { 1128 mlx5_core_err(dev, "set_hca_ctrl failed\n"); 1129 goto reclaim_boot_pages; 1130 } 1131 1132 err = handle_hca_cap(dev); 1133 if (err) { 1134 mlx5_core_err(dev, "handle_hca_cap failed\n"); 1135 goto reclaim_boot_pages; 1136 } 1137 1138 err = handle_hca_cap_atomic(dev); 1139 if (err) { 1140 mlx5_core_err(dev, "handle_hca_cap_atomic failed\n"); 1141 goto reclaim_boot_pages; 1142 } 1143 1144 err = mlx5_satisfy_startup_pages(dev, 0); 1145 if (err) { 1146 mlx5_core_err(dev, "failed to allocate init pages\n"); 1147 goto reclaim_boot_pages; 1148 } 1149 1150 err = mlx5_cmd_init_hca(dev); 1151 if (err) { 1152 mlx5_core_err(dev, "init hca failed\n"); 1153 goto reclaim_boot_pages; 1154 } 1155 1156 mlx5_set_driver_version(dev); 1157 1158 mlx5_start_health_poll(dev); 1159 1160 if (boot && (err = mlx5_init_once(dev, priv))) { 1161 mlx5_core_err(dev, "sw objs init failed\n"); 1162 goto err_stop_poll; 1163 } 1164 1165 dev->priv.uar = mlx5_get_uars_page(dev); 1166 if (IS_ERR(dev->priv.uar)) { 1167 mlx5_core_err(dev, "Failed allocating uar, aborting\n"); 1168 err = PTR_ERR(dev->priv.uar); 1169 goto err_cleanup_once; 1170 } 1171 1172 err = mlx5_enable_msix(dev); 1173 if (err) { 1174 mlx5_core_err(dev, "enable msix failed\n"); 1175 goto err_cleanup_uar; 1176 } 1177 1178 err = mlx5_start_eqs(dev); 1179 if (err) { 1180 mlx5_core_err(dev, "Failed to start pages and async EQs\n"); 1181 goto err_disable_msix; 1182 } 1183 1184 err = alloc_comp_eqs(dev); 1185 if (err) { 1186 mlx5_core_err(dev, "Failed to alloc completion EQs\n"); 1187 goto err_stop_eqs; 1188 } 1189 1190 err = mlx5_init_fs(dev); 1191 if (err) { 1192 mlx5_core_err(dev, "flow steering init %d\n", err); 1193 goto err_free_comp_eqs; 1194 } 1195 1196 err = mlx5_core_set_hca_defaults(dev); 1197 if (err) { 1198 mlx5_core_err(dev, "Failed to set HCA defaults %d\n", err); 1199 goto err_free_comp_eqs; 1200 } 1201 1202 err = mlx5_mpfs_init(dev); 1203 if (err) { 1204 mlx5_core_err(dev, "mpfs init failed %d\n", err); 1205 goto err_fs; 1206 } 1207 1208 err = mlx5_fpga_device_start(dev); 1209 if (err) { 1210 mlx5_core_err(dev, "fpga device start failed %d\n", err); 1211 goto err_mpfs; 1212 } 1213 1214 err = mlx5_diag_cnt_init(dev); 1215 if (err) { 1216 mlx5_core_err(dev, "diag cnt init failed %d\n", err); 1217 goto err_fpga; 1218 } 1219 1220 err = mlx5_register_device(dev); 1221 if (err) { 1222 mlx5_core_err(dev, "mlx5_register_device failed %d\n", err); 1223 goto err_diag_cnt; 1224 } 1225 1226 set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); 1227 1228 out: 1229 mutex_unlock(&dev->intf_state_mutex); 1230 return 0; 1231 1232 err_diag_cnt: 1233 mlx5_diag_cnt_cleanup(dev); 1234 1235 err_fpga: 1236 mlx5_fpga_device_stop(dev); 1237 1238 err_mpfs: 1239 mlx5_mpfs_destroy(dev); 1240 1241 err_fs: 1242 mlx5_cleanup_fs(dev); 1243 1244 err_free_comp_eqs: 1245 free_comp_eqs(dev); 1246 1247 err_stop_eqs: 1248 mlx5_stop_eqs(dev); 1249 1250 err_disable_msix: 1251 mlx5_disable_msix(dev); 1252 1253 err_cleanup_uar: 1254 mlx5_put_uars_page(dev, dev->priv.uar); 1255 1256 err_cleanup_once: 1257 if (boot) 1258 mlx5_cleanup_once(dev); 1259 1260 err_stop_poll: 1261 mlx5_stop_health_poll(dev, boot); 1262 if (mlx5_cmd_teardown_hca(dev)) { 1263 mlx5_core_err(dev, "tear_down_hca failed, skip cleanup\n"); 1264 goto out_err; 1265 } 1266 1267 reclaim_boot_pages: 1268 mlx5_reclaim_startup_pages(dev); 1269 1270 err_pagealloc_stop: 1271 mlx5_pagealloc_stop(dev); 1272 1273 err_disable_hca: 1274 mlx5_core_disable_hca(dev); 1275 1276 err_cmd_cleanup: 1277 mlx5_cmd_cleanup(dev); 1278 1279 out_err: 1280 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; 1281 mutex_unlock(&dev->intf_state_mutex); 1282 1283 return err; 1284 } 1285 1286 static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, 1287 bool cleanup) 1288 { 1289 int err = 0; 1290 1291 if (cleanup) 1292 mlx5_drain_health_recovery(dev); 1293 1294 mutex_lock(&dev->intf_state_mutex); 1295 if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { 1296 mlx5_core_warn(dev, "%s: interface is down, NOP\n", __func__); 1297 if (cleanup) 1298 mlx5_cleanup_once(dev); 1299 goto out; 1300 } 1301 1302 mlx5_unregister_device(dev); 1303 1304 mlx5_eswitch_cleanup(dev->priv.eswitch); 1305 mlx5_diag_cnt_cleanup(dev); 1306 mlx5_fpga_device_stop(dev); 1307 mlx5_mpfs_destroy(dev); 1308 mlx5_cleanup_fs(dev); 1309 mlx5_wait_for_reclaim_vfs_pages(dev); 1310 free_comp_eqs(dev); 1311 mlx5_stop_eqs(dev); 1312 mlx5_disable_msix(dev); 1313 mlx5_put_uars_page(dev, dev->priv.uar); 1314 if (cleanup) 1315 mlx5_cleanup_once(dev); 1316 mlx5_stop_health_poll(dev, cleanup); 1317 err = mlx5_cmd_teardown_hca(dev); 1318 if (err) { 1319 mlx5_core_err(dev, "tear_down_hca failed, skip cleanup\n"); 1320 goto out; 1321 } 1322 mlx5_pagealloc_stop(dev); 1323 mlx5_reclaim_startup_pages(dev); 1324 mlx5_core_disable_hca(dev); 1325 mlx5_cmd_cleanup(dev); 1326 1327 out: 1328 clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); 1329 mutex_unlock(&dev->intf_state_mutex); 1330 return err; 1331 } 1332 1333 void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, 1334 unsigned long param) 1335 { 1336 struct mlx5_priv *priv = &dev->priv; 1337 struct mlx5_device_context *dev_ctx; 1338 unsigned long flags; 1339 1340 spin_lock_irqsave(&priv->ctx_lock, flags); 1341 1342 list_for_each_entry(dev_ctx, &priv->ctx_list, list) 1343 if (dev_ctx->intf->event) 1344 dev_ctx->intf->event(dev, dev_ctx->context, event, param); 1345 1346 spin_unlock_irqrestore(&priv->ctx_lock, flags); 1347 } 1348 1349 struct mlx5_core_event_handler { 1350 void (*event)(struct mlx5_core_dev *dev, 1351 enum mlx5_dev_event event, 1352 void *data); 1353 }; 1354 1355 #define MLX5_STATS_DESC(a, b, c, d, e, ...) d, e, 1356 1357 #define MLX5_PORT_MODULE_ERROR_STATS(m) \ 1358 m(+1, u64, power_budget_exceeded, "power_budget", "Module Power Budget Exceeded") \ 1359 m(+1, u64, long_range, "long_range", "Module Long Range for non MLNX cable/module") \ 1360 m(+1, u64, bus_stuck, "bus_stuck", "Module Bus stuck(I2C or data shorted)") \ 1361 m(+1, u64, no_eeprom, "no_eeprom", "No EEPROM/retry timeout") \ 1362 m(+1, u64, enforce_part_number, "enforce_part_number", "Module Enforce part number list") \ 1363 m(+1, u64, unknown_id, "unknown_id", "Module Unknown identifier") \ 1364 m(+1, u64, high_temp, "high_temp", "Module High Temperature") \ 1365 m(+1, u64, cable_shorted, "cable_shorted", "Module Cable is shorted") \ 1366 m(+1, u64, pmd_type_not_enabled, "pmd_type_not_enabled", "PMD type is not enabled") \ 1367 m(+1, u64, laster_tec_failure, "laster_tec_failure", "Laster TEC failure") \ 1368 m(+1, u64, high_current, "high_current", "High current") \ 1369 m(+1, u64, high_voltage, "high_voltage", "High voltage") \ 1370 m(+1, u64, pcie_sys_power_slot_exceeded, "pcie_sys_power_slot_exceeded", "PCIe system power slot Exceeded") \ 1371 m(+1, u64, high_power, "high_power", "High power") \ 1372 m(+1, u64, module_state_machine_fault, "module_state_machine_fault", "Module State Machine fault") 1373 1374 static const char *mlx5_pme_err_desc[] = { 1375 MLX5_PORT_MODULE_ERROR_STATS(MLX5_STATS_DESC) 1376 }; 1377 1378 static int init_one(struct pci_dev *pdev, 1379 const struct pci_device_id *id) 1380 { 1381 struct mlx5_core_dev *dev; 1382 struct mlx5_priv *priv; 1383 device_t bsddev = pdev->dev.bsddev; 1384 #ifdef PCI_IOV 1385 nvlist_t *pf_schema, *vf_schema; 1386 int num_vfs, sriov_pos; 1387 #endif 1388 int i,err; 1389 int numa_node; 1390 struct sysctl_oid *pme_sysctl_node; 1391 struct sysctl_oid *pme_err_sysctl_node; 1392 struct sysctl_oid *cap_sysctl_node; 1393 struct sysctl_oid *current_cap_sysctl_node; 1394 struct sysctl_oid *max_cap_sysctl_node; 1395 1396 printk_once("mlx5: %s", mlx5_version); 1397 1398 numa_node = dev_to_node(&pdev->dev); 1399 1400 dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, numa_node); 1401 1402 priv = &dev->priv; 1403 priv->numa_node = numa_node; 1404 1405 if (id) 1406 priv->pci_dev_data = id->driver_data; 1407 1408 if (mlx5_prof_sel < 0 || mlx5_prof_sel >= ARRAY_SIZE(profiles)) { 1409 device_printf(bsddev, 1410 "WARN: selected profile out of range, selecting default (%d)\n", 1411 MLX5_DEFAULT_PROF); 1412 mlx5_prof_sel = MLX5_DEFAULT_PROF; 1413 } 1414 dev->profile = &profiles[mlx5_prof_sel]; 1415 dev->pdev = pdev; 1416 dev->event = mlx5_core_event; 1417 1418 /* Set desc */ 1419 device_set_desc(bsddev, mlx5_version); 1420 1421 sysctl_ctx_init(&dev->sysctl_ctx); 1422 SYSCTL_ADD_INT(&dev->sysctl_ctx, 1423 SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)), 1424 OID_AUTO, "msix_eqvec", CTLFLAG_RDTUN, &dev->msix_eqvec, 0, 1425 "Maximum number of MSIX event queue vectors, if set"); 1426 SYSCTL_ADD_INT(&dev->sysctl_ctx, 1427 SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)), 1428 OID_AUTO, "power_status", CTLFLAG_RD, &dev->pwr_status, 0, 1429 "0:Invalid 1:Sufficient 2:Insufficient"); 1430 SYSCTL_ADD_INT(&dev->sysctl_ctx, 1431 SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)), 1432 OID_AUTO, "power_value", CTLFLAG_RD, &dev->pwr_value, 0, 1433 "Current power value in Watts"); 1434 1435 pme_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx, 1436 SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)), 1437 OID_AUTO, "pme_stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 1438 "Port module event statistics"); 1439 if (pme_sysctl_node == NULL) { 1440 err = -ENOMEM; 1441 goto clean_sysctl_ctx; 1442 } 1443 pme_err_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx, 1444 SYSCTL_CHILDREN(pme_sysctl_node), 1445 OID_AUTO, "errors", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 1446 "Port module event error statistics"); 1447 if (pme_err_sysctl_node == NULL) { 1448 err = -ENOMEM; 1449 goto clean_sysctl_ctx; 1450 } 1451 SYSCTL_ADD_U64(&dev->sysctl_ctx, 1452 SYSCTL_CHILDREN(pme_sysctl_node), OID_AUTO, 1453 "module_plug", CTLFLAG_RD | CTLFLAG_MPSAFE, 1454 &dev->priv.pme_stats.status_counters[MLX5_MODULE_STATUS_PLUGGED_ENABLED], 1455 0, "Number of time module plugged"); 1456 SYSCTL_ADD_U64(&dev->sysctl_ctx, 1457 SYSCTL_CHILDREN(pme_sysctl_node), OID_AUTO, 1458 "module_unplug", CTLFLAG_RD | CTLFLAG_MPSAFE, 1459 &dev->priv.pme_stats.status_counters[MLX5_MODULE_STATUS_UNPLUGGED], 1460 0, "Number of time module unplugged"); 1461 for (i = 0 ; i < MLX5_MODULE_EVENT_ERROR_NUM; i++) { 1462 SYSCTL_ADD_U64(&dev->sysctl_ctx, 1463 SYSCTL_CHILDREN(pme_err_sysctl_node), OID_AUTO, 1464 mlx5_pme_err_desc[2 * i], CTLFLAG_RD | CTLFLAG_MPSAFE, 1465 &dev->priv.pme_stats.error_counters[i], 1466 0, mlx5_pme_err_desc[2 * i + 1]); 1467 } 1468 1469 cap_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx, 1470 SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)), 1471 OID_AUTO, "caps", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 1472 "hardware capabilities raw bitstrings"); 1473 if (cap_sysctl_node == NULL) { 1474 err = -ENOMEM; 1475 goto clean_sysctl_ctx; 1476 } 1477 current_cap_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx, 1478 SYSCTL_CHILDREN(cap_sysctl_node), 1479 OID_AUTO, "current", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 1480 ""); 1481 if (current_cap_sysctl_node == NULL) { 1482 err = -ENOMEM; 1483 goto clean_sysctl_ctx; 1484 } 1485 max_cap_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx, 1486 SYSCTL_CHILDREN(cap_sysctl_node), 1487 OID_AUTO, "max", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 1488 ""); 1489 if (max_cap_sysctl_node == NULL) { 1490 err = -ENOMEM; 1491 goto clean_sysctl_ctx; 1492 } 1493 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1494 SYSCTL_CHILDREN(current_cap_sysctl_node), 1495 OID_AUTO, "general", CTLFLAG_RD | CTLFLAG_MPSAFE, 1496 &dev->hca_caps_cur[MLX5_CAP_GENERAL], 1497 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1498 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1499 SYSCTL_CHILDREN(max_cap_sysctl_node), 1500 OID_AUTO, "general", CTLFLAG_RD | CTLFLAG_MPSAFE, 1501 &dev->hca_caps_max[MLX5_CAP_GENERAL], 1502 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1503 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1504 SYSCTL_CHILDREN(current_cap_sysctl_node), 1505 OID_AUTO, "ether", CTLFLAG_RD | CTLFLAG_MPSAFE, 1506 &dev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], 1507 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1508 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1509 SYSCTL_CHILDREN(max_cap_sysctl_node), 1510 OID_AUTO, "ether", CTLFLAG_RD | CTLFLAG_MPSAFE, 1511 &dev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], 1512 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1513 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1514 SYSCTL_CHILDREN(current_cap_sysctl_node), 1515 OID_AUTO, "odp", CTLFLAG_RD | CTLFLAG_MPSAFE, 1516 &dev->hca_caps_cur[MLX5_CAP_ODP], 1517 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1518 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1519 SYSCTL_CHILDREN(max_cap_sysctl_node), 1520 OID_AUTO, "odp", CTLFLAG_RD | CTLFLAG_MPSAFE, 1521 &dev->hca_caps_max[MLX5_CAP_ODP], 1522 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1523 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1524 SYSCTL_CHILDREN(current_cap_sysctl_node), 1525 OID_AUTO, "atomic", CTLFLAG_RD | CTLFLAG_MPSAFE, 1526 &dev->hca_caps_cur[MLX5_CAP_ATOMIC], 1527 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1528 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1529 SYSCTL_CHILDREN(max_cap_sysctl_node), 1530 OID_AUTO, "atomic", CTLFLAG_RD | CTLFLAG_MPSAFE, 1531 &dev->hca_caps_max[MLX5_CAP_ATOMIC], 1532 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1533 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1534 SYSCTL_CHILDREN(current_cap_sysctl_node), 1535 OID_AUTO, "roce", CTLFLAG_RD | CTLFLAG_MPSAFE, 1536 &dev->hca_caps_cur[MLX5_CAP_ROCE], 1537 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1538 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1539 SYSCTL_CHILDREN(max_cap_sysctl_node), 1540 OID_AUTO, "roce", CTLFLAG_RD | CTLFLAG_MPSAFE, 1541 &dev->hca_caps_max[MLX5_CAP_ROCE], 1542 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1543 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1544 SYSCTL_CHILDREN(current_cap_sysctl_node), 1545 OID_AUTO, "ipoib", CTLFLAG_RD | CTLFLAG_MPSAFE, 1546 &dev->hca_caps_cur[MLX5_CAP_IPOIB_OFFLOADS], 1547 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1548 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1549 SYSCTL_CHILDREN(max_cap_sysctl_node), 1550 OID_AUTO, "ipoib", CTLFLAG_RD | CTLFLAG_MPSAFE, 1551 &dev->hca_caps_max[MLX5_CAP_IPOIB_OFFLOADS], 1552 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1553 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1554 SYSCTL_CHILDREN(current_cap_sysctl_node), 1555 OID_AUTO, "eoib", CTLFLAG_RD | CTLFLAG_MPSAFE, 1556 &dev->hca_caps_cur[MLX5_CAP_EOIB_OFFLOADS], 1557 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1558 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1559 SYSCTL_CHILDREN(max_cap_sysctl_node), 1560 OID_AUTO, "eoib", CTLFLAG_RD | CTLFLAG_MPSAFE, 1561 &dev->hca_caps_max[MLX5_CAP_EOIB_OFFLOADS], 1562 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1563 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1564 SYSCTL_CHILDREN(current_cap_sysctl_node), 1565 OID_AUTO, "flow_table", CTLFLAG_RD | CTLFLAG_MPSAFE, 1566 &dev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], 1567 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1568 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1569 SYSCTL_CHILDREN(max_cap_sysctl_node), 1570 OID_AUTO, "flow_table", CTLFLAG_RD | CTLFLAG_MPSAFE, 1571 &dev->hca_caps_max[MLX5_CAP_FLOW_TABLE], 1572 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1573 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1574 SYSCTL_CHILDREN(current_cap_sysctl_node), 1575 OID_AUTO, "eswitch_flow_table", CTLFLAG_RD | CTLFLAG_MPSAFE, 1576 &dev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], 1577 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1578 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1579 SYSCTL_CHILDREN(max_cap_sysctl_node), 1580 OID_AUTO, "eswitch_flow_table", CTLFLAG_RD | CTLFLAG_MPSAFE, 1581 &dev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE], 1582 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1583 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1584 SYSCTL_CHILDREN(current_cap_sysctl_node), 1585 OID_AUTO, "eswitch", CTLFLAG_RD | CTLFLAG_MPSAFE, 1586 &dev->hca_caps_cur[MLX5_CAP_ESWITCH], 1587 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1588 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1589 SYSCTL_CHILDREN(max_cap_sysctl_node), 1590 OID_AUTO, "eswitch", CTLFLAG_RD | CTLFLAG_MPSAFE, 1591 &dev->hca_caps_max[MLX5_CAP_ESWITCH], 1592 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1593 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1594 SYSCTL_CHILDREN(current_cap_sysctl_node), 1595 OID_AUTO, "snapshot", CTLFLAG_RD | CTLFLAG_MPSAFE, 1596 &dev->hca_caps_cur[MLX5_CAP_SNAPSHOT], 1597 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1598 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1599 SYSCTL_CHILDREN(max_cap_sysctl_node), 1600 OID_AUTO, "snapshot", CTLFLAG_RD | CTLFLAG_MPSAFE, 1601 &dev->hca_caps_max[MLX5_CAP_SNAPSHOT], 1602 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1603 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1604 SYSCTL_CHILDREN(current_cap_sysctl_node), 1605 OID_AUTO, "vector_calc", CTLFLAG_RD | CTLFLAG_MPSAFE, 1606 &dev->hca_caps_cur[MLX5_CAP_VECTOR_CALC], 1607 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1608 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1609 SYSCTL_CHILDREN(max_cap_sysctl_node), 1610 OID_AUTO, "vector_calc", CTLFLAG_RD | CTLFLAG_MPSAFE, 1611 &dev->hca_caps_max[MLX5_CAP_VECTOR_CALC], 1612 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1613 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1614 SYSCTL_CHILDREN(current_cap_sysctl_node), 1615 OID_AUTO, "qos", CTLFLAG_RD | CTLFLAG_MPSAFE, 1616 &dev->hca_caps_cur[MLX5_CAP_QOS], 1617 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1618 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1619 SYSCTL_CHILDREN(max_cap_sysctl_node), 1620 OID_AUTO, "qos", CTLFLAG_RD | CTLFLAG_MPSAFE, 1621 &dev->hca_caps_max[MLX5_CAP_QOS], 1622 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1623 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1624 SYSCTL_CHILDREN(current_cap_sysctl_node), 1625 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_MPSAFE, 1626 &dev->hca_caps_cur[MLX5_CAP_DEBUG], 1627 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1628 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1629 SYSCTL_CHILDREN(max_cap_sysctl_node), 1630 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_MPSAFE, 1631 &dev->hca_caps_max[MLX5_CAP_DEBUG], 1632 MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", ""); 1633 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1634 SYSCTL_CHILDREN(cap_sysctl_node), 1635 OID_AUTO, "pcam", CTLFLAG_RD | CTLFLAG_MPSAFE, 1636 &dev->caps.pcam, sizeof(dev->caps.pcam), "IU", ""); 1637 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1638 SYSCTL_CHILDREN(cap_sysctl_node), 1639 OID_AUTO, "mcam", CTLFLAG_RD | CTLFLAG_MPSAFE, 1640 &dev->caps.mcam, sizeof(dev->caps.mcam), "IU", ""); 1641 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1642 SYSCTL_CHILDREN(cap_sysctl_node), 1643 OID_AUTO, "qcam", CTLFLAG_RD | CTLFLAG_MPSAFE, 1644 &dev->caps.qcam, sizeof(dev->caps.qcam), "IU", ""); 1645 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, 1646 SYSCTL_CHILDREN(cap_sysctl_node), 1647 OID_AUTO, "fpga", CTLFLAG_RD | CTLFLAG_MPSAFE, 1648 &dev->caps.fpga, sizeof(dev->caps.fpga), "IU", ""); 1649 1650 INIT_LIST_HEAD(&priv->ctx_list); 1651 spin_lock_init(&priv->ctx_lock); 1652 mutex_init(&dev->pci_status_mutex); 1653 mutex_init(&dev->intf_state_mutex); 1654 1655 mutex_init(&priv->bfregs.reg_head.lock); 1656 mutex_init(&priv->bfregs.wc_head.lock); 1657 INIT_LIST_HEAD(&priv->bfregs.reg_head.list); 1658 INIT_LIST_HEAD(&priv->bfregs.wc_head.list); 1659 1660 mtx_init(&dev->dump_lock, "mlx5dmp", NULL, MTX_DEF | MTX_NEW); 1661 err = mlx5_pci_init(dev, priv); 1662 if (err) { 1663 mlx5_core_err(dev, "mlx5_pci_init failed %d\n", err); 1664 goto clean_dev; 1665 } 1666 1667 err = mlx5_health_init(dev); 1668 if (err) { 1669 mlx5_core_err(dev, "mlx5_health_init failed %d\n", err); 1670 goto close_pci; 1671 } 1672 1673 mlx5_pagealloc_init(dev); 1674 1675 err = mlx5_load_one(dev, priv, true); 1676 if (err) { 1677 mlx5_core_err(dev, "mlx5_load_one failed %d\n", err); 1678 goto clean_health; 1679 } 1680 1681 mlx5_fwdump_prep(dev); 1682 1683 mlx5_firmware_update(dev); 1684 1685 #ifdef PCI_IOV 1686 if (MLX5_CAP_GEN(dev, vport_group_manager)) { 1687 if (pci_find_extcap(bsddev, PCIZ_SRIOV, &sriov_pos) == 0) { 1688 num_vfs = pci_read_config(bsddev, sriov_pos + 1689 PCIR_SRIOV_TOTAL_VFS, 2); 1690 } else { 1691 mlx5_core_info(dev, "cannot find SR-IOV PCIe cap\n"); 1692 num_vfs = 0; 1693 } 1694 err = mlx5_eswitch_init(dev, 1 + num_vfs); 1695 if (err == 0) { 1696 pf_schema = pci_iov_schema_alloc_node(); 1697 vf_schema = pci_iov_schema_alloc_node(); 1698 pci_iov_schema_add_unicast_mac(vf_schema, 1699 iov_mac_addr_name, 0, NULL); 1700 pci_iov_schema_add_uint64(vf_schema, iov_node_guid_name, 1701 0, 0); 1702 pci_iov_schema_add_uint64(vf_schema, iov_port_guid_name, 1703 0, 0); 1704 err = pci_iov_attach(bsddev, pf_schema, vf_schema); 1705 if (err != 0) { 1706 device_printf(bsddev, 1707 "Failed to initialize SR-IOV support, error %d\n", 1708 err); 1709 } 1710 } else { 1711 mlx5_core_err(dev, "eswitch init failed, error %d\n", 1712 err); 1713 } 1714 } 1715 #endif 1716 1717 pci_save_state(pdev); 1718 return 0; 1719 1720 clean_health: 1721 mlx5_pagealloc_cleanup(dev); 1722 mlx5_health_cleanup(dev); 1723 close_pci: 1724 mlx5_pci_close(dev, priv); 1725 clean_dev: 1726 mtx_destroy(&dev->dump_lock); 1727 clean_sysctl_ctx: 1728 sysctl_ctx_free(&dev->sysctl_ctx); 1729 kfree(dev); 1730 return err; 1731 } 1732 1733 static void remove_one(struct pci_dev *pdev) 1734 { 1735 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1736 struct mlx5_priv *priv = &dev->priv; 1737 1738 #ifdef PCI_IOV 1739 pci_iov_detach(pdev->dev.bsddev); 1740 mlx5_eswitch_disable_sriov(priv->eswitch); 1741 #endif 1742 1743 if (mlx5_unload_one(dev, priv, true)) { 1744 mlx5_core_err(dev, "mlx5_unload_one() failed, leaked %lld bytes\n", 1745 (long long)(dev->priv.fw_pages * MLX5_ADAPTER_PAGE_SIZE)); 1746 } 1747 1748 mlx5_pagealloc_cleanup(dev); 1749 mlx5_health_cleanup(dev); 1750 mlx5_fwdump_clean(dev); 1751 mlx5_pci_close(dev, priv); 1752 mtx_destroy(&dev->dump_lock); 1753 pci_set_drvdata(pdev, NULL); 1754 sysctl_ctx_free(&dev->sysctl_ctx); 1755 kfree(dev); 1756 } 1757 1758 static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev, 1759 pci_channel_state_t state) 1760 { 1761 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1762 struct mlx5_priv *priv = &dev->priv; 1763 1764 mlx5_core_info(dev, "%s was called\n", __func__); 1765 mlx5_enter_error_state(dev, false); 1766 mlx5_unload_one(dev, priv, false); 1767 1768 if (state) { 1769 mlx5_drain_health_wq(dev); 1770 mlx5_pci_disable_device(dev); 1771 } 1772 1773 return state == pci_channel_io_perm_failure ? 1774 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; 1775 } 1776 1777 static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev) 1778 { 1779 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1780 int err = 0; 1781 1782 mlx5_core_info(dev,"%s was called\n", __func__); 1783 1784 err = mlx5_pci_enable_device(dev); 1785 if (err) { 1786 mlx5_core_err(dev, "mlx5_pci_enable_device failed with error code: %d\n" 1787 ,err); 1788 return PCI_ERS_RESULT_DISCONNECT; 1789 } 1790 pci_set_master(pdev); 1791 pci_set_powerstate(pdev->dev.bsddev, PCI_POWERSTATE_D0); 1792 pci_restore_state(pdev); 1793 pci_save_state(pdev); 1794 1795 return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; 1796 } 1797 1798 /* wait for the device to show vital signs. For now we check 1799 * that we can read the device ID and that the health buffer 1800 * shows a non zero value which is different than 0xffffffff 1801 */ 1802 static void wait_vital(struct pci_dev *pdev) 1803 { 1804 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1805 struct mlx5_core_health *health = &dev->priv.health; 1806 const int niter = 100; 1807 u32 count; 1808 u16 did; 1809 int i; 1810 1811 /* Wait for firmware to be ready after reset */ 1812 msleep(1000); 1813 for (i = 0; i < niter; i++) { 1814 if (pci_read_config_word(pdev, 2, &did)) { 1815 mlx5_core_warn(dev, "failed reading config word\n"); 1816 break; 1817 } 1818 if (did == pdev->device) { 1819 mlx5_core_info(dev, 1820 "device ID correctly read after %d iterations\n", i); 1821 break; 1822 } 1823 msleep(50); 1824 } 1825 if (i == niter) 1826 mlx5_core_warn(dev, "could not read device ID\n"); 1827 1828 for (i = 0; i < niter; i++) { 1829 count = ioread32be(health->health_counter); 1830 if (count && count != 0xffffffff) { 1831 mlx5_core_info(dev, 1832 "Counter value 0x%x after %d iterations\n", count, i); 1833 break; 1834 } 1835 msleep(50); 1836 } 1837 1838 if (i == niter) 1839 mlx5_core_warn(dev, "could not read device ID\n"); 1840 } 1841 1842 static void mlx5_pci_resume(struct pci_dev *pdev) 1843 { 1844 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1845 struct mlx5_priv *priv = &dev->priv; 1846 int err; 1847 1848 mlx5_core_info(dev,"%s was called\n", __func__); 1849 1850 wait_vital(pdev); 1851 1852 err = mlx5_load_one(dev, priv, false); 1853 if (err) 1854 mlx5_core_err(dev, 1855 "mlx5_load_one failed with error code: %d\n" ,err); 1856 else 1857 mlx5_core_info(dev,"device recovered\n"); 1858 } 1859 1860 static const struct pci_error_handlers mlx5_err_handler = { 1861 .error_detected = mlx5_pci_err_detected, 1862 .slot_reset = mlx5_pci_slot_reset, 1863 .resume = mlx5_pci_resume 1864 }; 1865 1866 #ifdef PCI_IOV 1867 static int 1868 mlx5_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *pf_config) 1869 { 1870 struct pci_dev *pdev; 1871 struct mlx5_core_dev *core_dev; 1872 struct mlx5_priv *priv; 1873 int err; 1874 1875 pdev = device_get_softc(dev); 1876 core_dev = pci_get_drvdata(pdev); 1877 priv = &core_dev->priv; 1878 1879 if (priv->eswitch == NULL) 1880 return (ENXIO); 1881 if (priv->eswitch->total_vports < num_vfs + 1) 1882 num_vfs = priv->eswitch->total_vports - 1; 1883 err = mlx5_eswitch_enable_sriov(priv->eswitch, num_vfs); 1884 return (-err); 1885 } 1886 1887 static void 1888 mlx5_iov_uninit(device_t dev) 1889 { 1890 struct pci_dev *pdev; 1891 struct mlx5_core_dev *core_dev; 1892 struct mlx5_priv *priv; 1893 1894 pdev = device_get_softc(dev); 1895 core_dev = pci_get_drvdata(pdev); 1896 priv = &core_dev->priv; 1897 1898 mlx5_eswitch_disable_sriov(priv->eswitch); 1899 } 1900 1901 static int 1902 mlx5_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *vf_config) 1903 { 1904 struct pci_dev *pdev; 1905 struct mlx5_core_dev *core_dev; 1906 struct mlx5_priv *priv; 1907 const void *mac; 1908 size_t mac_size; 1909 uint64_t node_guid, port_guid; 1910 int error; 1911 1912 pdev = device_get_softc(dev); 1913 core_dev = pci_get_drvdata(pdev); 1914 priv = &core_dev->priv; 1915 1916 if (vfnum + 1 >= priv->eswitch->total_vports) 1917 return (ENXIO); 1918 1919 if (nvlist_exists_binary(vf_config, iov_mac_addr_name)) { 1920 mac = nvlist_get_binary(vf_config, iov_mac_addr_name, 1921 &mac_size); 1922 error = -mlx5_eswitch_set_vport_mac(priv->eswitch, 1923 vfnum + 1, __DECONST(u8 *, mac)); 1924 if (error != 0) { 1925 mlx5_core_err(core_dev, 1926 "setting MAC for VF %d failed, error %d\n", 1927 vfnum + 1, error); 1928 } 1929 } 1930 1931 if (nvlist_exists_number(vf_config, iov_node_guid_name)) { 1932 node_guid = nvlist_get_number(vf_config, iov_node_guid_name); 1933 error = -mlx5_modify_nic_vport_node_guid(core_dev, vfnum + 1, 1934 node_guid); 1935 if (error != 0) { 1936 mlx5_core_err(core_dev, 1937 "modifying node GUID for VF %d failed, error %d\n", 1938 vfnum + 1, error); 1939 } 1940 } 1941 1942 if (nvlist_exists_number(vf_config, iov_port_guid_name)) { 1943 port_guid = nvlist_get_number(vf_config, iov_port_guid_name); 1944 error = -mlx5_modify_nic_vport_port_guid(core_dev, vfnum + 1, 1945 port_guid); 1946 if (error != 0) { 1947 mlx5_core_err(core_dev, 1948 "modifying port GUID for VF %d failed, error %d\n", 1949 vfnum + 1, error); 1950 } 1951 } 1952 1953 error = -mlx5_eswitch_set_vport_state(priv->eswitch, vfnum + 1, 1954 VPORT_STATE_FOLLOW); 1955 if (error != 0) { 1956 mlx5_core_err(core_dev, 1957 "upping vport for VF %d failed, error %d\n", 1958 vfnum + 1, error); 1959 } 1960 error = -mlx5_core_enable_hca(core_dev, vfnum + 1); 1961 if (error != 0) { 1962 mlx5_core_err(core_dev, "enabling VF %d failed, error %d\n", 1963 vfnum + 1, error); 1964 } 1965 return (error); 1966 } 1967 #endif 1968 1969 static int mlx5_try_fast_unload(struct mlx5_core_dev *dev) 1970 { 1971 bool fast_teardown, force_teardown; 1972 int err; 1973 1974 if (!mlx5_fast_unload_enabled) { 1975 mlx5_core_dbg(dev, "fast unload is disabled by user\n"); 1976 return -EOPNOTSUPP; 1977 } 1978 1979 fast_teardown = MLX5_CAP_GEN(dev, fast_teardown); 1980 force_teardown = MLX5_CAP_GEN(dev, force_teardown); 1981 1982 mlx5_core_dbg(dev, "force teardown firmware support=%d\n", force_teardown); 1983 mlx5_core_dbg(dev, "fast teardown firmware support=%d\n", fast_teardown); 1984 1985 if (!fast_teardown && !force_teardown) 1986 return -EOPNOTSUPP; 1987 1988 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 1989 mlx5_core_dbg(dev, "Device in internal error state, giving up\n"); 1990 return -EAGAIN; 1991 } 1992 1993 /* Panic tear down fw command will stop the PCI bus communication 1994 * with the HCA, so the health polll is no longer needed. 1995 */ 1996 mlx5_drain_health_wq(dev); 1997 mlx5_stop_health_poll(dev, false); 1998 1999 err = mlx5_cmd_fast_teardown_hca(dev); 2000 if (!err) 2001 goto done; 2002 2003 err = mlx5_cmd_force_teardown_hca(dev); 2004 if (!err) 2005 goto done; 2006 2007 mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", err); 2008 mlx5_start_health_poll(dev); 2009 return err; 2010 done: 2011 mlx5_enter_error_state(dev, true); 2012 return 0; 2013 } 2014 2015 static void mlx5_shutdown_disable_interrupts(struct mlx5_core_dev *mdev) 2016 { 2017 int nvec = mdev->priv.eq_table.num_comp_vectors + MLX5_EQ_VEC_COMP_BASE; 2018 int x; 2019 2020 mdev->priv.disable_irqs = 1; 2021 2022 /* wait for all IRQ handlers to finish processing */ 2023 for (x = 0; x != nvec; x++) 2024 synchronize_irq(mdev->priv.msix_arr[x].vector); 2025 } 2026 2027 static void shutdown_one(struct pci_dev *pdev) 2028 { 2029 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 2030 struct mlx5_priv *priv = &dev->priv; 2031 int err; 2032 2033 /* enter polling mode */ 2034 mlx5_cmd_use_polling(dev); 2035 2036 set_bit(MLX5_INTERFACE_STATE_TEARDOWN, &dev->intf_state); 2037 2038 /* disable all interrupts */ 2039 mlx5_shutdown_disable_interrupts(dev); 2040 2041 err = mlx5_try_fast_unload(dev); 2042 if (err) 2043 mlx5_unload_one(dev, priv, false); 2044 mlx5_pci_disable_device(dev); 2045 } 2046 2047 static const struct pci_device_id mlx5_core_pci_table[] = { 2048 { PCI_VDEVICE(MELLANOX, 4113) }, /* Connect-IB */ 2049 { PCI_VDEVICE(MELLANOX, 4114) }, /* Connect-IB VF */ 2050 { PCI_VDEVICE(MELLANOX, 4115) }, /* ConnectX-4 */ 2051 { PCI_VDEVICE(MELLANOX, 4116) }, /* ConnectX-4 VF */ 2052 { PCI_VDEVICE(MELLANOX, 4117) }, /* ConnectX-4LX */ 2053 { PCI_VDEVICE(MELLANOX, 4118) }, /* ConnectX-4LX VF */ 2054 { PCI_VDEVICE(MELLANOX, 4119) }, /* ConnectX-5, PCIe 3.0 */ 2055 { PCI_VDEVICE(MELLANOX, 4120) }, /* ConnectX-5 VF */ 2056 { PCI_VDEVICE(MELLANOX, 4121) }, /* ConnectX-5 Ex */ 2057 { PCI_VDEVICE(MELLANOX, 4122) }, /* ConnectX-5 Ex VF */ 2058 { PCI_VDEVICE(MELLANOX, 4123) }, /* ConnectX-6 */ 2059 { PCI_VDEVICE(MELLANOX, 4124) }, /* ConnectX-6 VF */ 2060 { PCI_VDEVICE(MELLANOX, 4125) }, /* ConnectX-6 Dx */ 2061 { PCI_VDEVICE(MELLANOX, 4126) }, /* ConnectX Family mlx5Gen Virtual Function */ 2062 { PCI_VDEVICE(MELLANOX, 4127) }, /* ConnectX-6 LX */ 2063 { PCI_VDEVICE(MELLANOX, 4128) }, 2064 { PCI_VDEVICE(MELLANOX, 4129) }, /* ConnectX-7 */ 2065 { PCI_VDEVICE(MELLANOX, 4130) }, 2066 { PCI_VDEVICE(MELLANOX, 4131) }, /* ConnectX-8 */ 2067 { PCI_VDEVICE(MELLANOX, 4132) }, 2068 { PCI_VDEVICE(MELLANOX, 4133) }, 2069 { PCI_VDEVICE(MELLANOX, 4134) }, 2070 { PCI_VDEVICE(MELLANOX, 4135) }, 2071 { PCI_VDEVICE(MELLANOX, 4136) }, 2072 { PCI_VDEVICE(MELLANOX, 4137) }, 2073 { PCI_VDEVICE(MELLANOX, 4138) }, 2074 { PCI_VDEVICE(MELLANOX, 4139) }, 2075 { PCI_VDEVICE(MELLANOX, 4140) }, 2076 { PCI_VDEVICE(MELLANOX, 4141) }, 2077 { PCI_VDEVICE(MELLANOX, 4142) }, 2078 { PCI_VDEVICE(MELLANOX, 4143) }, 2079 { PCI_VDEVICE(MELLANOX, 4144) }, 2080 { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */ 2081 { PCI_VDEVICE(MELLANOX, 0xa2d3) }, /* BlueField integrated ConnectX-5 network controller VF */ 2082 { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */ 2083 { PCI_VDEVICE(MELLANOX, 0xa2dc) }, /* BlueField-3 integrated ConnectX-7 network controller */ 2084 { PCI_VDEVICE(MELLANOX, 0xa2df) }, /* BlueField-4 integrated ConnectX-8 network controller */ 2085 { } 2086 }; 2087 2088 MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table); 2089 2090 void mlx5_disable_device(struct mlx5_core_dev *dev) 2091 { 2092 mlx5_pci_err_detected(dev->pdev, 0); 2093 } 2094 2095 void mlx5_recover_device(struct mlx5_core_dev *dev) 2096 { 2097 mlx5_pci_disable_device(dev); 2098 if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED) 2099 mlx5_pci_resume(dev->pdev); 2100 } 2101 2102 struct pci_driver mlx5_core_driver = { 2103 .name = DRIVER_NAME, 2104 .id_table = mlx5_core_pci_table, 2105 .shutdown = shutdown_one, 2106 .probe = init_one, 2107 .remove = remove_one, 2108 .err_handler = &mlx5_err_handler, 2109 #ifdef PCI_IOV 2110 .bsd_iov_init = mlx5_iov_init, 2111 .bsd_iov_uninit = mlx5_iov_uninit, 2112 .bsd_iov_add_vf = mlx5_iov_add_vf, 2113 #endif 2114 }; 2115 2116 static int __init init(void) 2117 { 2118 int err; 2119 2120 err = pci_register_driver(&mlx5_core_driver); 2121 if (err) 2122 goto err_debug; 2123 2124 err = mlx5_ctl_init(); 2125 if (err) 2126 goto err_ctl; 2127 2128 return 0; 2129 2130 err_ctl: 2131 pci_unregister_driver(&mlx5_core_driver); 2132 2133 err_debug: 2134 return err; 2135 } 2136 2137 static void __exit cleanup(void) 2138 { 2139 mlx5_ctl_fini(); 2140 pci_unregister_driver(&mlx5_core_driver); 2141 } 2142 2143 module_init_order(init, SI_ORDER_FIRST); 2144 module_exit_order(cleanup, SI_ORDER_FIRST); 2145