1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. All rights reserved. 5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #define LINUXKPI_PARAM_PREFIX mlx4_ 37 38 #include <linux/kmod.h> 39 #include <linux/module.h> 40 #include <linux/errno.h> 41 #include <linux/pci.h> 42 #include <linux/dma-mapping.h> 43 #include <linux/slab.h> 44 #include <linux/io-mapping.h> 45 #include <linux/delay.h> 46 #include <linux/netdevice.h> 47 #include <linux/string.h> 48 #include <linux/fs.h> 49 #include <linux/cache.h> 50 #include <linux/random.h> 51 52 #include <dev/mlx4/device.h> 53 #include <dev/mlx4/doorbell.h> 54 55 #include "mlx4.h" 56 #include "fw.h" 57 #include "icm.h" 58 #include <dev/mlx4/stats.h> 59 60 MODULE_AUTHOR("Roland Dreier"); 61 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver"); 62 MODULE_LICENSE("Dual BSD/GPL"); 63 64 struct workqueue_struct *mlx4_wq; 65 66 #ifdef CONFIG_MLX4_DEBUG 67 68 int mlx4_debug_level = 0; 69 module_param_named(debug_level, mlx4_debug_level, int, 0644); 70 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); 71 72 #endif /* CONFIG_MLX4_DEBUG */ 73 74 #ifdef CONFIG_PCI_MSI 75 76 static int msi_x = 1; 77 module_param(msi_x, int, 0444); 78 MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); 79 80 #else /* CONFIG_PCI_MSI */ 81 82 #define msi_x (0) 83 84 #endif /* CONFIG_PCI_MSI */ 85 86 static uint8_t num_vfs[3] = {0, 0, 0}; 87 static int num_vfs_argc; 88 module_param_array(num_vfs, byte , &num_vfs_argc, 0444); 89 MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n" 90 "num_vfs=port1,port2,port1+2"); 91 92 static uint8_t probe_vf[3] = {0, 0, 0}; 93 static int probe_vfs_argc; 94 module_param_array(probe_vf, byte, &probe_vfs_argc, 0444); 95 MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n" 96 "probe_vf=port1,port2,port1+2"); 97 98 int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; 99 module_param_named(log_num_mgm_entry_size, 100 mlx4_log_num_mgm_entry_size, int, 0444); 101 MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num" 102 " of qp per mcg, for example:" 103 " 10 gives 248.range: 7 <=" 104 " log_num_mgm_entry_size <= 12." 105 " To activate device managed" 106 " flow steering when available, set to -1"); 107 108 static bool enable_64b_cqe_eqe = true; 109 module_param(enable_64b_cqe_eqe, bool, 0444); 110 MODULE_PARM_DESC(enable_64b_cqe_eqe, 111 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)"); 112 113 static bool enable_4k_uar; 114 module_param(enable_4k_uar, bool, 0444); 115 MODULE_PARM_DESC(enable_4k_uar, 116 "Enable using 4K UAR. Should not be enabled if have VFs which do not support 4K UARs (default: false)"); 117 118 #define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \ 119 MLX4_FUNC_CAP_EQE_CQE_STRIDE | \ 120 MLX4_FUNC_CAP_DMFS_A0_STATIC) 121 122 #define RESET_PERSIST_MASK_FLAGS (MLX4_FLAG_SRIOV) 123 124 static char mlx4_description[] = "Mellanox driver" 125 " (" DRV_VERSION ")"; 126 127 static char mlx4_version[] = 128 DRV_NAME ": Mellanox ConnectX core driver v" 129 DRV_VERSION " (" DRV_RELDATE ")\n"; 130 131 static struct mlx4_profile default_profile = { 132 .num_qp = 1 << 18, 133 .num_srq = 1 << 16, 134 .rdmarc_per_qp = 1 << 4, 135 .num_cq = 1 << 16, 136 .num_mcg = 1 << 13, 137 .num_mpt = 1 << 19, 138 .num_mtt = 1 << 20, /* It is really num mtt segements */ 139 }; 140 141 static struct mlx4_profile low_mem_profile = { 142 .num_qp = 1 << 17, 143 .num_srq = 1 << 6, 144 .rdmarc_per_qp = 1 << 4, 145 .num_cq = 1 << 8, 146 .num_mcg = 1 << 8, 147 .num_mpt = 1 << 9, 148 .num_mtt = 1 << 7, 149 }; 150 151 static int log_num_mac = 7; 152 module_param_named(log_num_mac, log_num_mac, int, 0444); 153 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)"); 154 155 static int log_num_vlan; 156 module_param_named(log_num_vlan, log_num_vlan, int, 0444); 157 MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)"); 158 /* Log2 max number of VLANs per ETH port (0-7) */ 159 #define MLX4_LOG_NUM_VLANS 7 160 #define MLX4_MIN_LOG_NUM_VLANS 0 161 #define MLX4_MIN_LOG_NUM_MAC 1 162 163 static bool use_prio; 164 module_param_named(use_prio, use_prio, bool, 0444); 165 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)"); 166 167 int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); 168 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); 169 MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)"); 170 171 static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE}; 172 173 struct mlx4_port_config { 174 struct list_head list; 175 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; 176 struct pci_dev *pdev; 177 }; 178 179 static atomic_t pf_loading = ATOMIC_INIT(0); 180 181 static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev, 182 struct mlx4_dev_cap *dev_cap) 183 { 184 /* The reserved_uars is calculated by system page size unit. 185 * Therefore, adjustment is added when the uar page size is less 186 * than the system page size 187 */ 188 dev->caps.reserved_uars = 189 max_t(int, 190 mlx4_get_num_reserved_uar(dev), 191 dev_cap->reserved_uars / 192 (1 << (PAGE_SHIFT - dev->uar_page_shift))); 193 } 194 195 int mlx4_check_port_params(struct mlx4_dev *dev, 196 enum mlx4_port_type *port_type) 197 { 198 int i; 199 200 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { 201 for (i = 0; i < dev->caps.num_ports - 1; i++) { 202 if (port_type[i] != port_type[i + 1]) { 203 mlx4_err(dev, "Only same port types supported on this HCA, aborting\n"); 204 return -EINVAL; 205 } 206 } 207 } 208 209 for (i = 0; i < dev->caps.num_ports; i++) { 210 if (!(port_type[i] & dev->caps.supported_type[i+1])) { 211 mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n", 212 i + 1); 213 return -EINVAL; 214 } 215 } 216 return 0; 217 } 218 219 static void mlx4_set_port_mask(struct mlx4_dev *dev) 220 { 221 int i; 222 223 for (i = 1; i <= dev->caps.num_ports; ++i) 224 dev->caps.port_mask[i] = dev->caps.port_type[i]; 225 } 226 227 enum { 228 MLX4_QUERY_FUNC_NUM_SYS_EQS = 1 << 0, 229 }; 230 231 static int mlx4_query_func(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 232 { 233 int err = 0; 234 struct mlx4_func func; 235 236 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { 237 err = mlx4_QUERY_FUNC(dev, &func, 0); 238 if (err) { 239 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 240 return err; 241 } 242 dev_cap->max_eqs = func.max_eq; 243 dev_cap->reserved_eqs = func.rsvd_eqs; 244 dev_cap->reserved_uars = func.rsvd_uars; 245 err |= MLX4_QUERY_FUNC_NUM_SYS_EQS; 246 } 247 return err; 248 } 249 250 static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev) 251 { 252 struct mlx4_caps *dev_cap = &dev->caps; 253 254 /* FW not supporting or cancelled by user */ 255 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) || 256 !(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE)) 257 return; 258 259 /* Must have 64B CQE_EQE enabled by FW to use bigger stride 260 * When FW has NCSI it may decide not to report 64B CQE/EQEs 261 */ 262 if (!(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_EQE) || 263 !(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_CQE)) { 264 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 265 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 266 return; 267 } 268 269 if (cache_line_size() == 128 || cache_line_size() == 256) { 270 mlx4_dbg(dev, "Enabling CQE stride cacheLine supported\n"); 271 /* Changing the real data inside CQE size to 32B */ 272 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; 273 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; 274 275 if (mlx4_is_master(dev)) 276 dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE; 277 } else { 278 if (cache_line_size() != 32 && cache_line_size() != 64) 279 mlx4_dbg(dev, "Disabling CQE stride, cacheLine size unsupported\n"); 280 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 281 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 282 } 283 } 284 285 static int _mlx4_dev_port(struct mlx4_dev *dev, int port, 286 struct mlx4_port_cap *port_cap) 287 { 288 dev->caps.vl_cap[port] = port_cap->max_vl; 289 dev->caps.ib_mtu_cap[port] = port_cap->ib_mtu; 290 dev->phys_caps.gid_phys_table_len[port] = port_cap->max_gids; 291 dev->phys_caps.pkey_phys_table_len[port] = port_cap->max_pkeys; 292 /* set gid and pkey table operating lengths by default 293 * to non-sriov values 294 */ 295 dev->caps.gid_table_len[port] = port_cap->max_gids; 296 dev->caps.pkey_table_len[port] = port_cap->max_pkeys; 297 dev->caps.port_width_cap[port] = port_cap->max_port_width; 298 dev->caps.eth_mtu_cap[port] = port_cap->eth_mtu; 299 dev->caps.max_tc_eth = port_cap->max_tc_eth; 300 dev->caps.def_mac[port] = port_cap->def_mac; 301 dev->caps.supported_type[port] = port_cap->supported_port_types; 302 dev->caps.suggested_type[port] = port_cap->suggested_type; 303 dev->caps.default_sense[port] = port_cap->default_sense; 304 dev->caps.trans_type[port] = port_cap->trans_type; 305 dev->caps.vendor_oui[port] = port_cap->vendor_oui; 306 dev->caps.wavelength[port] = port_cap->wavelength; 307 dev->caps.trans_code[port] = port_cap->trans_code; 308 309 return 0; 310 } 311 312 static int mlx4_dev_port(struct mlx4_dev *dev, int port, 313 struct mlx4_port_cap *port_cap) 314 { 315 int err = 0; 316 317 err = mlx4_QUERY_PORT(dev, port, port_cap); 318 319 if (err) 320 mlx4_err(dev, "QUERY_PORT command failed.\n"); 321 322 return err; 323 } 324 325 static inline void mlx4_enable_ignore_fcs(struct mlx4_dev *dev) 326 { 327 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS)) 328 return; 329 330 if (mlx4_is_mfunc(dev)) { 331 mlx4_dbg(dev, "SRIOV mode - Disabling Ignore FCS"); 332 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS; 333 return; 334 } 335 336 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)) { 337 mlx4_dbg(dev, 338 "Keep FCS is not supported - Disabling Ignore FCS"); 339 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS; 340 return; 341 } 342 } 343 344 #define MLX4_A0_STEERING_TABLE_SIZE 256 345 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 346 { 347 int err; 348 int i; 349 350 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 351 if (err) { 352 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 353 return err; 354 } 355 mlx4_dev_cap_dump(dev, dev_cap); 356 357 if (dev_cap->min_page_sz > PAGE_SIZE) { 358 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", 359 dev_cap->min_page_sz, (long)PAGE_SIZE); 360 return -ENODEV; 361 } 362 if (dev_cap->num_ports > MLX4_MAX_PORTS) { 363 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n", 364 dev_cap->num_ports, MLX4_MAX_PORTS); 365 return -ENODEV; 366 } 367 368 if (dev_cap->uar_size > pci_resource_len(dev->persist->pdev, 2)) { 369 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", 370 dev_cap->uar_size, 371 (unsigned long long) 372 pci_resource_len(dev->persist->pdev, 2)); 373 return -ENODEV; 374 } 375 376 dev->caps.num_ports = dev_cap->num_ports; 377 dev->caps.num_sys_eqs = dev_cap->num_sys_eqs; 378 dev->phys_caps.num_phys_eqs = dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS ? 379 dev->caps.num_sys_eqs : 380 MLX4_MAX_EQ_NUM; 381 for (i = 1; i <= dev->caps.num_ports; ++i) { 382 err = _mlx4_dev_port(dev, i, dev_cap->port_cap + i); 383 if (err) { 384 mlx4_err(dev, "QUERY_PORT command failed, aborting\n"); 385 return err; 386 } 387 } 388 389 dev->caps.uar_page_size = PAGE_SIZE; 390 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; 391 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay; 392 dev->caps.bf_reg_size = dev_cap->bf_reg_size; 393 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page; 394 dev->caps.max_sq_sg = dev_cap->max_sq_sg; 395 dev->caps.max_rq_sg = dev_cap->max_rq_sg; 396 dev->caps.max_wqes = dev_cap->max_qp_sz; 397 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp; 398 dev->caps.max_srq_wqes = dev_cap->max_srq_sz; 399 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1; 400 dev->caps.reserved_srqs = dev_cap->reserved_srqs; 401 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz; 402 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz; 403 /* 404 * Subtract 1 from the limit because we need to allocate a 405 * spare CQE so the HCA HW can tell the difference between an 406 * empty CQ and a full CQ. 407 */ 408 dev->caps.max_cqes = dev_cap->max_cq_sz - 1; 409 dev->caps.reserved_cqs = dev_cap->reserved_cqs; 410 dev->caps.reserved_eqs = dev_cap->reserved_eqs; 411 dev->caps.reserved_mtts = dev_cap->reserved_mtts; 412 dev->caps.reserved_mrws = dev_cap->reserved_mrws; 413 414 dev->caps.reserved_pds = dev_cap->reserved_pds; 415 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? 416 dev_cap->reserved_xrcds : 0; 417 dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? 418 dev_cap->max_xrcds : 0; 419 dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz; 420 421 dev->caps.max_msg_sz = dev_cap->max_msg_sz; 422 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); 423 dev->caps.flags = dev_cap->flags; 424 dev->caps.flags2 = dev_cap->flags2; 425 dev->caps.bmme_flags = dev_cap->bmme_flags; 426 dev->caps.reserved_lkey = dev_cap->reserved_lkey; 427 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 428 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 429 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; 430 431 /* Save uar page shift */ 432 if (!mlx4_is_slave(dev)) { 433 /* Virtual PCI function needs to determine UAR page size from 434 * firmware. Only master PCI function can set the uar page size 435 */ 436 if (enable_4k_uar) 437 dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT; 438 else 439 dev->uar_page_shift = PAGE_SHIFT; 440 441 mlx4_set_num_reserved_uars(dev, dev_cap); 442 } 443 444 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) { 445 struct mlx4_init_hca_param hca_param; 446 447 memset(&hca_param, 0, sizeof(hca_param)); 448 err = mlx4_QUERY_HCA(dev, &hca_param); 449 /* Turn off PHV_EN flag in case phv_check_en is set. 450 * phv_check_en is a HW check that parse the packet and verify 451 * phv bit was reported correctly in the wqe. To allow QinQ 452 * PHV_EN flag should be set and phv_check_en must be cleared 453 * otherwise QinQ packets will be drop by the HW. 454 */ 455 if (err || hca_param.phv_check_en) 456 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_PHV_EN; 457 } 458 459 /* Sense port always allowed on supported devices for ConnectX-1 and -2 */ 460 if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT) 461 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; 462 /* Don't do sense port on multifunction devices (for now at least) */ 463 if (mlx4_is_mfunc(dev)) 464 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; 465 466 if (mlx4_low_memory_profile()) { 467 dev->caps.log_num_macs = MLX4_MIN_LOG_NUM_MAC; 468 dev->caps.log_num_vlans = MLX4_MIN_LOG_NUM_VLANS; 469 } else { 470 dev->caps.log_num_macs = log_num_mac; 471 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS; 472 } 473 474 for (i = 1; i <= dev->caps.num_ports; ++i) { 475 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE; 476 if (dev->caps.supported_type[i]) { 477 /* if only ETH is supported - assign ETH */ 478 if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH) 479 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; 480 /* if only IB is supported, assign IB */ 481 else if (dev->caps.supported_type[i] == 482 MLX4_PORT_TYPE_IB) 483 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB; 484 else { 485 /* if IB and ETH are supported, we set the port 486 * type according to user selection of port type; 487 * if user selected none, take the FW hint */ 488 if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE) 489 dev->caps.port_type[i] = dev->caps.suggested_type[i] ? 490 MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB; 491 else 492 dev->caps.port_type[i] = port_type_array[i - 1]; 493 } 494 } 495 /* 496 * Link sensing is allowed on the port if 3 conditions are true: 497 * 1. Both protocols are supported on the port. 498 * 2. Different types are supported on the port 499 * 3. FW declared that it supports link sensing 500 */ 501 mlx4_priv(dev)->sense.sense_allowed[i] = 502 ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) && 503 (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && 504 (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)); 505 506 /* 507 * If "default_sense" bit is set, we move the port to "AUTO" mode 508 * and perform sense_port FW command to try and set the correct 509 * port type from beginning 510 */ 511 if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) { 512 enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE; 513 dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO; 514 mlx4_SENSE_PORT(dev, i, &sensed_port); 515 if (sensed_port != MLX4_PORT_TYPE_NONE) 516 dev->caps.port_type[i] = sensed_port; 517 } else { 518 dev->caps.possible_type[i] = dev->caps.port_type[i]; 519 } 520 521 if (dev->caps.log_num_macs > dev_cap->port_cap[i].log_max_macs) { 522 dev->caps.log_num_macs = dev_cap->port_cap[i].log_max_macs; 523 mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n", 524 i, 1 << dev->caps.log_num_macs); 525 } 526 if (dev->caps.log_num_vlans > dev_cap->port_cap[i].log_max_vlans) { 527 dev->caps.log_num_vlans = dev_cap->port_cap[i].log_max_vlans; 528 mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n", 529 i, 1 << dev->caps.log_num_vlans); 530 } 531 } 532 533 if (mlx4_is_master(dev) && (dev->caps.num_ports == 2) && 534 (port_type_array[0] == MLX4_PORT_TYPE_IB) && 535 (port_type_array[1] == MLX4_PORT_TYPE_ETH)) { 536 mlx4_warn(dev, 537 "Granular QoS per VF not supported with IB/Eth configuration\n"); 538 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_QOS_VPP; 539 } 540 541 dev->caps.max_counters = dev_cap->max_counters; 542 543 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps; 544 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] = 545 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] = 546 (1 << dev->caps.log_num_macs) * 547 (1 << dev->caps.log_num_vlans) * 548 dev->caps.num_ports; 549 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH; 550 551 if (dev_cap->dmfs_high_rate_qpn_base > 0 && 552 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) 553 dev->caps.dmfs_high_rate_qpn_base = dev_cap->dmfs_high_rate_qpn_base; 554 else 555 dev->caps.dmfs_high_rate_qpn_base = 556 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 557 558 if (dev_cap->dmfs_high_rate_qpn_range > 0 && 559 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) { 560 dev->caps.dmfs_high_rate_qpn_range = dev_cap->dmfs_high_rate_qpn_range; 561 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DEFAULT; 562 dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_FS_A0; 563 } else { 564 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_NOT_SUPPORTED; 565 dev->caps.dmfs_high_rate_qpn_base = 566 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 567 dev->caps.dmfs_high_rate_qpn_range = MLX4_A0_STEERING_TABLE_SIZE; 568 } 569 570 dev->caps.rl_caps = dev_cap->rl_caps; 571 572 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_RSS_RAW_ETH] = 573 dev->caps.dmfs_high_rate_qpn_range; 574 575 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] + 576 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] + 577 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] + 578 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH]; 579 580 dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0; 581 582 if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) { 583 if (dev_cap->flags & 584 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) { 585 mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n"); 586 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; 587 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; 588 } 589 590 if (dev_cap->flags2 & 591 (MLX4_DEV_CAP_FLAG2_CQE_STRIDE | 592 MLX4_DEV_CAP_FLAG2_EQE_STRIDE)) { 593 mlx4_warn(dev, "Disabling EQE/CQE stride per user request\n"); 594 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 595 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 596 } 597 } 598 599 if ((dev->caps.flags & 600 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) && 601 mlx4_is_master(dev)) 602 dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE; 603 604 if (!mlx4_is_slave(dev)) { 605 mlx4_enable_cqe_eqe_stride(dev); 606 dev->caps.alloc_res_qp_mask = 607 (dev->caps.bf_reg_size ? MLX4_RESERVE_ETH_BF_QP : 0) | 608 MLX4_RESERVE_A0_QP; 609 610 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) && 611 dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) { 612 mlx4_warn(dev, "Old device ETS support detected\n"); 613 mlx4_warn(dev, "Consider upgrading device FW.\n"); 614 dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG; 615 } 616 617 } else { 618 dev->caps.alloc_res_qp_mask = 0; 619 } 620 621 mlx4_enable_ignore_fcs(dev); 622 623 return 0; 624 } 625 626 static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev *dev, 627 enum pci_bus_speed *speed, 628 enum pcie_link_width *width) 629 { 630 u32 lnkcap1, lnkcap2; 631 int err1, err2; 632 633 #define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */ 634 635 *speed = PCI_SPEED_UNKNOWN; 636 *width = PCIE_LNK_WIDTH_UNKNOWN; 637 638 err1 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP, 639 &lnkcap1); 640 err2 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP2, 641 &lnkcap2); 642 if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */ 643 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) 644 *speed = PCIE_SPEED_8_0GT; 645 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) 646 *speed = PCIE_SPEED_5_0GT; 647 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) 648 *speed = PCIE_SPEED_2_5GT; 649 } 650 if (!err1) { 651 *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT; 652 if (!lnkcap2) { /* pre-r3.0 */ 653 if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB) 654 *speed = PCIE_SPEED_5_0GT; 655 else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB) 656 *speed = PCIE_SPEED_2_5GT; 657 } 658 } 659 660 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) { 661 return err1 ? err1 : 662 err2 ? err2 : -EINVAL; 663 } 664 return 0; 665 } 666 667 static void mlx4_check_pcie_caps(struct mlx4_dev *dev) 668 { 669 enum pcie_link_width width, width_cap; 670 enum pci_bus_speed speed, speed_cap; 671 int err; 672 673 #define PCIE_SPEED_STR(speed) \ 674 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \ 675 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \ 676 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \ 677 "Unknown") 678 679 err = mlx4_get_pcie_dev_link_caps(dev, &speed_cap, &width_cap); 680 if (err) { 681 mlx4_warn(dev, 682 "Unable to determine PCIe device BW capabilities\n"); 683 return; 684 } 685 686 err = pcie_get_minimum_link(dev->persist->pdev, &speed, &width); 687 if (err || speed == PCI_SPEED_UNKNOWN || 688 width == PCIE_LNK_WIDTH_UNKNOWN) { 689 mlx4_warn(dev, 690 "Unable to determine PCI device chain minimum BW\n"); 691 return; 692 } 693 694 if (width != width_cap || speed != speed_cap) 695 mlx4_warn(dev, 696 "PCIe BW is different than device's capability\n"); 697 698 mlx4_info(dev, "PCIe link speed is %s, device supports %s\n", 699 PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap)); 700 mlx4_info(dev, "PCIe link width is x%d, device supports x%d\n", 701 width, width_cap); 702 return; 703 } 704 705 /*The function checks if there are live vf, return the num of them*/ 706 static int mlx4_how_many_lives_vf(struct mlx4_dev *dev) 707 { 708 struct mlx4_priv *priv = mlx4_priv(dev); 709 struct mlx4_slave_state *s_state; 710 int i; 711 int ret = 0; 712 713 for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) { 714 s_state = &priv->mfunc.master.slave_state[i]; 715 if (s_state->active && s_state->last_cmd != 716 MLX4_COMM_CMD_RESET) { 717 mlx4_warn(dev, "%s: slave: %d is still active\n", 718 __func__, i); 719 ret++; 720 } 721 } 722 return ret; 723 } 724 725 int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey) 726 { 727 u32 qk = MLX4_RESERVED_QKEY_BASE; 728 729 if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX || 730 qpn < dev->phys_caps.base_proxy_sqpn) 731 return -EINVAL; 732 733 if (qpn >= dev->phys_caps.base_tunnel_sqpn) 734 /* tunnel qp */ 735 qk += qpn - dev->phys_caps.base_tunnel_sqpn; 736 else 737 qk += qpn - dev->phys_caps.base_proxy_sqpn; 738 *qkey = qk; 739 return 0; 740 } 741 EXPORT_SYMBOL(mlx4_get_parav_qkey); 742 743 void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val) 744 { 745 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 746 747 if (!mlx4_is_master(dev)) 748 return; 749 750 priv->virt2phys_pkey[slave][port - 1][i] = val; 751 } 752 EXPORT_SYMBOL(mlx4_sync_pkey_table); 753 754 void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid) 755 { 756 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 757 758 if (!mlx4_is_master(dev)) 759 return; 760 761 priv->slave_node_guids[slave] = guid; 762 } 763 EXPORT_SYMBOL(mlx4_put_slave_node_guid); 764 765 __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave) 766 { 767 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 768 769 if (!mlx4_is_master(dev)) 770 return 0; 771 772 return priv->slave_node_guids[slave]; 773 } 774 EXPORT_SYMBOL(mlx4_get_slave_node_guid); 775 776 int mlx4_is_slave_active(struct mlx4_dev *dev, int slave) 777 { 778 struct mlx4_priv *priv = mlx4_priv(dev); 779 struct mlx4_slave_state *s_slave; 780 781 if (!mlx4_is_master(dev)) 782 return 0; 783 784 s_slave = &priv->mfunc.master.slave_state[slave]; 785 return !!s_slave->active; 786 } 787 EXPORT_SYMBOL(mlx4_is_slave_active); 788 789 static void slave_adjust_steering_mode(struct mlx4_dev *dev, 790 struct mlx4_dev_cap *dev_cap, 791 struct mlx4_init_hca_param *hca_param) 792 { 793 dev->caps.steering_mode = hca_param->steering_mode; 794 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { 795 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; 796 dev->caps.fs_log_max_ucast_qp_range_size = 797 dev_cap->fs_log_max_ucast_qp_range_size; 798 } else 799 dev->caps.num_qp_per_mgm = 800 4 * ((1 << hca_param->log_mc_entry_sz)/16 - 2); 801 802 mlx4_dbg(dev, "Steering mode is: %s\n", 803 mlx4_steering_mode_str(dev->caps.steering_mode)); 804 } 805 806 static int mlx4_slave_cap(struct mlx4_dev *dev) 807 { 808 int err; 809 u32 page_size; 810 struct mlx4_dev_cap dev_cap; 811 struct mlx4_func_cap func_cap; 812 struct mlx4_init_hca_param hca_param; 813 u8 i; 814 815 memset(&hca_param, 0, sizeof(hca_param)); 816 err = mlx4_QUERY_HCA(dev, &hca_param); 817 if (err) { 818 mlx4_err(dev, "QUERY_HCA command failed, aborting\n"); 819 return err; 820 } 821 822 /* fail if the hca has an unknown global capability 823 * at this time global_caps should be always zeroed 824 */ 825 if (hca_param.global_caps) { 826 mlx4_err(dev, "Unknown hca global capabilities\n"); 827 return -ENOSYS; 828 } 829 830 mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz; 831 832 dev->caps.hca_core_clock = hca_param.hca_core_clock; 833 834 memset(&dev_cap, 0, sizeof(dev_cap)); 835 dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp; 836 err = mlx4_dev_cap(dev, &dev_cap); 837 if (err) { 838 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 839 return err; 840 } 841 842 err = mlx4_QUERY_FW(dev); 843 if (err) 844 mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n"); 845 846 page_size = ~dev->caps.page_size_cap + 1; 847 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size); 848 if (page_size > PAGE_SIZE) { 849 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", 850 page_size, (long)PAGE_SIZE); 851 return -ENODEV; 852 } 853 854 /* Set uar_page_shift for VF */ 855 dev->uar_page_shift = hca_param.uar_page_sz + 12; 856 857 /* Make sure the master uar page size is valid */ 858 if (dev->uar_page_shift > PAGE_SHIFT) { 859 mlx4_err(dev, 860 "Invalid configuration: uar page size is larger than system page size\n"); 861 return -ENODEV; 862 } 863 864 /* Set reserved_uars based on the uar_page_shift */ 865 mlx4_set_num_reserved_uars(dev, &dev_cap); 866 867 /* Although uar page size in FW differs from system page size, 868 * upper software layers (mlx4_ib, mlx4_en and part of mlx4_core) 869 * still works with assumption that uar page size == system page size 870 */ 871 dev->caps.uar_page_size = PAGE_SIZE; 872 873 memset(&func_cap, 0, sizeof(func_cap)); 874 err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap); 875 if (err) { 876 mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n", 877 err); 878 return err; 879 } 880 881 if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) != 882 PF_CONTEXT_BEHAVIOUR_MASK) { 883 mlx4_err(dev, "Unknown pf context behaviour %x known flags %x\n", 884 func_cap.pf_context_behaviour, PF_CONTEXT_BEHAVIOUR_MASK); 885 return -ENOSYS; 886 } 887 888 dev->caps.num_ports = func_cap.num_ports; 889 dev->quotas.qp = func_cap.qp_quota; 890 dev->quotas.srq = func_cap.srq_quota; 891 dev->quotas.cq = func_cap.cq_quota; 892 dev->quotas.mpt = func_cap.mpt_quota; 893 dev->quotas.mtt = func_cap.mtt_quota; 894 dev->caps.num_qps = 1 << hca_param.log_num_qps; 895 dev->caps.num_srqs = 1 << hca_param.log_num_srqs; 896 dev->caps.num_cqs = 1 << hca_param.log_num_cqs; 897 dev->caps.num_mpts = 1 << hca_param.log_mpt_sz; 898 dev->caps.num_eqs = func_cap.max_eq; 899 dev->caps.reserved_eqs = func_cap.reserved_eq; 900 dev->caps.reserved_lkey = func_cap.reserved_lkey; 901 dev->caps.num_pds = MLX4_NUM_PDS; 902 dev->caps.num_mgms = 0; 903 dev->caps.num_amgms = 0; 904 905 if (dev->caps.num_ports > MLX4_MAX_PORTS) { 906 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n", 907 dev->caps.num_ports, MLX4_MAX_PORTS); 908 return -ENODEV; 909 } 910 911 mlx4_replace_zero_macs(dev); 912 913 dev->caps.qp0_qkey = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL); 914 dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 915 dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 916 dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 917 dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 918 919 if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy || 920 !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy || 921 !dev->caps.qp0_qkey) { 922 err = -ENOMEM; 923 goto err_mem; 924 } 925 926 for (i = 1; i <= dev->caps.num_ports; ++i) { 927 err = mlx4_QUERY_FUNC_CAP(dev, i, &func_cap); 928 if (err) { 929 mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n", 930 i, err); 931 goto err_mem; 932 } 933 dev->caps.qp0_qkey[i - 1] = func_cap.qp0_qkey; 934 dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn; 935 dev->caps.qp0_proxy[i - 1] = func_cap.qp0_proxy_qpn; 936 dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn; 937 dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn; 938 dev->caps.port_mask[i] = dev->caps.port_type[i]; 939 dev->caps.phys_port_id[i] = func_cap.phys_port_id; 940 err = mlx4_get_slave_pkey_gid_tbl_len(dev, i, 941 &dev->caps.gid_table_len[i], 942 &dev->caps.pkey_table_len[i]); 943 if (err) 944 goto err_mem; 945 } 946 947 if (dev->caps.uar_page_size * (dev->caps.num_uars - 948 dev->caps.reserved_uars) > 949 pci_resource_len(dev->persist->pdev, 950 2)) { 951 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", 952 dev->caps.uar_page_size * dev->caps.num_uars, 953 (unsigned long long) 954 pci_resource_len(dev->persist->pdev, 2)); 955 err = -ENOMEM; 956 goto err_mem; 957 } 958 959 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_EQE_ENABLED) { 960 dev->caps.eqe_size = 64; 961 dev->caps.eqe_factor = 1; 962 } else { 963 dev->caps.eqe_size = 32; 964 dev->caps.eqe_factor = 0; 965 } 966 967 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) { 968 dev->caps.cqe_size = 64; 969 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; 970 } else { 971 dev->caps.cqe_size = 32; 972 } 973 974 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_EQE_STRIDE_ENABLED) { 975 dev->caps.eqe_size = hca_param.eqe_size; 976 dev->caps.eqe_factor = 0; 977 } 978 979 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_CQE_STRIDE_ENABLED) { 980 dev->caps.cqe_size = hca_param.cqe_size; 981 /* User still need to know when CQE > 32B */ 982 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; 983 } 984 985 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 986 mlx4_warn(dev, "Timestamping is not supported in slave mode\n"); 987 988 slave_adjust_steering_mode(dev, &dev_cap, &hca_param); 989 mlx4_dbg(dev, "RSS support for IP fragments is %s\n", 990 hca_param.rss_ip_frags ? "on" : "off"); 991 992 if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_BF_RES_QP && 993 dev->caps.bf_reg_size) 994 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_ETH_BF_QP; 995 996 if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_A0_RES_QP) 997 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_A0_QP; 998 999 return 0; 1000 1001 err_mem: 1002 kfree(dev->caps.qp0_qkey); 1003 kfree(dev->caps.qp0_tunnel); 1004 kfree(dev->caps.qp0_proxy); 1005 kfree(dev->caps.qp1_tunnel); 1006 kfree(dev->caps.qp1_proxy); 1007 dev->caps.qp0_qkey = NULL; 1008 dev->caps.qp0_tunnel = NULL; 1009 dev->caps.qp0_proxy = NULL; 1010 dev->caps.qp1_tunnel = NULL; 1011 dev->caps.qp1_proxy = NULL; 1012 1013 return err; 1014 } 1015 1016 static void mlx4_request_modules(struct mlx4_dev *dev) 1017 { 1018 int port; 1019 int has_ib_port = false; 1020 int has_eth_port = false; 1021 #define EN_DRV_NAME "mlx4_en" 1022 #define IB_DRV_NAME "mlx4_ib" 1023 1024 for (port = 1; port <= dev->caps.num_ports; port++) { 1025 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB) 1026 has_ib_port = true; 1027 else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) 1028 has_eth_port = true; 1029 } 1030 1031 if (has_eth_port) 1032 request_module_nowait(EN_DRV_NAME); 1033 if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) 1034 request_module_nowait(IB_DRV_NAME); 1035 } 1036 1037 /* 1038 * Change the port configuration of the device. 1039 * Every user of this function must hold the port mutex. 1040 */ 1041 int mlx4_change_port_types(struct mlx4_dev *dev, 1042 enum mlx4_port_type *port_types) 1043 { 1044 int err = 0; 1045 int change = 0; 1046 int port; 1047 1048 for (port = 0; port < dev->caps.num_ports; port++) { 1049 /* Change the port type only if the new type is different 1050 * from the current, and not set to Auto */ 1051 if (port_types[port] != dev->caps.port_type[port + 1]) 1052 change = 1; 1053 } 1054 if (change) { 1055 mlx4_unregister_device(dev); 1056 for (port = 1; port <= dev->caps.num_ports; port++) { 1057 mlx4_CLOSE_PORT(dev, port); 1058 dev->caps.port_type[port] = port_types[port - 1]; 1059 err = mlx4_SET_PORT(dev, port, -1); 1060 if (err) { 1061 mlx4_err(dev, "Failed to set port %d, aborting\n", 1062 port); 1063 goto out; 1064 } 1065 } 1066 mlx4_set_port_mask(dev); 1067 err = mlx4_register_device(dev); 1068 if (err) { 1069 mlx4_err(dev, "Failed to register device\n"); 1070 goto out; 1071 } 1072 mlx4_request_modules(dev); 1073 } 1074 1075 out: 1076 return err; 1077 } 1078 1079 static ssize_t show_port_type(struct device *dev, 1080 struct device_attribute *attr, 1081 char *buf) 1082 { 1083 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 1084 port_attr); 1085 struct mlx4_dev *mdev = info->dev; 1086 char type[8]; 1087 1088 sprintf(type, "%s", 1089 (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ? 1090 "ib" : "eth"); 1091 if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO) 1092 sprintf(buf, "auto (%s)\n", type); 1093 else 1094 sprintf(buf, "%s\n", type); 1095 1096 return strlen(buf); 1097 } 1098 1099 static int __set_port_type(struct mlx4_port_info *info, 1100 enum mlx4_port_type port_type) 1101 { 1102 struct mlx4_dev *mdev = info->dev; 1103 struct mlx4_priv *priv = mlx4_priv(mdev); 1104 enum mlx4_port_type types[MLX4_MAX_PORTS]; 1105 enum mlx4_port_type new_types[MLX4_MAX_PORTS]; 1106 int i; 1107 int err = 0; 1108 1109 if ((port_type & mdev->caps.supported_type[info->port]) != port_type) { 1110 mlx4_err(mdev, 1111 "Requested port type for port %d is not supported on this HCA\n", 1112 info->port); 1113 err = -EINVAL; 1114 goto err_sup; 1115 } 1116 1117 mlx4_stop_sense(mdev); 1118 mutex_lock(&priv->port_mutex); 1119 info->tmp_type = port_type; 1120 1121 /* Possible type is always the one that was delivered */ 1122 mdev->caps.possible_type[info->port] = info->tmp_type; 1123 1124 for (i = 0; i < mdev->caps.num_ports; i++) { 1125 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type : 1126 mdev->caps.possible_type[i+1]; 1127 if (types[i] == MLX4_PORT_TYPE_AUTO) 1128 types[i] = mdev->caps.port_type[i+1]; 1129 } 1130 1131 if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && 1132 !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) { 1133 for (i = 1; i <= mdev->caps.num_ports; i++) { 1134 if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) { 1135 mdev->caps.possible_type[i] = mdev->caps.port_type[i]; 1136 err = -EINVAL; 1137 } 1138 } 1139 } 1140 if (err) { 1141 mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n"); 1142 goto out; 1143 } 1144 1145 mlx4_do_sense_ports(mdev, new_types, types); 1146 1147 err = mlx4_check_port_params(mdev, new_types); 1148 if (err) 1149 goto out; 1150 1151 /* We are about to apply the changes after the configuration 1152 * was verified, no need to remember the temporary types 1153 * any more */ 1154 for (i = 0; i < mdev->caps.num_ports; i++) 1155 priv->port[i + 1].tmp_type = 0; 1156 1157 err = mlx4_change_port_types(mdev, new_types); 1158 1159 out: 1160 mutex_unlock(&priv->port_mutex); 1161 mlx4_start_sense(mdev); 1162 err_sup: 1163 return err; 1164 } 1165 1166 static ssize_t set_port_type(struct device *dev, 1167 struct device_attribute *attr, 1168 const char *buf, size_t count) 1169 { 1170 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 1171 port_attr); 1172 struct mlx4_dev *mdev = info->dev; 1173 enum mlx4_port_type port_type; 1174 static DEFINE_MUTEX(set_port_type_mutex); 1175 int err; 1176 1177 mutex_lock(&set_port_type_mutex); 1178 1179 if (!strcmp(buf, "ib\n")) { 1180 port_type = MLX4_PORT_TYPE_IB; 1181 } else if (!strcmp(buf, "eth\n")) { 1182 port_type = MLX4_PORT_TYPE_ETH; 1183 } else if (!strcmp(buf, "auto\n")) { 1184 port_type = MLX4_PORT_TYPE_AUTO; 1185 } else { 1186 mlx4_err(mdev, "%s is not supported port type\n", buf); 1187 err = -EINVAL; 1188 goto err_out; 1189 } 1190 1191 err = __set_port_type(info, port_type); 1192 1193 err_out: 1194 mutex_unlock(&set_port_type_mutex); 1195 1196 return err ? err : count; 1197 } 1198 1199 enum ibta_mtu { 1200 IB_MTU_256 = 1, 1201 IB_MTU_512 = 2, 1202 IB_MTU_1024 = 3, 1203 IB_MTU_2048 = 4, 1204 IB_MTU_4096 = 5 1205 }; 1206 1207 static inline int int_to_ibta_mtu(int mtu) 1208 { 1209 switch (mtu) { 1210 case 256: return IB_MTU_256; 1211 case 512: return IB_MTU_512; 1212 case 1024: return IB_MTU_1024; 1213 case 2048: return IB_MTU_2048; 1214 case 4096: return IB_MTU_4096; 1215 default: return -1; 1216 } 1217 } 1218 1219 static inline int ibta_mtu_to_int(enum ibta_mtu mtu) 1220 { 1221 switch (mtu) { 1222 case IB_MTU_256: return 256; 1223 case IB_MTU_512: return 512; 1224 case IB_MTU_1024: return 1024; 1225 case IB_MTU_2048: return 2048; 1226 case IB_MTU_4096: return 4096; 1227 default: return -1; 1228 } 1229 } 1230 1231 static ssize_t show_port_ib_mtu(struct device *dev, 1232 struct device_attribute *attr, 1233 char *buf) 1234 { 1235 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 1236 port_mtu_attr); 1237 struct mlx4_dev *mdev = info->dev; 1238 1239 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) 1240 mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); 1241 1242 sprintf(buf, "%d\n", 1243 ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port])); 1244 return strlen(buf); 1245 } 1246 1247 static ssize_t set_port_ib_mtu(struct device *dev, 1248 struct device_attribute *attr, 1249 const char *buf, size_t count) 1250 { 1251 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 1252 port_mtu_attr); 1253 struct mlx4_dev *mdev = info->dev; 1254 struct mlx4_priv *priv = mlx4_priv(mdev); 1255 int err, port, mtu, ibta_mtu = -1; 1256 1257 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) { 1258 mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); 1259 return -EINVAL; 1260 } 1261 1262 err = kstrtoint(buf, 0, &mtu); 1263 if (!err) 1264 ibta_mtu = int_to_ibta_mtu(mtu); 1265 1266 if (err || ibta_mtu < 0) { 1267 mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf); 1268 return -EINVAL; 1269 } 1270 1271 mdev->caps.port_ib_mtu[info->port] = ibta_mtu; 1272 1273 mlx4_stop_sense(mdev); 1274 mutex_lock(&priv->port_mutex); 1275 mlx4_unregister_device(mdev); 1276 for (port = 1; port <= mdev->caps.num_ports; port++) { 1277 mlx4_CLOSE_PORT(mdev, port); 1278 err = mlx4_SET_PORT(mdev, port, -1); 1279 if (err) { 1280 mlx4_err(mdev, "Failed to set port %d, aborting\n", 1281 port); 1282 goto err_set_port; 1283 } 1284 } 1285 err = mlx4_register_device(mdev); 1286 err_set_port: 1287 mutex_unlock(&priv->port_mutex); 1288 mlx4_start_sense(mdev); 1289 return err ? err : count; 1290 } 1291 1292 /* bond for multi-function device */ 1293 #define MAX_MF_BOND_ALLOWED_SLAVES 63 1294 static int mlx4_mf_bond(struct mlx4_dev *dev) 1295 { 1296 int err = 0; 1297 int nvfs; 1298 struct mlx4_slaves_pport slaves_port1; 1299 struct mlx4_slaves_pport slaves_port2; 1300 DECLARE_BITMAP(slaves_port_1_2, MLX4_MFUNC_MAX); 1301 1302 slaves_port1 = mlx4_phys_to_slaves_pport(dev, 1); 1303 slaves_port2 = mlx4_phys_to_slaves_pport(dev, 2); 1304 bitmap_and(slaves_port_1_2, 1305 slaves_port1.slaves, slaves_port2.slaves, 1306 dev->persist->num_vfs + 1); 1307 1308 /* only single port vfs are allowed */ 1309 if (bitmap_weight(slaves_port_1_2, dev->persist->num_vfs + 1) > 1) { 1310 mlx4_warn(dev, "HA mode unsupported for dual ported VFs\n"); 1311 return -EINVAL; 1312 } 1313 1314 /* number of virtual functions is number of total functions minus one 1315 * physical function for each port. 1316 */ 1317 nvfs = bitmap_weight(slaves_port1.slaves, dev->persist->num_vfs + 1) + 1318 bitmap_weight(slaves_port2.slaves, dev->persist->num_vfs + 1) - 2; 1319 1320 /* limit on maximum allowed VFs */ 1321 if (nvfs > MAX_MF_BOND_ALLOWED_SLAVES) { 1322 mlx4_warn(dev, "HA mode is not supported for %d VFs (max %d are allowed)\n", 1323 nvfs, MAX_MF_BOND_ALLOWED_SLAVES); 1324 return -EINVAL; 1325 } 1326 1327 if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) { 1328 mlx4_warn(dev, "HA mode unsupported for NON DMFS steering\n"); 1329 return -EINVAL; 1330 } 1331 1332 err = mlx4_bond_mac_table(dev); 1333 if (err) 1334 return err; 1335 err = mlx4_bond_vlan_table(dev); 1336 if (err) 1337 goto err1; 1338 err = mlx4_bond_fs_rules(dev); 1339 if (err) 1340 goto err2; 1341 1342 return 0; 1343 err2: 1344 (void)mlx4_unbond_vlan_table(dev); 1345 err1: 1346 (void)mlx4_unbond_mac_table(dev); 1347 return err; 1348 } 1349 1350 static int mlx4_mf_unbond(struct mlx4_dev *dev) 1351 { 1352 int ret, ret1; 1353 1354 ret = mlx4_unbond_fs_rules(dev); 1355 if (ret) 1356 mlx4_warn(dev, "multifunction unbond for flow rules failedi (%d)\n", ret); 1357 ret1 = mlx4_unbond_mac_table(dev); 1358 if (ret1) { 1359 mlx4_warn(dev, "multifunction unbond for MAC table failed (%d)\n", ret1); 1360 ret = ret1; 1361 } 1362 ret1 = mlx4_unbond_vlan_table(dev); 1363 if (ret1) { 1364 mlx4_warn(dev, "multifunction unbond for VLAN table failed (%d)\n", ret1); 1365 ret = ret1; 1366 } 1367 return ret; 1368 } 1369 1370 int mlx4_bond(struct mlx4_dev *dev) 1371 { 1372 int ret = 0; 1373 struct mlx4_priv *priv = mlx4_priv(dev); 1374 1375 mutex_lock(&priv->bond_mutex); 1376 1377 if (!mlx4_is_bonded(dev)) { 1378 ret = mlx4_do_bond(dev, true); 1379 if (ret) 1380 mlx4_err(dev, "Failed to bond device: %d\n", ret); 1381 if (!ret && mlx4_is_master(dev)) { 1382 ret = mlx4_mf_bond(dev); 1383 if (ret) { 1384 mlx4_err(dev, "bond for multifunction failed\n"); 1385 mlx4_do_bond(dev, false); 1386 } 1387 } 1388 } 1389 1390 mutex_unlock(&priv->bond_mutex); 1391 if (!ret) 1392 mlx4_dbg(dev, "Device is bonded\n"); 1393 1394 return ret; 1395 } 1396 EXPORT_SYMBOL_GPL(mlx4_bond); 1397 1398 int mlx4_unbond(struct mlx4_dev *dev) 1399 { 1400 int ret = 0; 1401 struct mlx4_priv *priv = mlx4_priv(dev); 1402 1403 mutex_lock(&priv->bond_mutex); 1404 1405 if (mlx4_is_bonded(dev)) { 1406 int ret2 = 0; 1407 1408 ret = mlx4_do_bond(dev, false); 1409 if (ret) 1410 mlx4_err(dev, "Failed to unbond device: %d\n", ret); 1411 if (mlx4_is_master(dev)) 1412 ret2 = mlx4_mf_unbond(dev); 1413 if (ret2) { 1414 mlx4_warn(dev, "Failed to unbond device for multifunction (%d)\n", ret2); 1415 ret = ret2; 1416 } 1417 } 1418 1419 mutex_unlock(&priv->bond_mutex); 1420 if (!ret) 1421 mlx4_dbg(dev, "Device is unbonded\n"); 1422 1423 return ret; 1424 } 1425 EXPORT_SYMBOL_GPL(mlx4_unbond); 1426 1427 1428 int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p) 1429 { 1430 u8 port1 = v2p->port1; 1431 u8 port2 = v2p->port2; 1432 struct mlx4_priv *priv = mlx4_priv(dev); 1433 int err; 1434 1435 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP)) 1436 return -ENOTSUPP; 1437 1438 mutex_lock(&priv->bond_mutex); 1439 1440 /* zero means keep current mapping for this port */ 1441 if (port1 == 0) 1442 port1 = priv->v2p.port1; 1443 if (port2 == 0) 1444 port2 = priv->v2p.port2; 1445 1446 if ((port1 < 1) || (port1 > MLX4_MAX_PORTS) || 1447 (port2 < 1) || (port2 > MLX4_MAX_PORTS) || 1448 (port1 == 2 && port2 == 1)) { 1449 /* besides boundary checks cross mapping makes 1450 * no sense and therefore not allowed */ 1451 err = -EINVAL; 1452 } else if ((port1 == priv->v2p.port1) && 1453 (port2 == priv->v2p.port2)) { 1454 err = 0; 1455 } else { 1456 err = mlx4_virt2phy_port_map(dev, port1, port2); 1457 if (!err) { 1458 mlx4_dbg(dev, "port map changed: [%d][%d]\n", 1459 port1, port2); 1460 priv->v2p.port1 = port1; 1461 priv->v2p.port2 = port2; 1462 } else { 1463 mlx4_err(dev, "Failed to change port mape: %d\n", err); 1464 } 1465 } 1466 1467 mutex_unlock(&priv->bond_mutex); 1468 return err; 1469 } 1470 EXPORT_SYMBOL_GPL(mlx4_port_map_set); 1471 1472 static int mlx4_load_fw(struct mlx4_dev *dev) 1473 { 1474 struct mlx4_priv *priv = mlx4_priv(dev); 1475 int err; 1476 1477 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages, 1478 GFP_HIGHUSER | __GFP_NOWARN, 0); 1479 if (!priv->fw.fw_icm) { 1480 mlx4_err(dev, "Couldn't allocate FW area, aborting\n"); 1481 return -ENOMEM; 1482 } 1483 1484 err = mlx4_MAP_FA(dev, priv->fw.fw_icm); 1485 if (err) { 1486 mlx4_err(dev, "MAP_FA command failed, aborting\n"); 1487 goto err_free; 1488 } 1489 1490 err = mlx4_RUN_FW(dev); 1491 if (err) { 1492 mlx4_err(dev, "RUN_FW command failed, aborting\n"); 1493 goto err_unmap_fa; 1494 } 1495 1496 return 0; 1497 1498 err_unmap_fa: 1499 mlx4_UNMAP_FA(dev); 1500 1501 err_free: 1502 mlx4_free_icm(dev, priv->fw.fw_icm, 0); 1503 return err; 1504 } 1505 1506 static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, 1507 int cmpt_entry_sz) 1508 { 1509 struct mlx4_priv *priv = mlx4_priv(dev); 1510 int err; 1511 int num_eqs; 1512 1513 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table, 1514 cmpt_base + 1515 ((u64) (MLX4_CMPT_TYPE_QP * 1516 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1517 cmpt_entry_sz, dev->caps.num_qps, 1518 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1519 0, 0); 1520 if (err) 1521 goto err; 1522 1523 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table, 1524 cmpt_base + 1525 ((u64) (MLX4_CMPT_TYPE_SRQ * 1526 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1527 cmpt_entry_sz, dev->caps.num_srqs, 1528 dev->caps.reserved_srqs, 0, 0); 1529 if (err) 1530 goto err_qp; 1531 1532 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table, 1533 cmpt_base + 1534 ((u64) (MLX4_CMPT_TYPE_CQ * 1535 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1536 cmpt_entry_sz, dev->caps.num_cqs, 1537 dev->caps.reserved_cqs, 0, 0); 1538 if (err) 1539 goto err_srq; 1540 1541 num_eqs = dev->phys_caps.num_phys_eqs; 1542 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table, 1543 cmpt_base + 1544 ((u64) (MLX4_CMPT_TYPE_EQ * 1545 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1546 cmpt_entry_sz, num_eqs, num_eqs, 0, 0); 1547 if (err) 1548 goto err_cq; 1549 1550 return 0; 1551 1552 err_cq: 1553 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1554 1555 err_srq: 1556 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1557 1558 err_qp: 1559 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1560 1561 err: 1562 return err; 1563 } 1564 1565 static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, 1566 struct mlx4_init_hca_param *init_hca, u64 icm_size) 1567 { 1568 struct mlx4_priv *priv = mlx4_priv(dev); 1569 u64 aux_pages; 1570 int num_eqs; 1571 int err; 1572 1573 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages); 1574 if (err) { 1575 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n"); 1576 return err; 1577 } 1578 1579 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n", 1580 (unsigned long long) icm_size >> 10, 1581 (unsigned long long) aux_pages << 2); 1582 1583 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages, 1584 GFP_HIGHUSER | __GFP_NOWARN, 0); 1585 if (!priv->fw.aux_icm) { 1586 mlx4_err(dev, "Couldn't allocate aux memory, aborting\n"); 1587 return -ENOMEM; 1588 } 1589 1590 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm); 1591 if (err) { 1592 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n"); 1593 goto err_free_aux; 1594 } 1595 1596 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz); 1597 if (err) { 1598 mlx4_err(dev, "Failed to map cMPT context memory, aborting\n"); 1599 goto err_unmap_aux; 1600 } 1601 1602 1603 num_eqs = dev->phys_caps.num_phys_eqs; 1604 err = mlx4_init_icm_table(dev, &priv->eq_table.table, 1605 init_hca->eqc_base, dev_cap->eqc_entry_sz, 1606 num_eqs, num_eqs, 0, 0); 1607 if (err) { 1608 mlx4_err(dev, "Failed to map EQ context memory, aborting\n"); 1609 goto err_unmap_cmpt; 1610 } 1611 1612 /* 1613 * Reserved MTT entries must be aligned up to a cacheline 1614 * boundary, since the FW will write to them, while the driver 1615 * writes to all other MTT entries. (The variable 1616 * dev->caps.mtt_entry_sz below is really the MTT segment 1617 * size, not the raw entry size) 1618 */ 1619 dev->caps.reserved_mtts = 1620 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz, 1621 dma_get_cache_alignment()) / dev->caps.mtt_entry_sz; 1622 1623 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table, 1624 init_hca->mtt_base, 1625 dev->caps.mtt_entry_sz, 1626 dev->caps.num_mtts, 1627 dev->caps.reserved_mtts, 1, 0); 1628 if (err) { 1629 mlx4_err(dev, "Failed to map MTT context memory, aborting\n"); 1630 goto err_unmap_eq; 1631 } 1632 1633 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table, 1634 init_hca->dmpt_base, 1635 dev_cap->dmpt_entry_sz, 1636 dev->caps.num_mpts, 1637 dev->caps.reserved_mrws, 1, 1); 1638 if (err) { 1639 mlx4_err(dev, "Failed to map dMPT context memory, aborting\n"); 1640 goto err_unmap_mtt; 1641 } 1642 1643 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table, 1644 init_hca->qpc_base, 1645 dev_cap->qpc_entry_sz, 1646 dev->caps.num_qps, 1647 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1648 0, 0); 1649 if (err) { 1650 mlx4_err(dev, "Failed to map QP context memory, aborting\n"); 1651 goto err_unmap_dmpt; 1652 } 1653 1654 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table, 1655 init_hca->auxc_base, 1656 dev_cap->aux_entry_sz, 1657 dev->caps.num_qps, 1658 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1659 0, 0); 1660 if (err) { 1661 mlx4_err(dev, "Failed to map AUXC context memory, aborting\n"); 1662 goto err_unmap_qp; 1663 } 1664 1665 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table, 1666 init_hca->altc_base, 1667 dev_cap->altc_entry_sz, 1668 dev->caps.num_qps, 1669 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1670 0, 0); 1671 if (err) { 1672 mlx4_err(dev, "Failed to map ALTC context memory, aborting\n"); 1673 goto err_unmap_auxc; 1674 } 1675 1676 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table, 1677 init_hca->rdmarc_base, 1678 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, 1679 dev->caps.num_qps, 1680 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1681 0, 0); 1682 if (err) { 1683 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); 1684 goto err_unmap_altc; 1685 } 1686 1687 err = mlx4_init_icm_table(dev, &priv->cq_table.table, 1688 init_hca->cqc_base, 1689 dev_cap->cqc_entry_sz, 1690 dev->caps.num_cqs, 1691 dev->caps.reserved_cqs, 0, 0); 1692 if (err) { 1693 mlx4_err(dev, "Failed to map CQ context memory, aborting\n"); 1694 goto err_unmap_rdmarc; 1695 } 1696 1697 err = mlx4_init_icm_table(dev, &priv->srq_table.table, 1698 init_hca->srqc_base, 1699 dev_cap->srq_entry_sz, 1700 dev->caps.num_srqs, 1701 dev->caps.reserved_srqs, 0, 0); 1702 if (err) { 1703 mlx4_err(dev, "Failed to map SRQ context memory, aborting\n"); 1704 goto err_unmap_cq; 1705 } 1706 1707 /* 1708 * For flow steering device managed mode it is required to use 1709 * mlx4_init_icm_table. For B0 steering mode it's not strictly 1710 * required, but for simplicity just map the whole multicast 1711 * group table now. The table isn't very big and it's a lot 1712 * easier than trying to track ref counts. 1713 */ 1714 err = mlx4_init_icm_table(dev, &priv->mcg_table.table, 1715 init_hca->mc_base, 1716 mlx4_get_mgm_entry_size(dev), 1717 dev->caps.num_mgms + dev->caps.num_amgms, 1718 dev->caps.num_mgms + dev->caps.num_amgms, 1719 0, 0); 1720 if (err) { 1721 mlx4_err(dev, "Failed to map MCG context memory, aborting\n"); 1722 goto err_unmap_srq; 1723 } 1724 1725 return 0; 1726 1727 err_unmap_srq: 1728 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 1729 1730 err_unmap_cq: 1731 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 1732 1733 err_unmap_rdmarc: 1734 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 1735 1736 err_unmap_altc: 1737 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 1738 1739 err_unmap_auxc: 1740 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 1741 1742 err_unmap_qp: 1743 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 1744 1745 err_unmap_dmpt: 1746 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 1747 1748 err_unmap_mtt: 1749 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 1750 1751 err_unmap_eq: 1752 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); 1753 1754 err_unmap_cmpt: 1755 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 1756 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1757 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1758 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1759 1760 err_unmap_aux: 1761 mlx4_UNMAP_ICM_AUX(dev); 1762 1763 err_free_aux: 1764 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 1765 1766 return err; 1767 } 1768 1769 static void mlx4_free_icms(struct mlx4_dev *dev) 1770 { 1771 struct mlx4_priv *priv = mlx4_priv(dev); 1772 1773 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table); 1774 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 1775 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 1776 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 1777 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 1778 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 1779 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 1780 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 1781 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 1782 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); 1783 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 1784 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1785 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1786 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1787 1788 mlx4_UNMAP_ICM_AUX(dev); 1789 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 1790 } 1791 1792 static void mlx4_slave_exit(struct mlx4_dev *dev) 1793 { 1794 struct mlx4_priv *priv = mlx4_priv(dev); 1795 1796 mutex_lock(&priv->cmd.slave_cmd_mutex); 1797 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 1798 MLX4_COMM_TIME)) 1799 mlx4_warn(dev, "Failed to close slave function\n"); 1800 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1801 } 1802 1803 static int map_bf_area(struct mlx4_dev *dev) 1804 { 1805 struct mlx4_priv *priv = mlx4_priv(dev); 1806 resource_size_t bf_start; 1807 resource_size_t bf_len; 1808 int err = 0; 1809 1810 if (!dev->caps.bf_reg_size) 1811 return -ENXIO; 1812 1813 bf_start = pci_resource_start(dev->persist->pdev, 2) + 1814 (dev->caps.num_uars << PAGE_SHIFT); 1815 bf_len = pci_resource_len(dev->persist->pdev, 2) - 1816 (dev->caps.num_uars << PAGE_SHIFT); 1817 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len); 1818 if (!priv->bf_mapping) 1819 err = -ENOMEM; 1820 1821 return err; 1822 } 1823 1824 static void unmap_bf_area(struct mlx4_dev *dev) 1825 { 1826 if (mlx4_priv(dev)->bf_mapping) 1827 io_mapping_free(mlx4_priv(dev)->bf_mapping); 1828 } 1829 1830 s64 mlx4_read_clock(struct mlx4_dev *dev) 1831 { 1832 u32 clockhi, clocklo, clockhi1; 1833 s64 cycles; 1834 int i; 1835 struct mlx4_priv *priv = mlx4_priv(dev); 1836 1837 if (!priv->clock_mapping) 1838 return -ENOTSUPP; 1839 1840 for (i = 0; i < 10; i++) { 1841 clockhi = swab32(readl(priv->clock_mapping)); 1842 clocklo = swab32(readl(priv->clock_mapping + 4)); 1843 clockhi1 = swab32(readl(priv->clock_mapping)); 1844 if (clockhi == clockhi1) 1845 break; 1846 } 1847 1848 cycles = (u64) clockhi << 32 | (u64) clocklo; 1849 1850 return cycles & CORE_CLOCK_MASK; 1851 } 1852 EXPORT_SYMBOL_GPL(mlx4_read_clock); 1853 1854 1855 static int map_internal_clock(struct mlx4_dev *dev) 1856 { 1857 struct mlx4_priv *priv = mlx4_priv(dev); 1858 1859 priv->clock_mapping = 1860 ioremap(pci_resource_start(dev->persist->pdev, 1861 priv->fw.clock_bar) + 1862 priv->fw.clock_offset, MLX4_CLOCK_SIZE); 1863 1864 if (!priv->clock_mapping) 1865 return -ENOMEM; 1866 1867 return 0; 1868 } 1869 1870 int mlx4_get_internal_clock_params(struct mlx4_dev *dev, 1871 struct mlx4_clock_params *params) 1872 { 1873 struct mlx4_priv *priv = mlx4_priv(dev); 1874 1875 if (mlx4_is_slave(dev)) 1876 return -ENOTSUPP; 1877 1878 if (!params) 1879 return -EINVAL; 1880 1881 params->bar = priv->fw.clock_bar; 1882 params->offset = priv->fw.clock_offset; 1883 params->size = MLX4_CLOCK_SIZE; 1884 1885 return 0; 1886 } 1887 EXPORT_SYMBOL_GPL(mlx4_get_internal_clock_params); 1888 1889 static void unmap_internal_clock(struct mlx4_dev *dev) 1890 { 1891 struct mlx4_priv *priv = mlx4_priv(dev); 1892 1893 if (priv->clock_mapping) 1894 iounmap(priv->clock_mapping); 1895 } 1896 1897 static void mlx4_close_hca(struct mlx4_dev *dev) 1898 { 1899 sysctl_ctx_free(&dev->hw_ctx); 1900 unmap_internal_clock(dev); 1901 unmap_bf_area(dev); 1902 if (mlx4_is_slave(dev)) 1903 mlx4_slave_exit(dev); 1904 else { 1905 mlx4_CLOSE_HCA(dev, 0); 1906 mlx4_free_icms(dev); 1907 } 1908 } 1909 1910 static void mlx4_close_fw(struct mlx4_dev *dev) 1911 { 1912 if (!mlx4_is_slave(dev)) { 1913 mlx4_UNMAP_FA(dev); 1914 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0); 1915 } 1916 } 1917 1918 static int mlx4_comm_check_offline(struct mlx4_dev *dev) 1919 { 1920 #define COMM_CHAN_OFFLINE_OFFSET 0x09 1921 1922 u32 comm_flags; 1923 u32 offline_bit; 1924 unsigned long end; 1925 struct mlx4_priv *priv = mlx4_priv(dev); 1926 1927 end = msecs_to_jiffies(MLX4_COMM_OFFLINE_TIME_OUT) + jiffies; 1928 while (time_before(jiffies, end)) { 1929 comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm + 1930 MLX4_COMM_CHAN_FLAGS)); 1931 offline_bit = (comm_flags & 1932 (u32)(1 << COMM_CHAN_OFFLINE_OFFSET)); 1933 if (!offline_bit) 1934 return 0; 1935 /* There are cases as part of AER/Reset flow that PF needs 1936 * around 100 msec to load. We therefore sleep for 100 msec 1937 * to allow other tasks to make use of that CPU during this 1938 * time interval. 1939 */ 1940 msleep(100); 1941 } 1942 mlx4_err(dev, "Communication channel is offline.\n"); 1943 return -EIO; 1944 } 1945 1946 static void mlx4_reset_vf_support(struct mlx4_dev *dev) 1947 { 1948 #define COMM_CHAN_RST_OFFSET 0x1e 1949 1950 struct mlx4_priv *priv = mlx4_priv(dev); 1951 u32 comm_rst; 1952 u32 comm_caps; 1953 1954 comm_caps = swab32(readl((__iomem char *)priv->mfunc.comm + 1955 MLX4_COMM_CHAN_CAPS)); 1956 comm_rst = (comm_caps & (u32)(1 << COMM_CHAN_RST_OFFSET)); 1957 1958 if (comm_rst) 1959 dev->caps.vf_caps |= MLX4_VF_CAP_FLAG_RESET; 1960 } 1961 1962 static int mlx4_init_slave(struct mlx4_dev *dev) 1963 { 1964 struct mlx4_priv *priv = mlx4_priv(dev); 1965 u64 dma = (u64) priv->mfunc.vhcr_dma; 1966 int ret_from_reset = 0; 1967 u32 slave_read; 1968 u32 cmd_channel_ver; 1969 1970 if (atomic_read(&pf_loading)) { 1971 mlx4_warn(dev, "PF is not ready - Deferring probe\n"); 1972 return -EAGAIN; 1973 } 1974 1975 mutex_lock(&priv->cmd.slave_cmd_mutex); 1976 priv->cmd.max_cmds = 1; 1977 if (mlx4_comm_check_offline(dev)) { 1978 mlx4_err(dev, "PF is not responsive, skipping initialization\n"); 1979 goto err_offline; 1980 } 1981 1982 mlx4_reset_vf_support(dev); 1983 mlx4_warn(dev, "Sending reset\n"); 1984 ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 1985 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME); 1986 /* if we are in the middle of flr the slave will try 1987 * NUM_OF_RESET_RETRIES times before leaving.*/ 1988 if (ret_from_reset) { 1989 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) { 1990 mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n"); 1991 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1992 return -EAGAIN; 1993 } else 1994 goto err; 1995 } 1996 1997 /* check the driver version - the slave I/F revision 1998 * must match the master's */ 1999 slave_read = swab32(readl(&priv->mfunc.comm->slave_read)); 2000 cmd_channel_ver = mlx4_comm_get_version(); 2001 2002 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) != 2003 MLX4_COMM_GET_IF_REV(slave_read)) { 2004 mlx4_err(dev, "slave driver version is not supported by the master\n"); 2005 goto err; 2006 } 2007 2008 mlx4_warn(dev, "Sending vhcr0\n"); 2009 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48, 2010 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 2011 goto err; 2012 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32, 2013 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 2014 goto err; 2015 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16, 2016 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 2017 goto err; 2018 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, 2019 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 2020 goto err; 2021 2022 mutex_unlock(&priv->cmd.slave_cmd_mutex); 2023 return 0; 2024 2025 err: 2026 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 0); 2027 err_offline: 2028 mutex_unlock(&priv->cmd.slave_cmd_mutex); 2029 return -EIO; 2030 } 2031 2032 static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev) 2033 { 2034 int i; 2035 2036 for (i = 1; i <= dev->caps.num_ports; i++) { 2037 if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) 2038 dev->caps.gid_table_len[i] = 2039 mlx4_get_slave_num_gids(dev, 0, i); 2040 else 2041 dev->caps.gid_table_len[i] = 1; 2042 dev->caps.pkey_table_len[i] = 2043 dev->phys_caps.pkey_phys_table_len[i] - 1; 2044 } 2045 } 2046 2047 static int choose_log_fs_mgm_entry_size(int qp_per_entry) 2048 { 2049 int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; 2050 2051 for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE; 2052 i++) { 2053 if (qp_per_entry <= 4 * ((1 << i) / 16 - 2)) 2054 break; 2055 } 2056 2057 return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1; 2058 } 2059 2060 static const char *dmfs_high_rate_steering_mode_str(int dmfs_high_steer_mode) 2061 { 2062 switch (dmfs_high_steer_mode) { 2063 case MLX4_STEERING_DMFS_A0_DEFAULT: 2064 return "default performance"; 2065 2066 case MLX4_STEERING_DMFS_A0_DYNAMIC: 2067 return "dynamic hybrid mode"; 2068 2069 case MLX4_STEERING_DMFS_A0_STATIC: 2070 return "performance optimized for limited rule configuration (static)"; 2071 2072 case MLX4_STEERING_DMFS_A0_DISABLE: 2073 return "disabled performance optimized steering"; 2074 2075 case MLX4_STEERING_DMFS_A0_NOT_SUPPORTED: 2076 return "performance optimized steering not supported"; 2077 2078 default: 2079 return "Unrecognized mode"; 2080 } 2081 } 2082 2083 #define MLX4_DMFS_A0_STEERING (1UL << 2) 2084 2085 static void choose_steering_mode(struct mlx4_dev *dev, 2086 struct mlx4_dev_cap *dev_cap) 2087 { 2088 if (mlx4_log_num_mgm_entry_size <= 0) { 2089 if ((-mlx4_log_num_mgm_entry_size) & MLX4_DMFS_A0_STEERING) { 2090 if (dev->caps.dmfs_high_steer_mode == 2091 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 2092 mlx4_err(dev, "DMFS high rate mode not supported\n"); 2093 else 2094 dev->caps.dmfs_high_steer_mode = 2095 MLX4_STEERING_DMFS_A0_STATIC; 2096 } 2097 } 2098 2099 if (mlx4_log_num_mgm_entry_size <= 0 && 2100 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN && 2101 (!mlx4_is_mfunc(dev) || 2102 (dev_cap->fs_max_num_qp_per_entry >= 2103 (dev->persist->num_vfs + 1))) && 2104 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >= 2105 MLX4_MIN_MGM_LOG_ENTRY_SIZE) { 2106 dev->oper_log_mgm_entry_size = 2107 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry); 2108 dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED; 2109 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; 2110 dev->caps.fs_log_max_ucast_qp_range_size = 2111 dev_cap->fs_log_max_ucast_qp_range_size; 2112 } else { 2113 if (dev->caps.dmfs_high_steer_mode != 2114 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 2115 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DISABLE; 2116 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER && 2117 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) 2118 dev->caps.steering_mode = MLX4_STEERING_MODE_B0; 2119 else { 2120 dev->caps.steering_mode = MLX4_STEERING_MODE_A0; 2121 2122 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER || 2123 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) 2124 mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n"); 2125 } 2126 dev->oper_log_mgm_entry_size = 2127 mlx4_log_num_mgm_entry_size > 0 ? 2128 mlx4_log_num_mgm_entry_size : 2129 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; 2130 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev); 2131 } 2132 mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n", 2133 mlx4_steering_mode_str(dev->caps.steering_mode), 2134 dev->oper_log_mgm_entry_size, 2135 mlx4_log_num_mgm_entry_size); 2136 } 2137 2138 static void choose_tunnel_offload_mode(struct mlx4_dev *dev, 2139 struct mlx4_dev_cap *dev_cap) 2140 { 2141 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED && 2142 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) 2143 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN; 2144 else 2145 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE; 2146 2147 mlx4_dbg(dev, "Tunneling offload mode is: %s\n", (dev->caps.tunnel_offload_mode 2148 == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none"); 2149 } 2150 2151 static int mlx4_validate_optimized_steering(struct mlx4_dev *dev) 2152 { 2153 int i; 2154 struct mlx4_port_cap port_cap; 2155 2156 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 2157 return -EINVAL; 2158 2159 for (i = 1; i <= dev->caps.num_ports; i++) { 2160 if (mlx4_dev_port(dev, i, &port_cap)) { 2161 mlx4_err(dev, 2162 "QUERY_DEV_CAP command failed, can't veify DMFS high rate steering.\n"); 2163 } else if ((dev->caps.dmfs_high_steer_mode != 2164 MLX4_STEERING_DMFS_A0_DEFAULT) && 2165 (port_cap.dmfs_optimized_state == 2166 !!(dev->caps.dmfs_high_steer_mode == 2167 MLX4_STEERING_DMFS_A0_DISABLE))) { 2168 mlx4_err(dev, 2169 "DMFS high rate steer mode differ, driver requested %s but %s in FW.\n", 2170 dmfs_high_rate_steering_mode_str( 2171 dev->caps.dmfs_high_steer_mode), 2172 (port_cap.dmfs_optimized_state ? 2173 "enabled" : "disabled")); 2174 } 2175 } 2176 2177 return 0; 2178 } 2179 2180 static int mlx4_init_fw(struct mlx4_dev *dev) 2181 { 2182 struct mlx4_mod_stat_cfg mlx4_cfg; 2183 int err = 0; 2184 2185 if (!mlx4_is_slave(dev)) { 2186 err = mlx4_QUERY_FW(dev); 2187 if (err) { 2188 if (err == -EACCES) 2189 mlx4_info(dev, "non-primary physical function, skipping\n"); 2190 else 2191 mlx4_err(dev, "QUERY_FW command failed, aborting\n"); 2192 return err; 2193 } 2194 2195 err = mlx4_load_fw(dev); 2196 if (err) { 2197 mlx4_err(dev, "Failed to start FW, aborting\n"); 2198 return err; 2199 } 2200 2201 mlx4_cfg.log_pg_sz_m = 1; 2202 mlx4_cfg.log_pg_sz = 0; 2203 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg); 2204 if (err) 2205 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n"); 2206 } 2207 2208 return err; 2209 } 2210 2211 static int mlx4_init_hca(struct mlx4_dev *dev) 2212 { 2213 struct mlx4_priv *priv = mlx4_priv(dev); 2214 struct mlx4_adapter adapter; 2215 struct mlx4_dev_cap dev_cap = {}; 2216 struct mlx4_profile profile; 2217 struct mlx4_init_hca_param init_hca; 2218 u64 icm_size; 2219 struct mlx4_config_dev_params params; 2220 int err; 2221 2222 if (!mlx4_is_slave(dev)) { 2223 err = mlx4_dev_cap(dev, &dev_cap); 2224 if (err) { 2225 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 2226 return err; 2227 } 2228 2229 choose_steering_mode(dev, &dev_cap); 2230 choose_tunnel_offload_mode(dev, &dev_cap); 2231 2232 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC && 2233 mlx4_is_master(dev)) 2234 dev->caps.function_caps |= MLX4_FUNC_CAP_DMFS_A0_STATIC; 2235 2236 err = mlx4_get_phys_port_id(dev); 2237 if (err) 2238 mlx4_err(dev, "Fail to get physical port id\n"); 2239 2240 if (mlx4_is_master(dev)) 2241 mlx4_parav_master_pf_caps(dev); 2242 2243 if (mlx4_low_memory_profile()) { 2244 mlx4_info(dev, "Running from within kdump kernel. Using low memory profile\n"); 2245 profile = low_mem_profile; 2246 } else { 2247 profile = default_profile; 2248 } 2249 if (dev->caps.steering_mode == 2250 MLX4_STEERING_MODE_DEVICE_MANAGED) 2251 profile.num_mcg = MLX4_FS_NUM_MCG; 2252 2253 icm_size = mlx4_make_profile(dev, &profile, &dev_cap, 2254 &init_hca); 2255 if ((long long) icm_size < 0) { 2256 err = icm_size; 2257 return err; 2258 } 2259 2260 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; 2261 2262 if (enable_4k_uar) { 2263 init_hca.log_uar_sz = ilog2(dev->caps.num_uars) + 2264 PAGE_SHIFT - DEFAULT_UAR_PAGE_SHIFT; 2265 init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12; 2266 } else { 2267 init_hca.log_uar_sz = ilog2(dev->caps.num_uars); 2268 init_hca.uar_page_sz = PAGE_SHIFT - 12; 2269 } 2270 2271 init_hca.mw_enabled = 0; 2272 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW || 2273 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) 2274 init_hca.mw_enabled = INIT_HCA_TPT_MW_ENABLE; 2275 2276 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); 2277 if (err) 2278 return err; 2279 2280 err = mlx4_INIT_HCA(dev, &init_hca); 2281 if (err) { 2282 mlx4_err(dev, "INIT_HCA command failed, aborting\n"); 2283 goto err_free_icm; 2284 } 2285 2286 if (dev_cap.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { 2287 err = mlx4_query_func(dev, &dev_cap); 2288 if (err < 0) { 2289 mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n"); 2290 goto err_close; 2291 } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) { 2292 dev->caps.num_eqs = dev_cap.max_eqs; 2293 dev->caps.reserved_eqs = dev_cap.reserved_eqs; 2294 dev->caps.reserved_uars = dev_cap.reserved_uars; 2295 } 2296 } 2297 2298 /* 2299 * If TS is supported by FW 2300 * read HCA frequency by QUERY_HCA command 2301 */ 2302 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) { 2303 memset(&init_hca, 0, sizeof(init_hca)); 2304 err = mlx4_QUERY_HCA(dev, &init_hca); 2305 if (err) { 2306 mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n"); 2307 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 2308 } else { 2309 dev->caps.hca_core_clock = 2310 init_hca.hca_core_clock; 2311 } 2312 2313 /* In case we got HCA frequency 0 - disable timestamping 2314 * to avoid dividing by zero 2315 */ 2316 if (!dev->caps.hca_core_clock) { 2317 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 2318 mlx4_err(dev, 2319 "HCA frequency is 0 - timestamping is not supported\n"); 2320 } else if (map_internal_clock(dev)) { 2321 /* 2322 * Map internal clock, 2323 * in case of failure disable timestamping 2324 */ 2325 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 2326 mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n"); 2327 } 2328 } 2329 2330 if (dev->caps.dmfs_high_steer_mode != 2331 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) { 2332 if (mlx4_validate_optimized_steering(dev)) 2333 mlx4_warn(dev, "Optimized steering validation failed\n"); 2334 2335 if (dev->caps.dmfs_high_steer_mode == 2336 MLX4_STEERING_DMFS_A0_DISABLE) { 2337 dev->caps.dmfs_high_rate_qpn_base = 2338 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 2339 dev->caps.dmfs_high_rate_qpn_range = 2340 MLX4_A0_STEERING_TABLE_SIZE; 2341 } 2342 2343 mlx4_dbg(dev, "DMFS high rate steer mode is: %s\n", 2344 dmfs_high_rate_steering_mode_str( 2345 dev->caps.dmfs_high_steer_mode)); 2346 } 2347 } else { 2348 err = mlx4_init_slave(dev); 2349 if (err) { 2350 if (err != -EAGAIN) 2351 mlx4_err(dev, "Failed to initialize slave\n"); 2352 return err; 2353 } 2354 2355 err = mlx4_slave_cap(dev); 2356 if (err) { 2357 mlx4_err(dev, "Failed to obtain slave caps\n"); 2358 goto err_close; 2359 } 2360 } 2361 2362 if (map_bf_area(dev)) 2363 mlx4_dbg(dev, "Failed to map blue flame area\n"); 2364 2365 /*Only the master set the ports, all the rest got it from it.*/ 2366 if (!mlx4_is_slave(dev)) 2367 mlx4_set_port_mask(dev); 2368 2369 err = mlx4_QUERY_ADAPTER(dev, &adapter); 2370 if (err) { 2371 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n"); 2372 goto unmap_bf; 2373 } 2374 2375 /* Query CONFIG_DEV parameters */ 2376 err = mlx4_config_dev_retrieval(dev, ¶ms); 2377 if (err && err != -ENOTSUPP) { 2378 mlx4_err(dev, "Failed to query CONFIG_DEV parameters\n"); 2379 } else if (!err) { 2380 dev->caps.rx_checksum_flags_port[1] = params.rx_csum_flags_port_1; 2381 dev->caps.rx_checksum_flags_port[2] = params.rx_csum_flags_port_2; 2382 } 2383 priv->eq_table.inta_pin = adapter.inta_pin; 2384 memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id); 2385 2386 return 0; 2387 2388 unmap_bf: 2389 unmap_internal_clock(dev); 2390 unmap_bf_area(dev); 2391 2392 if (mlx4_is_slave(dev)) { 2393 kfree(dev->caps.qp0_qkey); 2394 kfree(dev->caps.qp0_tunnel); 2395 kfree(dev->caps.qp0_proxy); 2396 kfree(dev->caps.qp1_tunnel); 2397 kfree(dev->caps.qp1_proxy); 2398 } 2399 2400 err_close: 2401 if (mlx4_is_slave(dev)) 2402 mlx4_slave_exit(dev); 2403 else 2404 mlx4_CLOSE_HCA(dev, 0); 2405 2406 err_free_icm: 2407 if (!mlx4_is_slave(dev)) 2408 mlx4_free_icms(dev); 2409 2410 return err; 2411 } 2412 2413 static int mlx4_init_counters_table(struct mlx4_dev *dev) 2414 { 2415 struct mlx4_priv *priv = mlx4_priv(dev); 2416 int nent_pow2; 2417 2418 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 2419 return -ENOENT; 2420 2421 if (!dev->caps.max_counters) 2422 return -ENOSPC; 2423 2424 nent_pow2 = roundup_pow_of_two(dev->caps.max_counters); 2425 /* reserve last counter index for sink counter */ 2426 return mlx4_bitmap_init(&priv->counters_bitmap, nent_pow2, 2427 nent_pow2 - 1, 0, 2428 nent_pow2 - dev->caps.max_counters + 1); 2429 } 2430 2431 static void mlx4_cleanup_counters_table(struct mlx4_dev *dev) 2432 { 2433 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 2434 return; 2435 2436 if (!dev->caps.max_counters) 2437 return; 2438 2439 mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap); 2440 } 2441 2442 static void mlx4_cleanup_default_counters(struct mlx4_dev *dev) 2443 { 2444 struct mlx4_priv *priv = mlx4_priv(dev); 2445 int port; 2446 2447 for (port = 0; port < dev->caps.num_ports; port++) 2448 if (priv->def_counter[port] != -1) 2449 mlx4_counter_free(dev, priv->def_counter[port]); 2450 } 2451 2452 static int mlx4_allocate_default_counters(struct mlx4_dev *dev) 2453 { 2454 struct mlx4_priv *priv = mlx4_priv(dev); 2455 int port, err = 0; 2456 u32 idx; 2457 2458 for (port = 0; port < dev->caps.num_ports; port++) 2459 priv->def_counter[port] = -1; 2460 2461 for (port = 0; port < dev->caps.num_ports; port++) { 2462 err = mlx4_counter_alloc(dev, &idx); 2463 2464 if (!err || err == -ENOSPC) { 2465 priv->def_counter[port] = idx; 2466 } else if (err == -ENOENT) { 2467 err = 0; 2468 continue; 2469 } else if (mlx4_is_slave(dev) && err == -EINVAL) { 2470 priv->def_counter[port] = MLX4_SINK_COUNTER_INDEX(dev); 2471 mlx4_warn(dev, "can't allocate counter from old PF driver, using index %d\n", 2472 MLX4_SINK_COUNTER_INDEX(dev)); 2473 err = 0; 2474 } else { 2475 mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n", 2476 __func__, port + 1, err); 2477 mlx4_cleanup_default_counters(dev); 2478 return err; 2479 } 2480 2481 mlx4_dbg(dev, "%s: default counter index %d for port %d\n", 2482 __func__, priv->def_counter[port], port + 1); 2483 } 2484 2485 return err; 2486 } 2487 2488 int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) 2489 { 2490 struct mlx4_priv *priv = mlx4_priv(dev); 2491 2492 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 2493 return -ENOENT; 2494 2495 *idx = mlx4_bitmap_alloc(&priv->counters_bitmap); 2496 if (*idx == -1) { 2497 *idx = MLX4_SINK_COUNTER_INDEX(dev); 2498 return -ENOSPC; 2499 } 2500 2501 return 0; 2502 } 2503 2504 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) 2505 { 2506 u64 out_param; 2507 int err; 2508 2509 if (mlx4_is_mfunc(dev)) { 2510 err = mlx4_cmd_imm(dev, 0, &out_param, RES_COUNTER, 2511 RES_OP_RESERVE, MLX4_CMD_ALLOC_RES, 2512 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 2513 if (!err) 2514 *idx = get_param_l(&out_param); 2515 2516 return err; 2517 } 2518 return __mlx4_counter_alloc(dev, idx); 2519 } 2520 EXPORT_SYMBOL_GPL(mlx4_counter_alloc); 2521 2522 static int __mlx4_clear_if_stat(struct mlx4_dev *dev, 2523 u8 counter_index) 2524 { 2525 struct mlx4_cmd_mailbox *if_stat_mailbox; 2526 int err; 2527 u32 if_stat_in_mod = (counter_index & 0xff) | MLX4_QUERY_IF_STAT_RESET; 2528 2529 if_stat_mailbox = mlx4_alloc_cmd_mailbox(dev); 2530 if (IS_ERR(if_stat_mailbox)) 2531 return PTR_ERR(if_stat_mailbox); 2532 2533 err = mlx4_cmd_box(dev, 0, if_stat_mailbox->dma, if_stat_in_mod, 0, 2534 MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C, 2535 MLX4_CMD_NATIVE); 2536 2537 mlx4_free_cmd_mailbox(dev, if_stat_mailbox); 2538 return err; 2539 } 2540 2541 void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx) 2542 { 2543 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 2544 return; 2545 2546 if (idx == MLX4_SINK_COUNTER_INDEX(dev)) 2547 return; 2548 2549 __mlx4_clear_if_stat(dev, idx); 2550 2551 mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR); 2552 return; 2553 } 2554 2555 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx) 2556 { 2557 u64 in_param = 0; 2558 2559 if (mlx4_is_mfunc(dev)) { 2560 set_param_l(&in_param, idx); 2561 mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE, 2562 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, 2563 MLX4_CMD_WRAPPED); 2564 return; 2565 } 2566 __mlx4_counter_free(dev, idx); 2567 } 2568 EXPORT_SYMBOL_GPL(mlx4_counter_free); 2569 2570 int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port) 2571 { 2572 struct mlx4_priv *priv = mlx4_priv(dev); 2573 2574 return priv->def_counter[port - 1]; 2575 } 2576 EXPORT_SYMBOL_GPL(mlx4_get_default_counter_index); 2577 2578 void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry, int port) 2579 { 2580 struct mlx4_priv *priv = mlx4_priv(dev); 2581 2582 priv->mfunc.master.vf_admin[entry].vport[port].guid = guid; 2583 } 2584 EXPORT_SYMBOL_GPL(mlx4_set_admin_guid); 2585 2586 __be64 mlx4_get_admin_guid(struct mlx4_dev *dev, int entry, int port) 2587 { 2588 struct mlx4_priv *priv = mlx4_priv(dev); 2589 2590 return priv->mfunc.master.vf_admin[entry].vport[port].guid; 2591 } 2592 EXPORT_SYMBOL_GPL(mlx4_get_admin_guid); 2593 2594 void mlx4_set_random_admin_guid(struct mlx4_dev *dev, int entry, int port) 2595 { 2596 struct mlx4_priv *priv = mlx4_priv(dev); 2597 __be64 guid; 2598 2599 /* hw GUID */ 2600 if (entry == 0) 2601 return; 2602 2603 get_random_bytes((char *)&guid, sizeof(guid)); 2604 guid &= ~(cpu_to_be64(1ULL << 56)); 2605 guid |= cpu_to_be64(1ULL << 57); 2606 priv->mfunc.master.vf_admin[entry].vport[port].guid = guid; 2607 } 2608 2609 static int mlx4_setup_hca(struct mlx4_dev *dev) 2610 { 2611 struct mlx4_priv *priv = mlx4_priv(dev); 2612 int err; 2613 int port; 2614 __be32 ib_port_default_caps; 2615 2616 err = mlx4_init_uar_table(dev); 2617 if (err) { 2618 mlx4_err(dev, "Failed to initialize user access region table, aborting\n"); 2619 return err; 2620 } 2621 2622 err = mlx4_uar_alloc(dev, &priv->driver_uar); 2623 if (err) { 2624 mlx4_err(dev, "Failed to allocate driver access region, aborting\n"); 2625 goto err_uar_table_free; 2626 } 2627 2628 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 2629 if (!priv->kar) { 2630 mlx4_err(dev, "Couldn't map kernel access region, aborting\n"); 2631 err = -ENOMEM; 2632 goto err_uar_free; 2633 } 2634 2635 err = mlx4_init_pd_table(dev); 2636 if (err) { 2637 mlx4_err(dev, "Failed to initialize protection domain table, aborting\n"); 2638 goto err_kar_unmap; 2639 } 2640 2641 err = mlx4_init_xrcd_table(dev); 2642 if (err) { 2643 mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n"); 2644 goto err_pd_table_free; 2645 } 2646 2647 err = mlx4_init_mr_table(dev); 2648 if (err) { 2649 mlx4_err(dev, "Failed to initialize memory region table, aborting\n"); 2650 goto err_xrcd_table_free; 2651 } 2652 2653 if (!mlx4_is_slave(dev)) { 2654 err = mlx4_init_mcg_table(dev); 2655 if (err) { 2656 mlx4_err(dev, "Failed to initialize multicast group table, aborting\n"); 2657 goto err_mr_table_free; 2658 } 2659 err = mlx4_config_mad_demux(dev); 2660 if (err) { 2661 mlx4_err(dev, "Failed in config_mad_demux, aborting\n"); 2662 goto err_mcg_table_free; 2663 } 2664 } 2665 2666 err = mlx4_init_eq_table(dev); 2667 if (err) { 2668 mlx4_err(dev, "Failed to initialize event queue table, aborting\n"); 2669 goto err_mcg_table_free; 2670 } 2671 2672 err = mlx4_cmd_use_events(dev); 2673 if (err) { 2674 mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n"); 2675 goto err_eq_table_free; 2676 } 2677 2678 err = mlx4_NOP(dev); 2679 if (err) { 2680 if (dev->flags & MLX4_FLAG_MSI_X) { 2681 mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n", 2682 priv->eq_table.eq[MLX4_EQ_ASYNC].irq); 2683 mlx4_warn(dev, "Trying again without MSI-X\n"); 2684 } else { 2685 mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n", 2686 priv->eq_table.eq[MLX4_EQ_ASYNC].irq); 2687 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); 2688 } 2689 2690 goto err_cmd_poll; 2691 } 2692 2693 mlx4_dbg(dev, "NOP command IRQ test passed\n"); 2694 2695 err = mlx4_init_cq_table(dev); 2696 if (err) { 2697 mlx4_err(dev, "Failed to initialize completion queue table, aborting\n"); 2698 goto err_cmd_poll; 2699 } 2700 2701 err = mlx4_init_srq_table(dev); 2702 if (err) { 2703 mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n"); 2704 goto err_cq_table_free; 2705 } 2706 2707 err = mlx4_init_qp_table(dev); 2708 if (err) { 2709 mlx4_err(dev, "Failed to initialize queue pair table, aborting\n"); 2710 goto err_srq_table_free; 2711 } 2712 2713 if (!mlx4_is_slave(dev)) { 2714 err = mlx4_init_counters_table(dev); 2715 if (err && err != -ENOENT) { 2716 mlx4_err(dev, "Failed to initialize counters table, aborting\n"); 2717 goto err_qp_table_free; 2718 } 2719 } 2720 2721 err = mlx4_allocate_default_counters(dev); 2722 if (err) { 2723 mlx4_err(dev, "Failed to allocate default counters, aborting\n"); 2724 goto err_counters_table_free; 2725 } 2726 2727 if (!mlx4_is_slave(dev)) { 2728 for (port = 1; port <= dev->caps.num_ports; port++) { 2729 ib_port_default_caps = 0; 2730 err = mlx4_get_port_ib_caps(dev, port, 2731 &ib_port_default_caps); 2732 if (err) 2733 mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n", 2734 port, err); 2735 dev->caps.ib_port_def_cap[port] = ib_port_default_caps; 2736 2737 /* initialize per-slave default ib port capabilities */ 2738 if (mlx4_is_master(dev)) { 2739 int i; 2740 for (i = 0; i < dev->num_slaves; i++) { 2741 if (i == mlx4_master_func_num(dev)) 2742 continue; 2743 priv->mfunc.master.slave_state[i].ib_cap_mask[port] = 2744 ib_port_default_caps; 2745 } 2746 } 2747 2748 if (mlx4_is_mfunc(dev)) 2749 dev->caps.port_ib_mtu[port] = IB_MTU_2048; 2750 else 2751 dev->caps.port_ib_mtu[port] = IB_MTU_4096; 2752 2753 err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ? 2754 dev->caps.pkey_table_len[port] : -1); 2755 if (err) { 2756 mlx4_err(dev, "Failed to set port %d, aborting\n", 2757 port); 2758 goto err_default_countes_free; 2759 } 2760 } 2761 } 2762 2763 return 0; 2764 2765 err_default_countes_free: 2766 mlx4_cleanup_default_counters(dev); 2767 2768 err_counters_table_free: 2769 if (!mlx4_is_slave(dev)) 2770 mlx4_cleanup_counters_table(dev); 2771 2772 err_qp_table_free: 2773 mlx4_cleanup_qp_table(dev); 2774 2775 err_srq_table_free: 2776 mlx4_cleanup_srq_table(dev); 2777 2778 err_cq_table_free: 2779 mlx4_cleanup_cq_table(dev); 2780 2781 err_cmd_poll: 2782 mlx4_cmd_use_polling(dev); 2783 2784 err_eq_table_free: 2785 mlx4_cleanup_eq_table(dev); 2786 2787 err_mcg_table_free: 2788 if (!mlx4_is_slave(dev)) 2789 mlx4_cleanup_mcg_table(dev); 2790 2791 err_mr_table_free: 2792 mlx4_cleanup_mr_table(dev); 2793 2794 err_xrcd_table_free: 2795 mlx4_cleanup_xrcd_table(dev); 2796 2797 err_pd_table_free: 2798 mlx4_cleanup_pd_table(dev); 2799 2800 err_kar_unmap: 2801 iounmap(priv->kar); 2802 2803 err_uar_free: 2804 mlx4_uar_free(dev, &priv->driver_uar); 2805 2806 err_uar_table_free: 2807 mlx4_cleanup_uar_table(dev); 2808 return err; 2809 } 2810 2811 static int mlx4_init_affinity_hint(struct mlx4_dev *dev, int port, int eqn) 2812 { 2813 int requested_cpu = 0; 2814 struct mlx4_priv *priv = mlx4_priv(dev); 2815 struct mlx4_eq *eq; 2816 int off = 0; 2817 int i; 2818 2819 if (eqn > dev->caps.num_comp_vectors) 2820 return -EINVAL; 2821 2822 for (i = 1; i < port; i++) 2823 off += mlx4_get_eqs_per_port(dev, i); 2824 2825 requested_cpu = eqn - off - !!(eqn > MLX4_EQ_ASYNC); 2826 2827 /* Meaning EQs are shared, and this call comes from the second port */ 2828 if (requested_cpu < 0) 2829 return 0; 2830 2831 eq = &priv->eq_table.eq[eqn]; 2832 2833 eq->affinity_cpu_id = requested_cpu % num_online_cpus(); 2834 2835 return 0; 2836 } 2837 2838 static void mlx4_enable_msi_x(struct mlx4_dev *dev) 2839 { 2840 struct mlx4_priv *priv = mlx4_priv(dev); 2841 struct msix_entry *entries; 2842 int i; 2843 int port = 0; 2844 2845 if (msi_x) { 2846 int nreq = dev->caps.num_ports * num_online_cpus() + 1; 2847 2848 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, 2849 nreq); 2850 if (nreq > MAX_MSIX) 2851 nreq = MAX_MSIX; 2852 2853 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); 2854 if (!entries) 2855 goto no_msi; 2856 2857 for (i = 0; i < nreq; ++i) 2858 entries[i].entry = i; 2859 2860 nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2, 2861 nreq); 2862 2863 if (nreq < 0 || nreq < MLX4_EQ_ASYNC) { 2864 kfree(entries); 2865 goto no_msi; 2866 } 2867 /* 1 is reserved for events (asyncrounous EQ) */ 2868 dev->caps.num_comp_vectors = nreq - 1; 2869 2870 priv->eq_table.eq[MLX4_EQ_ASYNC].irq = entries[0].vector; 2871 bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports, 2872 dev->caps.num_ports); 2873 2874 for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) { 2875 if (i == MLX4_EQ_ASYNC) 2876 continue; 2877 2878 priv->eq_table.eq[i].irq = 2879 entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector; 2880 2881 if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) { 2882 bitmap_fill(priv->eq_table.eq[i].actv_ports.ports, 2883 dev->caps.num_ports); 2884 /* We don't set affinity hint when there 2885 * aren't enough EQs 2886 */ 2887 } else { 2888 set_bit(port, 2889 priv->eq_table.eq[i].actv_ports.ports); 2890 if (mlx4_init_affinity_hint(dev, port + 1, i)) 2891 mlx4_warn(dev, "Couldn't init hint cpumask for EQ %d\n", 2892 i); 2893 } 2894 /* We divide the Eqs evenly between the two ports. 2895 * (dev->caps.num_comp_vectors / dev->caps.num_ports) 2896 * refers to the number of Eqs per port 2897 * (i.e eqs_per_port). Theoretically, we would like to 2898 * write something like (i + 1) % eqs_per_port == 0. 2899 * However, since there's an asynchronous Eq, we have 2900 * to skip over it by comparing this condition to 2901 * !!((i + 1) > MLX4_EQ_ASYNC). 2902 */ 2903 if ((dev->caps.num_comp_vectors > dev->caps.num_ports) && 2904 ((i + 1) % 2905 (dev->caps.num_comp_vectors / dev->caps.num_ports)) == 2906 !!((i + 1) > MLX4_EQ_ASYNC)) 2907 /* If dev->caps.num_comp_vectors < dev->caps.num_ports, 2908 * everything is shared anyway. 2909 */ 2910 port++; 2911 } 2912 2913 dev->flags |= MLX4_FLAG_MSI_X; 2914 2915 kfree(entries); 2916 return; 2917 } 2918 2919 no_msi: 2920 dev->caps.num_comp_vectors = 1; 2921 2922 BUG_ON(MLX4_EQ_ASYNC >= 2); 2923 for (i = 0; i < 2; ++i) { 2924 priv->eq_table.eq[i].irq = dev->persist->pdev->irq; 2925 if (i != MLX4_EQ_ASYNC) { 2926 bitmap_fill(priv->eq_table.eq[i].actv_ports.ports, 2927 dev->caps.num_ports); 2928 } 2929 } 2930 } 2931 2932 static int mlx4_init_port_info(struct mlx4_dev *dev, int port) 2933 { 2934 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; 2935 int err = 0; 2936 2937 info->dev = dev; 2938 info->port = port; 2939 if (!mlx4_is_slave(dev)) { 2940 mlx4_init_mac_table(dev, &info->mac_table); 2941 mlx4_init_vlan_table(dev, &info->vlan_table); 2942 mlx4_init_roce_gid_table(dev, &info->gid_table); 2943 info->base_qpn = mlx4_get_base_qpn(dev, port); 2944 } 2945 2946 sprintf(info->dev_name, "mlx4_port%d", port); 2947 info->port_attr.attr.name = info->dev_name; 2948 if (mlx4_is_mfunc(dev)) 2949 info->port_attr.attr.mode = S_IRUGO; 2950 else { 2951 info->port_attr.attr.mode = S_IRUGO | S_IWUSR; 2952 info->port_attr.store = set_port_type; 2953 } 2954 info->port_attr.show = show_port_type; 2955 sysfs_attr_init(&info->port_attr.attr); 2956 2957 err = device_create_file(&dev->persist->pdev->dev, &info->port_attr); 2958 if (err) { 2959 mlx4_err(dev, "Failed to create file for port %d\n", port); 2960 info->port = -1; 2961 } 2962 2963 sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port); 2964 info->port_mtu_attr.attr.name = info->dev_mtu_name; 2965 if (mlx4_is_mfunc(dev)) 2966 info->port_mtu_attr.attr.mode = S_IRUGO; 2967 else { 2968 info->port_mtu_attr.attr.mode = S_IRUGO | S_IWUSR; 2969 info->port_mtu_attr.store = set_port_ib_mtu; 2970 } 2971 info->port_mtu_attr.show = show_port_ib_mtu; 2972 sysfs_attr_init(&info->port_mtu_attr.attr); 2973 2974 err = device_create_file(&dev->persist->pdev->dev, 2975 &info->port_mtu_attr); 2976 if (err) { 2977 mlx4_err(dev, "Failed to create mtu file for port %d\n", port); 2978 device_remove_file(&info->dev->persist->pdev->dev, 2979 &info->port_attr); 2980 info->port = -1; 2981 } 2982 2983 return err; 2984 } 2985 2986 static void mlx4_cleanup_port_info(struct mlx4_port_info *info) 2987 { 2988 if (info->port < 0) 2989 return; 2990 2991 device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr); 2992 device_remove_file(&info->dev->persist->pdev->dev, 2993 &info->port_mtu_attr); 2994 #ifdef CONFIG_RFS_ACCEL 2995 free_irq_cpu_rmap(info->rmap); 2996 info->rmap = NULL; 2997 #endif 2998 } 2999 3000 static int mlx4_init_steering(struct mlx4_dev *dev) 3001 { 3002 struct mlx4_priv *priv = mlx4_priv(dev); 3003 int num_entries = dev->caps.num_ports; 3004 int i, j; 3005 3006 priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL); 3007 if (!priv->steer) 3008 return -ENOMEM; 3009 3010 for (i = 0; i < num_entries; i++) 3011 for (j = 0; j < MLX4_NUM_STEERS; j++) { 3012 INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]); 3013 INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]); 3014 } 3015 return 0; 3016 } 3017 3018 static void mlx4_clear_steering(struct mlx4_dev *dev) 3019 { 3020 struct mlx4_priv *priv = mlx4_priv(dev); 3021 struct mlx4_steer_index *entry, *tmp_entry; 3022 struct mlx4_promisc_qp *pqp, *tmp_pqp; 3023 int num_entries = dev->caps.num_ports; 3024 int i, j; 3025 3026 for (i = 0; i < num_entries; i++) { 3027 for (j = 0; j < MLX4_NUM_STEERS; j++) { 3028 list_for_each_entry_safe(pqp, tmp_pqp, 3029 &priv->steer[i].promisc_qps[j], 3030 list) { 3031 list_del(&pqp->list); 3032 kfree(pqp); 3033 } 3034 list_for_each_entry_safe(entry, tmp_entry, 3035 &priv->steer[i].steer_entries[j], 3036 list) { 3037 list_del(&entry->list); 3038 list_for_each_entry_safe(pqp, tmp_pqp, 3039 &entry->duplicates, 3040 list) { 3041 list_del(&pqp->list); 3042 kfree(pqp); 3043 } 3044 kfree(entry); 3045 } 3046 } 3047 } 3048 kfree(priv->steer); 3049 } 3050 3051 static int extended_func_num(struct pci_dev *pdev) 3052 { 3053 return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn); 3054 } 3055 3056 #define MLX4_OWNER_BASE 0x8069c 3057 #define MLX4_OWNER_SIZE 4 3058 3059 static int mlx4_get_ownership(struct mlx4_dev *dev) 3060 { 3061 void __iomem *owner; 3062 u32 ret; 3063 3064 if (pci_channel_offline(dev->persist->pdev)) 3065 return -EIO; 3066 3067 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) + 3068 MLX4_OWNER_BASE, 3069 MLX4_OWNER_SIZE); 3070 if (!owner) { 3071 mlx4_err(dev, "Failed to obtain ownership bit\n"); 3072 return -ENOMEM; 3073 } 3074 3075 ret = readl(owner); 3076 iounmap(owner); 3077 return (int) !!ret; 3078 } 3079 3080 static void mlx4_free_ownership(struct mlx4_dev *dev) 3081 { 3082 void __iomem *owner; 3083 3084 if (pci_channel_offline(dev->persist->pdev)) 3085 return; 3086 3087 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) + 3088 MLX4_OWNER_BASE, 3089 MLX4_OWNER_SIZE); 3090 if (!owner) { 3091 mlx4_err(dev, "Failed to obtain ownership bit\n"); 3092 return; 3093 } 3094 writel(0, owner); 3095 msleep(1000); 3096 iounmap(owner); 3097 } 3098 3099 #define SRIOV_VALID_STATE(flags) (!!((flags) & MLX4_FLAG_SRIOV) ==\ 3100 !!((flags) & MLX4_FLAG_MASTER)) 3101 3102 static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev, 3103 u8 total_vfs, int existing_vfs, int reset_flow) 3104 { 3105 u64 dev_flags = dev->flags; 3106 int err = 0; 3107 3108 if (reset_flow) { 3109 dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs), 3110 GFP_KERNEL); 3111 if (!dev->dev_vfs) 3112 goto free_mem; 3113 return dev_flags; 3114 } 3115 3116 atomic_inc(&pf_loading); 3117 if (dev->flags & MLX4_FLAG_SRIOV) { 3118 if (existing_vfs != total_vfs) { 3119 mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n", 3120 existing_vfs, total_vfs); 3121 total_vfs = existing_vfs; 3122 } 3123 } 3124 3125 dev->dev_vfs = kzalloc(total_vfs * sizeof(*dev->dev_vfs), GFP_KERNEL); 3126 if (NULL == dev->dev_vfs) { 3127 mlx4_err(dev, "Failed to allocate memory for VFs\n"); 3128 goto disable_sriov; 3129 } 3130 3131 if (!(dev->flags & MLX4_FLAG_SRIOV)) { 3132 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs); 3133 err = pci_enable_sriov(pdev, total_vfs); 3134 } 3135 if (err) { 3136 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n", 3137 err); 3138 goto disable_sriov; 3139 } else { 3140 mlx4_warn(dev, "Running in master mode\n"); 3141 dev_flags |= MLX4_FLAG_SRIOV | 3142 MLX4_FLAG_MASTER; 3143 dev_flags &= ~MLX4_FLAG_SLAVE; 3144 dev->persist->num_vfs = total_vfs; 3145 } 3146 return dev_flags; 3147 3148 disable_sriov: 3149 atomic_dec(&pf_loading); 3150 free_mem: 3151 dev->persist->num_vfs = 0; 3152 kfree(dev->dev_vfs); 3153 dev->dev_vfs = NULL; 3154 return dev_flags & ~MLX4_FLAG_MASTER; 3155 } 3156 3157 enum { 3158 MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64 = -1, 3159 }; 3160 3161 static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, 3162 int *nvfs) 3163 { 3164 int requested_vfs = nvfs[0] + nvfs[1] + nvfs[2]; 3165 /* Checking for 64 VFs as a limitation of CX2 */ 3166 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_80_VFS) && 3167 requested_vfs >= 64) { 3168 mlx4_err(dev, "Requested %d VFs, but FW does not support more than 64\n", 3169 requested_vfs); 3170 return MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64; 3171 } 3172 return 0; 3173 } 3174 3175 static int mlx4_pci_enable_device(struct mlx4_dev *dev) 3176 { 3177 struct pci_dev *pdev = dev->persist->pdev; 3178 int err = 0; 3179 3180 mutex_lock(&dev->persist->pci_status_mutex); 3181 if (dev->persist->pci_status == MLX4_PCI_STATUS_DISABLED) { 3182 err = pci_enable_device(pdev); 3183 if (!err) 3184 dev->persist->pci_status = MLX4_PCI_STATUS_ENABLED; 3185 } 3186 mutex_unlock(&dev->persist->pci_status_mutex); 3187 3188 return err; 3189 } 3190 3191 static void mlx4_pci_disable_device(struct mlx4_dev *dev) 3192 { 3193 struct pci_dev *pdev = dev->persist->pdev; 3194 3195 mutex_lock(&dev->persist->pci_status_mutex); 3196 if (dev->persist->pci_status == MLX4_PCI_STATUS_ENABLED) { 3197 pci_disable_device(pdev); 3198 dev->persist->pci_status = MLX4_PCI_STATUS_DISABLED; 3199 } 3200 mutex_unlock(&dev->persist->pci_status_mutex); 3201 } 3202 3203 static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, 3204 int total_vfs, int *nvfs, struct mlx4_priv *priv, 3205 int reset_flow) 3206 { 3207 struct mlx4_dev *dev; 3208 unsigned sum = 0; 3209 int err; 3210 int port; 3211 int i; 3212 struct mlx4_dev_cap *dev_cap = NULL; 3213 int existing_vfs = 0; 3214 3215 dev = &priv->dev; 3216 3217 INIT_LIST_HEAD(&priv->ctx_list); 3218 spin_lock_init(&priv->ctx_lock); 3219 3220 mutex_init(&priv->port_mutex); 3221 mutex_init(&priv->bond_mutex); 3222 3223 INIT_LIST_HEAD(&priv->pgdir_list); 3224 mutex_init(&priv->pgdir_mutex); 3225 spin_lock_init(&priv->cmd.context_lock); 3226 3227 INIT_LIST_HEAD(&priv->bf_list); 3228 mutex_init(&priv->bf_mutex); 3229 3230 dev->rev_id = pdev->revision; 3231 dev->numa_node = dev_to_node(&pdev->dev); 3232 3233 /* Detect if this device is a virtual function */ 3234 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { 3235 mlx4_warn(dev, "Detected virtual function - running in slave mode\n"); 3236 dev->flags |= MLX4_FLAG_SLAVE; 3237 } else { 3238 /* We reset the device and enable SRIOV only for physical 3239 * devices. Try to claim ownership on the device; 3240 * if already taken, skip -- do not allow multiple PFs */ 3241 err = mlx4_get_ownership(dev); 3242 if (err) { 3243 if (err < 0) 3244 return err; 3245 else { 3246 mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n"); 3247 return -EINVAL; 3248 } 3249 } 3250 3251 atomic_set(&priv->opreq_count, 0); 3252 INIT_WORK(&priv->opreq_task, mlx4_opreq_action); 3253 3254 /* 3255 * Now reset the HCA before we touch the PCI capabilities or 3256 * attempt a firmware command, since a boot ROM may have left 3257 * the HCA in an undefined state. 3258 */ 3259 err = mlx4_reset(dev); 3260 if (err) { 3261 mlx4_err(dev, "Failed to reset HCA, aborting\n"); 3262 goto err_sriov; 3263 } 3264 3265 if (total_vfs) { 3266 dev->flags = MLX4_FLAG_MASTER; 3267 existing_vfs = pci_num_vf(pdev); 3268 if (existing_vfs) 3269 dev->flags |= MLX4_FLAG_SRIOV; 3270 dev->persist->num_vfs = total_vfs; 3271 } 3272 } 3273 3274 /* on load remove any previous indication of internal error, 3275 * device is up. 3276 */ 3277 dev->persist->state = MLX4_DEVICE_STATE_UP; 3278 3279 slave_start: 3280 err = mlx4_cmd_init(dev); 3281 if (err) { 3282 mlx4_err(dev, "Failed to init command interface, aborting\n"); 3283 goto err_sriov; 3284 } 3285 3286 /* In slave functions, the communication channel must be initialized 3287 * before posting commands. Also, init num_slaves before calling 3288 * mlx4_init_hca */ 3289 if (mlx4_is_mfunc(dev)) { 3290 if (mlx4_is_master(dev)) { 3291 dev->num_slaves = MLX4_MAX_NUM_SLAVES; 3292 3293 } else { 3294 dev->num_slaves = 0; 3295 err = mlx4_multi_func_init(dev); 3296 if (err) { 3297 mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n"); 3298 goto err_cmd; 3299 } 3300 } 3301 } 3302 3303 err = mlx4_init_fw(dev); 3304 if (err) { 3305 mlx4_err(dev, "Failed to init fw, aborting.\n"); 3306 goto err_mfunc; 3307 } 3308 3309 if (mlx4_is_master(dev)) { 3310 /* when we hit the goto slave_start below, dev_cap already initialized */ 3311 if (!dev_cap) { 3312 dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL); 3313 3314 if (!dev_cap) { 3315 err = -ENOMEM; 3316 goto err_fw; 3317 } 3318 3319 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 3320 if (err) { 3321 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 3322 goto err_fw; 3323 } 3324 3325 if (mlx4_check_dev_cap(dev, dev_cap, nvfs)) 3326 goto err_fw; 3327 3328 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { 3329 u64 dev_flags = mlx4_enable_sriov(dev, pdev, 3330 total_vfs, 3331 existing_vfs, 3332 reset_flow); 3333 3334 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 3335 dev->flags = dev_flags; 3336 if (!SRIOV_VALID_STATE(dev->flags)) { 3337 mlx4_err(dev, "Invalid SRIOV state\n"); 3338 goto err_sriov; 3339 } 3340 err = mlx4_reset(dev); 3341 if (err) { 3342 mlx4_err(dev, "Failed to reset HCA, aborting.\n"); 3343 goto err_sriov; 3344 } 3345 goto slave_start; 3346 } 3347 } else { 3348 /* Legacy mode FW requires SRIOV to be enabled before 3349 * doing QUERY_DEV_CAP, since max_eq's value is different if 3350 * SRIOV is enabled. 3351 */ 3352 memset(dev_cap, 0, sizeof(*dev_cap)); 3353 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 3354 if (err) { 3355 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 3356 goto err_fw; 3357 } 3358 3359 if (mlx4_check_dev_cap(dev, dev_cap, nvfs)) 3360 goto err_fw; 3361 } 3362 } 3363 3364 err = mlx4_init_hca(dev); 3365 if (err) { 3366 if (err == -EACCES) { 3367 /* Not primary Physical function 3368 * Running in slave mode */ 3369 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 3370 /* We're not a PF */ 3371 if (dev->flags & MLX4_FLAG_SRIOV) { 3372 if (!existing_vfs) 3373 pci_disable_sriov(pdev); 3374 if (mlx4_is_master(dev) && !reset_flow) 3375 atomic_dec(&pf_loading); 3376 dev->flags &= ~MLX4_FLAG_SRIOV; 3377 } 3378 if (!mlx4_is_slave(dev)) 3379 mlx4_free_ownership(dev); 3380 dev->flags |= MLX4_FLAG_SLAVE; 3381 dev->flags &= ~MLX4_FLAG_MASTER; 3382 goto slave_start; 3383 } else 3384 goto err_fw; 3385 } 3386 3387 if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { 3388 u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs, 3389 existing_vfs, reset_flow); 3390 3391 if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) { 3392 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR); 3393 dev->flags = dev_flags; 3394 err = mlx4_cmd_init(dev); 3395 if (err) { 3396 /* Only VHCR is cleaned up, so could still 3397 * send FW commands 3398 */ 3399 mlx4_err(dev, "Failed to init VHCR command interface, aborting\n"); 3400 goto err_close; 3401 } 3402 } else { 3403 dev->flags = dev_flags; 3404 } 3405 3406 if (!SRIOV_VALID_STATE(dev->flags)) { 3407 mlx4_err(dev, "Invalid SRIOV state\n"); 3408 goto err_close; 3409 } 3410 } 3411 3412 /* check if the device is functioning at its maximum possible speed. 3413 * No return code for this call, just warn the user in case of PCI 3414 * express device capabilities are under-satisfied by the bus. 3415 */ 3416 if (!mlx4_is_slave(dev)) 3417 mlx4_check_pcie_caps(dev); 3418 3419 /* In master functions, the communication channel must be initialized 3420 * after obtaining its address from fw */ 3421 if (mlx4_is_master(dev)) { 3422 if (dev->caps.num_ports < 2 && 3423 num_vfs_argc > 1) { 3424 err = -EINVAL; 3425 mlx4_err(dev, 3426 "Error: Trying to configure VFs on port 2, but HCA has only %d physical ports\n", 3427 dev->caps.num_ports); 3428 goto err_close; 3429 } 3430 memcpy(dev->persist->nvfs, nvfs, sizeof(dev->persist->nvfs)); 3431 3432 for (i = 0; 3433 i < sizeof(dev->persist->nvfs)/ 3434 sizeof(dev->persist->nvfs[0]); i++) { 3435 unsigned j; 3436 3437 for (j = 0; j < dev->persist->nvfs[i]; ++sum, ++j) { 3438 dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1; 3439 dev->dev_vfs[sum].n_ports = i < 2 ? 1 : 3440 dev->caps.num_ports; 3441 } 3442 } 3443 3444 /* In master functions, the communication channel 3445 * must be initialized after obtaining its address from fw 3446 */ 3447 err = mlx4_multi_func_init(dev); 3448 if (err) { 3449 mlx4_err(dev, "Failed to init master mfunc interface, aborting.\n"); 3450 goto err_close; 3451 } 3452 } 3453 3454 err = mlx4_alloc_eq_table(dev); 3455 if (err) 3456 goto err_master_mfunc; 3457 3458 bitmap_zero(priv->msix_ctl.pool_bm, MAX_MSIX); 3459 mutex_init(&priv->msix_ctl.pool_lock); 3460 3461 mlx4_enable_msi_x(dev); 3462 if ((mlx4_is_mfunc(dev)) && 3463 !(dev->flags & MLX4_FLAG_MSI_X)) { 3464 err = -ENOSYS; 3465 mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n"); 3466 goto err_free_eq; 3467 } 3468 3469 if (!mlx4_is_slave(dev)) { 3470 err = mlx4_init_steering(dev); 3471 if (err) 3472 goto err_disable_msix; 3473 } 3474 3475 mlx4_init_quotas(dev); 3476 3477 err = mlx4_setup_hca(dev); 3478 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) && 3479 !mlx4_is_mfunc(dev)) { 3480 dev->flags &= ~MLX4_FLAG_MSI_X; 3481 dev->caps.num_comp_vectors = 1; 3482 pci_disable_msix(pdev); 3483 err = mlx4_setup_hca(dev); 3484 } 3485 3486 if (err) 3487 goto err_steer; 3488 3489 /* When PF resources are ready arm its comm channel to enable 3490 * getting commands 3491 */ 3492 if (mlx4_is_master(dev)) { 3493 err = mlx4_ARM_COMM_CHANNEL(dev); 3494 if (err) { 3495 mlx4_err(dev, " Failed to arm comm channel eq: %x\n", 3496 err); 3497 goto err_steer; 3498 } 3499 } 3500 3501 for (port = 1; port <= dev->caps.num_ports; port++) { 3502 err = mlx4_init_port_info(dev, port); 3503 if (err) 3504 goto err_port; 3505 } 3506 3507 priv->v2p.port1 = 1; 3508 priv->v2p.port2 = 2; 3509 3510 err = mlx4_register_device(dev); 3511 if (err) 3512 goto err_port; 3513 3514 mlx4_request_modules(dev); 3515 3516 mlx4_sense_init(dev); 3517 mlx4_start_sense(dev); 3518 3519 priv->removed = 0; 3520 3521 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow) 3522 atomic_dec(&pf_loading); 3523 3524 kfree(dev_cap); 3525 return 0; 3526 3527 err_port: 3528 for (--port; port >= 1; --port) 3529 mlx4_cleanup_port_info(&priv->port[port]); 3530 3531 mlx4_cleanup_counters_table(dev); 3532 mlx4_cleanup_qp_table(dev); 3533 mlx4_cleanup_srq_table(dev); 3534 mlx4_cleanup_cq_table(dev); 3535 mlx4_cmd_use_polling(dev); 3536 mlx4_cleanup_eq_table(dev); 3537 mlx4_cleanup_mcg_table(dev); 3538 mlx4_cleanup_mr_table(dev); 3539 mlx4_cleanup_xrcd_table(dev); 3540 mlx4_cleanup_pd_table(dev); 3541 mlx4_cleanup_uar_table(dev); 3542 3543 err_steer: 3544 if (!mlx4_is_slave(dev)) 3545 mlx4_clear_steering(dev); 3546 3547 err_disable_msix: 3548 if (dev->flags & MLX4_FLAG_MSI_X) 3549 pci_disable_msix(pdev); 3550 3551 err_free_eq: 3552 mlx4_free_eq_table(dev); 3553 3554 err_master_mfunc: 3555 if (mlx4_is_master(dev)) { 3556 mlx4_free_resource_tracker(dev, RES_TR_FREE_STRUCTS_ONLY); 3557 mlx4_multi_func_cleanup(dev); 3558 } 3559 3560 if (mlx4_is_slave(dev)) { 3561 kfree(dev->caps.qp0_qkey); 3562 kfree(dev->caps.qp0_tunnel); 3563 kfree(dev->caps.qp0_proxy); 3564 kfree(dev->caps.qp1_tunnel); 3565 kfree(dev->caps.qp1_proxy); 3566 } 3567 3568 err_close: 3569 mlx4_close_hca(dev); 3570 3571 err_fw: 3572 mlx4_close_fw(dev); 3573 3574 err_mfunc: 3575 if (mlx4_is_slave(dev)) 3576 mlx4_multi_func_cleanup(dev); 3577 3578 err_cmd: 3579 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 3580 3581 err_sriov: 3582 if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) { 3583 pci_disable_sriov(pdev); 3584 dev->flags &= ~MLX4_FLAG_SRIOV; 3585 } 3586 3587 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow) 3588 atomic_dec(&pf_loading); 3589 3590 kfree(priv->dev.dev_vfs); 3591 3592 if (!mlx4_is_slave(dev)) 3593 mlx4_free_ownership(dev); 3594 3595 kfree(dev_cap); 3596 return err; 3597 } 3598 3599 static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data, 3600 struct mlx4_priv *priv) 3601 { 3602 int err; 3603 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 3604 int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 3605 const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = { 3606 {2, 0, 0}, {0, 1, 2}, {0, 1, 2} }; 3607 unsigned total_vfs = 0; 3608 unsigned int i; 3609 3610 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); 3611 3612 err = mlx4_pci_enable_device(&priv->dev); 3613 if (err) { 3614 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 3615 return err; 3616 } 3617 3618 /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS 3619 * per port, we must limit the number of VFs to 63 (since their are 3620 * 128 MACs) 3621 */ 3622 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && i < num_vfs_argc; 3623 total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) { 3624 nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i]; 3625 if (nvfs[i] < 0) { 3626 dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n"); 3627 err = -EINVAL; 3628 goto err_disable_pdev; 3629 } 3630 } 3631 for (i = 0; i < sizeof(prb_vf)/sizeof(prb_vf[0]) && i < probe_vfs_argc; 3632 i++) { 3633 prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i]; 3634 if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) { 3635 dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n"); 3636 err = -EINVAL; 3637 goto err_disable_pdev; 3638 } 3639 } 3640 if (total_vfs > MLX4_MAX_NUM_VF) { 3641 dev_err(&pdev->dev, 3642 "Requested more VF's (%d) than allowed by hw (%d)\n", 3643 total_vfs, MLX4_MAX_NUM_VF); 3644 err = -EINVAL; 3645 goto err_disable_pdev; 3646 } 3647 3648 for (i = 0; i < MLX4_MAX_PORTS; i++) { 3649 if (nvfs[i] + nvfs[2] > MLX4_MAX_NUM_VF_P_PORT) { 3650 dev_err(&pdev->dev, 3651 "Requested more VF's (%d) for port (%d) than allowed by driver (%d)\n", 3652 nvfs[i] + nvfs[2], i + 1, 3653 MLX4_MAX_NUM_VF_P_PORT); 3654 err = -EINVAL; 3655 goto err_disable_pdev; 3656 } 3657 } 3658 3659 /* Check for BARs. */ 3660 if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) && 3661 !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 3662 dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n", 3663 pci_dev_data, (long)pci_resource_flags(pdev, 0)); 3664 err = -ENODEV; 3665 goto err_disable_pdev; 3666 } 3667 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 3668 dev_err(&pdev->dev, "Missing UAR, aborting\n"); 3669 err = -ENODEV; 3670 goto err_disable_pdev; 3671 } 3672 3673 err = pci_request_regions(pdev, DRV_NAME); 3674 if (err) { 3675 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); 3676 goto err_disable_pdev; 3677 } 3678 3679 pci_set_master(pdev); 3680 3681 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 3682 if (err) { 3683 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n"); 3684 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3685 if (err) { 3686 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n"); 3687 goto err_release_regions; 3688 } 3689 } 3690 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 3691 if (err) { 3692 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n"); 3693 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 3694 if (err) { 3695 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n"); 3696 goto err_release_regions; 3697 } 3698 } 3699 3700 /* Allow large DMA segments, up to the firmware limit of 1 GB */ 3701 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); 3702 /* Detect if this device is a virtual function */ 3703 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { 3704 /* When acting as pf, we normally skip vfs unless explicitly 3705 * requested to probe them. 3706 */ 3707 if (total_vfs) { 3708 unsigned vfs_offset = 0; 3709 3710 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && 3711 vfs_offset + nvfs[i] < extended_func_num(pdev); 3712 vfs_offset += nvfs[i], i++) 3713 ; 3714 if (i == sizeof(nvfs)/sizeof(nvfs[0])) { 3715 err = -ENODEV; 3716 goto err_release_regions; 3717 } 3718 if ((extended_func_num(pdev) - vfs_offset) 3719 > prb_vf[i]) { 3720 dev_warn(&pdev->dev, "Skipping virtual function:%d\n", 3721 extended_func_num(pdev)); 3722 err = -ENODEV; 3723 goto err_release_regions; 3724 } 3725 } 3726 } 3727 3728 err = mlx4_catas_init(&priv->dev); 3729 if (err) 3730 goto err_release_regions; 3731 3732 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 0); 3733 if (err) 3734 goto err_catas; 3735 3736 return 0; 3737 3738 err_catas: 3739 mlx4_catas_end(&priv->dev); 3740 3741 err_release_regions: 3742 pci_release_regions(pdev); 3743 3744 err_disable_pdev: 3745 mlx4_pci_disable_device(&priv->dev); 3746 pci_set_drvdata(pdev, NULL); 3747 return err; 3748 } 3749 3750 static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 3751 { 3752 3753 struct sysctl_ctx_list *ctx; 3754 struct sysctl_oid *node; 3755 struct sysctl_oid_list *node_list; 3756 struct mlx4_priv *priv; 3757 struct mlx4_dev *dev; 3758 int ret; 3759 3760 printk_once(KERN_INFO "%s", mlx4_version); 3761 3762 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 3763 if (!priv) 3764 return -ENOMEM; 3765 3766 dev = &priv->dev; 3767 dev->persist = kzalloc(sizeof(*dev->persist), GFP_KERNEL); 3768 if (!dev->persist) { 3769 kfree(priv); 3770 return -ENOMEM; 3771 } 3772 dev->persist->pdev = pdev; 3773 dev->persist->dev = dev; 3774 pci_set_drvdata(pdev, dev->persist); 3775 priv->pci_dev_data = id->driver_data; 3776 mutex_init(&dev->persist->device_state_mutex); 3777 mutex_init(&dev->persist->interface_state_mutex); 3778 mutex_init(&dev->persist->pci_status_mutex); 3779 3780 ret = __mlx4_init_one(pdev, id->driver_data, priv); 3781 if (ret) { 3782 kfree(dev->persist); 3783 kfree(priv); 3784 return ret; 3785 } else { 3786 device_set_desc(pdev->dev.bsddev, mlx4_description); 3787 pci_save_state(pdev->dev.bsddev); 3788 } 3789 3790 snprintf(dev->fw_str, sizeof(dev->fw_str), "%d.%d.%d", 3791 (int) (dev->caps.fw_ver >> 32), 3792 (int) (dev->caps.fw_ver >> 16) & 0xffff, 3793 (int) (dev->caps.fw_ver & 0xffff)); 3794 3795 ctx = &dev->hw_ctx; 3796 sysctl_ctx_init(ctx); 3797 node = SYSCTL_ADD_NODE(ctx,SYSCTL_CHILDREN(pdev->dev.kobj.oidp), 3798 OID_AUTO, "hw" , CTLFLAG_RD, 0, "mlx4 dev hw information"); 3799 if (node != NULL) { 3800 node_list = SYSCTL_CHILDREN(node); 3801 SYSCTL_ADD_STRING(ctx, node_list, OID_AUTO, 3802 "fw_version", CTLFLAG_RD, dev->fw_str, 0, 3803 "Device firmware version"); 3804 SYSCTL_ADD_STRING(ctx, node_list, OID_AUTO, 3805 "board_id", CTLFLAG_RD, dev->board_id, 0, 3806 "Device board identifier"); 3807 } 3808 3809 return ret; 3810 } 3811 3812 static void mlx4_clean_dev(struct mlx4_dev *dev) 3813 { 3814 struct mlx4_dev_persistent *persist = dev->persist; 3815 struct mlx4_priv *priv = mlx4_priv(dev); 3816 unsigned long flags = (dev->flags & RESET_PERSIST_MASK_FLAGS); 3817 3818 memset(priv, 0, sizeof(*priv)); 3819 priv->dev.persist = persist; 3820 priv->dev.flags = flags; 3821 } 3822 3823 static void mlx4_unload_one(struct pci_dev *pdev) 3824 { 3825 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3826 struct mlx4_dev *dev = persist->dev; 3827 struct mlx4_priv *priv = mlx4_priv(dev); 3828 int pci_dev_data; 3829 int p, i; 3830 3831 if (priv->removed) 3832 return; 3833 3834 /* saving current ports type for further use */ 3835 for (i = 0; i < dev->caps.num_ports; i++) { 3836 dev->persist->curr_port_type[i] = dev->caps.port_type[i + 1]; 3837 dev->persist->curr_port_poss_type[i] = dev->caps. 3838 possible_type[i + 1]; 3839 } 3840 3841 pci_dev_data = priv->pci_dev_data; 3842 3843 mlx4_stop_sense(dev); 3844 mlx4_unregister_device(dev); 3845 3846 for (p = 1; p <= dev->caps.num_ports; p++) { 3847 mlx4_cleanup_port_info(&priv->port[p]); 3848 mlx4_CLOSE_PORT(dev, p); 3849 } 3850 3851 if (mlx4_is_master(dev)) 3852 mlx4_free_resource_tracker(dev, 3853 RES_TR_FREE_SLAVES_ONLY); 3854 3855 mlx4_cleanup_default_counters(dev); 3856 if (!mlx4_is_slave(dev)) 3857 mlx4_cleanup_counters_table(dev); 3858 mlx4_cleanup_qp_table(dev); 3859 mlx4_cleanup_srq_table(dev); 3860 mlx4_cleanup_cq_table(dev); 3861 mlx4_cmd_use_polling(dev); 3862 mlx4_cleanup_eq_table(dev); 3863 mlx4_cleanup_mcg_table(dev); 3864 mlx4_cleanup_mr_table(dev); 3865 mlx4_cleanup_xrcd_table(dev); 3866 mlx4_cleanup_pd_table(dev); 3867 3868 if (mlx4_is_master(dev)) 3869 mlx4_free_resource_tracker(dev, 3870 RES_TR_FREE_STRUCTS_ONLY); 3871 3872 iounmap(priv->kar); 3873 mlx4_uar_free(dev, &priv->driver_uar); 3874 mlx4_cleanup_uar_table(dev); 3875 if (!mlx4_is_slave(dev)) 3876 mlx4_clear_steering(dev); 3877 mlx4_free_eq_table(dev); 3878 if (mlx4_is_master(dev)) 3879 mlx4_multi_func_cleanup(dev); 3880 mlx4_close_hca(dev); 3881 mlx4_close_fw(dev); 3882 if (mlx4_is_slave(dev)) 3883 mlx4_multi_func_cleanup(dev); 3884 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 3885 3886 if (dev->flags & MLX4_FLAG_MSI_X) 3887 pci_disable_msix(pdev); 3888 3889 if (!mlx4_is_slave(dev)) 3890 mlx4_free_ownership(dev); 3891 3892 kfree(dev->caps.qp0_qkey); 3893 kfree(dev->caps.qp0_tunnel); 3894 kfree(dev->caps.qp0_proxy); 3895 kfree(dev->caps.qp1_tunnel); 3896 kfree(dev->caps.qp1_proxy); 3897 kfree(dev->dev_vfs); 3898 3899 mlx4_clean_dev(dev); 3900 priv->pci_dev_data = pci_dev_data; 3901 priv->removed = 1; 3902 } 3903 3904 static void mlx4_remove_one(struct pci_dev *pdev) 3905 { 3906 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3907 struct mlx4_dev *dev = persist->dev; 3908 struct mlx4_priv *priv = mlx4_priv(dev); 3909 int active_vfs = 0; 3910 3911 mutex_lock(&persist->interface_state_mutex); 3912 persist->interface_state |= MLX4_INTERFACE_STATE_DELETION; 3913 mutex_unlock(&persist->interface_state_mutex); 3914 3915 /* 3916 * Clear the device description to avoid use after free, 3917 * because the bsddev is not destroyed when this module is 3918 * unloaded: 3919 */ 3920 device_set_desc(pdev->dev.bsddev, NULL); 3921 3922 /* Disabling SR-IOV is not allowed while there are active vf's */ 3923 if (mlx4_is_master(dev) && dev->flags & MLX4_FLAG_SRIOV) { 3924 active_vfs = mlx4_how_many_lives_vf(dev); 3925 if (active_vfs) { 3926 pr_warn("Removing PF when there are active VF's !!\n"); 3927 pr_warn("Will not disable SR-IOV.\n"); 3928 } 3929 } 3930 3931 /* device marked to be under deletion running now without the lock 3932 * letting other tasks to be terminated 3933 */ 3934 if (persist->interface_state & MLX4_INTERFACE_STATE_UP) 3935 mlx4_unload_one(pdev); 3936 else 3937 mlx4_info(dev, "%s: interface is down\n", __func__); 3938 mlx4_catas_end(dev); 3939 if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) { 3940 mlx4_warn(dev, "Disabling SR-IOV\n"); 3941 pci_disable_sriov(pdev); 3942 } 3943 3944 pci_release_regions(pdev); 3945 pci_disable_device(pdev); 3946 kfree(dev->persist); 3947 kfree(priv); 3948 pci_set_drvdata(pdev, NULL); 3949 } 3950 3951 static int restore_current_port_types(struct mlx4_dev *dev, 3952 enum mlx4_port_type *types, 3953 enum mlx4_port_type *poss_types) 3954 { 3955 struct mlx4_priv *priv = mlx4_priv(dev); 3956 int err, i; 3957 3958 mlx4_stop_sense(dev); 3959 3960 mutex_lock(&priv->port_mutex); 3961 for (i = 0; i < dev->caps.num_ports; i++) 3962 dev->caps.possible_type[i + 1] = poss_types[i]; 3963 err = mlx4_change_port_types(dev, types); 3964 mutex_unlock(&priv->port_mutex); 3965 3966 mlx4_start_sense(dev); 3967 3968 return err; 3969 } 3970 3971 int mlx4_restart_one(struct pci_dev *pdev) 3972 { 3973 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3974 struct mlx4_dev *dev = persist->dev; 3975 struct mlx4_priv *priv = mlx4_priv(dev); 3976 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 3977 int pci_dev_data, err, total_vfs; 3978 3979 pci_dev_data = priv->pci_dev_data; 3980 total_vfs = dev->persist->num_vfs; 3981 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); 3982 3983 mlx4_unload_one(pdev); 3984 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 1); 3985 if (err) { 3986 mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n", 3987 __func__, pci_name(pdev), err); 3988 return err; 3989 } 3990 3991 err = restore_current_port_types(dev, dev->persist->curr_port_type, 3992 dev->persist->curr_port_poss_type); 3993 if (err) 3994 mlx4_err(dev, "could not restore original port types (%d)\n", 3995 err); 3996 3997 return err; 3998 } 3999 4000 static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = { 4001 /* MT25408 "Hermon" SDR */ 4002 { PCI_VDEVICE(MELLANOX, 0x6340), 4003 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, 4004 /* MT25408 "Hermon" DDR */ 4005 { PCI_VDEVICE(MELLANOX, 0x634a), 4006 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, 4007 /* MT25408 "Hermon" QDR */ 4008 { PCI_VDEVICE(MELLANOX, 0x6354), 4009 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, 4010 /* MT25408 "Hermon" DDR PCIe gen2 */ 4011 { PCI_VDEVICE(MELLANOX, 0x6732), 4012 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, 4013 /* MT25408 "Hermon" QDR PCIe gen2 */ 4014 { PCI_VDEVICE(MELLANOX, 0x673c), 4015 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, 4016 /* MT25408 "Hermon" EN 10GigE */ 4017 { PCI_VDEVICE(MELLANOX, 0x6368), 4018 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, 4019 /* MT25408 "Hermon" EN 10GigE PCIe gen2 */ 4020 { PCI_VDEVICE(MELLANOX, 0x6750), 4021 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, 4022 /* MT25458 ConnectX EN 10GBASE-T 10GigE */ 4023 { PCI_VDEVICE(MELLANOX, 0x6372), 4024 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, 4025 /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */ 4026 { PCI_VDEVICE(MELLANOX, 0x675a), 4027 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, 4028 /* MT26468 ConnectX EN 10GigE PCIe gen2*/ 4029 { PCI_VDEVICE(MELLANOX, 0x6764), 4030 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, 4031 /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */ 4032 { PCI_VDEVICE(MELLANOX, 0x6746), 4033 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, 4034 /* MT26478 ConnectX2 40GigE PCIe gen2 */ 4035 { PCI_VDEVICE(MELLANOX, 0x676e), 4036 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, 4037 /* MT25400 Family [ConnectX-2 Virtual Function] */ 4038 { PCI_VDEVICE(MELLANOX, 0x1002), 4039 .driver_data = MLX4_PCI_DEV_IS_VF }, 4040 /* MT27500 Family [ConnectX-3] */ 4041 { PCI_VDEVICE(MELLANOX, 0x1003) }, 4042 /* MT27500 Family [ConnectX-3 Virtual Function] */ 4043 { PCI_VDEVICE(MELLANOX, 0x1004), 4044 .driver_data = MLX4_PCI_DEV_IS_VF }, 4045 { PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */ 4046 { PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */ 4047 { PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */ 4048 { PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */ 4049 { PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */ 4050 { PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */ 4051 { PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */ 4052 { PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */ 4053 { PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */ 4054 { PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */ 4055 { PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */ 4056 { PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */ 4057 { 0, } 4058 }; 4059 4060 MODULE_DEVICE_TABLE(pci, mlx4_pci_table); 4061 4062 static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev, 4063 pci_channel_state_t state) 4064 { 4065 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 4066 4067 mlx4_err(persist->dev, "mlx4_pci_err_detected was called\n"); 4068 mlx4_enter_error_state(persist); 4069 4070 mutex_lock(&persist->interface_state_mutex); 4071 if (persist->interface_state & MLX4_INTERFACE_STATE_UP) 4072 mlx4_unload_one(pdev); 4073 4074 mutex_unlock(&persist->interface_state_mutex); 4075 if (state == pci_channel_io_perm_failure) 4076 return PCI_ERS_RESULT_DISCONNECT; 4077 4078 mlx4_pci_disable_device(persist->dev); 4079 return PCI_ERS_RESULT_NEED_RESET; 4080 } 4081 4082 static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev) 4083 { 4084 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 4085 struct mlx4_dev *dev = persist->dev; 4086 int err; 4087 4088 mlx4_err(dev, "mlx4_pci_slot_reset was called\n"); 4089 err = mlx4_pci_enable_device(dev); 4090 if (err) { 4091 mlx4_err(dev, "Can not re-enable device, err=%d\n", err); 4092 return PCI_ERS_RESULT_DISCONNECT; 4093 } 4094 4095 pci_set_master(pdev); 4096 return PCI_ERS_RESULT_RECOVERED; 4097 } 4098 4099 static void mlx4_pci_resume(struct pci_dev *pdev) 4100 { 4101 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 4102 struct mlx4_dev *dev = persist->dev; 4103 struct mlx4_priv *priv = mlx4_priv(dev); 4104 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 4105 int total_vfs; 4106 int err; 4107 4108 mlx4_err(dev, "%s was called\n", __func__); 4109 total_vfs = dev->persist->num_vfs; 4110 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); 4111 4112 mutex_lock(&persist->interface_state_mutex); 4113 if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) { 4114 err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs, 4115 priv, 1); 4116 if (err) { 4117 mlx4_err(dev, "%s: mlx4_load_one failed, err=%d\n", 4118 __func__, err); 4119 goto end; 4120 } 4121 4122 err = restore_current_port_types(dev, dev->persist-> 4123 curr_port_type, dev->persist-> 4124 curr_port_poss_type); 4125 if (err) 4126 mlx4_err(dev, "could not restore original port types (%d)\n", err); 4127 } 4128 end: 4129 mutex_unlock(&persist->interface_state_mutex); 4130 4131 } 4132 4133 static void mlx4_shutdown(struct pci_dev *pdev) 4134 { 4135 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 4136 4137 mlx4_info(persist->dev, "mlx4_shutdown was called\n"); 4138 mutex_lock(&persist->interface_state_mutex); 4139 if (persist->interface_state & MLX4_INTERFACE_STATE_UP) 4140 mlx4_unload_one(pdev); 4141 mutex_unlock(&persist->interface_state_mutex); 4142 } 4143 4144 static const struct pci_error_handlers mlx4_err_handler = { 4145 .error_detected = mlx4_pci_err_detected, 4146 .slot_reset = mlx4_pci_slot_reset, 4147 .resume = mlx4_pci_resume, 4148 }; 4149 4150 static struct pci_driver mlx4_driver = { 4151 .name = DRV_NAME, 4152 .id_table = mlx4_pci_table, 4153 .probe = mlx4_init_one, 4154 .shutdown = mlx4_shutdown, 4155 .remove = mlx4_remove_one, 4156 .err_handler = &mlx4_err_handler, 4157 }; 4158 4159 static int __init mlx4_verify_params(void) 4160 { 4161 if ((log_num_mac < 0) || (log_num_mac > 7)) { 4162 pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac); 4163 return -1; 4164 } 4165 4166 if (log_num_vlan != 0) 4167 pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n", 4168 MLX4_LOG_NUM_VLANS); 4169 4170 if (use_prio != 0) 4171 pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n"); 4172 4173 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) { 4174 pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n", 4175 log_mtts_per_seg); 4176 return -1; 4177 } 4178 4179 /* Check if module param for ports type has legal combination */ 4180 if (port_type_array[0] == false && port_type_array[1] == true) { 4181 pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n"); 4182 port_type_array[0] = true; 4183 } 4184 4185 if (mlx4_log_num_mgm_entry_size < -7 || 4186 (mlx4_log_num_mgm_entry_size > 0 && 4187 (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE || 4188 mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE))) { 4189 pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-7..0 or %d..%d)\n", 4190 mlx4_log_num_mgm_entry_size, 4191 MLX4_MIN_MGM_LOG_ENTRY_SIZE, 4192 MLX4_MAX_MGM_LOG_ENTRY_SIZE); 4193 return -1; 4194 } 4195 4196 return 0; 4197 } 4198 4199 static int __init mlx4_init(void) 4200 { 4201 int ret; 4202 4203 if (mlx4_verify_params()) 4204 return -EINVAL; 4205 4206 4207 mlx4_wq = create_singlethread_workqueue("mlx4"); 4208 if (!mlx4_wq) 4209 return -ENOMEM; 4210 4211 ret = pci_register_driver(&mlx4_driver); 4212 if (ret < 0) 4213 destroy_workqueue(mlx4_wq); 4214 return ret < 0 ? ret : 0; 4215 } 4216 4217 static void __exit mlx4_cleanup(void) 4218 { 4219 pci_unregister_driver(&mlx4_driver); 4220 destroy_workqueue(mlx4_wq); 4221 } 4222 4223 module_init(mlx4_init); 4224 module_exit(mlx4_cleanup); 4225 4226 static int 4227 mlx4_evhand(module_t mod, int event, void *arg) 4228 { 4229 return (0); 4230 } 4231 4232 static moduledata_t mlx4_mod = { 4233 .name = "mlx4", 4234 .evhand = mlx4_evhand, 4235 }; 4236 MODULE_VERSION(mlx4, 1); 4237 DECLARE_MODULE(mlx4, mlx4_mod, SI_SUB_OFED_PREINIT, SI_ORDER_ANY); 4238 MODULE_DEPEND(mlx4, linuxkpi, 1, 1, 1); 4239 4240