1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. All rights reserved. 5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #define LINUXKPI_PARAM_PREFIX mlx4_ 37 38 #include <linux/kmod.h> 39 #include <linux/module.h> 40 #include <linux/errno.h> 41 #include <linux/pci.h> 42 #include <linux/dma-mapping.h> 43 #include <linux/slab.h> 44 #include <linux/io-mapping.h> 45 #include <linux/delay.h> 46 #include <linux/netdevice.h> 47 #include <linux/string.h> 48 #include <linux/fs.h> 49 #include <linux/cache.h> 50 #include <linux/random.h> 51 52 #include <dev/mlx4/device.h> 53 #include <dev/mlx4/doorbell.h> 54 55 #include "mlx4.h" 56 #include "fw.h" 57 #include "icm.h" 58 #include <dev/mlx4/stats.h> 59 60 MODULE_AUTHOR("Roland Dreier"); 61 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver"); 62 MODULE_LICENSE("Dual BSD/GPL"); 63 64 struct workqueue_struct *mlx4_wq; 65 66 #ifdef CONFIG_MLX4_DEBUG 67 68 int mlx4_debug_level = 0; 69 module_param_named(debug_level, mlx4_debug_level, int, 0644); 70 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); 71 72 #endif /* CONFIG_MLX4_DEBUG */ 73 74 #ifdef CONFIG_PCI_MSI 75 76 static int msi_x = 1; 77 module_param(msi_x, int, 0444); 78 MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); 79 80 #else /* CONFIG_PCI_MSI */ 81 82 #define msi_x (0) 83 84 #endif /* CONFIG_PCI_MSI */ 85 86 static uint8_t num_vfs[3] = {0, 0, 0}; 87 static int num_vfs_argc; 88 module_param_array(num_vfs, byte , &num_vfs_argc, 0444); 89 MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n" 90 "num_vfs=port1,port2,port1+2"); 91 92 static uint8_t probe_vf[3] = {0, 0, 0}; 93 static int probe_vfs_argc; 94 module_param_array(probe_vf, byte, &probe_vfs_argc, 0444); 95 MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n" 96 "probe_vf=port1,port2,port1+2"); 97 98 int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; 99 module_param_named(log_num_mgm_entry_size, 100 mlx4_log_num_mgm_entry_size, int, 0444); 101 MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num" 102 " of qp per mcg, for example:" 103 " 10 gives 248.range: 7 <=" 104 " log_num_mgm_entry_size <= 12." 105 " To activate device managed" 106 " flow steering when available, set to -1"); 107 108 static bool enable_64b_cqe_eqe = true; 109 module_param(enable_64b_cqe_eqe, bool, 0444); 110 MODULE_PARM_DESC(enable_64b_cqe_eqe, 111 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)"); 112 113 static bool enable_4k_uar; 114 module_param(enable_4k_uar, bool, 0444); 115 MODULE_PARM_DESC(enable_4k_uar, 116 "Enable using 4K UAR. Should not be enabled if have VFs which do not support 4K UARs (default: false)"); 117 118 #define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \ 119 MLX4_FUNC_CAP_EQE_CQE_STRIDE | \ 120 MLX4_FUNC_CAP_DMFS_A0_STATIC) 121 122 #define RESET_PERSIST_MASK_FLAGS (MLX4_FLAG_SRIOV) 123 124 static char mlx4_version[] = 125 DRV_NAME ": Mellanox ConnectX core driver v" 126 DRV_VERSION " (" DRV_RELDATE ")\n"; 127 128 static struct mlx4_profile default_profile = { 129 .num_qp = 1 << 18, 130 .num_srq = 1 << 16, 131 .rdmarc_per_qp = 1 << 4, 132 .num_cq = 1 << 16, 133 .num_mcg = 1 << 13, 134 .num_mpt = 1 << 19, 135 .num_mtt = 1 << 20, /* It is really num mtt segements */ 136 }; 137 138 static struct mlx4_profile low_mem_profile = { 139 .num_qp = 1 << 17, 140 .num_srq = 1 << 6, 141 .rdmarc_per_qp = 1 << 4, 142 .num_cq = 1 << 8, 143 .num_mcg = 1 << 8, 144 .num_mpt = 1 << 9, 145 .num_mtt = 1 << 7, 146 }; 147 148 static int log_num_mac = 7; 149 module_param_named(log_num_mac, log_num_mac, int, 0444); 150 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)"); 151 152 static int log_num_vlan; 153 module_param_named(log_num_vlan, log_num_vlan, int, 0444); 154 MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)"); 155 /* Log2 max number of VLANs per ETH port (0-7) */ 156 #define MLX4_LOG_NUM_VLANS 7 157 #define MLX4_MIN_LOG_NUM_VLANS 0 158 #define MLX4_MIN_LOG_NUM_MAC 1 159 160 static bool use_prio; 161 module_param_named(use_prio, use_prio, bool, 0444); 162 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)"); 163 164 int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); 165 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); 166 MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)"); 167 168 static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE}; 169 170 struct mlx4_port_config { 171 struct list_head list; 172 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; 173 struct pci_dev *pdev; 174 }; 175 176 static atomic_t pf_loading = ATOMIC_INIT(0); 177 178 static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev, 179 struct mlx4_dev_cap *dev_cap) 180 { 181 /* The reserved_uars is calculated by system page size unit. 182 * Therefore, adjustment is added when the uar page size is less 183 * than the system page size 184 */ 185 dev->caps.reserved_uars = 186 max_t(int, 187 mlx4_get_num_reserved_uar(dev), 188 dev_cap->reserved_uars / 189 (1 << (PAGE_SHIFT - dev->uar_page_shift))); 190 } 191 192 int mlx4_check_port_params(struct mlx4_dev *dev, 193 enum mlx4_port_type *port_type) 194 { 195 int i; 196 197 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { 198 for (i = 0; i < dev->caps.num_ports - 1; i++) { 199 if (port_type[i] != port_type[i + 1]) { 200 mlx4_err(dev, "Only same port types supported on this HCA, aborting\n"); 201 return -EINVAL; 202 } 203 } 204 } 205 206 for (i = 0; i < dev->caps.num_ports; i++) { 207 if (!(port_type[i] & dev->caps.supported_type[i+1])) { 208 mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n", 209 i + 1); 210 return -EINVAL; 211 } 212 } 213 return 0; 214 } 215 216 static void mlx4_set_port_mask(struct mlx4_dev *dev) 217 { 218 int i; 219 220 for (i = 1; i <= dev->caps.num_ports; ++i) 221 dev->caps.port_mask[i] = dev->caps.port_type[i]; 222 } 223 224 enum { 225 MLX4_QUERY_FUNC_NUM_SYS_EQS = 1 << 0, 226 }; 227 228 static int mlx4_query_func(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 229 { 230 int err = 0; 231 struct mlx4_func func; 232 233 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { 234 err = mlx4_QUERY_FUNC(dev, &func, 0); 235 if (err) { 236 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 237 return err; 238 } 239 dev_cap->max_eqs = func.max_eq; 240 dev_cap->reserved_eqs = func.rsvd_eqs; 241 dev_cap->reserved_uars = func.rsvd_uars; 242 err |= MLX4_QUERY_FUNC_NUM_SYS_EQS; 243 } 244 return err; 245 } 246 247 static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev) 248 { 249 struct mlx4_caps *dev_cap = &dev->caps; 250 251 /* FW not supporting or cancelled by user */ 252 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) || 253 !(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE)) 254 return; 255 256 /* Must have 64B CQE_EQE enabled by FW to use bigger stride 257 * When FW has NCSI it may decide not to report 64B CQE/EQEs 258 */ 259 if (!(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_EQE) || 260 !(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_CQE)) { 261 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 262 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 263 return; 264 } 265 266 if (cache_line_size() == 128 || cache_line_size() == 256) { 267 mlx4_dbg(dev, "Enabling CQE stride cacheLine supported\n"); 268 /* Changing the real data inside CQE size to 32B */ 269 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; 270 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; 271 272 if (mlx4_is_master(dev)) 273 dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE; 274 } else { 275 if (cache_line_size() != 32 && cache_line_size() != 64) 276 mlx4_dbg(dev, "Disabling CQE stride, cacheLine size unsupported\n"); 277 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 278 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 279 } 280 } 281 282 static int _mlx4_dev_port(struct mlx4_dev *dev, int port, 283 struct mlx4_port_cap *port_cap) 284 { 285 dev->caps.vl_cap[port] = port_cap->max_vl; 286 dev->caps.ib_mtu_cap[port] = port_cap->ib_mtu; 287 dev->phys_caps.gid_phys_table_len[port] = port_cap->max_gids; 288 dev->phys_caps.pkey_phys_table_len[port] = port_cap->max_pkeys; 289 /* set gid and pkey table operating lengths by default 290 * to non-sriov values 291 */ 292 dev->caps.gid_table_len[port] = port_cap->max_gids; 293 dev->caps.pkey_table_len[port] = port_cap->max_pkeys; 294 dev->caps.port_width_cap[port] = port_cap->max_port_width; 295 dev->caps.eth_mtu_cap[port] = port_cap->eth_mtu; 296 dev->caps.max_tc_eth = port_cap->max_tc_eth; 297 dev->caps.def_mac[port] = port_cap->def_mac; 298 dev->caps.supported_type[port] = port_cap->supported_port_types; 299 dev->caps.suggested_type[port] = port_cap->suggested_type; 300 dev->caps.default_sense[port] = port_cap->default_sense; 301 dev->caps.trans_type[port] = port_cap->trans_type; 302 dev->caps.vendor_oui[port] = port_cap->vendor_oui; 303 dev->caps.wavelength[port] = port_cap->wavelength; 304 dev->caps.trans_code[port] = port_cap->trans_code; 305 306 return 0; 307 } 308 309 static int mlx4_dev_port(struct mlx4_dev *dev, int port, 310 struct mlx4_port_cap *port_cap) 311 { 312 int err = 0; 313 314 err = mlx4_QUERY_PORT(dev, port, port_cap); 315 316 if (err) 317 mlx4_err(dev, "QUERY_PORT command failed.\n"); 318 319 return err; 320 } 321 322 static inline void mlx4_enable_ignore_fcs(struct mlx4_dev *dev) 323 { 324 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS)) 325 return; 326 327 if (mlx4_is_mfunc(dev)) { 328 mlx4_dbg(dev, "SRIOV mode - Disabling Ignore FCS"); 329 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS; 330 return; 331 } 332 333 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)) { 334 mlx4_dbg(dev, 335 "Keep FCS is not supported - Disabling Ignore FCS"); 336 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS; 337 return; 338 } 339 } 340 341 #define MLX4_A0_STEERING_TABLE_SIZE 256 342 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 343 { 344 int err; 345 int i; 346 347 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 348 if (err) { 349 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 350 return err; 351 } 352 mlx4_dev_cap_dump(dev, dev_cap); 353 354 if (dev_cap->min_page_sz > PAGE_SIZE) { 355 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", 356 dev_cap->min_page_sz, (long)PAGE_SIZE); 357 return -ENODEV; 358 } 359 if (dev_cap->num_ports > MLX4_MAX_PORTS) { 360 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n", 361 dev_cap->num_ports, MLX4_MAX_PORTS); 362 return -ENODEV; 363 } 364 365 if (dev_cap->uar_size > pci_resource_len(dev->persist->pdev, 2)) { 366 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", 367 dev_cap->uar_size, 368 (unsigned long long) 369 pci_resource_len(dev->persist->pdev, 2)); 370 return -ENODEV; 371 } 372 373 dev->caps.num_ports = dev_cap->num_ports; 374 dev->caps.num_sys_eqs = dev_cap->num_sys_eqs; 375 dev->phys_caps.num_phys_eqs = dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS ? 376 dev->caps.num_sys_eqs : 377 MLX4_MAX_EQ_NUM; 378 for (i = 1; i <= dev->caps.num_ports; ++i) { 379 err = _mlx4_dev_port(dev, i, dev_cap->port_cap + i); 380 if (err) { 381 mlx4_err(dev, "QUERY_PORT command failed, aborting\n"); 382 return err; 383 } 384 } 385 386 dev->caps.uar_page_size = PAGE_SIZE; 387 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; 388 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay; 389 dev->caps.bf_reg_size = dev_cap->bf_reg_size; 390 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page; 391 dev->caps.max_sq_sg = dev_cap->max_sq_sg; 392 dev->caps.max_rq_sg = dev_cap->max_rq_sg; 393 dev->caps.max_wqes = dev_cap->max_qp_sz; 394 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp; 395 dev->caps.max_srq_wqes = dev_cap->max_srq_sz; 396 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1; 397 dev->caps.reserved_srqs = dev_cap->reserved_srqs; 398 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz; 399 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz; 400 /* 401 * Subtract 1 from the limit because we need to allocate a 402 * spare CQE so the HCA HW can tell the difference between an 403 * empty CQ and a full CQ. 404 */ 405 dev->caps.max_cqes = dev_cap->max_cq_sz - 1; 406 dev->caps.reserved_cqs = dev_cap->reserved_cqs; 407 dev->caps.reserved_eqs = dev_cap->reserved_eqs; 408 dev->caps.reserved_mtts = dev_cap->reserved_mtts; 409 dev->caps.reserved_mrws = dev_cap->reserved_mrws; 410 411 dev->caps.reserved_pds = dev_cap->reserved_pds; 412 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? 413 dev_cap->reserved_xrcds : 0; 414 dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? 415 dev_cap->max_xrcds : 0; 416 dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz; 417 418 dev->caps.max_msg_sz = dev_cap->max_msg_sz; 419 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); 420 dev->caps.flags = dev_cap->flags; 421 dev->caps.flags2 = dev_cap->flags2; 422 dev->caps.bmme_flags = dev_cap->bmme_flags; 423 dev->caps.reserved_lkey = dev_cap->reserved_lkey; 424 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 425 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 426 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; 427 428 /* Save uar page shift */ 429 if (!mlx4_is_slave(dev)) { 430 /* Virtual PCI function needs to determine UAR page size from 431 * firmware. Only master PCI function can set the uar page size 432 */ 433 if (enable_4k_uar) 434 dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT; 435 else 436 dev->uar_page_shift = PAGE_SHIFT; 437 438 mlx4_set_num_reserved_uars(dev, dev_cap); 439 } 440 441 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) { 442 struct mlx4_init_hca_param hca_param; 443 444 memset(&hca_param, 0, sizeof(hca_param)); 445 err = mlx4_QUERY_HCA(dev, &hca_param); 446 /* Turn off PHV_EN flag in case phv_check_en is set. 447 * phv_check_en is a HW check that parse the packet and verify 448 * phv bit was reported correctly in the wqe. To allow QinQ 449 * PHV_EN flag should be set and phv_check_en must be cleared 450 * otherwise QinQ packets will be drop by the HW. 451 */ 452 if (err || hca_param.phv_check_en) 453 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_PHV_EN; 454 } 455 456 /* Sense port always allowed on supported devices for ConnectX-1 and -2 */ 457 if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT) 458 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; 459 /* Don't do sense port on multifunction devices (for now at least) */ 460 if (mlx4_is_mfunc(dev)) 461 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; 462 463 if (mlx4_low_memory_profile()) { 464 dev->caps.log_num_macs = MLX4_MIN_LOG_NUM_MAC; 465 dev->caps.log_num_vlans = MLX4_MIN_LOG_NUM_VLANS; 466 } else { 467 dev->caps.log_num_macs = log_num_mac; 468 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS; 469 } 470 471 for (i = 1; i <= dev->caps.num_ports; ++i) { 472 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE; 473 if (dev->caps.supported_type[i]) { 474 /* if only ETH is supported - assign ETH */ 475 if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH) 476 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; 477 /* if only IB is supported, assign IB */ 478 else if (dev->caps.supported_type[i] == 479 MLX4_PORT_TYPE_IB) 480 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB; 481 else { 482 /* if IB and ETH are supported, we set the port 483 * type according to user selection of port type; 484 * if user selected none, take the FW hint */ 485 if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE) 486 dev->caps.port_type[i] = dev->caps.suggested_type[i] ? 487 MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB; 488 else 489 dev->caps.port_type[i] = port_type_array[i - 1]; 490 } 491 } 492 /* 493 * Link sensing is allowed on the port if 3 conditions are true: 494 * 1. Both protocols are supported on the port. 495 * 2. Different types are supported on the port 496 * 3. FW declared that it supports link sensing 497 */ 498 mlx4_priv(dev)->sense.sense_allowed[i] = 499 ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) && 500 (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && 501 (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)); 502 503 /* 504 * If "default_sense" bit is set, we move the port to "AUTO" mode 505 * and perform sense_port FW command to try and set the correct 506 * port type from beginning 507 */ 508 if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) { 509 enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE; 510 dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO; 511 mlx4_SENSE_PORT(dev, i, &sensed_port); 512 if (sensed_port != MLX4_PORT_TYPE_NONE) 513 dev->caps.port_type[i] = sensed_port; 514 } else { 515 dev->caps.possible_type[i] = dev->caps.port_type[i]; 516 } 517 518 if (dev->caps.log_num_macs > dev_cap->port_cap[i].log_max_macs) { 519 dev->caps.log_num_macs = dev_cap->port_cap[i].log_max_macs; 520 mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n", 521 i, 1 << dev->caps.log_num_macs); 522 } 523 if (dev->caps.log_num_vlans > dev_cap->port_cap[i].log_max_vlans) { 524 dev->caps.log_num_vlans = dev_cap->port_cap[i].log_max_vlans; 525 mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n", 526 i, 1 << dev->caps.log_num_vlans); 527 } 528 } 529 530 if (mlx4_is_master(dev) && (dev->caps.num_ports == 2) && 531 (port_type_array[0] == MLX4_PORT_TYPE_IB) && 532 (port_type_array[1] == MLX4_PORT_TYPE_ETH)) { 533 mlx4_warn(dev, 534 "Granular QoS per VF not supported with IB/Eth configuration\n"); 535 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_QOS_VPP; 536 } 537 538 dev->caps.max_counters = dev_cap->max_counters; 539 540 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps; 541 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] = 542 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] = 543 (1 << dev->caps.log_num_macs) * 544 (1 << dev->caps.log_num_vlans) * 545 dev->caps.num_ports; 546 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH; 547 548 if (dev_cap->dmfs_high_rate_qpn_base > 0 && 549 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) 550 dev->caps.dmfs_high_rate_qpn_base = dev_cap->dmfs_high_rate_qpn_base; 551 else 552 dev->caps.dmfs_high_rate_qpn_base = 553 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 554 555 if (dev_cap->dmfs_high_rate_qpn_range > 0 && 556 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) { 557 dev->caps.dmfs_high_rate_qpn_range = dev_cap->dmfs_high_rate_qpn_range; 558 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DEFAULT; 559 dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_FS_A0; 560 } else { 561 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_NOT_SUPPORTED; 562 dev->caps.dmfs_high_rate_qpn_base = 563 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 564 dev->caps.dmfs_high_rate_qpn_range = MLX4_A0_STEERING_TABLE_SIZE; 565 } 566 567 dev->caps.rl_caps = dev_cap->rl_caps; 568 569 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_RSS_RAW_ETH] = 570 dev->caps.dmfs_high_rate_qpn_range; 571 572 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] + 573 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] + 574 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] + 575 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH]; 576 577 dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0; 578 579 if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) { 580 if (dev_cap->flags & 581 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) { 582 mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n"); 583 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; 584 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; 585 } 586 587 if (dev_cap->flags2 & 588 (MLX4_DEV_CAP_FLAG2_CQE_STRIDE | 589 MLX4_DEV_CAP_FLAG2_EQE_STRIDE)) { 590 mlx4_warn(dev, "Disabling EQE/CQE stride per user request\n"); 591 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 592 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 593 } 594 } 595 596 if ((dev->caps.flags & 597 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) && 598 mlx4_is_master(dev)) 599 dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE; 600 601 if (!mlx4_is_slave(dev)) { 602 mlx4_enable_cqe_eqe_stride(dev); 603 dev->caps.alloc_res_qp_mask = 604 (dev->caps.bf_reg_size ? MLX4_RESERVE_ETH_BF_QP : 0) | 605 MLX4_RESERVE_A0_QP; 606 607 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) && 608 dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) { 609 mlx4_warn(dev, "Old device ETS support detected\n"); 610 mlx4_warn(dev, "Consider upgrading device FW.\n"); 611 dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG; 612 } 613 614 } else { 615 dev->caps.alloc_res_qp_mask = 0; 616 } 617 618 mlx4_enable_ignore_fcs(dev); 619 620 return 0; 621 } 622 623 static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev *dev, 624 enum pci_bus_speed *speed, 625 enum pcie_link_width *width) 626 { 627 u32 lnkcap1, lnkcap2; 628 int err1, err2; 629 630 #define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */ 631 632 *speed = PCI_SPEED_UNKNOWN; 633 *width = PCIE_LNK_WIDTH_UNKNOWN; 634 635 err1 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP, 636 &lnkcap1); 637 err2 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP2, 638 &lnkcap2); 639 if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */ 640 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) 641 *speed = PCIE_SPEED_8_0GT; 642 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) 643 *speed = PCIE_SPEED_5_0GT; 644 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) 645 *speed = PCIE_SPEED_2_5GT; 646 } 647 if (!err1) { 648 *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT; 649 if (!lnkcap2) { /* pre-r3.0 */ 650 if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB) 651 *speed = PCIE_SPEED_5_0GT; 652 else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB) 653 *speed = PCIE_SPEED_2_5GT; 654 } 655 } 656 657 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) { 658 return err1 ? err1 : 659 err2 ? err2 : -EINVAL; 660 } 661 return 0; 662 } 663 664 static void mlx4_check_pcie_caps(struct mlx4_dev *dev) 665 { 666 enum pcie_link_width width, width_cap; 667 enum pci_bus_speed speed, speed_cap; 668 int err; 669 670 #define PCIE_SPEED_STR(speed) \ 671 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \ 672 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \ 673 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \ 674 "Unknown") 675 676 err = mlx4_get_pcie_dev_link_caps(dev, &speed_cap, &width_cap); 677 if (err) { 678 mlx4_warn(dev, 679 "Unable to determine PCIe device BW capabilities\n"); 680 return; 681 } 682 683 err = pcie_get_minimum_link(dev->persist->pdev, &speed, &width); 684 if (err || speed == PCI_SPEED_UNKNOWN || 685 width == PCIE_LNK_WIDTH_UNKNOWN) { 686 mlx4_warn(dev, 687 "Unable to determine PCI device chain minimum BW\n"); 688 return; 689 } 690 691 if (width != width_cap || speed != speed_cap) 692 mlx4_warn(dev, 693 "PCIe BW is different than device's capability\n"); 694 695 mlx4_info(dev, "PCIe link speed is %s, device supports %s\n", 696 PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap)); 697 mlx4_info(dev, "PCIe link width is x%d, device supports x%d\n", 698 width, width_cap); 699 return; 700 } 701 702 /*The function checks if there are live vf, return the num of them*/ 703 static int mlx4_how_many_lives_vf(struct mlx4_dev *dev) 704 { 705 struct mlx4_priv *priv = mlx4_priv(dev); 706 struct mlx4_slave_state *s_state; 707 int i; 708 int ret = 0; 709 710 for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) { 711 s_state = &priv->mfunc.master.slave_state[i]; 712 if (s_state->active && s_state->last_cmd != 713 MLX4_COMM_CMD_RESET) { 714 mlx4_warn(dev, "%s: slave: %d is still active\n", 715 __func__, i); 716 ret++; 717 } 718 } 719 return ret; 720 } 721 722 int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey) 723 { 724 u32 qk = MLX4_RESERVED_QKEY_BASE; 725 726 if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX || 727 qpn < dev->phys_caps.base_proxy_sqpn) 728 return -EINVAL; 729 730 if (qpn >= dev->phys_caps.base_tunnel_sqpn) 731 /* tunnel qp */ 732 qk += qpn - dev->phys_caps.base_tunnel_sqpn; 733 else 734 qk += qpn - dev->phys_caps.base_proxy_sqpn; 735 *qkey = qk; 736 return 0; 737 } 738 EXPORT_SYMBOL(mlx4_get_parav_qkey); 739 740 void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val) 741 { 742 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 743 744 if (!mlx4_is_master(dev)) 745 return; 746 747 priv->virt2phys_pkey[slave][port - 1][i] = val; 748 } 749 EXPORT_SYMBOL(mlx4_sync_pkey_table); 750 751 void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid) 752 { 753 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 754 755 if (!mlx4_is_master(dev)) 756 return; 757 758 priv->slave_node_guids[slave] = guid; 759 } 760 EXPORT_SYMBOL(mlx4_put_slave_node_guid); 761 762 __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave) 763 { 764 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 765 766 if (!mlx4_is_master(dev)) 767 return 0; 768 769 return priv->slave_node_guids[slave]; 770 } 771 EXPORT_SYMBOL(mlx4_get_slave_node_guid); 772 773 int mlx4_is_slave_active(struct mlx4_dev *dev, int slave) 774 { 775 struct mlx4_priv *priv = mlx4_priv(dev); 776 struct mlx4_slave_state *s_slave; 777 778 if (!mlx4_is_master(dev)) 779 return 0; 780 781 s_slave = &priv->mfunc.master.slave_state[slave]; 782 return !!s_slave->active; 783 } 784 EXPORT_SYMBOL(mlx4_is_slave_active); 785 786 static void slave_adjust_steering_mode(struct mlx4_dev *dev, 787 struct mlx4_dev_cap *dev_cap, 788 struct mlx4_init_hca_param *hca_param) 789 { 790 dev->caps.steering_mode = hca_param->steering_mode; 791 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { 792 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; 793 dev->caps.fs_log_max_ucast_qp_range_size = 794 dev_cap->fs_log_max_ucast_qp_range_size; 795 } else 796 dev->caps.num_qp_per_mgm = 797 4 * ((1 << hca_param->log_mc_entry_sz)/16 - 2); 798 799 mlx4_dbg(dev, "Steering mode is: %s\n", 800 mlx4_steering_mode_str(dev->caps.steering_mode)); 801 } 802 803 static int mlx4_slave_cap(struct mlx4_dev *dev) 804 { 805 int err; 806 u32 page_size; 807 struct mlx4_dev_cap dev_cap; 808 struct mlx4_func_cap func_cap; 809 struct mlx4_init_hca_param hca_param; 810 u8 i; 811 812 memset(&hca_param, 0, sizeof(hca_param)); 813 err = mlx4_QUERY_HCA(dev, &hca_param); 814 if (err) { 815 mlx4_err(dev, "QUERY_HCA command failed, aborting\n"); 816 return err; 817 } 818 819 /* fail if the hca has an unknown global capability 820 * at this time global_caps should be always zeroed 821 */ 822 if (hca_param.global_caps) { 823 mlx4_err(dev, "Unknown hca global capabilities\n"); 824 return -ENOSYS; 825 } 826 827 mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz; 828 829 dev->caps.hca_core_clock = hca_param.hca_core_clock; 830 831 memset(&dev_cap, 0, sizeof(dev_cap)); 832 dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp; 833 err = mlx4_dev_cap(dev, &dev_cap); 834 if (err) { 835 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 836 return err; 837 } 838 839 err = mlx4_QUERY_FW(dev); 840 if (err) 841 mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n"); 842 843 page_size = ~dev->caps.page_size_cap + 1; 844 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size); 845 if (page_size > PAGE_SIZE) { 846 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", 847 page_size, (long)PAGE_SIZE); 848 return -ENODEV; 849 } 850 851 /* Set uar_page_shift for VF */ 852 dev->uar_page_shift = hca_param.uar_page_sz + 12; 853 854 /* Make sure the master uar page size is valid */ 855 if (dev->uar_page_shift > PAGE_SHIFT) { 856 mlx4_err(dev, 857 "Invalid configuration: uar page size is larger than system page size\n"); 858 return -ENODEV; 859 } 860 861 /* Set reserved_uars based on the uar_page_shift */ 862 mlx4_set_num_reserved_uars(dev, &dev_cap); 863 864 /* Although uar page size in FW differs from system page size, 865 * upper software layers (mlx4_ib, mlx4_en and part of mlx4_core) 866 * still works with assumption that uar page size == system page size 867 */ 868 dev->caps.uar_page_size = PAGE_SIZE; 869 870 memset(&func_cap, 0, sizeof(func_cap)); 871 err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap); 872 if (err) { 873 mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n", 874 err); 875 return err; 876 } 877 878 if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) != 879 PF_CONTEXT_BEHAVIOUR_MASK) { 880 mlx4_err(dev, "Unknown pf context behaviour %x known flags %x\n", 881 func_cap.pf_context_behaviour, PF_CONTEXT_BEHAVIOUR_MASK); 882 return -ENOSYS; 883 } 884 885 dev->caps.num_ports = func_cap.num_ports; 886 dev->quotas.qp = func_cap.qp_quota; 887 dev->quotas.srq = func_cap.srq_quota; 888 dev->quotas.cq = func_cap.cq_quota; 889 dev->quotas.mpt = func_cap.mpt_quota; 890 dev->quotas.mtt = func_cap.mtt_quota; 891 dev->caps.num_qps = 1 << hca_param.log_num_qps; 892 dev->caps.num_srqs = 1 << hca_param.log_num_srqs; 893 dev->caps.num_cqs = 1 << hca_param.log_num_cqs; 894 dev->caps.num_mpts = 1 << hca_param.log_mpt_sz; 895 dev->caps.num_eqs = func_cap.max_eq; 896 dev->caps.reserved_eqs = func_cap.reserved_eq; 897 dev->caps.reserved_lkey = func_cap.reserved_lkey; 898 dev->caps.num_pds = MLX4_NUM_PDS; 899 dev->caps.num_mgms = 0; 900 dev->caps.num_amgms = 0; 901 902 if (dev->caps.num_ports > MLX4_MAX_PORTS) { 903 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n", 904 dev->caps.num_ports, MLX4_MAX_PORTS); 905 return -ENODEV; 906 } 907 908 mlx4_replace_zero_macs(dev); 909 910 dev->caps.qp0_qkey = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL); 911 dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 912 dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 913 dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 914 dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 915 916 if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy || 917 !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy || 918 !dev->caps.qp0_qkey) { 919 err = -ENOMEM; 920 goto err_mem; 921 } 922 923 for (i = 1; i <= dev->caps.num_ports; ++i) { 924 err = mlx4_QUERY_FUNC_CAP(dev, i, &func_cap); 925 if (err) { 926 mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n", 927 i, err); 928 goto err_mem; 929 } 930 dev->caps.qp0_qkey[i - 1] = func_cap.qp0_qkey; 931 dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn; 932 dev->caps.qp0_proxy[i - 1] = func_cap.qp0_proxy_qpn; 933 dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn; 934 dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn; 935 dev->caps.port_mask[i] = dev->caps.port_type[i]; 936 dev->caps.phys_port_id[i] = func_cap.phys_port_id; 937 err = mlx4_get_slave_pkey_gid_tbl_len(dev, i, 938 &dev->caps.gid_table_len[i], 939 &dev->caps.pkey_table_len[i]); 940 if (err) 941 goto err_mem; 942 } 943 944 if (dev->caps.uar_page_size * (dev->caps.num_uars - 945 dev->caps.reserved_uars) > 946 pci_resource_len(dev->persist->pdev, 947 2)) { 948 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", 949 dev->caps.uar_page_size * dev->caps.num_uars, 950 (unsigned long long) 951 pci_resource_len(dev->persist->pdev, 2)); 952 err = -ENOMEM; 953 goto err_mem; 954 } 955 956 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_EQE_ENABLED) { 957 dev->caps.eqe_size = 64; 958 dev->caps.eqe_factor = 1; 959 } else { 960 dev->caps.eqe_size = 32; 961 dev->caps.eqe_factor = 0; 962 } 963 964 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) { 965 dev->caps.cqe_size = 64; 966 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; 967 } else { 968 dev->caps.cqe_size = 32; 969 } 970 971 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_EQE_STRIDE_ENABLED) { 972 dev->caps.eqe_size = hca_param.eqe_size; 973 dev->caps.eqe_factor = 0; 974 } 975 976 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_CQE_STRIDE_ENABLED) { 977 dev->caps.cqe_size = hca_param.cqe_size; 978 /* User still need to know when CQE > 32B */ 979 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; 980 } 981 982 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 983 mlx4_warn(dev, "Timestamping is not supported in slave mode\n"); 984 985 slave_adjust_steering_mode(dev, &dev_cap, &hca_param); 986 mlx4_dbg(dev, "RSS support for IP fragments is %s\n", 987 hca_param.rss_ip_frags ? "on" : "off"); 988 989 if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_BF_RES_QP && 990 dev->caps.bf_reg_size) 991 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_ETH_BF_QP; 992 993 if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_A0_RES_QP) 994 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_A0_QP; 995 996 return 0; 997 998 err_mem: 999 kfree(dev->caps.qp0_qkey); 1000 kfree(dev->caps.qp0_tunnel); 1001 kfree(dev->caps.qp0_proxy); 1002 kfree(dev->caps.qp1_tunnel); 1003 kfree(dev->caps.qp1_proxy); 1004 dev->caps.qp0_qkey = NULL; 1005 dev->caps.qp0_tunnel = NULL; 1006 dev->caps.qp0_proxy = NULL; 1007 dev->caps.qp1_tunnel = NULL; 1008 dev->caps.qp1_proxy = NULL; 1009 1010 return err; 1011 } 1012 1013 static void mlx4_request_modules(struct mlx4_dev *dev) 1014 { 1015 int port; 1016 int has_ib_port = false; 1017 int has_eth_port = false; 1018 #define EN_DRV_NAME "mlx4_en" 1019 #define IB_DRV_NAME "mlx4_ib" 1020 1021 for (port = 1; port <= dev->caps.num_ports; port++) { 1022 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB) 1023 has_ib_port = true; 1024 else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) 1025 has_eth_port = true; 1026 } 1027 1028 if (has_eth_port) 1029 request_module_nowait(EN_DRV_NAME); 1030 if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) 1031 request_module_nowait(IB_DRV_NAME); 1032 } 1033 1034 /* 1035 * Change the port configuration of the device. 1036 * Every user of this function must hold the port mutex. 1037 */ 1038 int mlx4_change_port_types(struct mlx4_dev *dev, 1039 enum mlx4_port_type *port_types) 1040 { 1041 int err = 0; 1042 int change = 0; 1043 int port; 1044 1045 for (port = 0; port < dev->caps.num_ports; port++) { 1046 /* Change the port type only if the new type is different 1047 * from the current, and not set to Auto */ 1048 if (port_types[port] != dev->caps.port_type[port + 1]) 1049 change = 1; 1050 } 1051 if (change) { 1052 mlx4_unregister_device(dev); 1053 for (port = 1; port <= dev->caps.num_ports; port++) { 1054 mlx4_CLOSE_PORT(dev, port); 1055 dev->caps.port_type[port] = port_types[port - 1]; 1056 err = mlx4_SET_PORT(dev, port, -1); 1057 if (err) { 1058 mlx4_err(dev, "Failed to set port %d, aborting\n", 1059 port); 1060 goto out; 1061 } 1062 } 1063 mlx4_set_port_mask(dev); 1064 err = mlx4_register_device(dev); 1065 if (err) { 1066 mlx4_err(dev, "Failed to register device\n"); 1067 goto out; 1068 } 1069 mlx4_request_modules(dev); 1070 } 1071 1072 out: 1073 return err; 1074 } 1075 1076 static ssize_t show_port_type(struct device *dev, 1077 struct device_attribute *attr, 1078 char *buf) 1079 { 1080 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 1081 port_attr); 1082 struct mlx4_dev *mdev = info->dev; 1083 char type[8]; 1084 1085 sprintf(type, "%s", 1086 (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ? 1087 "ib" : "eth"); 1088 if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO) 1089 sprintf(buf, "auto (%s)\n", type); 1090 else 1091 sprintf(buf, "%s\n", type); 1092 1093 return strlen(buf); 1094 } 1095 1096 static int __set_port_type(struct mlx4_port_info *info, 1097 enum mlx4_port_type port_type) 1098 { 1099 struct mlx4_dev *mdev = info->dev; 1100 struct mlx4_priv *priv = mlx4_priv(mdev); 1101 enum mlx4_port_type types[MLX4_MAX_PORTS]; 1102 enum mlx4_port_type new_types[MLX4_MAX_PORTS]; 1103 int i; 1104 int err = 0; 1105 1106 if ((port_type & mdev->caps.supported_type[info->port]) != port_type) { 1107 mlx4_err(mdev, 1108 "Requested port type for port %d is not supported on this HCA\n", 1109 info->port); 1110 err = -EINVAL; 1111 goto err_sup; 1112 } 1113 1114 mlx4_stop_sense(mdev); 1115 mutex_lock(&priv->port_mutex); 1116 info->tmp_type = port_type; 1117 1118 /* Possible type is always the one that was delivered */ 1119 mdev->caps.possible_type[info->port] = info->tmp_type; 1120 1121 for (i = 0; i < mdev->caps.num_ports; i++) { 1122 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type : 1123 mdev->caps.possible_type[i+1]; 1124 if (types[i] == MLX4_PORT_TYPE_AUTO) 1125 types[i] = mdev->caps.port_type[i+1]; 1126 } 1127 1128 if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && 1129 !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) { 1130 for (i = 1; i <= mdev->caps.num_ports; i++) { 1131 if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) { 1132 mdev->caps.possible_type[i] = mdev->caps.port_type[i]; 1133 err = -EINVAL; 1134 } 1135 } 1136 } 1137 if (err) { 1138 mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n"); 1139 goto out; 1140 } 1141 1142 mlx4_do_sense_ports(mdev, new_types, types); 1143 1144 err = mlx4_check_port_params(mdev, new_types); 1145 if (err) 1146 goto out; 1147 1148 /* We are about to apply the changes after the configuration 1149 * was verified, no need to remember the temporary types 1150 * any more */ 1151 for (i = 0; i < mdev->caps.num_ports; i++) 1152 priv->port[i + 1].tmp_type = 0; 1153 1154 err = mlx4_change_port_types(mdev, new_types); 1155 1156 out: 1157 mutex_unlock(&priv->port_mutex); 1158 mlx4_start_sense(mdev); 1159 err_sup: 1160 return err; 1161 } 1162 1163 static ssize_t set_port_type(struct device *dev, 1164 struct device_attribute *attr, 1165 const char *buf, size_t count) 1166 { 1167 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 1168 port_attr); 1169 struct mlx4_dev *mdev = info->dev; 1170 enum mlx4_port_type port_type; 1171 static DEFINE_MUTEX(set_port_type_mutex); 1172 int err; 1173 1174 mutex_lock(&set_port_type_mutex); 1175 1176 if (!strcmp(buf, "ib\n")) { 1177 port_type = MLX4_PORT_TYPE_IB; 1178 } else if (!strcmp(buf, "eth\n")) { 1179 port_type = MLX4_PORT_TYPE_ETH; 1180 } else if (!strcmp(buf, "auto\n")) { 1181 port_type = MLX4_PORT_TYPE_AUTO; 1182 } else { 1183 mlx4_err(mdev, "%s is not supported port type\n", buf); 1184 err = -EINVAL; 1185 goto err_out; 1186 } 1187 1188 err = __set_port_type(info, port_type); 1189 1190 err_out: 1191 mutex_unlock(&set_port_type_mutex); 1192 1193 return err ? err : count; 1194 } 1195 1196 enum ibta_mtu { 1197 IB_MTU_256 = 1, 1198 IB_MTU_512 = 2, 1199 IB_MTU_1024 = 3, 1200 IB_MTU_2048 = 4, 1201 IB_MTU_4096 = 5 1202 }; 1203 1204 static inline int int_to_ibta_mtu(int mtu) 1205 { 1206 switch (mtu) { 1207 case 256: return IB_MTU_256; 1208 case 512: return IB_MTU_512; 1209 case 1024: return IB_MTU_1024; 1210 case 2048: return IB_MTU_2048; 1211 case 4096: return IB_MTU_4096; 1212 default: return -1; 1213 } 1214 } 1215 1216 static inline int ibta_mtu_to_int(enum ibta_mtu mtu) 1217 { 1218 switch (mtu) { 1219 case IB_MTU_256: return 256; 1220 case IB_MTU_512: return 512; 1221 case IB_MTU_1024: return 1024; 1222 case IB_MTU_2048: return 2048; 1223 case IB_MTU_4096: return 4096; 1224 default: return -1; 1225 } 1226 } 1227 1228 static ssize_t show_port_ib_mtu(struct device *dev, 1229 struct device_attribute *attr, 1230 char *buf) 1231 { 1232 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 1233 port_mtu_attr); 1234 struct mlx4_dev *mdev = info->dev; 1235 1236 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) 1237 mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); 1238 1239 sprintf(buf, "%d\n", 1240 ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port])); 1241 return strlen(buf); 1242 } 1243 1244 static ssize_t set_port_ib_mtu(struct device *dev, 1245 struct device_attribute *attr, 1246 const char *buf, size_t count) 1247 { 1248 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 1249 port_mtu_attr); 1250 struct mlx4_dev *mdev = info->dev; 1251 struct mlx4_priv *priv = mlx4_priv(mdev); 1252 int err, port, mtu, ibta_mtu = -1; 1253 1254 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) { 1255 mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); 1256 return -EINVAL; 1257 } 1258 1259 err = kstrtoint(buf, 0, &mtu); 1260 if (!err) 1261 ibta_mtu = int_to_ibta_mtu(mtu); 1262 1263 if (err || ibta_mtu < 0) { 1264 mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf); 1265 return -EINVAL; 1266 } 1267 1268 mdev->caps.port_ib_mtu[info->port] = ibta_mtu; 1269 1270 mlx4_stop_sense(mdev); 1271 mutex_lock(&priv->port_mutex); 1272 mlx4_unregister_device(mdev); 1273 for (port = 1; port <= mdev->caps.num_ports; port++) { 1274 mlx4_CLOSE_PORT(mdev, port); 1275 err = mlx4_SET_PORT(mdev, port, -1); 1276 if (err) { 1277 mlx4_err(mdev, "Failed to set port %d, aborting\n", 1278 port); 1279 goto err_set_port; 1280 } 1281 } 1282 err = mlx4_register_device(mdev); 1283 err_set_port: 1284 mutex_unlock(&priv->port_mutex); 1285 mlx4_start_sense(mdev); 1286 return err ? err : count; 1287 } 1288 1289 /* bond for multi-function device */ 1290 #define MAX_MF_BOND_ALLOWED_SLAVES 63 1291 static int mlx4_mf_bond(struct mlx4_dev *dev) 1292 { 1293 int err = 0; 1294 int nvfs; 1295 struct mlx4_slaves_pport slaves_port1; 1296 struct mlx4_slaves_pport slaves_port2; 1297 DECLARE_BITMAP(slaves_port_1_2, MLX4_MFUNC_MAX); 1298 1299 slaves_port1 = mlx4_phys_to_slaves_pport(dev, 1); 1300 slaves_port2 = mlx4_phys_to_slaves_pport(dev, 2); 1301 bitmap_and(slaves_port_1_2, 1302 slaves_port1.slaves, slaves_port2.slaves, 1303 dev->persist->num_vfs + 1); 1304 1305 /* only single port vfs are allowed */ 1306 if (bitmap_weight(slaves_port_1_2, dev->persist->num_vfs + 1) > 1) { 1307 mlx4_warn(dev, "HA mode unsupported for dual ported VFs\n"); 1308 return -EINVAL; 1309 } 1310 1311 /* number of virtual functions is number of total functions minus one 1312 * physical function for each port. 1313 */ 1314 nvfs = bitmap_weight(slaves_port1.slaves, dev->persist->num_vfs + 1) + 1315 bitmap_weight(slaves_port2.slaves, dev->persist->num_vfs + 1) - 2; 1316 1317 /* limit on maximum allowed VFs */ 1318 if (nvfs > MAX_MF_BOND_ALLOWED_SLAVES) { 1319 mlx4_warn(dev, "HA mode is not supported for %d VFs (max %d are allowed)\n", 1320 nvfs, MAX_MF_BOND_ALLOWED_SLAVES); 1321 return -EINVAL; 1322 } 1323 1324 if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) { 1325 mlx4_warn(dev, "HA mode unsupported for NON DMFS steering\n"); 1326 return -EINVAL; 1327 } 1328 1329 err = mlx4_bond_mac_table(dev); 1330 if (err) 1331 return err; 1332 err = mlx4_bond_vlan_table(dev); 1333 if (err) 1334 goto err1; 1335 err = mlx4_bond_fs_rules(dev); 1336 if (err) 1337 goto err2; 1338 1339 return 0; 1340 err2: 1341 (void)mlx4_unbond_vlan_table(dev); 1342 err1: 1343 (void)mlx4_unbond_mac_table(dev); 1344 return err; 1345 } 1346 1347 static int mlx4_mf_unbond(struct mlx4_dev *dev) 1348 { 1349 int ret, ret1; 1350 1351 ret = mlx4_unbond_fs_rules(dev); 1352 if (ret) 1353 mlx4_warn(dev, "multifunction unbond for flow rules failedi (%d)\n", ret); 1354 ret1 = mlx4_unbond_mac_table(dev); 1355 if (ret1) { 1356 mlx4_warn(dev, "multifunction unbond for MAC table failed (%d)\n", ret1); 1357 ret = ret1; 1358 } 1359 ret1 = mlx4_unbond_vlan_table(dev); 1360 if (ret1) { 1361 mlx4_warn(dev, "multifunction unbond for VLAN table failed (%d)\n", ret1); 1362 ret = ret1; 1363 } 1364 return ret; 1365 } 1366 1367 int mlx4_bond(struct mlx4_dev *dev) 1368 { 1369 int ret = 0; 1370 struct mlx4_priv *priv = mlx4_priv(dev); 1371 1372 mutex_lock(&priv->bond_mutex); 1373 1374 if (!mlx4_is_bonded(dev)) { 1375 ret = mlx4_do_bond(dev, true); 1376 if (ret) 1377 mlx4_err(dev, "Failed to bond device: %d\n", ret); 1378 if (!ret && mlx4_is_master(dev)) { 1379 ret = mlx4_mf_bond(dev); 1380 if (ret) { 1381 mlx4_err(dev, "bond for multifunction failed\n"); 1382 mlx4_do_bond(dev, false); 1383 } 1384 } 1385 } 1386 1387 mutex_unlock(&priv->bond_mutex); 1388 if (!ret) 1389 mlx4_dbg(dev, "Device is bonded\n"); 1390 1391 return ret; 1392 } 1393 EXPORT_SYMBOL_GPL(mlx4_bond); 1394 1395 int mlx4_unbond(struct mlx4_dev *dev) 1396 { 1397 int ret = 0; 1398 struct mlx4_priv *priv = mlx4_priv(dev); 1399 1400 mutex_lock(&priv->bond_mutex); 1401 1402 if (mlx4_is_bonded(dev)) { 1403 int ret2 = 0; 1404 1405 ret = mlx4_do_bond(dev, false); 1406 if (ret) 1407 mlx4_err(dev, "Failed to unbond device: %d\n", ret); 1408 if (mlx4_is_master(dev)) 1409 ret2 = mlx4_mf_unbond(dev); 1410 if (ret2) { 1411 mlx4_warn(dev, "Failed to unbond device for multifunction (%d)\n", ret2); 1412 ret = ret2; 1413 } 1414 } 1415 1416 mutex_unlock(&priv->bond_mutex); 1417 if (!ret) 1418 mlx4_dbg(dev, "Device is unbonded\n"); 1419 1420 return ret; 1421 } 1422 EXPORT_SYMBOL_GPL(mlx4_unbond); 1423 1424 1425 int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p) 1426 { 1427 u8 port1 = v2p->port1; 1428 u8 port2 = v2p->port2; 1429 struct mlx4_priv *priv = mlx4_priv(dev); 1430 int err; 1431 1432 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP)) 1433 return -ENOTSUPP; 1434 1435 mutex_lock(&priv->bond_mutex); 1436 1437 /* zero means keep current mapping for this port */ 1438 if (port1 == 0) 1439 port1 = priv->v2p.port1; 1440 if (port2 == 0) 1441 port2 = priv->v2p.port2; 1442 1443 if ((port1 < 1) || (port1 > MLX4_MAX_PORTS) || 1444 (port2 < 1) || (port2 > MLX4_MAX_PORTS) || 1445 (port1 == 2 && port2 == 1)) { 1446 /* besides boundary checks cross mapping makes 1447 * no sense and therefore not allowed */ 1448 err = -EINVAL; 1449 } else if ((port1 == priv->v2p.port1) && 1450 (port2 == priv->v2p.port2)) { 1451 err = 0; 1452 } else { 1453 err = mlx4_virt2phy_port_map(dev, port1, port2); 1454 if (!err) { 1455 mlx4_dbg(dev, "port map changed: [%d][%d]\n", 1456 port1, port2); 1457 priv->v2p.port1 = port1; 1458 priv->v2p.port2 = port2; 1459 } else { 1460 mlx4_err(dev, "Failed to change port mape: %d\n", err); 1461 } 1462 } 1463 1464 mutex_unlock(&priv->bond_mutex); 1465 return err; 1466 } 1467 EXPORT_SYMBOL_GPL(mlx4_port_map_set); 1468 1469 static int mlx4_load_fw(struct mlx4_dev *dev) 1470 { 1471 struct mlx4_priv *priv = mlx4_priv(dev); 1472 int err; 1473 1474 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages, 1475 GFP_HIGHUSER | __GFP_NOWARN, 0); 1476 if (!priv->fw.fw_icm) { 1477 mlx4_err(dev, "Couldn't allocate FW area, aborting\n"); 1478 return -ENOMEM; 1479 } 1480 1481 err = mlx4_MAP_FA(dev, priv->fw.fw_icm); 1482 if (err) { 1483 mlx4_err(dev, "MAP_FA command failed, aborting\n"); 1484 goto err_free; 1485 } 1486 1487 err = mlx4_RUN_FW(dev); 1488 if (err) { 1489 mlx4_err(dev, "RUN_FW command failed, aborting\n"); 1490 goto err_unmap_fa; 1491 } 1492 1493 return 0; 1494 1495 err_unmap_fa: 1496 mlx4_UNMAP_FA(dev); 1497 1498 err_free: 1499 mlx4_free_icm(dev, priv->fw.fw_icm, 0); 1500 return err; 1501 } 1502 1503 static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, 1504 int cmpt_entry_sz) 1505 { 1506 struct mlx4_priv *priv = mlx4_priv(dev); 1507 int err; 1508 int num_eqs; 1509 1510 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table, 1511 cmpt_base + 1512 ((u64) (MLX4_CMPT_TYPE_QP * 1513 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1514 cmpt_entry_sz, dev->caps.num_qps, 1515 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1516 0, 0); 1517 if (err) 1518 goto err; 1519 1520 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table, 1521 cmpt_base + 1522 ((u64) (MLX4_CMPT_TYPE_SRQ * 1523 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1524 cmpt_entry_sz, dev->caps.num_srqs, 1525 dev->caps.reserved_srqs, 0, 0); 1526 if (err) 1527 goto err_qp; 1528 1529 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table, 1530 cmpt_base + 1531 ((u64) (MLX4_CMPT_TYPE_CQ * 1532 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1533 cmpt_entry_sz, dev->caps.num_cqs, 1534 dev->caps.reserved_cqs, 0, 0); 1535 if (err) 1536 goto err_srq; 1537 1538 num_eqs = dev->phys_caps.num_phys_eqs; 1539 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table, 1540 cmpt_base + 1541 ((u64) (MLX4_CMPT_TYPE_EQ * 1542 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1543 cmpt_entry_sz, num_eqs, num_eqs, 0, 0); 1544 if (err) 1545 goto err_cq; 1546 1547 return 0; 1548 1549 err_cq: 1550 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1551 1552 err_srq: 1553 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1554 1555 err_qp: 1556 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1557 1558 err: 1559 return err; 1560 } 1561 1562 static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, 1563 struct mlx4_init_hca_param *init_hca, u64 icm_size) 1564 { 1565 struct mlx4_priv *priv = mlx4_priv(dev); 1566 u64 aux_pages; 1567 int num_eqs; 1568 int err; 1569 1570 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages); 1571 if (err) { 1572 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n"); 1573 return err; 1574 } 1575 1576 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n", 1577 (unsigned long long) icm_size >> 10, 1578 (unsigned long long) aux_pages << 2); 1579 1580 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages, 1581 GFP_HIGHUSER | __GFP_NOWARN, 0); 1582 if (!priv->fw.aux_icm) { 1583 mlx4_err(dev, "Couldn't allocate aux memory, aborting\n"); 1584 return -ENOMEM; 1585 } 1586 1587 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm); 1588 if (err) { 1589 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n"); 1590 goto err_free_aux; 1591 } 1592 1593 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz); 1594 if (err) { 1595 mlx4_err(dev, "Failed to map cMPT context memory, aborting\n"); 1596 goto err_unmap_aux; 1597 } 1598 1599 1600 num_eqs = dev->phys_caps.num_phys_eqs; 1601 err = mlx4_init_icm_table(dev, &priv->eq_table.table, 1602 init_hca->eqc_base, dev_cap->eqc_entry_sz, 1603 num_eqs, num_eqs, 0, 0); 1604 if (err) { 1605 mlx4_err(dev, "Failed to map EQ context memory, aborting\n"); 1606 goto err_unmap_cmpt; 1607 } 1608 1609 /* 1610 * Reserved MTT entries must be aligned up to a cacheline 1611 * boundary, since the FW will write to them, while the driver 1612 * writes to all other MTT entries. (The variable 1613 * dev->caps.mtt_entry_sz below is really the MTT segment 1614 * size, not the raw entry size) 1615 */ 1616 dev->caps.reserved_mtts = 1617 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz, 1618 dma_get_cache_alignment()) / dev->caps.mtt_entry_sz; 1619 1620 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table, 1621 init_hca->mtt_base, 1622 dev->caps.mtt_entry_sz, 1623 dev->caps.num_mtts, 1624 dev->caps.reserved_mtts, 1, 0); 1625 if (err) { 1626 mlx4_err(dev, "Failed to map MTT context memory, aborting\n"); 1627 goto err_unmap_eq; 1628 } 1629 1630 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table, 1631 init_hca->dmpt_base, 1632 dev_cap->dmpt_entry_sz, 1633 dev->caps.num_mpts, 1634 dev->caps.reserved_mrws, 1, 1); 1635 if (err) { 1636 mlx4_err(dev, "Failed to map dMPT context memory, aborting\n"); 1637 goto err_unmap_mtt; 1638 } 1639 1640 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table, 1641 init_hca->qpc_base, 1642 dev_cap->qpc_entry_sz, 1643 dev->caps.num_qps, 1644 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1645 0, 0); 1646 if (err) { 1647 mlx4_err(dev, "Failed to map QP context memory, aborting\n"); 1648 goto err_unmap_dmpt; 1649 } 1650 1651 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table, 1652 init_hca->auxc_base, 1653 dev_cap->aux_entry_sz, 1654 dev->caps.num_qps, 1655 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1656 0, 0); 1657 if (err) { 1658 mlx4_err(dev, "Failed to map AUXC context memory, aborting\n"); 1659 goto err_unmap_qp; 1660 } 1661 1662 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table, 1663 init_hca->altc_base, 1664 dev_cap->altc_entry_sz, 1665 dev->caps.num_qps, 1666 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1667 0, 0); 1668 if (err) { 1669 mlx4_err(dev, "Failed to map ALTC context memory, aborting\n"); 1670 goto err_unmap_auxc; 1671 } 1672 1673 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table, 1674 init_hca->rdmarc_base, 1675 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, 1676 dev->caps.num_qps, 1677 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1678 0, 0); 1679 if (err) { 1680 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); 1681 goto err_unmap_altc; 1682 } 1683 1684 err = mlx4_init_icm_table(dev, &priv->cq_table.table, 1685 init_hca->cqc_base, 1686 dev_cap->cqc_entry_sz, 1687 dev->caps.num_cqs, 1688 dev->caps.reserved_cqs, 0, 0); 1689 if (err) { 1690 mlx4_err(dev, "Failed to map CQ context memory, aborting\n"); 1691 goto err_unmap_rdmarc; 1692 } 1693 1694 err = mlx4_init_icm_table(dev, &priv->srq_table.table, 1695 init_hca->srqc_base, 1696 dev_cap->srq_entry_sz, 1697 dev->caps.num_srqs, 1698 dev->caps.reserved_srqs, 0, 0); 1699 if (err) { 1700 mlx4_err(dev, "Failed to map SRQ context memory, aborting\n"); 1701 goto err_unmap_cq; 1702 } 1703 1704 /* 1705 * For flow steering device managed mode it is required to use 1706 * mlx4_init_icm_table. For B0 steering mode it's not strictly 1707 * required, but for simplicity just map the whole multicast 1708 * group table now. The table isn't very big and it's a lot 1709 * easier than trying to track ref counts. 1710 */ 1711 err = mlx4_init_icm_table(dev, &priv->mcg_table.table, 1712 init_hca->mc_base, 1713 mlx4_get_mgm_entry_size(dev), 1714 dev->caps.num_mgms + dev->caps.num_amgms, 1715 dev->caps.num_mgms + dev->caps.num_amgms, 1716 0, 0); 1717 if (err) { 1718 mlx4_err(dev, "Failed to map MCG context memory, aborting\n"); 1719 goto err_unmap_srq; 1720 } 1721 1722 return 0; 1723 1724 err_unmap_srq: 1725 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 1726 1727 err_unmap_cq: 1728 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 1729 1730 err_unmap_rdmarc: 1731 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 1732 1733 err_unmap_altc: 1734 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 1735 1736 err_unmap_auxc: 1737 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 1738 1739 err_unmap_qp: 1740 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 1741 1742 err_unmap_dmpt: 1743 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 1744 1745 err_unmap_mtt: 1746 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 1747 1748 err_unmap_eq: 1749 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); 1750 1751 err_unmap_cmpt: 1752 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 1753 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1754 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1755 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1756 1757 err_unmap_aux: 1758 mlx4_UNMAP_ICM_AUX(dev); 1759 1760 err_free_aux: 1761 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 1762 1763 return err; 1764 } 1765 1766 static void mlx4_free_icms(struct mlx4_dev *dev) 1767 { 1768 struct mlx4_priv *priv = mlx4_priv(dev); 1769 1770 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table); 1771 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 1772 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 1773 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 1774 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 1775 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 1776 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 1777 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 1778 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 1779 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); 1780 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 1781 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1782 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1783 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1784 1785 mlx4_UNMAP_ICM_AUX(dev); 1786 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 1787 } 1788 1789 static void mlx4_slave_exit(struct mlx4_dev *dev) 1790 { 1791 struct mlx4_priv *priv = mlx4_priv(dev); 1792 1793 mutex_lock(&priv->cmd.slave_cmd_mutex); 1794 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 1795 MLX4_COMM_TIME)) 1796 mlx4_warn(dev, "Failed to close slave function\n"); 1797 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1798 } 1799 1800 static int map_bf_area(struct mlx4_dev *dev) 1801 { 1802 struct mlx4_priv *priv = mlx4_priv(dev); 1803 resource_size_t bf_start; 1804 resource_size_t bf_len; 1805 int err = 0; 1806 1807 if (!dev->caps.bf_reg_size) 1808 return -ENXIO; 1809 1810 bf_start = pci_resource_start(dev->persist->pdev, 2) + 1811 (dev->caps.num_uars << PAGE_SHIFT); 1812 bf_len = pci_resource_len(dev->persist->pdev, 2) - 1813 (dev->caps.num_uars << PAGE_SHIFT); 1814 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len); 1815 if (!priv->bf_mapping) 1816 err = -ENOMEM; 1817 1818 return err; 1819 } 1820 1821 static void unmap_bf_area(struct mlx4_dev *dev) 1822 { 1823 if (mlx4_priv(dev)->bf_mapping) 1824 io_mapping_free(mlx4_priv(dev)->bf_mapping); 1825 } 1826 1827 s64 mlx4_read_clock(struct mlx4_dev *dev) 1828 { 1829 u32 clockhi, clocklo, clockhi1; 1830 s64 cycles; 1831 int i; 1832 struct mlx4_priv *priv = mlx4_priv(dev); 1833 1834 if (!priv->clock_mapping) 1835 return -ENOTSUPP; 1836 1837 for (i = 0; i < 10; i++) { 1838 clockhi = swab32(readl(priv->clock_mapping)); 1839 clocklo = swab32(readl(priv->clock_mapping + 4)); 1840 clockhi1 = swab32(readl(priv->clock_mapping)); 1841 if (clockhi == clockhi1) 1842 break; 1843 } 1844 1845 cycles = (u64) clockhi << 32 | (u64) clocklo; 1846 1847 return cycles & CORE_CLOCK_MASK; 1848 } 1849 EXPORT_SYMBOL_GPL(mlx4_read_clock); 1850 1851 1852 static int map_internal_clock(struct mlx4_dev *dev) 1853 { 1854 struct mlx4_priv *priv = mlx4_priv(dev); 1855 1856 priv->clock_mapping = 1857 ioremap(pci_resource_start(dev->persist->pdev, 1858 priv->fw.clock_bar) + 1859 priv->fw.clock_offset, MLX4_CLOCK_SIZE); 1860 1861 if (!priv->clock_mapping) 1862 return -ENOMEM; 1863 1864 return 0; 1865 } 1866 1867 int mlx4_get_internal_clock_params(struct mlx4_dev *dev, 1868 struct mlx4_clock_params *params) 1869 { 1870 struct mlx4_priv *priv = mlx4_priv(dev); 1871 1872 if (mlx4_is_slave(dev)) 1873 return -ENOTSUPP; 1874 1875 if (!params) 1876 return -EINVAL; 1877 1878 params->bar = priv->fw.clock_bar; 1879 params->offset = priv->fw.clock_offset; 1880 params->size = MLX4_CLOCK_SIZE; 1881 1882 return 0; 1883 } 1884 EXPORT_SYMBOL_GPL(mlx4_get_internal_clock_params); 1885 1886 static void unmap_internal_clock(struct mlx4_dev *dev) 1887 { 1888 struct mlx4_priv *priv = mlx4_priv(dev); 1889 1890 if (priv->clock_mapping) 1891 iounmap(priv->clock_mapping); 1892 } 1893 1894 static void mlx4_close_hca(struct mlx4_dev *dev) 1895 { 1896 unmap_internal_clock(dev); 1897 unmap_bf_area(dev); 1898 if (mlx4_is_slave(dev)) 1899 mlx4_slave_exit(dev); 1900 else { 1901 mlx4_CLOSE_HCA(dev, 0); 1902 mlx4_free_icms(dev); 1903 } 1904 } 1905 1906 static void mlx4_close_fw(struct mlx4_dev *dev) 1907 { 1908 if (!mlx4_is_slave(dev)) { 1909 mlx4_UNMAP_FA(dev); 1910 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0); 1911 } 1912 } 1913 1914 static int mlx4_comm_check_offline(struct mlx4_dev *dev) 1915 { 1916 #define COMM_CHAN_OFFLINE_OFFSET 0x09 1917 1918 u32 comm_flags; 1919 u32 offline_bit; 1920 unsigned long end; 1921 struct mlx4_priv *priv = mlx4_priv(dev); 1922 1923 end = msecs_to_jiffies(MLX4_COMM_OFFLINE_TIME_OUT) + jiffies; 1924 while (time_before(jiffies, end)) { 1925 comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm + 1926 MLX4_COMM_CHAN_FLAGS)); 1927 offline_bit = (comm_flags & 1928 (u32)(1 << COMM_CHAN_OFFLINE_OFFSET)); 1929 if (!offline_bit) 1930 return 0; 1931 /* There are cases as part of AER/Reset flow that PF needs 1932 * around 100 msec to load. We therefore sleep for 100 msec 1933 * to allow other tasks to make use of that CPU during this 1934 * time interval. 1935 */ 1936 msleep(100); 1937 } 1938 mlx4_err(dev, "Communication channel is offline.\n"); 1939 return -EIO; 1940 } 1941 1942 static void mlx4_reset_vf_support(struct mlx4_dev *dev) 1943 { 1944 #define COMM_CHAN_RST_OFFSET 0x1e 1945 1946 struct mlx4_priv *priv = mlx4_priv(dev); 1947 u32 comm_rst; 1948 u32 comm_caps; 1949 1950 comm_caps = swab32(readl((__iomem char *)priv->mfunc.comm + 1951 MLX4_COMM_CHAN_CAPS)); 1952 comm_rst = (comm_caps & (u32)(1 << COMM_CHAN_RST_OFFSET)); 1953 1954 if (comm_rst) 1955 dev->caps.vf_caps |= MLX4_VF_CAP_FLAG_RESET; 1956 } 1957 1958 static int mlx4_init_slave(struct mlx4_dev *dev) 1959 { 1960 struct mlx4_priv *priv = mlx4_priv(dev); 1961 u64 dma = (u64) priv->mfunc.vhcr_dma; 1962 int ret_from_reset = 0; 1963 u32 slave_read; 1964 u32 cmd_channel_ver; 1965 1966 if (atomic_read(&pf_loading)) { 1967 mlx4_warn(dev, "PF is not ready - Deferring probe\n"); 1968 return -EAGAIN; 1969 } 1970 1971 mutex_lock(&priv->cmd.slave_cmd_mutex); 1972 priv->cmd.max_cmds = 1; 1973 if (mlx4_comm_check_offline(dev)) { 1974 mlx4_err(dev, "PF is not responsive, skipping initialization\n"); 1975 goto err_offline; 1976 } 1977 1978 mlx4_reset_vf_support(dev); 1979 mlx4_warn(dev, "Sending reset\n"); 1980 ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 1981 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME); 1982 /* if we are in the middle of flr the slave will try 1983 * NUM_OF_RESET_RETRIES times before leaving.*/ 1984 if (ret_from_reset) { 1985 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) { 1986 mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n"); 1987 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1988 return -EAGAIN; 1989 } else 1990 goto err; 1991 } 1992 1993 /* check the driver version - the slave I/F revision 1994 * must match the master's */ 1995 slave_read = swab32(readl(&priv->mfunc.comm->slave_read)); 1996 cmd_channel_ver = mlx4_comm_get_version(); 1997 1998 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) != 1999 MLX4_COMM_GET_IF_REV(slave_read)) { 2000 mlx4_err(dev, "slave driver version is not supported by the master\n"); 2001 goto err; 2002 } 2003 2004 mlx4_warn(dev, "Sending vhcr0\n"); 2005 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48, 2006 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 2007 goto err; 2008 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32, 2009 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 2010 goto err; 2011 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16, 2012 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 2013 goto err; 2014 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, 2015 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 2016 goto err; 2017 2018 mutex_unlock(&priv->cmd.slave_cmd_mutex); 2019 return 0; 2020 2021 err: 2022 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 0); 2023 err_offline: 2024 mutex_unlock(&priv->cmd.slave_cmd_mutex); 2025 return -EIO; 2026 } 2027 2028 static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev) 2029 { 2030 int i; 2031 2032 for (i = 1; i <= dev->caps.num_ports; i++) { 2033 if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) 2034 dev->caps.gid_table_len[i] = 2035 mlx4_get_slave_num_gids(dev, 0, i); 2036 else 2037 dev->caps.gid_table_len[i] = 1; 2038 dev->caps.pkey_table_len[i] = 2039 dev->phys_caps.pkey_phys_table_len[i] - 1; 2040 } 2041 } 2042 2043 static int choose_log_fs_mgm_entry_size(int qp_per_entry) 2044 { 2045 int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; 2046 2047 for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE; 2048 i++) { 2049 if (qp_per_entry <= 4 * ((1 << i) / 16 - 2)) 2050 break; 2051 } 2052 2053 return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1; 2054 } 2055 2056 static const char *dmfs_high_rate_steering_mode_str(int dmfs_high_steer_mode) 2057 { 2058 switch (dmfs_high_steer_mode) { 2059 case MLX4_STEERING_DMFS_A0_DEFAULT: 2060 return "default performance"; 2061 2062 case MLX4_STEERING_DMFS_A0_DYNAMIC: 2063 return "dynamic hybrid mode"; 2064 2065 case MLX4_STEERING_DMFS_A0_STATIC: 2066 return "performance optimized for limited rule configuration (static)"; 2067 2068 case MLX4_STEERING_DMFS_A0_DISABLE: 2069 return "disabled performance optimized steering"; 2070 2071 case MLX4_STEERING_DMFS_A0_NOT_SUPPORTED: 2072 return "performance optimized steering not supported"; 2073 2074 default: 2075 return "Unrecognized mode"; 2076 } 2077 } 2078 2079 #define MLX4_DMFS_A0_STEERING (1UL << 2) 2080 2081 static void choose_steering_mode(struct mlx4_dev *dev, 2082 struct mlx4_dev_cap *dev_cap) 2083 { 2084 if (mlx4_log_num_mgm_entry_size <= 0) { 2085 if ((-mlx4_log_num_mgm_entry_size) & MLX4_DMFS_A0_STEERING) { 2086 if (dev->caps.dmfs_high_steer_mode == 2087 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 2088 mlx4_err(dev, "DMFS high rate mode not supported\n"); 2089 else 2090 dev->caps.dmfs_high_steer_mode = 2091 MLX4_STEERING_DMFS_A0_STATIC; 2092 } 2093 } 2094 2095 if (mlx4_log_num_mgm_entry_size <= 0 && 2096 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN && 2097 (!mlx4_is_mfunc(dev) || 2098 (dev_cap->fs_max_num_qp_per_entry >= 2099 (dev->persist->num_vfs + 1))) && 2100 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >= 2101 MLX4_MIN_MGM_LOG_ENTRY_SIZE) { 2102 dev->oper_log_mgm_entry_size = 2103 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry); 2104 dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED; 2105 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; 2106 dev->caps.fs_log_max_ucast_qp_range_size = 2107 dev_cap->fs_log_max_ucast_qp_range_size; 2108 } else { 2109 if (dev->caps.dmfs_high_steer_mode != 2110 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 2111 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DISABLE; 2112 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER && 2113 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) 2114 dev->caps.steering_mode = MLX4_STEERING_MODE_B0; 2115 else { 2116 dev->caps.steering_mode = MLX4_STEERING_MODE_A0; 2117 2118 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER || 2119 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) 2120 mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n"); 2121 } 2122 dev->oper_log_mgm_entry_size = 2123 mlx4_log_num_mgm_entry_size > 0 ? 2124 mlx4_log_num_mgm_entry_size : 2125 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; 2126 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev); 2127 } 2128 mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n", 2129 mlx4_steering_mode_str(dev->caps.steering_mode), 2130 dev->oper_log_mgm_entry_size, 2131 mlx4_log_num_mgm_entry_size); 2132 } 2133 2134 static void choose_tunnel_offload_mode(struct mlx4_dev *dev, 2135 struct mlx4_dev_cap *dev_cap) 2136 { 2137 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED && 2138 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) 2139 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN; 2140 else 2141 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE; 2142 2143 mlx4_dbg(dev, "Tunneling offload mode is: %s\n", (dev->caps.tunnel_offload_mode 2144 == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none"); 2145 } 2146 2147 static int mlx4_validate_optimized_steering(struct mlx4_dev *dev) 2148 { 2149 int i; 2150 struct mlx4_port_cap port_cap; 2151 2152 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 2153 return -EINVAL; 2154 2155 for (i = 1; i <= dev->caps.num_ports; i++) { 2156 if (mlx4_dev_port(dev, i, &port_cap)) { 2157 mlx4_err(dev, 2158 "QUERY_DEV_CAP command failed, can't veify DMFS high rate steering.\n"); 2159 } else if ((dev->caps.dmfs_high_steer_mode != 2160 MLX4_STEERING_DMFS_A0_DEFAULT) && 2161 (port_cap.dmfs_optimized_state == 2162 !!(dev->caps.dmfs_high_steer_mode == 2163 MLX4_STEERING_DMFS_A0_DISABLE))) { 2164 mlx4_err(dev, 2165 "DMFS high rate steer mode differ, driver requested %s but %s in FW.\n", 2166 dmfs_high_rate_steering_mode_str( 2167 dev->caps.dmfs_high_steer_mode), 2168 (port_cap.dmfs_optimized_state ? 2169 "enabled" : "disabled")); 2170 } 2171 } 2172 2173 return 0; 2174 } 2175 2176 static int mlx4_init_fw(struct mlx4_dev *dev) 2177 { 2178 struct mlx4_mod_stat_cfg mlx4_cfg; 2179 int err = 0; 2180 2181 if (!mlx4_is_slave(dev)) { 2182 err = mlx4_QUERY_FW(dev); 2183 if (err) { 2184 if (err == -EACCES) 2185 mlx4_info(dev, "non-primary physical function, skipping\n"); 2186 else 2187 mlx4_err(dev, "QUERY_FW command failed, aborting\n"); 2188 return err; 2189 } 2190 2191 err = mlx4_load_fw(dev); 2192 if (err) { 2193 mlx4_err(dev, "Failed to start FW, aborting\n"); 2194 return err; 2195 } 2196 2197 mlx4_cfg.log_pg_sz_m = 1; 2198 mlx4_cfg.log_pg_sz = 0; 2199 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg); 2200 if (err) 2201 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n"); 2202 } 2203 2204 return err; 2205 } 2206 2207 static int mlx4_init_hca(struct mlx4_dev *dev) 2208 { 2209 struct mlx4_priv *priv = mlx4_priv(dev); 2210 struct mlx4_adapter adapter; 2211 struct mlx4_dev_cap dev_cap; 2212 struct mlx4_profile profile; 2213 struct mlx4_init_hca_param init_hca; 2214 u64 icm_size; 2215 struct mlx4_config_dev_params params; 2216 int err; 2217 2218 if (!mlx4_is_slave(dev)) { 2219 err = mlx4_dev_cap(dev, &dev_cap); 2220 if (err) { 2221 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 2222 return err; 2223 } 2224 2225 choose_steering_mode(dev, &dev_cap); 2226 choose_tunnel_offload_mode(dev, &dev_cap); 2227 2228 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC && 2229 mlx4_is_master(dev)) 2230 dev->caps.function_caps |= MLX4_FUNC_CAP_DMFS_A0_STATIC; 2231 2232 err = mlx4_get_phys_port_id(dev); 2233 if (err) 2234 mlx4_err(dev, "Fail to get physical port id\n"); 2235 2236 if (mlx4_is_master(dev)) 2237 mlx4_parav_master_pf_caps(dev); 2238 2239 if (mlx4_low_memory_profile()) { 2240 mlx4_info(dev, "Running from within kdump kernel. Using low memory profile\n"); 2241 profile = low_mem_profile; 2242 } else { 2243 profile = default_profile; 2244 } 2245 if (dev->caps.steering_mode == 2246 MLX4_STEERING_MODE_DEVICE_MANAGED) 2247 profile.num_mcg = MLX4_FS_NUM_MCG; 2248 2249 icm_size = mlx4_make_profile(dev, &profile, &dev_cap, 2250 &init_hca); 2251 if ((long long) icm_size < 0) { 2252 err = icm_size; 2253 return err; 2254 } 2255 2256 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; 2257 2258 if (enable_4k_uar) { 2259 init_hca.log_uar_sz = ilog2(dev->caps.num_uars) + 2260 PAGE_SHIFT - DEFAULT_UAR_PAGE_SHIFT; 2261 init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12; 2262 } else { 2263 init_hca.log_uar_sz = ilog2(dev->caps.num_uars); 2264 init_hca.uar_page_sz = PAGE_SHIFT - 12; 2265 } 2266 2267 init_hca.mw_enabled = 0; 2268 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW || 2269 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) 2270 init_hca.mw_enabled = INIT_HCA_TPT_MW_ENABLE; 2271 2272 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); 2273 if (err) 2274 return err; 2275 2276 err = mlx4_INIT_HCA(dev, &init_hca); 2277 if (err) { 2278 mlx4_err(dev, "INIT_HCA command failed, aborting\n"); 2279 goto err_free_icm; 2280 } 2281 2282 if (dev_cap.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { 2283 err = mlx4_query_func(dev, &dev_cap); 2284 if (err < 0) { 2285 mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n"); 2286 goto err_close; 2287 } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) { 2288 dev->caps.num_eqs = dev_cap.max_eqs; 2289 dev->caps.reserved_eqs = dev_cap.reserved_eqs; 2290 dev->caps.reserved_uars = dev_cap.reserved_uars; 2291 } 2292 } 2293 2294 /* 2295 * If TS is supported by FW 2296 * read HCA frequency by QUERY_HCA command 2297 */ 2298 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) { 2299 memset(&init_hca, 0, sizeof(init_hca)); 2300 err = mlx4_QUERY_HCA(dev, &init_hca); 2301 if (err) { 2302 mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n"); 2303 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 2304 } else { 2305 dev->caps.hca_core_clock = 2306 init_hca.hca_core_clock; 2307 } 2308 2309 /* In case we got HCA frequency 0 - disable timestamping 2310 * to avoid dividing by zero 2311 */ 2312 if (!dev->caps.hca_core_clock) { 2313 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 2314 mlx4_err(dev, 2315 "HCA frequency is 0 - timestamping is not supported\n"); 2316 } else if (map_internal_clock(dev)) { 2317 /* 2318 * Map internal clock, 2319 * in case of failure disable timestamping 2320 */ 2321 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 2322 mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n"); 2323 } 2324 } 2325 2326 if (dev->caps.dmfs_high_steer_mode != 2327 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) { 2328 if (mlx4_validate_optimized_steering(dev)) 2329 mlx4_warn(dev, "Optimized steering validation failed\n"); 2330 2331 if (dev->caps.dmfs_high_steer_mode == 2332 MLX4_STEERING_DMFS_A0_DISABLE) { 2333 dev->caps.dmfs_high_rate_qpn_base = 2334 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 2335 dev->caps.dmfs_high_rate_qpn_range = 2336 MLX4_A0_STEERING_TABLE_SIZE; 2337 } 2338 2339 mlx4_dbg(dev, "DMFS high rate steer mode is: %s\n", 2340 dmfs_high_rate_steering_mode_str( 2341 dev->caps.dmfs_high_steer_mode)); 2342 } 2343 } else { 2344 err = mlx4_init_slave(dev); 2345 if (err) { 2346 if (err != -EAGAIN) 2347 mlx4_err(dev, "Failed to initialize slave\n"); 2348 return err; 2349 } 2350 2351 err = mlx4_slave_cap(dev); 2352 if (err) { 2353 mlx4_err(dev, "Failed to obtain slave caps\n"); 2354 goto err_close; 2355 } 2356 } 2357 2358 if (map_bf_area(dev)) 2359 mlx4_dbg(dev, "Failed to map blue flame area\n"); 2360 2361 /*Only the master set the ports, all the rest got it from it.*/ 2362 if (!mlx4_is_slave(dev)) 2363 mlx4_set_port_mask(dev); 2364 2365 err = mlx4_QUERY_ADAPTER(dev, &adapter); 2366 if (err) { 2367 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n"); 2368 goto unmap_bf; 2369 } 2370 2371 /* Query CONFIG_DEV parameters */ 2372 err = mlx4_config_dev_retrieval(dev, ¶ms); 2373 if (err && err != -ENOTSUPP) { 2374 mlx4_err(dev, "Failed to query CONFIG_DEV parameters\n"); 2375 } else if (!err) { 2376 dev->caps.rx_checksum_flags_port[1] = params.rx_csum_flags_port_1; 2377 dev->caps.rx_checksum_flags_port[2] = params.rx_csum_flags_port_2; 2378 } 2379 priv->eq_table.inta_pin = adapter.inta_pin; 2380 memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id); 2381 2382 return 0; 2383 2384 unmap_bf: 2385 unmap_internal_clock(dev); 2386 unmap_bf_area(dev); 2387 2388 if (mlx4_is_slave(dev)) { 2389 kfree(dev->caps.qp0_qkey); 2390 kfree(dev->caps.qp0_tunnel); 2391 kfree(dev->caps.qp0_proxy); 2392 kfree(dev->caps.qp1_tunnel); 2393 kfree(dev->caps.qp1_proxy); 2394 } 2395 2396 err_close: 2397 if (mlx4_is_slave(dev)) 2398 mlx4_slave_exit(dev); 2399 else 2400 mlx4_CLOSE_HCA(dev, 0); 2401 2402 err_free_icm: 2403 if (!mlx4_is_slave(dev)) 2404 mlx4_free_icms(dev); 2405 2406 return err; 2407 } 2408 2409 static int mlx4_init_counters_table(struct mlx4_dev *dev) 2410 { 2411 struct mlx4_priv *priv = mlx4_priv(dev); 2412 int nent_pow2; 2413 2414 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 2415 return -ENOENT; 2416 2417 if (!dev->caps.max_counters) 2418 return -ENOSPC; 2419 2420 nent_pow2 = roundup_pow_of_two(dev->caps.max_counters); 2421 /* reserve last counter index for sink counter */ 2422 return mlx4_bitmap_init(&priv->counters_bitmap, nent_pow2, 2423 nent_pow2 - 1, 0, 2424 nent_pow2 - dev->caps.max_counters + 1); 2425 } 2426 2427 static void mlx4_cleanup_counters_table(struct mlx4_dev *dev) 2428 { 2429 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 2430 return; 2431 2432 if (!dev->caps.max_counters) 2433 return; 2434 2435 mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap); 2436 } 2437 2438 static void mlx4_cleanup_default_counters(struct mlx4_dev *dev) 2439 { 2440 struct mlx4_priv *priv = mlx4_priv(dev); 2441 int port; 2442 2443 for (port = 0; port < dev->caps.num_ports; port++) 2444 if (priv->def_counter[port] != -1) 2445 mlx4_counter_free(dev, priv->def_counter[port]); 2446 } 2447 2448 static int mlx4_allocate_default_counters(struct mlx4_dev *dev) 2449 { 2450 struct mlx4_priv *priv = mlx4_priv(dev); 2451 int port, err = 0; 2452 u32 idx; 2453 2454 for (port = 0; port < dev->caps.num_ports; port++) 2455 priv->def_counter[port] = -1; 2456 2457 for (port = 0; port < dev->caps.num_ports; port++) { 2458 err = mlx4_counter_alloc(dev, &idx); 2459 2460 if (!err || err == -ENOSPC) { 2461 priv->def_counter[port] = idx; 2462 } else if (err == -ENOENT) { 2463 err = 0; 2464 continue; 2465 } else if (mlx4_is_slave(dev) && err == -EINVAL) { 2466 priv->def_counter[port] = MLX4_SINK_COUNTER_INDEX(dev); 2467 mlx4_warn(dev, "can't allocate counter from old PF driver, using index %d\n", 2468 MLX4_SINK_COUNTER_INDEX(dev)); 2469 err = 0; 2470 } else { 2471 mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n", 2472 __func__, port + 1, err); 2473 mlx4_cleanup_default_counters(dev); 2474 return err; 2475 } 2476 2477 mlx4_dbg(dev, "%s: default counter index %d for port %d\n", 2478 __func__, priv->def_counter[port], port + 1); 2479 } 2480 2481 return err; 2482 } 2483 2484 int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) 2485 { 2486 struct mlx4_priv *priv = mlx4_priv(dev); 2487 2488 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 2489 return -ENOENT; 2490 2491 *idx = mlx4_bitmap_alloc(&priv->counters_bitmap); 2492 if (*idx == -1) { 2493 *idx = MLX4_SINK_COUNTER_INDEX(dev); 2494 return -ENOSPC; 2495 } 2496 2497 return 0; 2498 } 2499 2500 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) 2501 { 2502 u64 out_param; 2503 int err; 2504 2505 if (mlx4_is_mfunc(dev)) { 2506 err = mlx4_cmd_imm(dev, 0, &out_param, RES_COUNTER, 2507 RES_OP_RESERVE, MLX4_CMD_ALLOC_RES, 2508 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 2509 if (!err) 2510 *idx = get_param_l(&out_param); 2511 2512 return err; 2513 } 2514 return __mlx4_counter_alloc(dev, idx); 2515 } 2516 EXPORT_SYMBOL_GPL(mlx4_counter_alloc); 2517 2518 static int __mlx4_clear_if_stat(struct mlx4_dev *dev, 2519 u8 counter_index) 2520 { 2521 struct mlx4_cmd_mailbox *if_stat_mailbox; 2522 int err; 2523 u32 if_stat_in_mod = (counter_index & 0xff) | MLX4_QUERY_IF_STAT_RESET; 2524 2525 if_stat_mailbox = mlx4_alloc_cmd_mailbox(dev); 2526 if (IS_ERR(if_stat_mailbox)) 2527 return PTR_ERR(if_stat_mailbox); 2528 2529 err = mlx4_cmd_box(dev, 0, if_stat_mailbox->dma, if_stat_in_mod, 0, 2530 MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C, 2531 MLX4_CMD_NATIVE); 2532 2533 mlx4_free_cmd_mailbox(dev, if_stat_mailbox); 2534 return err; 2535 } 2536 2537 void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx) 2538 { 2539 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 2540 return; 2541 2542 if (idx == MLX4_SINK_COUNTER_INDEX(dev)) 2543 return; 2544 2545 __mlx4_clear_if_stat(dev, idx); 2546 2547 mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR); 2548 return; 2549 } 2550 2551 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx) 2552 { 2553 u64 in_param = 0; 2554 2555 if (mlx4_is_mfunc(dev)) { 2556 set_param_l(&in_param, idx); 2557 mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE, 2558 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, 2559 MLX4_CMD_WRAPPED); 2560 return; 2561 } 2562 __mlx4_counter_free(dev, idx); 2563 } 2564 EXPORT_SYMBOL_GPL(mlx4_counter_free); 2565 2566 int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port) 2567 { 2568 struct mlx4_priv *priv = mlx4_priv(dev); 2569 2570 return priv->def_counter[port - 1]; 2571 } 2572 EXPORT_SYMBOL_GPL(mlx4_get_default_counter_index); 2573 2574 void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry, int port) 2575 { 2576 struct mlx4_priv *priv = mlx4_priv(dev); 2577 2578 priv->mfunc.master.vf_admin[entry].vport[port].guid = guid; 2579 } 2580 EXPORT_SYMBOL_GPL(mlx4_set_admin_guid); 2581 2582 __be64 mlx4_get_admin_guid(struct mlx4_dev *dev, int entry, int port) 2583 { 2584 struct mlx4_priv *priv = mlx4_priv(dev); 2585 2586 return priv->mfunc.master.vf_admin[entry].vport[port].guid; 2587 } 2588 EXPORT_SYMBOL_GPL(mlx4_get_admin_guid); 2589 2590 void mlx4_set_random_admin_guid(struct mlx4_dev *dev, int entry, int port) 2591 { 2592 struct mlx4_priv *priv = mlx4_priv(dev); 2593 __be64 guid; 2594 2595 /* hw GUID */ 2596 if (entry == 0) 2597 return; 2598 2599 get_random_bytes((char *)&guid, sizeof(guid)); 2600 guid &= ~(cpu_to_be64(1ULL << 56)); 2601 guid |= cpu_to_be64(1ULL << 57); 2602 priv->mfunc.master.vf_admin[entry].vport[port].guid = guid; 2603 } 2604 2605 static int mlx4_setup_hca(struct mlx4_dev *dev) 2606 { 2607 struct mlx4_priv *priv = mlx4_priv(dev); 2608 int err; 2609 int port; 2610 __be32 ib_port_default_caps; 2611 2612 err = mlx4_init_uar_table(dev); 2613 if (err) { 2614 mlx4_err(dev, "Failed to initialize user access region table, aborting\n"); 2615 return err; 2616 } 2617 2618 err = mlx4_uar_alloc(dev, &priv->driver_uar); 2619 if (err) { 2620 mlx4_err(dev, "Failed to allocate driver access region, aborting\n"); 2621 goto err_uar_table_free; 2622 } 2623 2624 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 2625 if (!priv->kar) { 2626 mlx4_err(dev, "Couldn't map kernel access region, aborting\n"); 2627 err = -ENOMEM; 2628 goto err_uar_free; 2629 } 2630 2631 err = mlx4_init_pd_table(dev); 2632 if (err) { 2633 mlx4_err(dev, "Failed to initialize protection domain table, aborting\n"); 2634 goto err_kar_unmap; 2635 } 2636 2637 err = mlx4_init_xrcd_table(dev); 2638 if (err) { 2639 mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n"); 2640 goto err_pd_table_free; 2641 } 2642 2643 err = mlx4_init_mr_table(dev); 2644 if (err) { 2645 mlx4_err(dev, "Failed to initialize memory region table, aborting\n"); 2646 goto err_xrcd_table_free; 2647 } 2648 2649 if (!mlx4_is_slave(dev)) { 2650 err = mlx4_init_mcg_table(dev); 2651 if (err) { 2652 mlx4_err(dev, "Failed to initialize multicast group table, aborting\n"); 2653 goto err_mr_table_free; 2654 } 2655 err = mlx4_config_mad_demux(dev); 2656 if (err) { 2657 mlx4_err(dev, "Failed in config_mad_demux, aborting\n"); 2658 goto err_mcg_table_free; 2659 } 2660 } 2661 2662 err = mlx4_init_eq_table(dev); 2663 if (err) { 2664 mlx4_err(dev, "Failed to initialize event queue table, aborting\n"); 2665 goto err_mcg_table_free; 2666 } 2667 2668 err = mlx4_cmd_use_events(dev); 2669 if (err) { 2670 mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n"); 2671 goto err_eq_table_free; 2672 } 2673 2674 err = mlx4_NOP(dev); 2675 if (err) { 2676 if (dev->flags & MLX4_FLAG_MSI_X) { 2677 mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n", 2678 priv->eq_table.eq[MLX4_EQ_ASYNC].irq); 2679 mlx4_warn(dev, "Trying again without MSI-X\n"); 2680 } else { 2681 mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n", 2682 priv->eq_table.eq[MLX4_EQ_ASYNC].irq); 2683 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); 2684 } 2685 2686 goto err_cmd_poll; 2687 } 2688 2689 mlx4_dbg(dev, "NOP command IRQ test passed\n"); 2690 2691 err = mlx4_init_cq_table(dev); 2692 if (err) { 2693 mlx4_err(dev, "Failed to initialize completion queue table, aborting\n"); 2694 goto err_cmd_poll; 2695 } 2696 2697 err = mlx4_init_srq_table(dev); 2698 if (err) { 2699 mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n"); 2700 goto err_cq_table_free; 2701 } 2702 2703 err = mlx4_init_qp_table(dev); 2704 if (err) { 2705 mlx4_err(dev, "Failed to initialize queue pair table, aborting\n"); 2706 goto err_srq_table_free; 2707 } 2708 2709 if (!mlx4_is_slave(dev)) { 2710 err = mlx4_init_counters_table(dev); 2711 if (err && err != -ENOENT) { 2712 mlx4_err(dev, "Failed to initialize counters table, aborting\n"); 2713 goto err_qp_table_free; 2714 } 2715 } 2716 2717 err = mlx4_allocate_default_counters(dev); 2718 if (err) { 2719 mlx4_err(dev, "Failed to allocate default counters, aborting\n"); 2720 goto err_counters_table_free; 2721 } 2722 2723 if (!mlx4_is_slave(dev)) { 2724 for (port = 1; port <= dev->caps.num_ports; port++) { 2725 ib_port_default_caps = 0; 2726 err = mlx4_get_port_ib_caps(dev, port, 2727 &ib_port_default_caps); 2728 if (err) 2729 mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n", 2730 port, err); 2731 dev->caps.ib_port_def_cap[port] = ib_port_default_caps; 2732 2733 /* initialize per-slave default ib port capabilities */ 2734 if (mlx4_is_master(dev)) { 2735 int i; 2736 for (i = 0; i < dev->num_slaves; i++) { 2737 if (i == mlx4_master_func_num(dev)) 2738 continue; 2739 priv->mfunc.master.slave_state[i].ib_cap_mask[port] = 2740 ib_port_default_caps; 2741 } 2742 } 2743 2744 if (mlx4_is_mfunc(dev)) 2745 dev->caps.port_ib_mtu[port] = IB_MTU_2048; 2746 else 2747 dev->caps.port_ib_mtu[port] = IB_MTU_4096; 2748 2749 err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ? 2750 dev->caps.pkey_table_len[port] : -1); 2751 if (err) { 2752 mlx4_err(dev, "Failed to set port %d, aborting\n", 2753 port); 2754 goto err_default_countes_free; 2755 } 2756 } 2757 } 2758 2759 return 0; 2760 2761 err_default_countes_free: 2762 mlx4_cleanup_default_counters(dev); 2763 2764 err_counters_table_free: 2765 if (!mlx4_is_slave(dev)) 2766 mlx4_cleanup_counters_table(dev); 2767 2768 err_qp_table_free: 2769 mlx4_cleanup_qp_table(dev); 2770 2771 err_srq_table_free: 2772 mlx4_cleanup_srq_table(dev); 2773 2774 err_cq_table_free: 2775 mlx4_cleanup_cq_table(dev); 2776 2777 err_cmd_poll: 2778 mlx4_cmd_use_polling(dev); 2779 2780 err_eq_table_free: 2781 mlx4_cleanup_eq_table(dev); 2782 2783 err_mcg_table_free: 2784 if (!mlx4_is_slave(dev)) 2785 mlx4_cleanup_mcg_table(dev); 2786 2787 err_mr_table_free: 2788 mlx4_cleanup_mr_table(dev); 2789 2790 err_xrcd_table_free: 2791 mlx4_cleanup_xrcd_table(dev); 2792 2793 err_pd_table_free: 2794 mlx4_cleanup_pd_table(dev); 2795 2796 err_kar_unmap: 2797 iounmap(priv->kar); 2798 2799 err_uar_free: 2800 mlx4_uar_free(dev, &priv->driver_uar); 2801 2802 err_uar_table_free: 2803 mlx4_cleanup_uar_table(dev); 2804 return err; 2805 } 2806 2807 static int mlx4_init_affinity_hint(struct mlx4_dev *dev, int port, int eqn) 2808 { 2809 int requested_cpu = 0; 2810 struct mlx4_priv *priv = mlx4_priv(dev); 2811 struct mlx4_eq *eq; 2812 int off = 0; 2813 int i; 2814 2815 if (eqn > dev->caps.num_comp_vectors) 2816 return -EINVAL; 2817 2818 for (i = 1; i < port; i++) 2819 off += mlx4_get_eqs_per_port(dev, i); 2820 2821 requested_cpu = eqn - off - !!(eqn > MLX4_EQ_ASYNC); 2822 2823 /* Meaning EQs are shared, and this call comes from the second port */ 2824 if (requested_cpu < 0) 2825 return 0; 2826 2827 eq = &priv->eq_table.eq[eqn]; 2828 2829 eq->affinity_cpu_id = requested_cpu % num_online_cpus(); 2830 2831 return 0; 2832 } 2833 2834 static void mlx4_enable_msi_x(struct mlx4_dev *dev) 2835 { 2836 struct mlx4_priv *priv = mlx4_priv(dev); 2837 struct msix_entry *entries; 2838 int i; 2839 int port = 0; 2840 2841 if (msi_x) { 2842 int nreq = dev->caps.num_ports * num_online_cpus() + 1; 2843 2844 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, 2845 nreq); 2846 if (nreq > MAX_MSIX) 2847 nreq = MAX_MSIX; 2848 2849 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); 2850 if (!entries) 2851 goto no_msi; 2852 2853 for (i = 0; i < nreq; ++i) 2854 entries[i].entry = i; 2855 2856 nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2, 2857 nreq); 2858 2859 if (nreq < 0 || nreq < MLX4_EQ_ASYNC) { 2860 kfree(entries); 2861 goto no_msi; 2862 } 2863 /* 1 is reserved for events (asyncrounous EQ) */ 2864 dev->caps.num_comp_vectors = nreq - 1; 2865 2866 priv->eq_table.eq[MLX4_EQ_ASYNC].irq = entries[0].vector; 2867 bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports, 2868 dev->caps.num_ports); 2869 2870 for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) { 2871 if (i == MLX4_EQ_ASYNC) 2872 continue; 2873 2874 priv->eq_table.eq[i].irq = 2875 entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector; 2876 2877 if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) { 2878 bitmap_fill(priv->eq_table.eq[i].actv_ports.ports, 2879 dev->caps.num_ports); 2880 /* We don't set affinity hint when there 2881 * aren't enough EQs 2882 */ 2883 } else { 2884 set_bit(port, 2885 priv->eq_table.eq[i].actv_ports.ports); 2886 if (mlx4_init_affinity_hint(dev, port + 1, i)) 2887 mlx4_warn(dev, "Couldn't init hint cpumask for EQ %d\n", 2888 i); 2889 } 2890 /* We divide the Eqs evenly between the two ports. 2891 * (dev->caps.num_comp_vectors / dev->caps.num_ports) 2892 * refers to the number of Eqs per port 2893 * (i.e eqs_per_port). Theoretically, we would like to 2894 * write something like (i + 1) % eqs_per_port == 0. 2895 * However, since there's an asynchronous Eq, we have 2896 * to skip over it by comparing this condition to 2897 * !!((i + 1) > MLX4_EQ_ASYNC). 2898 */ 2899 if ((dev->caps.num_comp_vectors > dev->caps.num_ports) && 2900 ((i + 1) % 2901 (dev->caps.num_comp_vectors / dev->caps.num_ports)) == 2902 !!((i + 1) > MLX4_EQ_ASYNC)) 2903 /* If dev->caps.num_comp_vectors < dev->caps.num_ports, 2904 * everything is shared anyway. 2905 */ 2906 port++; 2907 } 2908 2909 dev->flags |= MLX4_FLAG_MSI_X; 2910 2911 kfree(entries); 2912 return; 2913 } 2914 2915 no_msi: 2916 dev->caps.num_comp_vectors = 1; 2917 2918 BUG_ON(MLX4_EQ_ASYNC >= 2); 2919 for (i = 0; i < 2; ++i) { 2920 priv->eq_table.eq[i].irq = dev->persist->pdev->irq; 2921 if (i != MLX4_EQ_ASYNC) { 2922 bitmap_fill(priv->eq_table.eq[i].actv_ports.ports, 2923 dev->caps.num_ports); 2924 } 2925 } 2926 } 2927 2928 static int mlx4_init_port_info(struct mlx4_dev *dev, int port) 2929 { 2930 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; 2931 int err = 0; 2932 2933 info->dev = dev; 2934 info->port = port; 2935 if (!mlx4_is_slave(dev)) { 2936 mlx4_init_mac_table(dev, &info->mac_table); 2937 mlx4_init_vlan_table(dev, &info->vlan_table); 2938 mlx4_init_roce_gid_table(dev, &info->gid_table); 2939 info->base_qpn = mlx4_get_base_qpn(dev, port); 2940 } 2941 2942 sprintf(info->dev_name, "mlx4_port%d", port); 2943 info->port_attr.attr.name = info->dev_name; 2944 if (mlx4_is_mfunc(dev)) 2945 info->port_attr.attr.mode = S_IRUGO; 2946 else { 2947 info->port_attr.attr.mode = S_IRUGO | S_IWUSR; 2948 info->port_attr.store = set_port_type; 2949 } 2950 info->port_attr.show = show_port_type; 2951 sysfs_attr_init(&info->port_attr.attr); 2952 2953 err = device_create_file(&dev->persist->pdev->dev, &info->port_attr); 2954 if (err) { 2955 mlx4_err(dev, "Failed to create file for port %d\n", port); 2956 info->port = -1; 2957 } 2958 2959 sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port); 2960 info->port_mtu_attr.attr.name = info->dev_mtu_name; 2961 if (mlx4_is_mfunc(dev)) 2962 info->port_mtu_attr.attr.mode = S_IRUGO; 2963 else { 2964 info->port_mtu_attr.attr.mode = S_IRUGO | S_IWUSR; 2965 info->port_mtu_attr.store = set_port_ib_mtu; 2966 } 2967 info->port_mtu_attr.show = show_port_ib_mtu; 2968 sysfs_attr_init(&info->port_mtu_attr.attr); 2969 2970 err = device_create_file(&dev->persist->pdev->dev, 2971 &info->port_mtu_attr); 2972 if (err) { 2973 mlx4_err(dev, "Failed to create mtu file for port %d\n", port); 2974 device_remove_file(&info->dev->persist->pdev->dev, 2975 &info->port_attr); 2976 info->port = -1; 2977 } 2978 2979 return err; 2980 } 2981 2982 static void mlx4_cleanup_port_info(struct mlx4_port_info *info) 2983 { 2984 if (info->port < 0) 2985 return; 2986 2987 device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr); 2988 device_remove_file(&info->dev->persist->pdev->dev, 2989 &info->port_mtu_attr); 2990 #ifdef CONFIG_RFS_ACCEL 2991 free_irq_cpu_rmap(info->rmap); 2992 info->rmap = NULL; 2993 #endif 2994 } 2995 2996 static int mlx4_init_steering(struct mlx4_dev *dev) 2997 { 2998 struct mlx4_priv *priv = mlx4_priv(dev); 2999 int num_entries = dev->caps.num_ports; 3000 int i, j; 3001 3002 priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL); 3003 if (!priv->steer) 3004 return -ENOMEM; 3005 3006 for (i = 0; i < num_entries; i++) 3007 for (j = 0; j < MLX4_NUM_STEERS; j++) { 3008 INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]); 3009 INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]); 3010 } 3011 return 0; 3012 } 3013 3014 static void mlx4_clear_steering(struct mlx4_dev *dev) 3015 { 3016 struct mlx4_priv *priv = mlx4_priv(dev); 3017 struct mlx4_steer_index *entry, *tmp_entry; 3018 struct mlx4_promisc_qp *pqp, *tmp_pqp; 3019 int num_entries = dev->caps.num_ports; 3020 int i, j; 3021 3022 for (i = 0; i < num_entries; i++) { 3023 for (j = 0; j < MLX4_NUM_STEERS; j++) { 3024 list_for_each_entry_safe(pqp, tmp_pqp, 3025 &priv->steer[i].promisc_qps[j], 3026 list) { 3027 list_del(&pqp->list); 3028 kfree(pqp); 3029 } 3030 list_for_each_entry_safe(entry, tmp_entry, 3031 &priv->steer[i].steer_entries[j], 3032 list) { 3033 list_del(&entry->list); 3034 list_for_each_entry_safe(pqp, tmp_pqp, 3035 &entry->duplicates, 3036 list) { 3037 list_del(&pqp->list); 3038 kfree(pqp); 3039 } 3040 kfree(entry); 3041 } 3042 } 3043 } 3044 kfree(priv->steer); 3045 } 3046 3047 static int extended_func_num(struct pci_dev *pdev) 3048 { 3049 return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn); 3050 } 3051 3052 #define MLX4_OWNER_BASE 0x8069c 3053 #define MLX4_OWNER_SIZE 4 3054 3055 static int mlx4_get_ownership(struct mlx4_dev *dev) 3056 { 3057 void __iomem *owner; 3058 u32 ret; 3059 3060 if (pci_channel_offline(dev->persist->pdev)) 3061 return -EIO; 3062 3063 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) + 3064 MLX4_OWNER_BASE, 3065 MLX4_OWNER_SIZE); 3066 if (!owner) { 3067 mlx4_err(dev, "Failed to obtain ownership bit\n"); 3068 return -ENOMEM; 3069 } 3070 3071 ret = readl(owner); 3072 iounmap(owner); 3073 return (int) !!ret; 3074 } 3075 3076 static void mlx4_free_ownership(struct mlx4_dev *dev) 3077 { 3078 void __iomem *owner; 3079 3080 if (pci_channel_offline(dev->persist->pdev)) 3081 return; 3082 3083 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) + 3084 MLX4_OWNER_BASE, 3085 MLX4_OWNER_SIZE); 3086 if (!owner) { 3087 mlx4_err(dev, "Failed to obtain ownership bit\n"); 3088 return; 3089 } 3090 writel(0, owner); 3091 msleep(1000); 3092 iounmap(owner); 3093 } 3094 3095 #define SRIOV_VALID_STATE(flags) (!!((flags) & MLX4_FLAG_SRIOV) ==\ 3096 !!((flags) & MLX4_FLAG_MASTER)) 3097 3098 static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev, 3099 u8 total_vfs, int existing_vfs, int reset_flow) 3100 { 3101 u64 dev_flags = dev->flags; 3102 int err = 0; 3103 3104 if (reset_flow) { 3105 dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs), 3106 GFP_KERNEL); 3107 if (!dev->dev_vfs) 3108 goto free_mem; 3109 return dev_flags; 3110 } 3111 3112 atomic_inc(&pf_loading); 3113 if (dev->flags & MLX4_FLAG_SRIOV) { 3114 if (existing_vfs != total_vfs) { 3115 mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n", 3116 existing_vfs, total_vfs); 3117 total_vfs = existing_vfs; 3118 } 3119 } 3120 3121 dev->dev_vfs = kzalloc(total_vfs * sizeof(*dev->dev_vfs), GFP_KERNEL); 3122 if (NULL == dev->dev_vfs) { 3123 mlx4_err(dev, "Failed to allocate memory for VFs\n"); 3124 goto disable_sriov; 3125 } 3126 3127 if (!(dev->flags & MLX4_FLAG_SRIOV)) { 3128 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs); 3129 err = pci_enable_sriov(pdev, total_vfs); 3130 } 3131 if (err) { 3132 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n", 3133 err); 3134 goto disable_sriov; 3135 } else { 3136 mlx4_warn(dev, "Running in master mode\n"); 3137 dev_flags |= MLX4_FLAG_SRIOV | 3138 MLX4_FLAG_MASTER; 3139 dev_flags &= ~MLX4_FLAG_SLAVE; 3140 dev->persist->num_vfs = total_vfs; 3141 } 3142 return dev_flags; 3143 3144 disable_sriov: 3145 atomic_dec(&pf_loading); 3146 free_mem: 3147 dev->persist->num_vfs = 0; 3148 kfree(dev->dev_vfs); 3149 dev->dev_vfs = NULL; 3150 return dev_flags & ~MLX4_FLAG_MASTER; 3151 } 3152 3153 enum { 3154 MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64 = -1, 3155 }; 3156 3157 static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, 3158 int *nvfs) 3159 { 3160 int requested_vfs = nvfs[0] + nvfs[1] + nvfs[2]; 3161 /* Checking for 64 VFs as a limitation of CX2 */ 3162 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_80_VFS) && 3163 requested_vfs >= 64) { 3164 mlx4_err(dev, "Requested %d VFs, but FW does not support more than 64\n", 3165 requested_vfs); 3166 return MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64; 3167 } 3168 return 0; 3169 } 3170 3171 static int mlx4_pci_enable_device(struct mlx4_dev *dev) 3172 { 3173 struct pci_dev *pdev = dev->persist->pdev; 3174 int err = 0; 3175 3176 mutex_lock(&dev->persist->pci_status_mutex); 3177 if (dev->persist->pci_status == MLX4_PCI_STATUS_DISABLED) { 3178 err = pci_enable_device(pdev); 3179 if (!err) 3180 dev->persist->pci_status = MLX4_PCI_STATUS_ENABLED; 3181 } 3182 mutex_unlock(&dev->persist->pci_status_mutex); 3183 3184 return err; 3185 } 3186 3187 static void mlx4_pci_disable_device(struct mlx4_dev *dev) 3188 { 3189 struct pci_dev *pdev = dev->persist->pdev; 3190 3191 mutex_lock(&dev->persist->pci_status_mutex); 3192 if (dev->persist->pci_status == MLX4_PCI_STATUS_ENABLED) { 3193 pci_disable_device(pdev); 3194 dev->persist->pci_status = MLX4_PCI_STATUS_DISABLED; 3195 } 3196 mutex_unlock(&dev->persist->pci_status_mutex); 3197 } 3198 3199 static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, 3200 int total_vfs, int *nvfs, struct mlx4_priv *priv, 3201 int reset_flow) 3202 { 3203 struct mlx4_dev *dev; 3204 unsigned sum = 0; 3205 int err; 3206 int port; 3207 int i; 3208 struct mlx4_dev_cap *dev_cap = NULL; 3209 int existing_vfs = 0; 3210 3211 dev = &priv->dev; 3212 3213 INIT_LIST_HEAD(&priv->ctx_list); 3214 spin_lock_init(&priv->ctx_lock); 3215 3216 mutex_init(&priv->port_mutex); 3217 mutex_init(&priv->bond_mutex); 3218 3219 INIT_LIST_HEAD(&priv->pgdir_list); 3220 mutex_init(&priv->pgdir_mutex); 3221 spin_lock_init(&priv->cmd.context_lock); 3222 3223 INIT_LIST_HEAD(&priv->bf_list); 3224 mutex_init(&priv->bf_mutex); 3225 3226 dev->rev_id = pdev->revision; 3227 dev->numa_node = dev_to_node(&pdev->dev); 3228 3229 /* Detect if this device is a virtual function */ 3230 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { 3231 mlx4_warn(dev, "Detected virtual function - running in slave mode\n"); 3232 dev->flags |= MLX4_FLAG_SLAVE; 3233 } else { 3234 /* We reset the device and enable SRIOV only for physical 3235 * devices. Try to claim ownership on the device; 3236 * if already taken, skip -- do not allow multiple PFs */ 3237 err = mlx4_get_ownership(dev); 3238 if (err) { 3239 if (err < 0) 3240 return err; 3241 else { 3242 mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n"); 3243 return -EINVAL; 3244 } 3245 } 3246 3247 atomic_set(&priv->opreq_count, 0); 3248 INIT_WORK(&priv->opreq_task, mlx4_opreq_action); 3249 3250 /* 3251 * Now reset the HCA before we touch the PCI capabilities or 3252 * attempt a firmware command, since a boot ROM may have left 3253 * the HCA in an undefined state. 3254 */ 3255 err = mlx4_reset(dev); 3256 if (err) { 3257 mlx4_err(dev, "Failed to reset HCA, aborting\n"); 3258 goto err_sriov; 3259 } 3260 3261 if (total_vfs) { 3262 dev->flags = MLX4_FLAG_MASTER; 3263 existing_vfs = pci_num_vf(pdev); 3264 if (existing_vfs) 3265 dev->flags |= MLX4_FLAG_SRIOV; 3266 dev->persist->num_vfs = total_vfs; 3267 } 3268 } 3269 3270 /* on load remove any previous indication of internal error, 3271 * device is up. 3272 */ 3273 dev->persist->state = MLX4_DEVICE_STATE_UP; 3274 3275 slave_start: 3276 err = mlx4_cmd_init(dev); 3277 if (err) { 3278 mlx4_err(dev, "Failed to init command interface, aborting\n"); 3279 goto err_sriov; 3280 } 3281 3282 /* In slave functions, the communication channel must be initialized 3283 * before posting commands. Also, init num_slaves before calling 3284 * mlx4_init_hca */ 3285 if (mlx4_is_mfunc(dev)) { 3286 if (mlx4_is_master(dev)) { 3287 dev->num_slaves = MLX4_MAX_NUM_SLAVES; 3288 3289 } else { 3290 dev->num_slaves = 0; 3291 err = mlx4_multi_func_init(dev); 3292 if (err) { 3293 mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n"); 3294 goto err_cmd; 3295 } 3296 } 3297 } 3298 3299 err = mlx4_init_fw(dev); 3300 if (err) { 3301 mlx4_err(dev, "Failed to init fw, aborting.\n"); 3302 goto err_mfunc; 3303 } 3304 3305 if (mlx4_is_master(dev)) { 3306 /* when we hit the goto slave_start below, dev_cap already initialized */ 3307 if (!dev_cap) { 3308 dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL); 3309 3310 if (!dev_cap) { 3311 err = -ENOMEM; 3312 goto err_fw; 3313 } 3314 3315 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 3316 if (err) { 3317 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 3318 goto err_fw; 3319 } 3320 3321 if (mlx4_check_dev_cap(dev, dev_cap, nvfs)) 3322 goto err_fw; 3323 3324 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { 3325 u64 dev_flags = mlx4_enable_sriov(dev, pdev, 3326 total_vfs, 3327 existing_vfs, 3328 reset_flow); 3329 3330 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 3331 dev->flags = dev_flags; 3332 if (!SRIOV_VALID_STATE(dev->flags)) { 3333 mlx4_err(dev, "Invalid SRIOV state\n"); 3334 goto err_sriov; 3335 } 3336 err = mlx4_reset(dev); 3337 if (err) { 3338 mlx4_err(dev, "Failed to reset HCA, aborting.\n"); 3339 goto err_sriov; 3340 } 3341 goto slave_start; 3342 } 3343 } else { 3344 /* Legacy mode FW requires SRIOV to be enabled before 3345 * doing QUERY_DEV_CAP, since max_eq's value is different if 3346 * SRIOV is enabled. 3347 */ 3348 memset(dev_cap, 0, sizeof(*dev_cap)); 3349 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 3350 if (err) { 3351 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 3352 goto err_fw; 3353 } 3354 3355 if (mlx4_check_dev_cap(dev, dev_cap, nvfs)) 3356 goto err_fw; 3357 } 3358 } 3359 3360 err = mlx4_init_hca(dev); 3361 if (err) { 3362 if (err == -EACCES) { 3363 /* Not primary Physical function 3364 * Running in slave mode */ 3365 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 3366 /* We're not a PF */ 3367 if (dev->flags & MLX4_FLAG_SRIOV) { 3368 if (!existing_vfs) 3369 pci_disable_sriov(pdev); 3370 if (mlx4_is_master(dev) && !reset_flow) 3371 atomic_dec(&pf_loading); 3372 dev->flags &= ~MLX4_FLAG_SRIOV; 3373 } 3374 if (!mlx4_is_slave(dev)) 3375 mlx4_free_ownership(dev); 3376 dev->flags |= MLX4_FLAG_SLAVE; 3377 dev->flags &= ~MLX4_FLAG_MASTER; 3378 goto slave_start; 3379 } else 3380 goto err_fw; 3381 } 3382 3383 if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { 3384 u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs, 3385 existing_vfs, reset_flow); 3386 3387 if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) { 3388 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR); 3389 dev->flags = dev_flags; 3390 err = mlx4_cmd_init(dev); 3391 if (err) { 3392 /* Only VHCR is cleaned up, so could still 3393 * send FW commands 3394 */ 3395 mlx4_err(dev, "Failed to init VHCR command interface, aborting\n"); 3396 goto err_close; 3397 } 3398 } else { 3399 dev->flags = dev_flags; 3400 } 3401 3402 if (!SRIOV_VALID_STATE(dev->flags)) { 3403 mlx4_err(dev, "Invalid SRIOV state\n"); 3404 goto err_close; 3405 } 3406 } 3407 3408 /* check if the device is functioning at its maximum possible speed. 3409 * No return code for this call, just warn the user in case of PCI 3410 * express device capabilities are under-satisfied by the bus. 3411 */ 3412 if (!mlx4_is_slave(dev)) 3413 mlx4_check_pcie_caps(dev); 3414 3415 /* In master functions, the communication channel must be initialized 3416 * after obtaining its address from fw */ 3417 if (mlx4_is_master(dev)) { 3418 if (dev->caps.num_ports < 2 && 3419 num_vfs_argc > 1) { 3420 err = -EINVAL; 3421 mlx4_err(dev, 3422 "Error: Trying to configure VFs on port 2, but HCA has only %d physical ports\n", 3423 dev->caps.num_ports); 3424 goto err_close; 3425 } 3426 memcpy(dev->persist->nvfs, nvfs, sizeof(dev->persist->nvfs)); 3427 3428 for (i = 0; 3429 i < sizeof(dev->persist->nvfs)/ 3430 sizeof(dev->persist->nvfs[0]); i++) { 3431 unsigned j; 3432 3433 for (j = 0; j < dev->persist->nvfs[i]; ++sum, ++j) { 3434 dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1; 3435 dev->dev_vfs[sum].n_ports = i < 2 ? 1 : 3436 dev->caps.num_ports; 3437 } 3438 } 3439 3440 /* In master functions, the communication channel 3441 * must be initialized after obtaining its address from fw 3442 */ 3443 err = mlx4_multi_func_init(dev); 3444 if (err) { 3445 mlx4_err(dev, "Failed to init master mfunc interface, aborting.\n"); 3446 goto err_close; 3447 } 3448 } 3449 3450 err = mlx4_alloc_eq_table(dev); 3451 if (err) 3452 goto err_master_mfunc; 3453 3454 bitmap_zero(priv->msix_ctl.pool_bm, MAX_MSIX); 3455 mutex_init(&priv->msix_ctl.pool_lock); 3456 3457 mlx4_enable_msi_x(dev); 3458 if ((mlx4_is_mfunc(dev)) && 3459 !(dev->flags & MLX4_FLAG_MSI_X)) { 3460 err = -ENOSYS; 3461 mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n"); 3462 goto err_free_eq; 3463 } 3464 3465 if (!mlx4_is_slave(dev)) { 3466 err = mlx4_init_steering(dev); 3467 if (err) 3468 goto err_disable_msix; 3469 } 3470 3471 mlx4_init_quotas(dev); 3472 3473 err = mlx4_setup_hca(dev); 3474 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) && 3475 !mlx4_is_mfunc(dev)) { 3476 dev->flags &= ~MLX4_FLAG_MSI_X; 3477 dev->caps.num_comp_vectors = 1; 3478 pci_disable_msix(pdev); 3479 err = mlx4_setup_hca(dev); 3480 } 3481 3482 if (err) 3483 goto err_steer; 3484 3485 /* When PF resources are ready arm its comm channel to enable 3486 * getting commands 3487 */ 3488 if (mlx4_is_master(dev)) { 3489 err = mlx4_ARM_COMM_CHANNEL(dev); 3490 if (err) { 3491 mlx4_err(dev, " Failed to arm comm channel eq: %x\n", 3492 err); 3493 goto err_steer; 3494 } 3495 } 3496 3497 for (port = 1; port <= dev->caps.num_ports; port++) { 3498 err = mlx4_init_port_info(dev, port); 3499 if (err) 3500 goto err_port; 3501 } 3502 3503 priv->v2p.port1 = 1; 3504 priv->v2p.port2 = 2; 3505 3506 err = mlx4_register_device(dev); 3507 if (err) 3508 goto err_port; 3509 3510 mlx4_request_modules(dev); 3511 3512 mlx4_sense_init(dev); 3513 mlx4_start_sense(dev); 3514 3515 priv->removed = 0; 3516 3517 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow) 3518 atomic_dec(&pf_loading); 3519 3520 kfree(dev_cap); 3521 return 0; 3522 3523 err_port: 3524 for (--port; port >= 1; --port) 3525 mlx4_cleanup_port_info(&priv->port[port]); 3526 3527 mlx4_cleanup_counters_table(dev); 3528 mlx4_cleanup_qp_table(dev); 3529 mlx4_cleanup_srq_table(dev); 3530 mlx4_cleanup_cq_table(dev); 3531 mlx4_cmd_use_polling(dev); 3532 mlx4_cleanup_eq_table(dev); 3533 mlx4_cleanup_mcg_table(dev); 3534 mlx4_cleanup_mr_table(dev); 3535 mlx4_cleanup_xrcd_table(dev); 3536 mlx4_cleanup_pd_table(dev); 3537 mlx4_cleanup_uar_table(dev); 3538 3539 err_steer: 3540 if (!mlx4_is_slave(dev)) 3541 mlx4_clear_steering(dev); 3542 3543 err_disable_msix: 3544 if (dev->flags & MLX4_FLAG_MSI_X) 3545 pci_disable_msix(pdev); 3546 3547 err_free_eq: 3548 mlx4_free_eq_table(dev); 3549 3550 err_master_mfunc: 3551 if (mlx4_is_master(dev)) { 3552 mlx4_free_resource_tracker(dev, RES_TR_FREE_STRUCTS_ONLY); 3553 mlx4_multi_func_cleanup(dev); 3554 } 3555 3556 if (mlx4_is_slave(dev)) { 3557 kfree(dev->caps.qp0_qkey); 3558 kfree(dev->caps.qp0_tunnel); 3559 kfree(dev->caps.qp0_proxy); 3560 kfree(dev->caps.qp1_tunnel); 3561 kfree(dev->caps.qp1_proxy); 3562 } 3563 3564 err_close: 3565 mlx4_close_hca(dev); 3566 3567 err_fw: 3568 mlx4_close_fw(dev); 3569 3570 err_mfunc: 3571 if (mlx4_is_slave(dev)) 3572 mlx4_multi_func_cleanup(dev); 3573 3574 err_cmd: 3575 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 3576 3577 err_sriov: 3578 if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) { 3579 pci_disable_sriov(pdev); 3580 dev->flags &= ~MLX4_FLAG_SRIOV; 3581 } 3582 3583 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow) 3584 atomic_dec(&pf_loading); 3585 3586 kfree(priv->dev.dev_vfs); 3587 3588 if (!mlx4_is_slave(dev)) 3589 mlx4_free_ownership(dev); 3590 3591 kfree(dev_cap); 3592 return err; 3593 } 3594 3595 static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data, 3596 struct mlx4_priv *priv) 3597 { 3598 int err; 3599 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 3600 int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 3601 const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = { 3602 {2, 0, 0}, {0, 1, 2}, {0, 1, 2} }; 3603 unsigned total_vfs = 0; 3604 unsigned int i; 3605 3606 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); 3607 3608 err = mlx4_pci_enable_device(&priv->dev); 3609 if (err) { 3610 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 3611 return err; 3612 } 3613 3614 /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS 3615 * per port, we must limit the number of VFs to 63 (since their are 3616 * 128 MACs) 3617 */ 3618 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && i < num_vfs_argc; 3619 total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) { 3620 nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i]; 3621 if (nvfs[i] < 0) { 3622 dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n"); 3623 err = -EINVAL; 3624 goto err_disable_pdev; 3625 } 3626 } 3627 for (i = 0; i < sizeof(prb_vf)/sizeof(prb_vf[0]) && i < probe_vfs_argc; 3628 i++) { 3629 prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i]; 3630 if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) { 3631 dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n"); 3632 err = -EINVAL; 3633 goto err_disable_pdev; 3634 } 3635 } 3636 if (total_vfs > MLX4_MAX_NUM_VF) { 3637 dev_err(&pdev->dev, 3638 "Requested more VF's (%d) than allowed by hw (%d)\n", 3639 total_vfs, MLX4_MAX_NUM_VF); 3640 err = -EINVAL; 3641 goto err_disable_pdev; 3642 } 3643 3644 for (i = 0; i < MLX4_MAX_PORTS; i++) { 3645 if (nvfs[i] + nvfs[2] > MLX4_MAX_NUM_VF_P_PORT) { 3646 dev_err(&pdev->dev, 3647 "Requested more VF's (%d) for port (%d) than allowed by driver (%d)\n", 3648 nvfs[i] + nvfs[2], i + 1, 3649 MLX4_MAX_NUM_VF_P_PORT); 3650 err = -EINVAL; 3651 goto err_disable_pdev; 3652 } 3653 } 3654 3655 /* Check for BARs. */ 3656 if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) && 3657 !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 3658 dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n", 3659 pci_dev_data, (long)pci_resource_flags(pdev, 0)); 3660 err = -ENODEV; 3661 goto err_disable_pdev; 3662 } 3663 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 3664 dev_err(&pdev->dev, "Missing UAR, aborting\n"); 3665 err = -ENODEV; 3666 goto err_disable_pdev; 3667 } 3668 3669 err = pci_request_regions(pdev, DRV_NAME); 3670 if (err) { 3671 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); 3672 goto err_disable_pdev; 3673 } 3674 3675 pci_set_master(pdev); 3676 3677 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 3678 if (err) { 3679 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n"); 3680 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3681 if (err) { 3682 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n"); 3683 goto err_release_regions; 3684 } 3685 } 3686 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 3687 if (err) { 3688 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n"); 3689 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 3690 if (err) { 3691 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n"); 3692 goto err_release_regions; 3693 } 3694 } 3695 3696 /* Allow large DMA segments, up to the firmware limit of 1 GB */ 3697 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); 3698 /* Detect if this device is a virtual function */ 3699 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { 3700 /* When acting as pf, we normally skip vfs unless explicitly 3701 * requested to probe them. 3702 */ 3703 if (total_vfs) { 3704 unsigned vfs_offset = 0; 3705 3706 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && 3707 vfs_offset + nvfs[i] < extended_func_num(pdev); 3708 vfs_offset += nvfs[i], i++) 3709 ; 3710 if (i == sizeof(nvfs)/sizeof(nvfs[0])) { 3711 err = -ENODEV; 3712 goto err_release_regions; 3713 } 3714 if ((extended_func_num(pdev) - vfs_offset) 3715 > prb_vf[i]) { 3716 dev_warn(&pdev->dev, "Skipping virtual function:%d\n", 3717 extended_func_num(pdev)); 3718 err = -ENODEV; 3719 goto err_release_regions; 3720 } 3721 } 3722 } 3723 3724 err = mlx4_catas_init(&priv->dev); 3725 if (err) 3726 goto err_release_regions; 3727 3728 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 0); 3729 if (err) 3730 goto err_catas; 3731 3732 return 0; 3733 3734 err_catas: 3735 mlx4_catas_end(&priv->dev); 3736 3737 err_release_regions: 3738 pci_release_regions(pdev); 3739 3740 err_disable_pdev: 3741 mlx4_pci_disable_device(&priv->dev); 3742 pci_set_drvdata(pdev, NULL); 3743 return err; 3744 } 3745 3746 static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 3747 { 3748 struct mlx4_priv *priv; 3749 struct mlx4_dev *dev; 3750 int ret; 3751 3752 printk_once(KERN_INFO "%s", mlx4_version); 3753 3754 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 3755 if (!priv) 3756 return -ENOMEM; 3757 3758 dev = &priv->dev; 3759 dev->persist = kzalloc(sizeof(*dev->persist), GFP_KERNEL); 3760 if (!dev->persist) { 3761 kfree(priv); 3762 return -ENOMEM; 3763 } 3764 dev->persist->pdev = pdev; 3765 dev->persist->dev = dev; 3766 pci_set_drvdata(pdev, dev->persist); 3767 priv->pci_dev_data = id->driver_data; 3768 mutex_init(&dev->persist->device_state_mutex); 3769 mutex_init(&dev->persist->interface_state_mutex); 3770 mutex_init(&dev->persist->pci_status_mutex); 3771 3772 ret = __mlx4_init_one(pdev, id->driver_data, priv); 3773 if (ret) { 3774 kfree(dev->persist); 3775 kfree(priv); 3776 } else { 3777 pci_save_state(pdev->dev.bsddev); 3778 } 3779 3780 return ret; 3781 } 3782 3783 static void mlx4_clean_dev(struct mlx4_dev *dev) 3784 { 3785 struct mlx4_dev_persistent *persist = dev->persist; 3786 struct mlx4_priv *priv = mlx4_priv(dev); 3787 unsigned long flags = (dev->flags & RESET_PERSIST_MASK_FLAGS); 3788 3789 memset(priv, 0, sizeof(*priv)); 3790 priv->dev.persist = persist; 3791 priv->dev.flags = flags; 3792 } 3793 3794 static void mlx4_unload_one(struct pci_dev *pdev) 3795 { 3796 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3797 struct mlx4_dev *dev = persist->dev; 3798 struct mlx4_priv *priv = mlx4_priv(dev); 3799 int pci_dev_data; 3800 int p, i; 3801 3802 if (priv->removed) 3803 return; 3804 3805 /* saving current ports type for further use */ 3806 for (i = 0; i < dev->caps.num_ports; i++) { 3807 dev->persist->curr_port_type[i] = dev->caps.port_type[i + 1]; 3808 dev->persist->curr_port_poss_type[i] = dev->caps. 3809 possible_type[i + 1]; 3810 } 3811 3812 pci_dev_data = priv->pci_dev_data; 3813 3814 mlx4_stop_sense(dev); 3815 mlx4_unregister_device(dev); 3816 3817 for (p = 1; p <= dev->caps.num_ports; p++) { 3818 mlx4_cleanup_port_info(&priv->port[p]); 3819 mlx4_CLOSE_PORT(dev, p); 3820 } 3821 3822 if (mlx4_is_master(dev)) 3823 mlx4_free_resource_tracker(dev, 3824 RES_TR_FREE_SLAVES_ONLY); 3825 3826 mlx4_cleanup_default_counters(dev); 3827 if (!mlx4_is_slave(dev)) 3828 mlx4_cleanup_counters_table(dev); 3829 mlx4_cleanup_qp_table(dev); 3830 mlx4_cleanup_srq_table(dev); 3831 mlx4_cleanup_cq_table(dev); 3832 mlx4_cmd_use_polling(dev); 3833 mlx4_cleanup_eq_table(dev); 3834 mlx4_cleanup_mcg_table(dev); 3835 mlx4_cleanup_mr_table(dev); 3836 mlx4_cleanup_xrcd_table(dev); 3837 mlx4_cleanup_pd_table(dev); 3838 3839 if (mlx4_is_master(dev)) 3840 mlx4_free_resource_tracker(dev, 3841 RES_TR_FREE_STRUCTS_ONLY); 3842 3843 iounmap(priv->kar); 3844 mlx4_uar_free(dev, &priv->driver_uar); 3845 mlx4_cleanup_uar_table(dev); 3846 if (!mlx4_is_slave(dev)) 3847 mlx4_clear_steering(dev); 3848 mlx4_free_eq_table(dev); 3849 if (mlx4_is_master(dev)) 3850 mlx4_multi_func_cleanup(dev); 3851 mlx4_close_hca(dev); 3852 mlx4_close_fw(dev); 3853 if (mlx4_is_slave(dev)) 3854 mlx4_multi_func_cleanup(dev); 3855 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 3856 3857 if (dev->flags & MLX4_FLAG_MSI_X) 3858 pci_disable_msix(pdev); 3859 3860 if (!mlx4_is_slave(dev)) 3861 mlx4_free_ownership(dev); 3862 3863 kfree(dev->caps.qp0_qkey); 3864 kfree(dev->caps.qp0_tunnel); 3865 kfree(dev->caps.qp0_proxy); 3866 kfree(dev->caps.qp1_tunnel); 3867 kfree(dev->caps.qp1_proxy); 3868 kfree(dev->dev_vfs); 3869 3870 mlx4_clean_dev(dev); 3871 priv->pci_dev_data = pci_dev_data; 3872 priv->removed = 1; 3873 } 3874 3875 static void mlx4_remove_one(struct pci_dev *pdev) 3876 { 3877 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3878 struct mlx4_dev *dev = persist->dev; 3879 struct mlx4_priv *priv = mlx4_priv(dev); 3880 int active_vfs = 0; 3881 3882 mutex_lock(&persist->interface_state_mutex); 3883 persist->interface_state |= MLX4_INTERFACE_STATE_DELETION; 3884 mutex_unlock(&persist->interface_state_mutex); 3885 3886 /* Disabling SR-IOV is not allowed while there are active vf's */ 3887 if (mlx4_is_master(dev) && dev->flags & MLX4_FLAG_SRIOV) { 3888 active_vfs = mlx4_how_many_lives_vf(dev); 3889 if (active_vfs) { 3890 pr_warn("Removing PF when there are active VF's !!\n"); 3891 pr_warn("Will not disable SR-IOV.\n"); 3892 } 3893 } 3894 3895 /* device marked to be under deletion running now without the lock 3896 * letting other tasks to be terminated 3897 */ 3898 if (persist->interface_state & MLX4_INTERFACE_STATE_UP) 3899 mlx4_unload_one(pdev); 3900 else 3901 mlx4_info(dev, "%s: interface is down\n", __func__); 3902 mlx4_catas_end(dev); 3903 if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) { 3904 mlx4_warn(dev, "Disabling SR-IOV\n"); 3905 pci_disable_sriov(pdev); 3906 } 3907 3908 pci_release_regions(pdev); 3909 pci_disable_device(pdev); 3910 kfree(dev->persist); 3911 kfree(priv); 3912 pci_set_drvdata(pdev, NULL); 3913 } 3914 3915 static int restore_current_port_types(struct mlx4_dev *dev, 3916 enum mlx4_port_type *types, 3917 enum mlx4_port_type *poss_types) 3918 { 3919 struct mlx4_priv *priv = mlx4_priv(dev); 3920 int err, i; 3921 3922 mlx4_stop_sense(dev); 3923 3924 mutex_lock(&priv->port_mutex); 3925 for (i = 0; i < dev->caps.num_ports; i++) 3926 dev->caps.possible_type[i + 1] = poss_types[i]; 3927 err = mlx4_change_port_types(dev, types); 3928 mutex_unlock(&priv->port_mutex); 3929 3930 mlx4_start_sense(dev); 3931 3932 return err; 3933 } 3934 3935 int mlx4_restart_one(struct pci_dev *pdev) 3936 { 3937 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3938 struct mlx4_dev *dev = persist->dev; 3939 struct mlx4_priv *priv = mlx4_priv(dev); 3940 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 3941 int pci_dev_data, err, total_vfs; 3942 3943 pci_dev_data = priv->pci_dev_data; 3944 total_vfs = dev->persist->num_vfs; 3945 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); 3946 3947 mlx4_unload_one(pdev); 3948 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 1); 3949 if (err) { 3950 mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n", 3951 __func__, pci_name(pdev), err); 3952 return err; 3953 } 3954 3955 err = restore_current_port_types(dev, dev->persist->curr_port_type, 3956 dev->persist->curr_port_poss_type); 3957 if (err) 3958 mlx4_err(dev, "could not restore original port types (%d)\n", 3959 err); 3960 3961 return err; 3962 } 3963 3964 static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = { 3965 /* MT25408 "Hermon" SDR */ 3966 { PCI_VDEVICE(MELLANOX, 0x6340), 3967 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3968 /* MT25408 "Hermon" DDR */ 3969 { PCI_VDEVICE(MELLANOX, 0x634a), 3970 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3971 /* MT25408 "Hermon" QDR */ 3972 { PCI_VDEVICE(MELLANOX, 0x6354), 3973 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3974 /* MT25408 "Hermon" DDR PCIe gen2 */ 3975 { PCI_VDEVICE(MELLANOX, 0x6732), 3976 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3977 /* MT25408 "Hermon" QDR PCIe gen2 */ 3978 { PCI_VDEVICE(MELLANOX, 0x673c), 3979 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3980 /* MT25408 "Hermon" EN 10GigE */ 3981 { PCI_VDEVICE(MELLANOX, 0x6368), 3982 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3983 /* MT25408 "Hermon" EN 10GigE PCIe gen2 */ 3984 { PCI_VDEVICE(MELLANOX, 0x6750), 3985 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3986 /* MT25458 ConnectX EN 10GBASE-T 10GigE */ 3987 { PCI_VDEVICE(MELLANOX, 0x6372), 3988 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3989 /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */ 3990 { PCI_VDEVICE(MELLANOX, 0x675a), 3991 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3992 /* MT26468 ConnectX EN 10GigE PCIe gen2*/ 3993 { PCI_VDEVICE(MELLANOX, 0x6764), 3994 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3995 /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */ 3996 { PCI_VDEVICE(MELLANOX, 0x6746), 3997 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3998 /* MT26478 ConnectX2 40GigE PCIe gen2 */ 3999 { PCI_VDEVICE(MELLANOX, 0x676e), 4000 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, 4001 /* MT25400 Family [ConnectX-2 Virtual Function] */ 4002 { PCI_VDEVICE(MELLANOX, 0x1002), 4003 .driver_data = MLX4_PCI_DEV_IS_VF }, 4004 /* MT27500 Family [ConnectX-3] */ 4005 { PCI_VDEVICE(MELLANOX, 0x1003) }, 4006 /* MT27500 Family [ConnectX-3 Virtual Function] */ 4007 { PCI_VDEVICE(MELLANOX, 0x1004), 4008 .driver_data = MLX4_PCI_DEV_IS_VF }, 4009 { PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */ 4010 { PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */ 4011 { PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */ 4012 { PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */ 4013 { PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */ 4014 { PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */ 4015 { PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */ 4016 { PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */ 4017 { PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */ 4018 { PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */ 4019 { PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */ 4020 { PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */ 4021 { 0, } 4022 }; 4023 4024 MODULE_DEVICE_TABLE(pci, mlx4_pci_table); 4025 4026 static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev, 4027 pci_channel_state_t state) 4028 { 4029 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 4030 4031 mlx4_err(persist->dev, "mlx4_pci_err_detected was called\n"); 4032 mlx4_enter_error_state(persist); 4033 4034 mutex_lock(&persist->interface_state_mutex); 4035 if (persist->interface_state & MLX4_INTERFACE_STATE_UP) 4036 mlx4_unload_one(pdev); 4037 4038 mutex_unlock(&persist->interface_state_mutex); 4039 if (state == pci_channel_io_perm_failure) 4040 return PCI_ERS_RESULT_DISCONNECT; 4041 4042 mlx4_pci_disable_device(persist->dev); 4043 return PCI_ERS_RESULT_NEED_RESET; 4044 } 4045 4046 static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev) 4047 { 4048 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 4049 struct mlx4_dev *dev = persist->dev; 4050 int err; 4051 4052 mlx4_err(dev, "mlx4_pci_slot_reset was called\n"); 4053 err = mlx4_pci_enable_device(dev); 4054 if (err) { 4055 mlx4_err(dev, "Can not re-enable device, err=%d\n", err); 4056 return PCI_ERS_RESULT_DISCONNECT; 4057 } 4058 4059 pci_set_master(pdev); 4060 return PCI_ERS_RESULT_RECOVERED; 4061 } 4062 4063 static void mlx4_pci_resume(struct pci_dev *pdev) 4064 { 4065 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 4066 struct mlx4_dev *dev = persist->dev; 4067 struct mlx4_priv *priv = mlx4_priv(dev); 4068 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 4069 int total_vfs; 4070 int err; 4071 4072 mlx4_err(dev, "%s was called\n", __func__); 4073 total_vfs = dev->persist->num_vfs; 4074 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); 4075 4076 mutex_lock(&persist->interface_state_mutex); 4077 if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) { 4078 err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs, 4079 priv, 1); 4080 if (err) { 4081 mlx4_err(dev, "%s: mlx4_load_one failed, err=%d\n", 4082 __func__, err); 4083 goto end; 4084 } 4085 4086 err = restore_current_port_types(dev, dev->persist-> 4087 curr_port_type, dev->persist-> 4088 curr_port_poss_type); 4089 if (err) 4090 mlx4_err(dev, "could not restore original port types (%d)\n", err); 4091 } 4092 end: 4093 mutex_unlock(&persist->interface_state_mutex); 4094 4095 } 4096 4097 static void mlx4_shutdown(struct pci_dev *pdev) 4098 { 4099 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 4100 4101 mlx4_info(persist->dev, "mlx4_shutdown was called\n"); 4102 mutex_lock(&persist->interface_state_mutex); 4103 if (persist->interface_state & MLX4_INTERFACE_STATE_UP) 4104 mlx4_unload_one(pdev); 4105 mutex_unlock(&persist->interface_state_mutex); 4106 } 4107 4108 static const struct pci_error_handlers mlx4_err_handler = { 4109 .error_detected = mlx4_pci_err_detected, 4110 .slot_reset = mlx4_pci_slot_reset, 4111 .resume = mlx4_pci_resume, 4112 }; 4113 4114 static struct pci_driver mlx4_driver = { 4115 .name = DRV_NAME, 4116 .id_table = mlx4_pci_table, 4117 .probe = mlx4_init_one, 4118 .shutdown = mlx4_shutdown, 4119 .remove = mlx4_remove_one, 4120 .err_handler = &mlx4_err_handler, 4121 }; 4122 4123 static int __init mlx4_verify_params(void) 4124 { 4125 if ((log_num_mac < 0) || (log_num_mac > 7)) { 4126 pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac); 4127 return -1; 4128 } 4129 4130 if (log_num_vlan != 0) 4131 pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n", 4132 MLX4_LOG_NUM_VLANS); 4133 4134 if (use_prio != 0) 4135 pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n"); 4136 4137 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) { 4138 pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n", 4139 log_mtts_per_seg); 4140 return -1; 4141 } 4142 4143 /* Check if module param for ports type has legal combination */ 4144 if (port_type_array[0] == false && port_type_array[1] == true) { 4145 pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n"); 4146 port_type_array[0] = true; 4147 } 4148 4149 if (mlx4_log_num_mgm_entry_size < -7 || 4150 (mlx4_log_num_mgm_entry_size > 0 && 4151 (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE || 4152 mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE))) { 4153 pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-7..0 or %d..%d)\n", 4154 mlx4_log_num_mgm_entry_size, 4155 MLX4_MIN_MGM_LOG_ENTRY_SIZE, 4156 MLX4_MAX_MGM_LOG_ENTRY_SIZE); 4157 return -1; 4158 } 4159 4160 return 0; 4161 } 4162 4163 static int __init mlx4_init(void) 4164 { 4165 int ret; 4166 4167 if (mlx4_verify_params()) 4168 return -EINVAL; 4169 4170 4171 mlx4_wq = create_singlethread_workqueue("mlx4"); 4172 if (!mlx4_wq) 4173 return -ENOMEM; 4174 4175 ret = pci_register_driver(&mlx4_driver); 4176 if (ret < 0) 4177 destroy_workqueue(mlx4_wq); 4178 return ret < 0 ? ret : 0; 4179 } 4180 4181 static void __exit mlx4_cleanup(void) 4182 { 4183 pci_unregister_driver(&mlx4_driver); 4184 destroy_workqueue(mlx4_wq); 4185 } 4186 4187 module_init(mlx4_init); 4188 module_exit(mlx4_cleanup); 4189 4190 static int 4191 mlx4_evhand(module_t mod, int event, void *arg) 4192 { 4193 return (0); 4194 } 4195 4196 static moduledata_t mlx4_mod = { 4197 .name = "mlx4", 4198 .evhand = mlx4_evhand, 4199 }; 4200 MODULE_VERSION(mlx4, 1); 4201 DECLARE_MODULE(mlx4, mlx4_mod, SI_SUB_OFED_PREINIT, SI_ORDER_ANY); 4202 MODULE_DEPEND(mlx4, linuxkpi, 1, 1, 1); 4203 4204