1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. All rights reserved. 5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #define LINUXKPI_PARAM_PREFIX mlx4_ 37 38 #include <linux/kmod.h> 39 #include <linux/module.h> 40 #include <linux/errno.h> 41 #include <linux/pci.h> 42 #include <linux/dma-mapping.h> 43 #include <linux/slab.h> 44 #include <linux/io-mapping.h> 45 #include <linux/delay.h> 46 #include <linux/netdevice.h> 47 #include <linux/string.h> 48 #include <linux/fs.h> 49 #include <linux/cache.h> 50 #include <linux/random.h> 51 52 #include <dev/mlx4/device.h> 53 #include <dev/mlx4/doorbell.h> 54 55 #include "mlx4.h" 56 #include "fw.h" 57 #include "icm.h" 58 #include <dev/mlx4/stats.h> 59 60 MODULE_AUTHOR("Roland Dreier"); 61 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver"); 62 MODULE_LICENSE("Dual BSD/GPL"); 63 64 struct workqueue_struct *mlx4_wq; 65 66 #ifdef CONFIG_MLX4_DEBUG 67 68 int mlx4_debug_level = 0; 69 module_param_named(debug_level, mlx4_debug_level, int, 0644); 70 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); 71 72 #endif /* CONFIG_MLX4_DEBUG */ 73 74 #ifdef CONFIG_PCI_MSI 75 76 static int msi_x = 1; 77 module_param(msi_x, int, 0444); 78 MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); 79 80 #else /* CONFIG_PCI_MSI */ 81 82 #define msi_x (0) 83 84 #endif /* CONFIG_PCI_MSI */ 85 86 static uint8_t num_vfs[3] = {0, 0, 0}; 87 static int num_vfs_argc; 88 module_param_array(num_vfs, byte , &num_vfs_argc, 0444); 89 MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n" 90 "num_vfs=port1,port2,port1+2"); 91 92 static uint8_t probe_vf[3] = {0, 0, 0}; 93 static int probe_vfs_argc; 94 module_param_array(probe_vf, byte, &probe_vfs_argc, 0444); 95 MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n" 96 "probe_vf=port1,port2,port1+2"); 97 98 int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; 99 module_param_named(log_num_mgm_entry_size, 100 mlx4_log_num_mgm_entry_size, int, 0444); 101 MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num" 102 " of qp per mcg, for example:" 103 " 10 gives 248.range: 7 <=" 104 " log_num_mgm_entry_size <= 12." 105 " To activate device managed" 106 " flow steering when available, set to -1"); 107 108 static bool enable_64b_cqe_eqe = true; 109 module_param(enable_64b_cqe_eqe, bool, 0444); 110 MODULE_PARM_DESC(enable_64b_cqe_eqe, 111 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)"); 112 113 static bool enable_4k_uar; 114 module_param(enable_4k_uar, bool, 0444); 115 MODULE_PARM_DESC(enable_4k_uar, 116 "Enable using 4K UAR. Should not be enabled if have VFs which do not support 4K UARs (default: false)"); 117 118 #define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \ 119 MLX4_FUNC_CAP_EQE_CQE_STRIDE | \ 120 MLX4_FUNC_CAP_DMFS_A0_STATIC) 121 122 #define RESET_PERSIST_MASK_FLAGS (MLX4_FLAG_SRIOV) 123 124 static char mlx4_description[] = "Mellanox driver" 125 " (" DRV_VERSION ")"; 126 127 static char mlx4_version[] = 128 DRV_NAME ": Mellanox ConnectX core driver v" 129 DRV_VERSION " (" DRV_RELDATE ")\n"; 130 131 static struct mlx4_profile default_profile = { 132 .num_qp = 1 << 18, 133 .num_srq = 1 << 16, 134 .rdmarc_per_qp = 1 << 4, 135 .num_cq = 1 << 16, 136 .num_mcg = 1 << 13, 137 .num_mpt = 1 << 19, 138 .num_mtt = 1 << 20, /* It is really num mtt segements */ 139 }; 140 141 static struct mlx4_profile low_mem_profile = { 142 .num_qp = 1 << 17, 143 .num_srq = 1 << 6, 144 .rdmarc_per_qp = 1 << 4, 145 .num_cq = 1 << 8, 146 .num_mcg = 1 << 8, 147 .num_mpt = 1 << 9, 148 .num_mtt = 1 << 7, 149 }; 150 151 static int log_num_mac = 7; 152 module_param_named(log_num_mac, log_num_mac, int, 0444); 153 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)"); 154 155 static int log_num_vlan; 156 module_param_named(log_num_vlan, log_num_vlan, int, 0444); 157 MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)"); 158 /* Log2 max number of VLANs per ETH port (0-7) */ 159 #define MLX4_LOG_NUM_VLANS 7 160 #define MLX4_MIN_LOG_NUM_VLANS 0 161 #define MLX4_MIN_LOG_NUM_MAC 1 162 163 static bool use_prio; 164 module_param_named(use_prio, use_prio, bool, 0444); 165 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)"); 166 167 int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); 168 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); 169 MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)"); 170 171 static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE}; 172 173 struct mlx4_port_config { 174 struct list_head list; 175 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; 176 struct pci_dev *pdev; 177 }; 178 179 static atomic_t pf_loading = ATOMIC_INIT(0); 180 181 static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev, 182 struct mlx4_dev_cap *dev_cap) 183 { 184 /* The reserved_uars is calculated by system page size unit. 185 * Therefore, adjustment is added when the uar page size is less 186 * than the system page size 187 */ 188 dev->caps.reserved_uars = 189 max_t(int, 190 mlx4_get_num_reserved_uar(dev), 191 dev_cap->reserved_uars / 192 (1 << (PAGE_SHIFT - dev->uar_page_shift))); 193 } 194 195 int mlx4_check_port_params(struct mlx4_dev *dev, 196 enum mlx4_port_type *port_type) 197 { 198 int i; 199 200 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { 201 for (i = 0; i < dev->caps.num_ports - 1; i++) { 202 if (port_type[i] != port_type[i + 1]) { 203 mlx4_err(dev, "Only same port types supported on this HCA, aborting\n"); 204 return -EINVAL; 205 } 206 } 207 } 208 209 for (i = 0; i < dev->caps.num_ports; i++) { 210 if (!(port_type[i] & dev->caps.supported_type[i+1])) { 211 mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n", 212 i + 1); 213 return -EINVAL; 214 } 215 } 216 return 0; 217 } 218 219 static void mlx4_set_port_mask(struct mlx4_dev *dev) 220 { 221 int i; 222 223 for (i = 1; i <= dev->caps.num_ports; ++i) 224 dev->caps.port_mask[i] = dev->caps.port_type[i]; 225 } 226 227 enum { 228 MLX4_QUERY_FUNC_NUM_SYS_EQS = 1 << 0, 229 }; 230 231 static int mlx4_query_func(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 232 { 233 int err = 0; 234 struct mlx4_func func; 235 236 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { 237 err = mlx4_QUERY_FUNC(dev, &func, 0); 238 if (err) { 239 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 240 return err; 241 } 242 dev_cap->max_eqs = func.max_eq; 243 dev_cap->reserved_eqs = func.rsvd_eqs; 244 dev_cap->reserved_uars = func.rsvd_uars; 245 err |= MLX4_QUERY_FUNC_NUM_SYS_EQS; 246 } 247 return err; 248 } 249 250 static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev) 251 { 252 struct mlx4_caps *dev_cap = &dev->caps; 253 254 /* FW not supporting or cancelled by user */ 255 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) || 256 !(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE)) 257 return; 258 259 /* Must have 64B CQE_EQE enabled by FW to use bigger stride 260 * When FW has NCSI it may decide not to report 64B CQE/EQEs 261 */ 262 if (!(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_EQE) || 263 !(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_CQE)) { 264 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 265 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 266 return; 267 } 268 269 if (cache_line_size() == 128 || cache_line_size() == 256) { 270 mlx4_dbg(dev, "Enabling CQE stride cacheLine supported\n"); 271 /* Changing the real data inside CQE size to 32B */ 272 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; 273 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; 274 275 if (mlx4_is_master(dev)) 276 dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE; 277 } else { 278 if (cache_line_size() != 32 && cache_line_size() != 64) 279 mlx4_dbg(dev, "Disabling CQE stride, cacheLine size unsupported\n"); 280 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 281 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 282 } 283 } 284 285 static int _mlx4_dev_port(struct mlx4_dev *dev, int port, 286 struct mlx4_port_cap *port_cap) 287 { 288 dev->caps.vl_cap[port] = port_cap->max_vl; 289 dev->caps.ib_mtu_cap[port] = port_cap->ib_mtu; 290 dev->phys_caps.gid_phys_table_len[port] = port_cap->max_gids; 291 dev->phys_caps.pkey_phys_table_len[port] = port_cap->max_pkeys; 292 /* set gid and pkey table operating lengths by default 293 * to non-sriov values 294 */ 295 dev->caps.gid_table_len[port] = port_cap->max_gids; 296 dev->caps.pkey_table_len[port] = port_cap->max_pkeys; 297 dev->caps.port_width_cap[port] = port_cap->max_port_width; 298 dev->caps.eth_mtu_cap[port] = port_cap->eth_mtu; 299 dev->caps.max_tc_eth = port_cap->max_tc_eth; 300 dev->caps.def_mac[port] = port_cap->def_mac; 301 dev->caps.supported_type[port] = port_cap->supported_port_types; 302 dev->caps.suggested_type[port] = port_cap->suggested_type; 303 dev->caps.default_sense[port] = port_cap->default_sense; 304 dev->caps.trans_type[port] = port_cap->trans_type; 305 dev->caps.vendor_oui[port] = port_cap->vendor_oui; 306 dev->caps.wavelength[port] = port_cap->wavelength; 307 dev->caps.trans_code[port] = port_cap->trans_code; 308 309 return 0; 310 } 311 312 static int mlx4_dev_port(struct mlx4_dev *dev, int port, 313 struct mlx4_port_cap *port_cap) 314 { 315 int err = 0; 316 317 err = mlx4_QUERY_PORT(dev, port, port_cap); 318 319 if (err) 320 mlx4_err(dev, "QUERY_PORT command failed.\n"); 321 322 return err; 323 } 324 325 static inline void mlx4_enable_ignore_fcs(struct mlx4_dev *dev) 326 { 327 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS)) 328 return; 329 330 if (mlx4_is_mfunc(dev)) { 331 mlx4_dbg(dev, "SRIOV mode - Disabling Ignore FCS"); 332 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS; 333 return; 334 } 335 336 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)) { 337 mlx4_dbg(dev, 338 "Keep FCS is not supported - Disabling Ignore FCS"); 339 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS; 340 return; 341 } 342 } 343 344 #define MLX4_A0_STEERING_TABLE_SIZE 256 345 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 346 { 347 int err; 348 int i; 349 350 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 351 if (err) { 352 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 353 return err; 354 } 355 mlx4_dev_cap_dump(dev, dev_cap); 356 357 if (dev_cap->min_page_sz > PAGE_SIZE) { 358 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", 359 dev_cap->min_page_sz, (long)PAGE_SIZE); 360 return -ENODEV; 361 } 362 if (dev_cap->num_ports > MLX4_MAX_PORTS) { 363 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n", 364 dev_cap->num_ports, MLX4_MAX_PORTS); 365 return -ENODEV; 366 } 367 368 if (dev_cap->uar_size > pci_resource_len(dev->persist->pdev, 2)) { 369 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", 370 dev_cap->uar_size, 371 (unsigned long long) 372 pci_resource_len(dev->persist->pdev, 2)); 373 return -ENODEV; 374 } 375 376 dev->caps.num_ports = dev_cap->num_ports; 377 dev->caps.num_sys_eqs = dev_cap->num_sys_eqs; 378 dev->phys_caps.num_phys_eqs = dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS ? 379 dev->caps.num_sys_eqs : 380 MLX4_MAX_EQ_NUM; 381 for (i = 1; i <= dev->caps.num_ports; ++i) { 382 err = _mlx4_dev_port(dev, i, dev_cap->port_cap + i); 383 if (err) { 384 mlx4_err(dev, "QUERY_PORT command failed, aborting\n"); 385 return err; 386 } 387 } 388 389 dev->caps.map_clock_to_user = dev_cap->map_clock_to_user; 390 dev->caps.uar_page_size = PAGE_SIZE; 391 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; 392 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay; 393 dev->caps.bf_reg_size = dev_cap->bf_reg_size; 394 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page; 395 dev->caps.max_sq_sg = dev_cap->max_sq_sg; 396 dev->caps.max_rq_sg = dev_cap->max_rq_sg; 397 dev->caps.max_wqes = dev_cap->max_qp_sz; 398 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp; 399 dev->caps.max_srq_wqes = dev_cap->max_srq_sz; 400 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1; 401 dev->caps.reserved_srqs = dev_cap->reserved_srqs; 402 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz; 403 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz; 404 /* 405 * Subtract 1 from the limit because we need to allocate a 406 * spare CQE so the HCA HW can tell the difference between an 407 * empty CQ and a full CQ. 408 */ 409 dev->caps.max_cqes = dev_cap->max_cq_sz - 1; 410 dev->caps.reserved_cqs = dev_cap->reserved_cqs; 411 dev->caps.reserved_eqs = dev_cap->reserved_eqs; 412 dev->caps.reserved_mtts = dev_cap->reserved_mtts; 413 dev->caps.reserved_mrws = dev_cap->reserved_mrws; 414 415 dev->caps.reserved_pds = dev_cap->reserved_pds; 416 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? 417 dev_cap->reserved_xrcds : 0; 418 dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? 419 dev_cap->max_xrcds : 0; 420 dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz; 421 422 dev->caps.max_msg_sz = dev_cap->max_msg_sz; 423 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); 424 dev->caps.flags = dev_cap->flags; 425 dev->caps.flags2 = dev_cap->flags2; 426 dev->caps.bmme_flags = dev_cap->bmme_flags; 427 dev->caps.reserved_lkey = dev_cap->reserved_lkey; 428 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 429 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 430 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; 431 432 /* Save uar page shift */ 433 if (!mlx4_is_slave(dev)) { 434 /* Virtual PCI function needs to determine UAR page size from 435 * firmware. Only master PCI function can set the uar page size 436 */ 437 if (enable_4k_uar) 438 dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT; 439 else 440 dev->uar_page_shift = PAGE_SHIFT; 441 442 mlx4_set_num_reserved_uars(dev, dev_cap); 443 } 444 445 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) { 446 struct mlx4_init_hca_param hca_param; 447 448 memset(&hca_param, 0, sizeof(hca_param)); 449 err = mlx4_QUERY_HCA(dev, &hca_param); 450 /* Turn off PHV_EN flag in case phv_check_en is set. 451 * phv_check_en is a HW check that parse the packet and verify 452 * phv bit was reported correctly in the wqe. To allow QinQ 453 * PHV_EN flag should be set and phv_check_en must be cleared 454 * otherwise QinQ packets will be drop by the HW. 455 */ 456 if (err || hca_param.phv_check_en) 457 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_PHV_EN; 458 } 459 460 /* Sense port always allowed on supported devices for ConnectX-1 and -2 */ 461 if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT) 462 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; 463 /* Don't do sense port on multifunction devices (for now at least) */ 464 if (mlx4_is_mfunc(dev)) 465 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; 466 467 if (mlx4_low_memory_profile()) { 468 dev->caps.log_num_macs = MLX4_MIN_LOG_NUM_MAC; 469 dev->caps.log_num_vlans = MLX4_MIN_LOG_NUM_VLANS; 470 } else { 471 dev->caps.log_num_macs = log_num_mac; 472 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS; 473 } 474 475 for (i = 1; i <= dev->caps.num_ports; ++i) { 476 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE; 477 if (dev->caps.supported_type[i]) { 478 /* if only ETH is supported - assign ETH */ 479 if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH) 480 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; 481 /* if only IB is supported, assign IB */ 482 else if (dev->caps.supported_type[i] == 483 MLX4_PORT_TYPE_IB) 484 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB; 485 else { 486 /* if IB and ETH are supported, we set the port 487 * type according to user selection of port type; 488 * if user selected none, take the FW hint */ 489 if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE) 490 dev->caps.port_type[i] = dev->caps.suggested_type[i] ? 491 MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB; 492 else 493 dev->caps.port_type[i] = port_type_array[i - 1]; 494 } 495 } 496 /* 497 * Link sensing is allowed on the port if 3 conditions are true: 498 * 1. Both protocols are supported on the port. 499 * 2. Different types are supported on the port 500 * 3. FW declared that it supports link sensing 501 */ 502 mlx4_priv(dev)->sense.sense_allowed[i] = 503 ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) && 504 (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && 505 (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)); 506 507 /* 508 * If "default_sense" bit is set, we move the port to "AUTO" mode 509 * and perform sense_port FW command to try and set the correct 510 * port type from beginning 511 */ 512 if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) { 513 enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE; 514 dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO; 515 mlx4_SENSE_PORT(dev, i, &sensed_port); 516 if (sensed_port != MLX4_PORT_TYPE_NONE) 517 dev->caps.port_type[i] = sensed_port; 518 } else { 519 dev->caps.possible_type[i] = dev->caps.port_type[i]; 520 } 521 522 if (dev->caps.log_num_macs > dev_cap->port_cap[i].log_max_macs) { 523 dev->caps.log_num_macs = dev_cap->port_cap[i].log_max_macs; 524 mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n", 525 i, 1 << dev->caps.log_num_macs); 526 } 527 if (dev->caps.log_num_vlans > dev_cap->port_cap[i].log_max_vlans) { 528 dev->caps.log_num_vlans = dev_cap->port_cap[i].log_max_vlans; 529 mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n", 530 i, 1 << dev->caps.log_num_vlans); 531 } 532 } 533 534 if (mlx4_is_master(dev) && (dev->caps.num_ports == 2) && 535 (port_type_array[0] == MLX4_PORT_TYPE_IB) && 536 (port_type_array[1] == MLX4_PORT_TYPE_ETH)) { 537 mlx4_warn(dev, 538 "Granular QoS per VF not supported with IB/Eth configuration\n"); 539 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_QOS_VPP; 540 } 541 542 dev->caps.max_counters = dev_cap->max_counters; 543 544 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps; 545 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] = 546 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] = 547 (1 << dev->caps.log_num_macs) * 548 (1 << dev->caps.log_num_vlans) * 549 dev->caps.num_ports; 550 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH; 551 552 if (dev_cap->dmfs_high_rate_qpn_base > 0 && 553 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) 554 dev->caps.dmfs_high_rate_qpn_base = dev_cap->dmfs_high_rate_qpn_base; 555 else 556 dev->caps.dmfs_high_rate_qpn_base = 557 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 558 559 if (dev_cap->dmfs_high_rate_qpn_range > 0 && 560 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) { 561 dev->caps.dmfs_high_rate_qpn_range = dev_cap->dmfs_high_rate_qpn_range; 562 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DEFAULT; 563 dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_FS_A0; 564 } else { 565 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_NOT_SUPPORTED; 566 dev->caps.dmfs_high_rate_qpn_base = 567 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 568 dev->caps.dmfs_high_rate_qpn_range = MLX4_A0_STEERING_TABLE_SIZE; 569 } 570 571 dev->caps.rl_caps = dev_cap->rl_caps; 572 573 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_RSS_RAW_ETH] = 574 dev->caps.dmfs_high_rate_qpn_range; 575 576 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] + 577 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] + 578 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] + 579 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH]; 580 581 dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0; 582 583 if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) { 584 if (dev_cap->flags & 585 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) { 586 mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n"); 587 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; 588 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; 589 } 590 591 if (dev_cap->flags2 & 592 (MLX4_DEV_CAP_FLAG2_CQE_STRIDE | 593 MLX4_DEV_CAP_FLAG2_EQE_STRIDE)) { 594 mlx4_warn(dev, "Disabling EQE/CQE stride per user request\n"); 595 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 596 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 597 } 598 } 599 600 if ((dev->caps.flags & 601 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) && 602 mlx4_is_master(dev)) 603 dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE; 604 605 if (!mlx4_is_slave(dev)) { 606 mlx4_enable_cqe_eqe_stride(dev); 607 dev->caps.alloc_res_qp_mask = 608 (dev->caps.bf_reg_size ? MLX4_RESERVE_ETH_BF_QP : 0) | 609 MLX4_RESERVE_A0_QP; 610 611 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) && 612 dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) { 613 mlx4_warn(dev, "Old device ETS support detected\n"); 614 mlx4_warn(dev, "Consider upgrading device FW.\n"); 615 dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG; 616 } 617 618 } else { 619 dev->caps.alloc_res_qp_mask = 0; 620 } 621 622 mlx4_enable_ignore_fcs(dev); 623 624 return 0; 625 } 626 627 static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev *dev, 628 enum pci_bus_speed *speed, 629 enum pcie_link_width *width) 630 { 631 u32 lnkcap1, lnkcap2; 632 int err1, err2; 633 634 #define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */ 635 636 *speed = PCI_SPEED_UNKNOWN; 637 *width = PCIE_LNK_WIDTH_UNKNOWN; 638 639 err1 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP, 640 &lnkcap1); 641 err2 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP2, 642 &lnkcap2); 643 if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */ 644 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) 645 *speed = PCIE_SPEED_8_0GT; 646 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) 647 *speed = PCIE_SPEED_5_0GT; 648 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) 649 *speed = PCIE_SPEED_2_5GT; 650 } 651 if (!err1) { 652 *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT; 653 if (!lnkcap2) { /* pre-r3.0 */ 654 if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB) 655 *speed = PCIE_SPEED_5_0GT; 656 else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB) 657 *speed = PCIE_SPEED_2_5GT; 658 } 659 } 660 661 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) { 662 return err1 ? err1 : 663 err2 ? err2 : -EINVAL; 664 } 665 return 0; 666 } 667 668 static void mlx4_check_pcie_caps(struct mlx4_dev *dev) 669 { 670 enum pcie_link_width width, width_cap; 671 enum pci_bus_speed speed, speed_cap; 672 int err; 673 674 #define PCIE_SPEED_STR(speed) \ 675 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \ 676 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \ 677 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \ 678 "Unknown") 679 680 err = mlx4_get_pcie_dev_link_caps(dev, &speed_cap, &width_cap); 681 if (err) { 682 mlx4_warn(dev, 683 "Unable to determine PCIe device BW capabilities\n"); 684 return; 685 } 686 687 err = pcie_get_minimum_link(dev->persist->pdev, &speed, &width); 688 if (err || speed == PCI_SPEED_UNKNOWN || 689 width == PCIE_LNK_WIDTH_UNKNOWN) { 690 mlx4_warn(dev, 691 "Unable to determine PCI device chain minimum BW\n"); 692 return; 693 } 694 695 if (width != width_cap || speed != speed_cap) 696 mlx4_warn(dev, 697 "PCIe BW is different than device's capability\n"); 698 699 mlx4_info(dev, "PCIe link speed is %s, device supports %s\n", 700 PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap)); 701 mlx4_info(dev, "PCIe link width is x%d, device supports x%d\n", 702 width, width_cap); 703 return; 704 } 705 706 /*The function checks if there are live vf, return the num of them*/ 707 static int mlx4_how_many_lives_vf(struct mlx4_dev *dev) 708 { 709 struct mlx4_priv *priv = mlx4_priv(dev); 710 struct mlx4_slave_state *s_state; 711 int i; 712 int ret = 0; 713 714 for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) { 715 s_state = &priv->mfunc.master.slave_state[i]; 716 if (s_state->active && s_state->last_cmd != 717 MLX4_COMM_CMD_RESET) { 718 mlx4_warn(dev, "%s: slave: %d is still active\n", 719 __func__, i); 720 ret++; 721 } 722 } 723 return ret; 724 } 725 726 int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey) 727 { 728 u32 qk = MLX4_RESERVED_QKEY_BASE; 729 730 if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX || 731 qpn < dev->phys_caps.base_proxy_sqpn) 732 return -EINVAL; 733 734 if (qpn >= dev->phys_caps.base_tunnel_sqpn) 735 /* tunnel qp */ 736 qk += qpn - dev->phys_caps.base_tunnel_sqpn; 737 else 738 qk += qpn - dev->phys_caps.base_proxy_sqpn; 739 *qkey = qk; 740 return 0; 741 } 742 EXPORT_SYMBOL(mlx4_get_parav_qkey); 743 744 void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val) 745 { 746 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 747 748 if (!mlx4_is_master(dev)) 749 return; 750 751 priv->virt2phys_pkey[slave][port - 1][i] = val; 752 } 753 EXPORT_SYMBOL(mlx4_sync_pkey_table); 754 755 void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid) 756 { 757 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 758 759 if (!mlx4_is_master(dev)) 760 return; 761 762 priv->slave_node_guids[slave] = guid; 763 } 764 EXPORT_SYMBOL(mlx4_put_slave_node_guid); 765 766 __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave) 767 { 768 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 769 770 if (!mlx4_is_master(dev)) 771 return 0; 772 773 return priv->slave_node_guids[slave]; 774 } 775 EXPORT_SYMBOL(mlx4_get_slave_node_guid); 776 777 int mlx4_is_slave_active(struct mlx4_dev *dev, int slave) 778 { 779 struct mlx4_priv *priv = mlx4_priv(dev); 780 struct mlx4_slave_state *s_slave; 781 782 if (!mlx4_is_master(dev)) 783 return 0; 784 785 s_slave = &priv->mfunc.master.slave_state[slave]; 786 return !!s_slave->active; 787 } 788 EXPORT_SYMBOL(mlx4_is_slave_active); 789 790 static void slave_adjust_steering_mode(struct mlx4_dev *dev, 791 struct mlx4_dev_cap *dev_cap, 792 struct mlx4_init_hca_param *hca_param) 793 { 794 dev->caps.steering_mode = hca_param->steering_mode; 795 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { 796 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; 797 dev->caps.fs_log_max_ucast_qp_range_size = 798 dev_cap->fs_log_max_ucast_qp_range_size; 799 } else 800 dev->caps.num_qp_per_mgm = 801 4 * ((1 << hca_param->log_mc_entry_sz)/16 - 2); 802 803 mlx4_dbg(dev, "Steering mode is: %s\n", 804 mlx4_steering_mode_str(dev->caps.steering_mode)); 805 } 806 807 static int mlx4_slave_cap(struct mlx4_dev *dev) 808 { 809 int err; 810 u32 page_size; 811 struct mlx4_dev_cap dev_cap; 812 struct mlx4_func_cap func_cap; 813 struct mlx4_init_hca_param hca_param; 814 u8 i; 815 816 memset(&hca_param, 0, sizeof(hca_param)); 817 err = mlx4_QUERY_HCA(dev, &hca_param); 818 if (err) { 819 mlx4_err(dev, "QUERY_HCA command failed, aborting\n"); 820 return err; 821 } 822 823 /* fail if the hca has an unknown global capability 824 * at this time global_caps should be always zeroed 825 */ 826 if (hca_param.global_caps) { 827 mlx4_err(dev, "Unknown hca global capabilities\n"); 828 return -ENOSYS; 829 } 830 831 mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz; 832 833 dev->caps.hca_core_clock = hca_param.hca_core_clock; 834 835 memset(&dev_cap, 0, sizeof(dev_cap)); 836 dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp; 837 err = mlx4_dev_cap(dev, &dev_cap); 838 if (err) { 839 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 840 return err; 841 } 842 843 err = mlx4_QUERY_FW(dev); 844 if (err) 845 mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n"); 846 847 page_size = ~dev->caps.page_size_cap + 1; 848 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size); 849 if (page_size > PAGE_SIZE) { 850 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", 851 page_size, (long)PAGE_SIZE); 852 return -ENODEV; 853 } 854 855 /* Set uar_page_shift for VF */ 856 dev->uar_page_shift = hca_param.uar_page_sz + 12; 857 858 /* Make sure the master uar page size is valid */ 859 if (dev->uar_page_shift > PAGE_SHIFT) { 860 mlx4_err(dev, 861 "Invalid configuration: uar page size is larger than system page size\n"); 862 return -ENODEV; 863 } 864 865 /* Set reserved_uars based on the uar_page_shift */ 866 mlx4_set_num_reserved_uars(dev, &dev_cap); 867 868 /* Although uar page size in FW differs from system page size, 869 * upper software layers (mlx4_ib, mlx4_en and part of mlx4_core) 870 * still works with assumption that uar page size == system page size 871 */ 872 dev->caps.uar_page_size = PAGE_SIZE; 873 874 memset(&func_cap, 0, sizeof(func_cap)); 875 err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap); 876 if (err) { 877 mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n", 878 err); 879 return err; 880 } 881 882 if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) != 883 PF_CONTEXT_BEHAVIOUR_MASK) { 884 mlx4_err(dev, "Unknown pf context behaviour %x known flags %x\n", 885 func_cap.pf_context_behaviour, PF_CONTEXT_BEHAVIOUR_MASK); 886 return -ENOSYS; 887 } 888 889 dev->caps.num_ports = func_cap.num_ports; 890 dev->quotas.qp = func_cap.qp_quota; 891 dev->quotas.srq = func_cap.srq_quota; 892 dev->quotas.cq = func_cap.cq_quota; 893 dev->quotas.mpt = func_cap.mpt_quota; 894 dev->quotas.mtt = func_cap.mtt_quota; 895 dev->caps.num_qps = 1 << hca_param.log_num_qps; 896 dev->caps.num_srqs = 1 << hca_param.log_num_srqs; 897 dev->caps.num_cqs = 1 << hca_param.log_num_cqs; 898 dev->caps.num_mpts = 1 << hca_param.log_mpt_sz; 899 dev->caps.num_eqs = func_cap.max_eq; 900 dev->caps.reserved_eqs = func_cap.reserved_eq; 901 dev->caps.reserved_lkey = func_cap.reserved_lkey; 902 dev->caps.num_pds = MLX4_NUM_PDS; 903 dev->caps.num_mgms = 0; 904 dev->caps.num_amgms = 0; 905 906 if (dev->caps.num_ports > MLX4_MAX_PORTS) { 907 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n", 908 dev->caps.num_ports, MLX4_MAX_PORTS); 909 return -ENODEV; 910 } 911 912 mlx4_replace_zero_macs(dev); 913 914 dev->caps.qp0_qkey = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL); 915 dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 916 dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 917 dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 918 dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 919 920 if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy || 921 !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy || 922 !dev->caps.qp0_qkey) { 923 err = -ENOMEM; 924 goto err_mem; 925 } 926 927 for (i = 1; i <= dev->caps.num_ports; ++i) { 928 err = mlx4_QUERY_FUNC_CAP(dev, i, &func_cap); 929 if (err) { 930 mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n", 931 i, err); 932 goto err_mem; 933 } 934 dev->caps.qp0_qkey[i - 1] = func_cap.qp0_qkey; 935 dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn; 936 dev->caps.qp0_proxy[i - 1] = func_cap.qp0_proxy_qpn; 937 dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn; 938 dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn; 939 dev->caps.port_mask[i] = dev->caps.port_type[i]; 940 dev->caps.phys_port_id[i] = func_cap.phys_port_id; 941 err = mlx4_get_slave_pkey_gid_tbl_len(dev, i, 942 &dev->caps.gid_table_len[i], 943 &dev->caps.pkey_table_len[i]); 944 if (err) 945 goto err_mem; 946 } 947 948 if (dev->caps.uar_page_size * (dev->caps.num_uars - 949 dev->caps.reserved_uars) > 950 pci_resource_len(dev->persist->pdev, 951 2)) { 952 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", 953 dev->caps.uar_page_size * dev->caps.num_uars, 954 (unsigned long long) 955 pci_resource_len(dev->persist->pdev, 2)); 956 err = -ENOMEM; 957 goto err_mem; 958 } 959 960 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_EQE_ENABLED) { 961 dev->caps.eqe_size = 64; 962 dev->caps.eqe_factor = 1; 963 } else { 964 dev->caps.eqe_size = 32; 965 dev->caps.eqe_factor = 0; 966 } 967 968 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) { 969 dev->caps.cqe_size = 64; 970 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; 971 } else { 972 dev->caps.cqe_size = 32; 973 } 974 975 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_EQE_STRIDE_ENABLED) { 976 dev->caps.eqe_size = hca_param.eqe_size; 977 dev->caps.eqe_factor = 0; 978 } 979 980 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_CQE_STRIDE_ENABLED) { 981 dev->caps.cqe_size = hca_param.cqe_size; 982 /* User still need to know when CQE > 32B */ 983 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; 984 } 985 986 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 987 mlx4_warn(dev, "Timestamping is not supported in slave mode\n"); 988 989 slave_adjust_steering_mode(dev, &dev_cap, &hca_param); 990 mlx4_dbg(dev, "RSS support for IP fragments is %s\n", 991 hca_param.rss_ip_frags ? "on" : "off"); 992 993 if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_BF_RES_QP && 994 dev->caps.bf_reg_size) 995 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_ETH_BF_QP; 996 997 if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_A0_RES_QP) 998 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_A0_QP; 999 1000 return 0; 1001 1002 err_mem: 1003 kfree(dev->caps.qp0_qkey); 1004 kfree(dev->caps.qp0_tunnel); 1005 kfree(dev->caps.qp0_proxy); 1006 kfree(dev->caps.qp1_tunnel); 1007 kfree(dev->caps.qp1_proxy); 1008 dev->caps.qp0_qkey = NULL; 1009 dev->caps.qp0_tunnel = NULL; 1010 dev->caps.qp0_proxy = NULL; 1011 dev->caps.qp1_tunnel = NULL; 1012 dev->caps.qp1_proxy = NULL; 1013 1014 return err; 1015 } 1016 1017 static void mlx4_request_modules(struct mlx4_dev *dev) 1018 { 1019 int port; 1020 int has_ib_port = false; 1021 int has_eth_port = false; 1022 #define EN_DRV_NAME "mlx4_en" 1023 #define IB_DRV_NAME "mlx4_ib" 1024 1025 for (port = 1; port <= dev->caps.num_ports; port++) { 1026 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB) 1027 has_ib_port = true; 1028 else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) 1029 has_eth_port = true; 1030 } 1031 1032 if (has_eth_port) 1033 request_module_nowait(EN_DRV_NAME); 1034 if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) 1035 request_module_nowait(IB_DRV_NAME); 1036 } 1037 1038 /* 1039 * Change the port configuration of the device. 1040 * Every user of this function must hold the port mutex. 1041 */ 1042 int mlx4_change_port_types(struct mlx4_dev *dev, 1043 enum mlx4_port_type *port_types) 1044 { 1045 int err = 0; 1046 int change = 0; 1047 int port; 1048 1049 for (port = 0; port < dev->caps.num_ports; port++) { 1050 /* Change the port type only if the new type is different 1051 * from the current, and not set to Auto */ 1052 if (port_types[port] != dev->caps.port_type[port + 1]) 1053 change = 1; 1054 } 1055 if (change) { 1056 mlx4_unregister_device(dev); 1057 for (port = 1; port <= dev->caps.num_ports; port++) { 1058 mlx4_CLOSE_PORT(dev, port); 1059 dev->caps.port_type[port] = port_types[port - 1]; 1060 err = mlx4_SET_PORT(dev, port, -1); 1061 if (err) { 1062 mlx4_err(dev, "Failed to set port %d, aborting\n", 1063 port); 1064 goto out; 1065 } 1066 } 1067 mlx4_set_port_mask(dev); 1068 err = mlx4_register_device(dev); 1069 if (err) { 1070 mlx4_err(dev, "Failed to register device\n"); 1071 goto out; 1072 } 1073 mlx4_request_modules(dev); 1074 } 1075 1076 out: 1077 return err; 1078 } 1079 1080 static ssize_t show_port_type(struct device *dev, 1081 struct device_attribute *attr, 1082 char *buf) 1083 { 1084 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 1085 port_attr); 1086 struct mlx4_dev *mdev = info->dev; 1087 char type[8]; 1088 1089 sprintf(type, "%s", 1090 (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ? 1091 "ib" : "eth"); 1092 if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO) 1093 sprintf(buf, "auto (%s)\n", type); 1094 else 1095 sprintf(buf, "%s\n", type); 1096 1097 return strlen(buf); 1098 } 1099 1100 static int __set_port_type(struct mlx4_port_info *info, 1101 enum mlx4_port_type port_type) 1102 { 1103 struct mlx4_dev *mdev = info->dev; 1104 struct mlx4_priv *priv = mlx4_priv(mdev); 1105 enum mlx4_port_type types[MLX4_MAX_PORTS]; 1106 enum mlx4_port_type new_types[MLX4_MAX_PORTS]; 1107 int i; 1108 int err = 0; 1109 1110 if ((port_type & mdev->caps.supported_type[info->port]) != port_type) { 1111 mlx4_err(mdev, 1112 "Requested port type for port %d is not supported on this HCA\n", 1113 info->port); 1114 err = -EINVAL; 1115 goto err_sup; 1116 } 1117 1118 mlx4_stop_sense(mdev); 1119 mutex_lock(&priv->port_mutex); 1120 info->tmp_type = port_type; 1121 1122 /* Possible type is always the one that was delivered */ 1123 mdev->caps.possible_type[info->port] = info->tmp_type; 1124 1125 for (i = 0; i < mdev->caps.num_ports; i++) { 1126 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type : 1127 mdev->caps.possible_type[i+1]; 1128 if (types[i] == MLX4_PORT_TYPE_AUTO) 1129 types[i] = mdev->caps.port_type[i+1]; 1130 } 1131 1132 if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && 1133 !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) { 1134 for (i = 1; i <= mdev->caps.num_ports; i++) { 1135 if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) { 1136 mdev->caps.possible_type[i] = mdev->caps.port_type[i]; 1137 err = -EINVAL; 1138 } 1139 } 1140 } 1141 if (err) { 1142 mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n"); 1143 goto out; 1144 } 1145 1146 mlx4_do_sense_ports(mdev, new_types, types); 1147 1148 err = mlx4_check_port_params(mdev, new_types); 1149 if (err) 1150 goto out; 1151 1152 /* We are about to apply the changes after the configuration 1153 * was verified, no need to remember the temporary types 1154 * any more */ 1155 for (i = 0; i < mdev->caps.num_ports; i++) 1156 priv->port[i + 1].tmp_type = 0; 1157 1158 err = mlx4_change_port_types(mdev, new_types); 1159 1160 out: 1161 mutex_unlock(&priv->port_mutex); 1162 mlx4_start_sense(mdev); 1163 err_sup: 1164 return err; 1165 } 1166 1167 static ssize_t set_port_type(struct device *dev, 1168 struct device_attribute *attr, 1169 const char *buf, size_t count) 1170 { 1171 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 1172 port_attr); 1173 struct mlx4_dev *mdev = info->dev; 1174 enum mlx4_port_type port_type; 1175 static DEFINE_MUTEX(set_port_type_mutex); 1176 int err; 1177 1178 mutex_lock(&set_port_type_mutex); 1179 1180 if (!strcmp(buf, "ib\n")) { 1181 port_type = MLX4_PORT_TYPE_IB; 1182 } else if (!strcmp(buf, "eth\n")) { 1183 port_type = MLX4_PORT_TYPE_ETH; 1184 } else if (!strcmp(buf, "auto\n")) { 1185 port_type = MLX4_PORT_TYPE_AUTO; 1186 } else { 1187 mlx4_err(mdev, "%s is not supported port type\n", buf); 1188 err = -EINVAL; 1189 goto err_out; 1190 } 1191 1192 err = __set_port_type(info, port_type); 1193 1194 err_out: 1195 mutex_unlock(&set_port_type_mutex); 1196 1197 return err ? err : count; 1198 } 1199 1200 enum ibta_mtu { 1201 IB_MTU_256 = 1, 1202 IB_MTU_512 = 2, 1203 IB_MTU_1024 = 3, 1204 IB_MTU_2048 = 4, 1205 IB_MTU_4096 = 5 1206 }; 1207 1208 static inline int int_to_ibta_mtu(int mtu) 1209 { 1210 switch (mtu) { 1211 case 256: return IB_MTU_256; 1212 case 512: return IB_MTU_512; 1213 case 1024: return IB_MTU_1024; 1214 case 2048: return IB_MTU_2048; 1215 case 4096: return IB_MTU_4096; 1216 default: return -1; 1217 } 1218 } 1219 1220 static inline int ibta_mtu_to_int(enum ibta_mtu mtu) 1221 { 1222 switch (mtu) { 1223 case IB_MTU_256: return 256; 1224 case IB_MTU_512: return 512; 1225 case IB_MTU_1024: return 1024; 1226 case IB_MTU_2048: return 2048; 1227 case IB_MTU_4096: return 4096; 1228 default: return -1; 1229 } 1230 } 1231 1232 static ssize_t show_port_ib_mtu(struct device *dev, 1233 struct device_attribute *attr, 1234 char *buf) 1235 { 1236 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 1237 port_mtu_attr); 1238 struct mlx4_dev *mdev = info->dev; 1239 1240 sprintf(buf, "%d\n", 1241 ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port])); 1242 return strlen(buf); 1243 } 1244 1245 static ssize_t set_port_ib_mtu(struct device *dev, 1246 struct device_attribute *attr, 1247 const char *buf, size_t count) 1248 { 1249 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 1250 port_mtu_attr); 1251 struct mlx4_dev *mdev = info->dev; 1252 struct mlx4_priv *priv = mlx4_priv(mdev); 1253 int err, port, mtu, ibta_mtu = -1; 1254 1255 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) { 1256 mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); 1257 return -EINVAL; 1258 } 1259 1260 err = kstrtoint(buf, 0, &mtu); 1261 if (!err) 1262 ibta_mtu = int_to_ibta_mtu(mtu); 1263 1264 if (err || ibta_mtu < 0) { 1265 mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf); 1266 return -EINVAL; 1267 } 1268 1269 mdev->caps.port_ib_mtu[info->port] = ibta_mtu; 1270 1271 mlx4_stop_sense(mdev); 1272 mutex_lock(&priv->port_mutex); 1273 mlx4_unregister_device(mdev); 1274 for (port = 1; port <= mdev->caps.num_ports; port++) { 1275 mlx4_CLOSE_PORT(mdev, port); 1276 err = mlx4_SET_PORT(mdev, port, -1); 1277 if (err) { 1278 mlx4_err(mdev, "Failed to set port %d, aborting\n", 1279 port); 1280 goto err_set_port; 1281 } 1282 } 1283 err = mlx4_register_device(mdev); 1284 err_set_port: 1285 mutex_unlock(&priv->port_mutex); 1286 mlx4_start_sense(mdev); 1287 return err ? err : count; 1288 } 1289 1290 /* bond for multi-function device */ 1291 #define MAX_MF_BOND_ALLOWED_SLAVES 63 1292 static int mlx4_mf_bond(struct mlx4_dev *dev) 1293 { 1294 int err = 0; 1295 int nvfs; 1296 struct mlx4_slaves_pport slaves_port1; 1297 struct mlx4_slaves_pport slaves_port2; 1298 DECLARE_BITMAP(slaves_port_1_2, MLX4_MFUNC_MAX); 1299 1300 slaves_port1 = mlx4_phys_to_slaves_pport(dev, 1); 1301 slaves_port2 = mlx4_phys_to_slaves_pport(dev, 2); 1302 bitmap_and(slaves_port_1_2, 1303 slaves_port1.slaves, slaves_port2.slaves, 1304 dev->persist->num_vfs + 1); 1305 1306 /* only single port vfs are allowed */ 1307 if (bitmap_weight(slaves_port_1_2, dev->persist->num_vfs + 1) > 1) { 1308 mlx4_warn(dev, "HA mode unsupported for dual ported VFs\n"); 1309 return -EINVAL; 1310 } 1311 1312 /* number of virtual functions is number of total functions minus one 1313 * physical function for each port. 1314 */ 1315 nvfs = bitmap_weight(slaves_port1.slaves, dev->persist->num_vfs + 1) + 1316 bitmap_weight(slaves_port2.slaves, dev->persist->num_vfs + 1) - 2; 1317 1318 /* limit on maximum allowed VFs */ 1319 if (nvfs > MAX_MF_BOND_ALLOWED_SLAVES) { 1320 mlx4_warn(dev, "HA mode is not supported for %d VFs (max %d are allowed)\n", 1321 nvfs, MAX_MF_BOND_ALLOWED_SLAVES); 1322 return -EINVAL; 1323 } 1324 1325 if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) { 1326 mlx4_warn(dev, "HA mode unsupported for NON DMFS steering\n"); 1327 return -EINVAL; 1328 } 1329 1330 err = mlx4_bond_mac_table(dev); 1331 if (err) 1332 return err; 1333 err = mlx4_bond_vlan_table(dev); 1334 if (err) 1335 goto err1; 1336 err = mlx4_bond_fs_rules(dev); 1337 if (err) 1338 goto err2; 1339 1340 return 0; 1341 err2: 1342 (void)mlx4_unbond_vlan_table(dev); 1343 err1: 1344 (void)mlx4_unbond_mac_table(dev); 1345 return err; 1346 } 1347 1348 static int mlx4_mf_unbond(struct mlx4_dev *dev) 1349 { 1350 int ret, ret1; 1351 1352 ret = mlx4_unbond_fs_rules(dev); 1353 if (ret) 1354 mlx4_warn(dev, "multifunction unbond for flow rules failedi (%d)\n", ret); 1355 ret1 = mlx4_unbond_mac_table(dev); 1356 if (ret1) { 1357 mlx4_warn(dev, "multifunction unbond for MAC table failed (%d)\n", ret1); 1358 ret = ret1; 1359 } 1360 ret1 = mlx4_unbond_vlan_table(dev); 1361 if (ret1) { 1362 mlx4_warn(dev, "multifunction unbond for VLAN table failed (%d)\n", ret1); 1363 ret = ret1; 1364 } 1365 return ret; 1366 } 1367 1368 int mlx4_bond(struct mlx4_dev *dev) 1369 { 1370 int ret = 0; 1371 struct mlx4_priv *priv = mlx4_priv(dev); 1372 1373 mutex_lock(&priv->bond_mutex); 1374 1375 if (!mlx4_is_bonded(dev)) { 1376 ret = mlx4_do_bond(dev, true); 1377 if (ret) 1378 mlx4_err(dev, "Failed to bond device: %d\n", ret); 1379 if (!ret && mlx4_is_master(dev)) { 1380 ret = mlx4_mf_bond(dev); 1381 if (ret) { 1382 mlx4_err(dev, "bond for multifunction failed\n"); 1383 mlx4_do_bond(dev, false); 1384 } 1385 } 1386 } 1387 1388 mutex_unlock(&priv->bond_mutex); 1389 if (!ret) 1390 mlx4_dbg(dev, "Device is bonded\n"); 1391 1392 return ret; 1393 } 1394 EXPORT_SYMBOL_GPL(mlx4_bond); 1395 1396 int mlx4_unbond(struct mlx4_dev *dev) 1397 { 1398 int ret = 0; 1399 struct mlx4_priv *priv = mlx4_priv(dev); 1400 1401 mutex_lock(&priv->bond_mutex); 1402 1403 if (mlx4_is_bonded(dev)) { 1404 int ret2 = 0; 1405 1406 ret = mlx4_do_bond(dev, false); 1407 if (ret) 1408 mlx4_err(dev, "Failed to unbond device: %d\n", ret); 1409 if (mlx4_is_master(dev)) 1410 ret2 = mlx4_mf_unbond(dev); 1411 if (ret2) { 1412 mlx4_warn(dev, "Failed to unbond device for multifunction (%d)\n", ret2); 1413 ret = ret2; 1414 } 1415 } 1416 1417 mutex_unlock(&priv->bond_mutex); 1418 if (!ret) 1419 mlx4_dbg(dev, "Device is unbonded\n"); 1420 1421 return ret; 1422 } 1423 EXPORT_SYMBOL_GPL(mlx4_unbond); 1424 1425 1426 int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p) 1427 { 1428 u8 port1 = v2p->port1; 1429 u8 port2 = v2p->port2; 1430 struct mlx4_priv *priv = mlx4_priv(dev); 1431 int err; 1432 1433 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP)) 1434 return -ENOTSUPP; 1435 1436 mutex_lock(&priv->bond_mutex); 1437 1438 /* zero means keep current mapping for this port */ 1439 if (port1 == 0) 1440 port1 = priv->v2p.port1; 1441 if (port2 == 0) 1442 port2 = priv->v2p.port2; 1443 1444 if ((port1 < 1) || (port1 > MLX4_MAX_PORTS) || 1445 (port2 < 1) || (port2 > MLX4_MAX_PORTS) || 1446 (port1 == 2 && port2 == 1)) { 1447 /* besides boundary checks cross mapping makes 1448 * no sense and therefore not allowed */ 1449 err = -EINVAL; 1450 } else if ((port1 == priv->v2p.port1) && 1451 (port2 == priv->v2p.port2)) { 1452 err = 0; 1453 } else { 1454 err = mlx4_virt2phy_port_map(dev, port1, port2); 1455 if (!err) { 1456 mlx4_dbg(dev, "port map changed: [%d][%d]\n", 1457 port1, port2); 1458 priv->v2p.port1 = port1; 1459 priv->v2p.port2 = port2; 1460 } else { 1461 mlx4_err(dev, "Failed to change port mape: %d\n", err); 1462 } 1463 } 1464 1465 mutex_unlock(&priv->bond_mutex); 1466 return err; 1467 } 1468 EXPORT_SYMBOL_GPL(mlx4_port_map_set); 1469 1470 static int mlx4_load_fw(struct mlx4_dev *dev) 1471 { 1472 struct mlx4_priv *priv = mlx4_priv(dev); 1473 int err; 1474 1475 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages, 1476 GFP_HIGHUSER | __GFP_NOWARN, 0); 1477 if (!priv->fw.fw_icm) { 1478 mlx4_err(dev, "Couldn't allocate FW area, aborting\n"); 1479 return -ENOMEM; 1480 } 1481 1482 err = mlx4_MAP_FA(dev, priv->fw.fw_icm); 1483 if (err) { 1484 mlx4_err(dev, "MAP_FA command failed, aborting\n"); 1485 goto err_free; 1486 } 1487 1488 err = mlx4_RUN_FW(dev); 1489 if (err) { 1490 mlx4_err(dev, "RUN_FW command failed, aborting\n"); 1491 goto err_unmap_fa; 1492 } 1493 1494 return 0; 1495 1496 err_unmap_fa: 1497 mlx4_UNMAP_FA(dev); 1498 1499 err_free: 1500 mlx4_free_icm(dev, priv->fw.fw_icm, 0); 1501 return err; 1502 } 1503 1504 static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, 1505 int cmpt_entry_sz) 1506 { 1507 struct mlx4_priv *priv = mlx4_priv(dev); 1508 int err; 1509 int num_eqs; 1510 1511 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table, 1512 cmpt_base + 1513 ((u64) (MLX4_CMPT_TYPE_QP * 1514 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1515 cmpt_entry_sz, dev->caps.num_qps, 1516 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1517 0, 0); 1518 if (err) 1519 goto err; 1520 1521 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table, 1522 cmpt_base + 1523 ((u64) (MLX4_CMPT_TYPE_SRQ * 1524 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1525 cmpt_entry_sz, dev->caps.num_srqs, 1526 dev->caps.reserved_srqs, 0, 0); 1527 if (err) 1528 goto err_qp; 1529 1530 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table, 1531 cmpt_base + 1532 ((u64) (MLX4_CMPT_TYPE_CQ * 1533 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1534 cmpt_entry_sz, dev->caps.num_cqs, 1535 dev->caps.reserved_cqs, 0, 0); 1536 if (err) 1537 goto err_srq; 1538 1539 num_eqs = dev->phys_caps.num_phys_eqs; 1540 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table, 1541 cmpt_base + 1542 ((u64) (MLX4_CMPT_TYPE_EQ * 1543 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1544 cmpt_entry_sz, num_eqs, num_eqs, 0, 0); 1545 if (err) 1546 goto err_cq; 1547 1548 return 0; 1549 1550 err_cq: 1551 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1552 1553 err_srq: 1554 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1555 1556 err_qp: 1557 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1558 1559 err: 1560 return err; 1561 } 1562 1563 static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, 1564 struct mlx4_init_hca_param *init_hca, u64 icm_size) 1565 { 1566 struct mlx4_priv *priv = mlx4_priv(dev); 1567 u64 aux_pages; 1568 int num_eqs; 1569 int err; 1570 1571 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages); 1572 if (err) { 1573 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n"); 1574 return err; 1575 } 1576 1577 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n", 1578 (unsigned long long) icm_size >> 10, 1579 (unsigned long long) aux_pages << 2); 1580 1581 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages, 1582 GFP_HIGHUSER | __GFP_NOWARN, 0); 1583 if (!priv->fw.aux_icm) { 1584 mlx4_err(dev, "Couldn't allocate aux memory, aborting\n"); 1585 return -ENOMEM; 1586 } 1587 1588 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm); 1589 if (err) { 1590 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n"); 1591 goto err_free_aux; 1592 } 1593 1594 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz); 1595 if (err) { 1596 mlx4_err(dev, "Failed to map cMPT context memory, aborting\n"); 1597 goto err_unmap_aux; 1598 } 1599 1600 1601 num_eqs = dev->phys_caps.num_phys_eqs; 1602 err = mlx4_init_icm_table(dev, &priv->eq_table.table, 1603 init_hca->eqc_base, dev_cap->eqc_entry_sz, 1604 num_eqs, num_eqs, 0, 0); 1605 if (err) { 1606 mlx4_err(dev, "Failed to map EQ context memory, aborting\n"); 1607 goto err_unmap_cmpt; 1608 } 1609 1610 /* 1611 * Reserved MTT entries must be aligned up to a cacheline 1612 * boundary, since the FW will write to them, while the driver 1613 * writes to all other MTT entries. (The variable 1614 * dev->caps.mtt_entry_sz below is really the MTT segment 1615 * size, not the raw entry size) 1616 */ 1617 dev->caps.reserved_mtts = 1618 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz, 1619 dma_get_cache_alignment()) / dev->caps.mtt_entry_sz; 1620 1621 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table, 1622 init_hca->mtt_base, 1623 dev->caps.mtt_entry_sz, 1624 dev->caps.num_mtts, 1625 dev->caps.reserved_mtts, 1, 0); 1626 if (err) { 1627 mlx4_err(dev, "Failed to map MTT context memory, aborting\n"); 1628 goto err_unmap_eq; 1629 } 1630 1631 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table, 1632 init_hca->dmpt_base, 1633 dev_cap->dmpt_entry_sz, 1634 dev->caps.num_mpts, 1635 dev->caps.reserved_mrws, 1, 1); 1636 if (err) { 1637 mlx4_err(dev, "Failed to map dMPT context memory, aborting\n"); 1638 goto err_unmap_mtt; 1639 } 1640 1641 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table, 1642 init_hca->qpc_base, 1643 dev_cap->qpc_entry_sz, 1644 dev->caps.num_qps, 1645 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1646 0, 0); 1647 if (err) { 1648 mlx4_err(dev, "Failed to map QP context memory, aborting\n"); 1649 goto err_unmap_dmpt; 1650 } 1651 1652 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table, 1653 init_hca->auxc_base, 1654 dev_cap->aux_entry_sz, 1655 dev->caps.num_qps, 1656 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1657 0, 0); 1658 if (err) { 1659 mlx4_err(dev, "Failed to map AUXC context memory, aborting\n"); 1660 goto err_unmap_qp; 1661 } 1662 1663 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table, 1664 init_hca->altc_base, 1665 dev_cap->altc_entry_sz, 1666 dev->caps.num_qps, 1667 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1668 0, 0); 1669 if (err) { 1670 mlx4_err(dev, "Failed to map ALTC context memory, aborting\n"); 1671 goto err_unmap_auxc; 1672 } 1673 1674 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table, 1675 init_hca->rdmarc_base, 1676 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, 1677 dev->caps.num_qps, 1678 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1679 0, 0); 1680 if (err) { 1681 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); 1682 goto err_unmap_altc; 1683 } 1684 1685 err = mlx4_init_icm_table(dev, &priv->cq_table.table, 1686 init_hca->cqc_base, 1687 dev_cap->cqc_entry_sz, 1688 dev->caps.num_cqs, 1689 dev->caps.reserved_cqs, 0, 0); 1690 if (err) { 1691 mlx4_err(dev, "Failed to map CQ context memory, aborting\n"); 1692 goto err_unmap_rdmarc; 1693 } 1694 1695 err = mlx4_init_icm_table(dev, &priv->srq_table.table, 1696 init_hca->srqc_base, 1697 dev_cap->srq_entry_sz, 1698 dev->caps.num_srqs, 1699 dev->caps.reserved_srqs, 0, 0); 1700 if (err) { 1701 mlx4_err(dev, "Failed to map SRQ context memory, aborting\n"); 1702 goto err_unmap_cq; 1703 } 1704 1705 /* 1706 * For flow steering device managed mode it is required to use 1707 * mlx4_init_icm_table. For B0 steering mode it's not strictly 1708 * required, but for simplicity just map the whole multicast 1709 * group table now. The table isn't very big and it's a lot 1710 * easier than trying to track ref counts. 1711 */ 1712 err = mlx4_init_icm_table(dev, &priv->mcg_table.table, 1713 init_hca->mc_base, 1714 mlx4_get_mgm_entry_size(dev), 1715 dev->caps.num_mgms + dev->caps.num_amgms, 1716 dev->caps.num_mgms + dev->caps.num_amgms, 1717 0, 0); 1718 if (err) { 1719 mlx4_err(dev, "Failed to map MCG context memory, aborting\n"); 1720 goto err_unmap_srq; 1721 } 1722 1723 return 0; 1724 1725 err_unmap_srq: 1726 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 1727 1728 err_unmap_cq: 1729 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 1730 1731 err_unmap_rdmarc: 1732 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 1733 1734 err_unmap_altc: 1735 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 1736 1737 err_unmap_auxc: 1738 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 1739 1740 err_unmap_qp: 1741 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 1742 1743 err_unmap_dmpt: 1744 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 1745 1746 err_unmap_mtt: 1747 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 1748 1749 err_unmap_eq: 1750 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); 1751 1752 err_unmap_cmpt: 1753 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 1754 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1755 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1756 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1757 1758 err_unmap_aux: 1759 mlx4_UNMAP_ICM_AUX(dev); 1760 1761 err_free_aux: 1762 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 1763 1764 return err; 1765 } 1766 1767 static void mlx4_free_icms(struct mlx4_dev *dev) 1768 { 1769 struct mlx4_priv *priv = mlx4_priv(dev); 1770 1771 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table); 1772 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 1773 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 1774 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 1775 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 1776 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 1777 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 1778 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 1779 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 1780 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); 1781 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 1782 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1783 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1784 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1785 1786 mlx4_UNMAP_ICM_AUX(dev); 1787 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 1788 } 1789 1790 static void mlx4_slave_exit(struct mlx4_dev *dev) 1791 { 1792 struct mlx4_priv *priv = mlx4_priv(dev); 1793 1794 mutex_lock(&priv->cmd.slave_cmd_mutex); 1795 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 1796 MLX4_COMM_TIME)) 1797 mlx4_warn(dev, "Failed to close slave function\n"); 1798 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1799 } 1800 1801 static int map_bf_area(struct mlx4_dev *dev) 1802 { 1803 struct mlx4_priv *priv = mlx4_priv(dev); 1804 resource_size_t bf_start; 1805 resource_size_t bf_len; 1806 int err = 0; 1807 1808 if (!dev->caps.bf_reg_size) 1809 return -ENXIO; 1810 1811 bf_start = pci_resource_start(dev->persist->pdev, 2) + 1812 (dev->caps.num_uars << PAGE_SHIFT); 1813 bf_len = pci_resource_len(dev->persist->pdev, 2) - 1814 (dev->caps.num_uars << PAGE_SHIFT); 1815 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len); 1816 if (!priv->bf_mapping) 1817 err = -ENOMEM; 1818 1819 return err; 1820 } 1821 1822 static void unmap_bf_area(struct mlx4_dev *dev) 1823 { 1824 if (mlx4_priv(dev)->bf_mapping) 1825 io_mapping_free(mlx4_priv(dev)->bf_mapping); 1826 } 1827 1828 s64 mlx4_read_clock(struct mlx4_dev *dev) 1829 { 1830 u32 clockhi, clocklo, clockhi1; 1831 s64 cycles; 1832 int i; 1833 struct mlx4_priv *priv = mlx4_priv(dev); 1834 1835 if (!priv->clock_mapping) 1836 return -ENOTSUPP; 1837 1838 for (i = 0; i < 10; i++) { 1839 clockhi = swab32(readl(priv->clock_mapping)); 1840 clocklo = swab32(readl(priv->clock_mapping + 4)); 1841 clockhi1 = swab32(readl(priv->clock_mapping)); 1842 if (clockhi == clockhi1) 1843 break; 1844 } 1845 1846 cycles = (u64) clockhi << 32 | (u64) clocklo; 1847 1848 return cycles & CORE_CLOCK_MASK; 1849 } 1850 EXPORT_SYMBOL_GPL(mlx4_read_clock); 1851 1852 1853 static int map_internal_clock(struct mlx4_dev *dev) 1854 { 1855 struct mlx4_priv *priv = mlx4_priv(dev); 1856 1857 priv->clock_mapping = 1858 ioremap(pci_resource_start(dev->persist->pdev, 1859 priv->fw.clock_bar) + 1860 priv->fw.clock_offset, MLX4_CLOCK_SIZE); 1861 1862 if (!priv->clock_mapping) 1863 return -ENOMEM; 1864 1865 return 0; 1866 } 1867 1868 int mlx4_get_internal_clock_params(struct mlx4_dev *dev, 1869 struct mlx4_clock_params *params) 1870 { 1871 struct mlx4_priv *priv = mlx4_priv(dev); 1872 1873 if (mlx4_is_slave(dev)) 1874 return -ENOTSUPP; 1875 1876 if (!dev->caps.map_clock_to_user) { 1877 mlx4_dbg(dev, "Map clock to user is not supported.\n"); 1878 return -EOPNOTSUPP; 1879 } 1880 1881 if (!params) 1882 return -EINVAL; 1883 1884 params->bar = priv->fw.clock_bar; 1885 params->offset = priv->fw.clock_offset; 1886 params->size = MLX4_CLOCK_SIZE; 1887 1888 return 0; 1889 } 1890 EXPORT_SYMBOL_GPL(mlx4_get_internal_clock_params); 1891 1892 static void unmap_internal_clock(struct mlx4_dev *dev) 1893 { 1894 struct mlx4_priv *priv = mlx4_priv(dev); 1895 1896 if (priv->clock_mapping) 1897 iounmap(priv->clock_mapping); 1898 } 1899 1900 static void mlx4_close_hca(struct mlx4_dev *dev) 1901 { 1902 sysctl_ctx_free(&dev->hw_ctx); 1903 unmap_internal_clock(dev); 1904 unmap_bf_area(dev); 1905 if (mlx4_is_slave(dev)) 1906 mlx4_slave_exit(dev); 1907 else { 1908 mlx4_CLOSE_HCA(dev, 0); 1909 mlx4_free_icms(dev); 1910 } 1911 } 1912 1913 static void mlx4_close_fw(struct mlx4_dev *dev) 1914 { 1915 if (!mlx4_is_slave(dev)) { 1916 mlx4_UNMAP_FA(dev); 1917 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0); 1918 } 1919 } 1920 1921 static int mlx4_comm_check_offline(struct mlx4_dev *dev) 1922 { 1923 #define COMM_CHAN_OFFLINE_OFFSET 0x09 1924 1925 u32 comm_flags; 1926 u32 offline_bit; 1927 unsigned long end; 1928 struct mlx4_priv *priv = mlx4_priv(dev); 1929 1930 end = msecs_to_jiffies(MLX4_COMM_OFFLINE_TIME_OUT) + jiffies; 1931 while (time_before(jiffies, end)) { 1932 comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm + 1933 MLX4_COMM_CHAN_FLAGS)); 1934 offline_bit = (comm_flags & 1935 (u32)(1 << COMM_CHAN_OFFLINE_OFFSET)); 1936 if (!offline_bit) 1937 return 0; 1938 /* There are cases as part of AER/Reset flow that PF needs 1939 * around 100 msec to load. We therefore sleep for 100 msec 1940 * to allow other tasks to make use of that CPU during this 1941 * time interval. 1942 */ 1943 msleep(100); 1944 } 1945 mlx4_err(dev, "Communication channel is offline.\n"); 1946 return -EIO; 1947 } 1948 1949 static void mlx4_reset_vf_support(struct mlx4_dev *dev) 1950 { 1951 #define COMM_CHAN_RST_OFFSET 0x1e 1952 1953 struct mlx4_priv *priv = mlx4_priv(dev); 1954 u32 comm_rst; 1955 u32 comm_caps; 1956 1957 comm_caps = swab32(readl((__iomem char *)priv->mfunc.comm + 1958 MLX4_COMM_CHAN_CAPS)); 1959 comm_rst = (comm_caps & (u32)(1 << COMM_CHAN_RST_OFFSET)); 1960 1961 if (comm_rst) 1962 dev->caps.vf_caps |= MLX4_VF_CAP_FLAG_RESET; 1963 } 1964 1965 static int mlx4_init_slave(struct mlx4_dev *dev) 1966 { 1967 struct mlx4_priv *priv = mlx4_priv(dev); 1968 u64 dma = (u64) priv->mfunc.vhcr_dma; 1969 int ret_from_reset = 0; 1970 u32 slave_read; 1971 u32 cmd_channel_ver; 1972 1973 if (atomic_read(&pf_loading)) { 1974 mlx4_warn(dev, "PF is not ready - Deferring probe\n"); 1975 return -EAGAIN; 1976 } 1977 1978 mutex_lock(&priv->cmd.slave_cmd_mutex); 1979 priv->cmd.max_cmds = 1; 1980 if (mlx4_comm_check_offline(dev)) { 1981 mlx4_err(dev, "PF is not responsive, skipping initialization\n"); 1982 goto err_offline; 1983 } 1984 1985 mlx4_reset_vf_support(dev); 1986 mlx4_warn(dev, "Sending reset\n"); 1987 ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 1988 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME); 1989 /* if we are in the middle of flr the slave will try 1990 * NUM_OF_RESET_RETRIES times before leaving.*/ 1991 if (ret_from_reset) { 1992 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) { 1993 mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n"); 1994 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1995 return -EAGAIN; 1996 } else 1997 goto err; 1998 } 1999 2000 /* check the driver version - the slave I/F revision 2001 * must match the master's */ 2002 slave_read = swab32(readl(&priv->mfunc.comm->slave_read)); 2003 cmd_channel_ver = mlx4_comm_get_version(); 2004 2005 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) != 2006 MLX4_COMM_GET_IF_REV(slave_read)) { 2007 mlx4_err(dev, "slave driver version is not supported by the master\n"); 2008 goto err; 2009 } 2010 2011 mlx4_warn(dev, "Sending vhcr0\n"); 2012 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48, 2013 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 2014 goto err; 2015 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32, 2016 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 2017 goto err; 2018 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16, 2019 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 2020 goto err; 2021 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, 2022 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 2023 goto err; 2024 2025 mutex_unlock(&priv->cmd.slave_cmd_mutex); 2026 return 0; 2027 2028 err: 2029 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 0); 2030 err_offline: 2031 mutex_unlock(&priv->cmd.slave_cmd_mutex); 2032 return -EIO; 2033 } 2034 2035 static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev) 2036 { 2037 int i; 2038 2039 for (i = 1; i <= dev->caps.num_ports; i++) { 2040 if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) 2041 dev->caps.gid_table_len[i] = 2042 mlx4_get_slave_num_gids(dev, 0, i); 2043 else 2044 dev->caps.gid_table_len[i] = 1; 2045 dev->caps.pkey_table_len[i] = 2046 dev->phys_caps.pkey_phys_table_len[i] - 1; 2047 } 2048 } 2049 2050 static int choose_log_fs_mgm_entry_size(int qp_per_entry) 2051 { 2052 int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; 2053 2054 for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE; 2055 i++) { 2056 if (qp_per_entry <= 4 * ((1 << i) / 16 - 2)) 2057 break; 2058 } 2059 2060 return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1; 2061 } 2062 2063 static const char *dmfs_high_rate_steering_mode_str(int dmfs_high_steer_mode) 2064 { 2065 switch (dmfs_high_steer_mode) { 2066 case MLX4_STEERING_DMFS_A0_DEFAULT: 2067 return "default performance"; 2068 2069 case MLX4_STEERING_DMFS_A0_DYNAMIC: 2070 return "dynamic hybrid mode"; 2071 2072 case MLX4_STEERING_DMFS_A0_STATIC: 2073 return "performance optimized for limited rule configuration (static)"; 2074 2075 case MLX4_STEERING_DMFS_A0_DISABLE: 2076 return "disabled performance optimized steering"; 2077 2078 case MLX4_STEERING_DMFS_A0_NOT_SUPPORTED: 2079 return "performance optimized steering not supported"; 2080 2081 default: 2082 return "Unrecognized mode"; 2083 } 2084 } 2085 2086 #define MLX4_DMFS_A0_STEERING (1UL << 2) 2087 2088 static void choose_steering_mode(struct mlx4_dev *dev, 2089 struct mlx4_dev_cap *dev_cap) 2090 { 2091 if (mlx4_log_num_mgm_entry_size <= 0) { 2092 if ((-mlx4_log_num_mgm_entry_size) & MLX4_DMFS_A0_STEERING) { 2093 if (dev->caps.dmfs_high_steer_mode == 2094 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 2095 mlx4_err(dev, "DMFS high rate mode not supported\n"); 2096 else 2097 dev->caps.dmfs_high_steer_mode = 2098 MLX4_STEERING_DMFS_A0_STATIC; 2099 } 2100 } 2101 2102 if (mlx4_log_num_mgm_entry_size <= 0 && 2103 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN && 2104 (!mlx4_is_mfunc(dev) || 2105 (dev_cap->fs_max_num_qp_per_entry >= 2106 (dev->persist->num_vfs + 1))) && 2107 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >= 2108 MLX4_MIN_MGM_LOG_ENTRY_SIZE) { 2109 dev->oper_log_mgm_entry_size = 2110 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry); 2111 dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED; 2112 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; 2113 dev->caps.fs_log_max_ucast_qp_range_size = 2114 dev_cap->fs_log_max_ucast_qp_range_size; 2115 } else { 2116 if (dev->caps.dmfs_high_steer_mode != 2117 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 2118 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DISABLE; 2119 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER && 2120 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) 2121 dev->caps.steering_mode = MLX4_STEERING_MODE_B0; 2122 else { 2123 dev->caps.steering_mode = MLX4_STEERING_MODE_A0; 2124 2125 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER || 2126 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) 2127 mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n"); 2128 } 2129 dev->oper_log_mgm_entry_size = 2130 mlx4_log_num_mgm_entry_size > 0 ? 2131 mlx4_log_num_mgm_entry_size : 2132 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; 2133 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev); 2134 } 2135 mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n", 2136 mlx4_steering_mode_str(dev->caps.steering_mode), 2137 dev->oper_log_mgm_entry_size, 2138 mlx4_log_num_mgm_entry_size); 2139 } 2140 2141 static void choose_tunnel_offload_mode(struct mlx4_dev *dev, 2142 struct mlx4_dev_cap *dev_cap) 2143 { 2144 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED && 2145 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) 2146 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN; 2147 else 2148 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE; 2149 2150 mlx4_dbg(dev, "Tunneling offload mode is: %s\n", (dev->caps.tunnel_offload_mode 2151 == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none"); 2152 } 2153 2154 static int mlx4_validate_optimized_steering(struct mlx4_dev *dev) 2155 { 2156 int i; 2157 struct mlx4_port_cap port_cap; 2158 2159 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 2160 return -EINVAL; 2161 2162 for (i = 1; i <= dev->caps.num_ports; i++) { 2163 if (mlx4_dev_port(dev, i, &port_cap)) { 2164 mlx4_err(dev, 2165 "QUERY_DEV_CAP command failed, can't veify DMFS high rate steering.\n"); 2166 } else if ((dev->caps.dmfs_high_steer_mode != 2167 MLX4_STEERING_DMFS_A0_DEFAULT) && 2168 (port_cap.dmfs_optimized_state == 2169 !!(dev->caps.dmfs_high_steer_mode == 2170 MLX4_STEERING_DMFS_A0_DISABLE))) { 2171 mlx4_err(dev, 2172 "DMFS high rate steer mode differ, driver requested %s but %s in FW.\n", 2173 dmfs_high_rate_steering_mode_str( 2174 dev->caps.dmfs_high_steer_mode), 2175 (port_cap.dmfs_optimized_state ? 2176 "enabled" : "disabled")); 2177 } 2178 } 2179 2180 return 0; 2181 } 2182 2183 static int mlx4_init_fw(struct mlx4_dev *dev) 2184 { 2185 struct mlx4_mod_stat_cfg mlx4_cfg; 2186 int err = 0; 2187 2188 if (!mlx4_is_slave(dev)) { 2189 err = mlx4_QUERY_FW(dev); 2190 if (err) { 2191 if (err == -EACCES) 2192 mlx4_info(dev, "non-primary physical function, skipping\n"); 2193 else 2194 mlx4_err(dev, "QUERY_FW command failed, aborting\n"); 2195 return err; 2196 } 2197 2198 err = mlx4_load_fw(dev); 2199 if (err) { 2200 mlx4_err(dev, "Failed to start FW, aborting\n"); 2201 return err; 2202 } 2203 2204 mlx4_cfg.log_pg_sz_m = 1; 2205 mlx4_cfg.log_pg_sz = 0; 2206 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg); 2207 if (err) 2208 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n"); 2209 } 2210 2211 return err; 2212 } 2213 2214 static int mlx4_init_hca(struct mlx4_dev *dev) 2215 { 2216 struct mlx4_priv *priv = mlx4_priv(dev); 2217 struct mlx4_adapter adapter; 2218 struct mlx4_dev_cap dev_cap = {}; 2219 struct mlx4_profile profile; 2220 struct mlx4_init_hca_param init_hca; 2221 u64 icm_size; 2222 struct mlx4_config_dev_params params; 2223 int err; 2224 2225 if (!mlx4_is_slave(dev)) { 2226 err = mlx4_dev_cap(dev, &dev_cap); 2227 if (err) { 2228 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 2229 return err; 2230 } 2231 2232 choose_steering_mode(dev, &dev_cap); 2233 choose_tunnel_offload_mode(dev, &dev_cap); 2234 2235 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC && 2236 mlx4_is_master(dev)) 2237 dev->caps.function_caps |= MLX4_FUNC_CAP_DMFS_A0_STATIC; 2238 2239 err = mlx4_get_phys_port_id(dev); 2240 if (err) 2241 mlx4_err(dev, "Fail to get physical port id\n"); 2242 2243 if (mlx4_is_master(dev)) 2244 mlx4_parav_master_pf_caps(dev); 2245 2246 if (mlx4_low_memory_profile()) { 2247 mlx4_info(dev, "Running from within kdump kernel. Using low memory profile\n"); 2248 profile = low_mem_profile; 2249 } else { 2250 profile = default_profile; 2251 } 2252 if (dev->caps.steering_mode == 2253 MLX4_STEERING_MODE_DEVICE_MANAGED) 2254 profile.num_mcg = MLX4_FS_NUM_MCG; 2255 2256 icm_size = mlx4_make_profile(dev, &profile, &dev_cap, 2257 &init_hca); 2258 if ((long long) icm_size < 0) { 2259 err = icm_size; 2260 return err; 2261 } 2262 2263 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; 2264 2265 if (enable_4k_uar) { 2266 init_hca.log_uar_sz = ilog2(dev->caps.num_uars) + 2267 PAGE_SHIFT - DEFAULT_UAR_PAGE_SHIFT; 2268 init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12; 2269 } else { 2270 init_hca.log_uar_sz = ilog2(dev->caps.num_uars); 2271 init_hca.uar_page_sz = PAGE_SHIFT - 12; 2272 } 2273 2274 init_hca.mw_enabled = 0; 2275 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW || 2276 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) 2277 init_hca.mw_enabled = INIT_HCA_TPT_MW_ENABLE; 2278 2279 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); 2280 if (err) 2281 return err; 2282 2283 err = mlx4_INIT_HCA(dev, &init_hca); 2284 if (err) { 2285 mlx4_err(dev, "INIT_HCA command failed, aborting\n"); 2286 goto err_free_icm; 2287 } 2288 2289 if (dev_cap.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { 2290 err = mlx4_query_func(dev, &dev_cap); 2291 if (err < 0) { 2292 mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n"); 2293 goto err_close; 2294 } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) { 2295 dev->caps.num_eqs = dev_cap.max_eqs; 2296 dev->caps.reserved_eqs = dev_cap.reserved_eqs; 2297 dev->caps.reserved_uars = dev_cap.reserved_uars; 2298 } 2299 } 2300 2301 /* 2302 * If TS is supported by FW 2303 * read HCA frequency by QUERY_HCA command 2304 */ 2305 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) { 2306 memset(&init_hca, 0, sizeof(init_hca)); 2307 err = mlx4_QUERY_HCA(dev, &init_hca); 2308 if (err) { 2309 mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n"); 2310 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 2311 } else { 2312 dev->caps.hca_core_clock = 2313 init_hca.hca_core_clock; 2314 } 2315 2316 /* In case we got HCA frequency 0 - disable timestamping 2317 * to avoid dividing by zero 2318 */ 2319 if (!dev->caps.hca_core_clock) { 2320 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 2321 mlx4_err(dev, 2322 "HCA frequency is 0 - timestamping is not supported\n"); 2323 } else if (map_internal_clock(dev)) { 2324 /* 2325 * Map internal clock, 2326 * in case of failure disable timestamping 2327 */ 2328 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 2329 mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n"); 2330 } 2331 } 2332 2333 if (dev->caps.dmfs_high_steer_mode != 2334 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) { 2335 if (mlx4_validate_optimized_steering(dev)) 2336 mlx4_warn(dev, "Optimized steering validation failed\n"); 2337 2338 if (dev->caps.dmfs_high_steer_mode == 2339 MLX4_STEERING_DMFS_A0_DISABLE) { 2340 dev->caps.dmfs_high_rate_qpn_base = 2341 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 2342 dev->caps.dmfs_high_rate_qpn_range = 2343 MLX4_A0_STEERING_TABLE_SIZE; 2344 } 2345 2346 mlx4_dbg(dev, "DMFS high rate steer mode is: %s\n", 2347 dmfs_high_rate_steering_mode_str( 2348 dev->caps.dmfs_high_steer_mode)); 2349 } 2350 } else { 2351 err = mlx4_init_slave(dev); 2352 if (err) { 2353 if (err != -EAGAIN) 2354 mlx4_err(dev, "Failed to initialize slave\n"); 2355 return err; 2356 } 2357 2358 err = mlx4_slave_cap(dev); 2359 if (err) { 2360 mlx4_err(dev, "Failed to obtain slave caps\n"); 2361 goto err_close; 2362 } 2363 } 2364 2365 if (map_bf_area(dev)) 2366 mlx4_dbg(dev, "Failed to map blue flame area\n"); 2367 2368 /*Only the master set the ports, all the rest got it from it.*/ 2369 if (!mlx4_is_slave(dev)) 2370 mlx4_set_port_mask(dev); 2371 2372 err = mlx4_QUERY_ADAPTER(dev, &adapter); 2373 if (err) { 2374 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n"); 2375 goto unmap_bf; 2376 } 2377 2378 /* Query CONFIG_DEV parameters */ 2379 err = mlx4_config_dev_retrieval(dev, ¶ms); 2380 if (err && err != -ENOTSUPP) { 2381 mlx4_err(dev, "Failed to query CONFIG_DEV parameters\n"); 2382 } else if (!err) { 2383 dev->caps.rx_checksum_flags_port[1] = params.rx_csum_flags_port_1; 2384 dev->caps.rx_checksum_flags_port[2] = params.rx_csum_flags_port_2; 2385 } 2386 priv->eq_table.inta_pin = adapter.inta_pin; 2387 memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id); 2388 2389 return 0; 2390 2391 unmap_bf: 2392 unmap_internal_clock(dev); 2393 unmap_bf_area(dev); 2394 2395 if (mlx4_is_slave(dev)) { 2396 kfree(dev->caps.qp0_qkey); 2397 kfree(dev->caps.qp0_tunnel); 2398 kfree(dev->caps.qp0_proxy); 2399 kfree(dev->caps.qp1_tunnel); 2400 kfree(dev->caps.qp1_proxy); 2401 } 2402 2403 err_close: 2404 if (mlx4_is_slave(dev)) 2405 mlx4_slave_exit(dev); 2406 else 2407 mlx4_CLOSE_HCA(dev, 0); 2408 2409 err_free_icm: 2410 if (!mlx4_is_slave(dev)) 2411 mlx4_free_icms(dev); 2412 2413 return err; 2414 } 2415 2416 static int mlx4_init_counters_table(struct mlx4_dev *dev) 2417 { 2418 struct mlx4_priv *priv = mlx4_priv(dev); 2419 int nent_pow2; 2420 2421 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 2422 return -ENOENT; 2423 2424 if (!dev->caps.max_counters) 2425 return -ENOSPC; 2426 2427 nent_pow2 = roundup_pow_of_two(dev->caps.max_counters); 2428 /* reserve last counter index for sink counter */ 2429 return mlx4_bitmap_init(&priv->counters_bitmap, nent_pow2, 2430 nent_pow2 - 1, 0, 2431 nent_pow2 - dev->caps.max_counters + 1); 2432 } 2433 2434 static void mlx4_cleanup_counters_table(struct mlx4_dev *dev) 2435 { 2436 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 2437 return; 2438 2439 if (!dev->caps.max_counters) 2440 return; 2441 2442 mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap); 2443 } 2444 2445 static void mlx4_cleanup_default_counters(struct mlx4_dev *dev) 2446 { 2447 struct mlx4_priv *priv = mlx4_priv(dev); 2448 int port; 2449 2450 for (port = 0; port < dev->caps.num_ports; port++) 2451 if (priv->def_counter[port] != -1) 2452 mlx4_counter_free(dev, priv->def_counter[port]); 2453 } 2454 2455 static int mlx4_allocate_default_counters(struct mlx4_dev *dev) 2456 { 2457 struct mlx4_priv *priv = mlx4_priv(dev); 2458 int port, err = 0; 2459 u32 idx; 2460 2461 for (port = 0; port < dev->caps.num_ports; port++) 2462 priv->def_counter[port] = -1; 2463 2464 for (port = 0; port < dev->caps.num_ports; port++) { 2465 err = mlx4_counter_alloc(dev, &idx); 2466 2467 if (!err || err == -ENOSPC) { 2468 priv->def_counter[port] = idx; 2469 } else if (err == -ENOENT) { 2470 err = 0; 2471 continue; 2472 } else if (mlx4_is_slave(dev) && err == -EINVAL) { 2473 priv->def_counter[port] = MLX4_SINK_COUNTER_INDEX(dev); 2474 mlx4_warn(dev, "can't allocate counter from old PF driver, using index %d\n", 2475 MLX4_SINK_COUNTER_INDEX(dev)); 2476 err = 0; 2477 } else { 2478 mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n", 2479 __func__, port + 1, err); 2480 mlx4_cleanup_default_counters(dev); 2481 return err; 2482 } 2483 2484 mlx4_dbg(dev, "%s: default counter index %d for port %d\n", 2485 __func__, priv->def_counter[port], port + 1); 2486 } 2487 2488 return err; 2489 } 2490 2491 int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) 2492 { 2493 struct mlx4_priv *priv = mlx4_priv(dev); 2494 2495 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 2496 return -ENOENT; 2497 2498 *idx = mlx4_bitmap_alloc(&priv->counters_bitmap); 2499 if (*idx == -1) { 2500 *idx = MLX4_SINK_COUNTER_INDEX(dev); 2501 return -ENOSPC; 2502 } 2503 2504 return 0; 2505 } 2506 2507 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) 2508 { 2509 u64 out_param; 2510 int err; 2511 2512 if (mlx4_is_mfunc(dev)) { 2513 err = mlx4_cmd_imm(dev, 0, &out_param, RES_COUNTER, 2514 RES_OP_RESERVE, MLX4_CMD_ALLOC_RES, 2515 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 2516 if (!err) 2517 *idx = get_param_l(&out_param); 2518 2519 return err; 2520 } 2521 return __mlx4_counter_alloc(dev, idx); 2522 } 2523 EXPORT_SYMBOL_GPL(mlx4_counter_alloc); 2524 2525 static int __mlx4_clear_if_stat(struct mlx4_dev *dev, 2526 u8 counter_index) 2527 { 2528 struct mlx4_cmd_mailbox *if_stat_mailbox; 2529 int err; 2530 u32 if_stat_in_mod = (counter_index & 0xff) | MLX4_QUERY_IF_STAT_RESET; 2531 2532 if_stat_mailbox = mlx4_alloc_cmd_mailbox(dev); 2533 if (IS_ERR(if_stat_mailbox)) 2534 return PTR_ERR(if_stat_mailbox); 2535 2536 err = mlx4_cmd_box(dev, 0, if_stat_mailbox->dma, if_stat_in_mod, 0, 2537 MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C, 2538 MLX4_CMD_NATIVE); 2539 2540 mlx4_free_cmd_mailbox(dev, if_stat_mailbox); 2541 return err; 2542 } 2543 2544 void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx) 2545 { 2546 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 2547 return; 2548 2549 if (idx == MLX4_SINK_COUNTER_INDEX(dev)) 2550 return; 2551 2552 __mlx4_clear_if_stat(dev, idx); 2553 2554 mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR); 2555 return; 2556 } 2557 2558 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx) 2559 { 2560 u64 in_param = 0; 2561 2562 if (mlx4_is_mfunc(dev)) { 2563 set_param_l(&in_param, idx); 2564 mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE, 2565 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, 2566 MLX4_CMD_WRAPPED); 2567 return; 2568 } 2569 __mlx4_counter_free(dev, idx); 2570 } 2571 EXPORT_SYMBOL_GPL(mlx4_counter_free); 2572 2573 int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port) 2574 { 2575 struct mlx4_priv *priv = mlx4_priv(dev); 2576 2577 return priv->def_counter[port - 1]; 2578 } 2579 EXPORT_SYMBOL_GPL(mlx4_get_default_counter_index); 2580 2581 void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry, int port) 2582 { 2583 struct mlx4_priv *priv = mlx4_priv(dev); 2584 2585 priv->mfunc.master.vf_admin[entry].vport[port].guid = guid; 2586 } 2587 EXPORT_SYMBOL_GPL(mlx4_set_admin_guid); 2588 2589 __be64 mlx4_get_admin_guid(struct mlx4_dev *dev, int entry, int port) 2590 { 2591 struct mlx4_priv *priv = mlx4_priv(dev); 2592 2593 return priv->mfunc.master.vf_admin[entry].vport[port].guid; 2594 } 2595 EXPORT_SYMBOL_GPL(mlx4_get_admin_guid); 2596 2597 void mlx4_set_random_admin_guid(struct mlx4_dev *dev, int entry, int port) 2598 { 2599 struct mlx4_priv *priv = mlx4_priv(dev); 2600 __be64 guid; 2601 2602 /* hw GUID */ 2603 if (entry == 0) 2604 return; 2605 2606 get_random_bytes((char *)&guid, sizeof(guid)); 2607 guid &= ~(cpu_to_be64(1ULL << 56)); 2608 guid |= cpu_to_be64(1ULL << 57); 2609 priv->mfunc.master.vf_admin[entry].vport[port].guid = guid; 2610 } 2611 2612 static int mlx4_setup_hca(struct mlx4_dev *dev) 2613 { 2614 struct mlx4_priv *priv = mlx4_priv(dev); 2615 int err; 2616 int port; 2617 __be32 ib_port_default_caps; 2618 2619 err = mlx4_init_uar_table(dev); 2620 if (err) { 2621 mlx4_err(dev, "Failed to initialize user access region table, aborting\n"); 2622 return err; 2623 } 2624 2625 err = mlx4_uar_alloc(dev, &priv->driver_uar); 2626 if (err) { 2627 mlx4_err(dev, "Failed to allocate driver access region, aborting\n"); 2628 goto err_uar_table_free; 2629 } 2630 2631 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 2632 if (!priv->kar) { 2633 mlx4_err(dev, "Couldn't map kernel access region, aborting\n"); 2634 err = -ENOMEM; 2635 goto err_uar_free; 2636 } 2637 2638 err = mlx4_init_pd_table(dev); 2639 if (err) { 2640 mlx4_err(dev, "Failed to initialize protection domain table, aborting\n"); 2641 goto err_kar_unmap; 2642 } 2643 2644 err = mlx4_init_xrcd_table(dev); 2645 if (err) { 2646 mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n"); 2647 goto err_pd_table_free; 2648 } 2649 2650 err = mlx4_init_mr_table(dev); 2651 if (err) { 2652 mlx4_err(dev, "Failed to initialize memory region table, aborting\n"); 2653 goto err_xrcd_table_free; 2654 } 2655 2656 if (!mlx4_is_slave(dev)) { 2657 err = mlx4_init_mcg_table(dev); 2658 if (err) { 2659 mlx4_err(dev, "Failed to initialize multicast group table, aborting\n"); 2660 goto err_mr_table_free; 2661 } 2662 err = mlx4_config_mad_demux(dev); 2663 if (err) { 2664 mlx4_err(dev, "Failed in config_mad_demux, aborting\n"); 2665 goto err_mcg_table_free; 2666 } 2667 } 2668 2669 err = mlx4_init_eq_table(dev); 2670 if (err) { 2671 mlx4_err(dev, "Failed to initialize event queue table, aborting\n"); 2672 goto err_mcg_table_free; 2673 } 2674 2675 err = mlx4_cmd_use_events(dev); 2676 if (err) { 2677 mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n"); 2678 goto err_eq_table_free; 2679 } 2680 2681 err = mlx4_NOP(dev); 2682 if (err) { 2683 if (dev->flags & MLX4_FLAG_MSI_X) { 2684 mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n", 2685 priv->eq_table.eq[MLX4_EQ_ASYNC].irq); 2686 mlx4_warn(dev, "Trying again without MSI-X\n"); 2687 } else { 2688 mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n", 2689 priv->eq_table.eq[MLX4_EQ_ASYNC].irq); 2690 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); 2691 } 2692 2693 goto err_cmd_poll; 2694 } 2695 2696 mlx4_dbg(dev, "NOP command IRQ test passed\n"); 2697 2698 err = mlx4_init_cq_table(dev); 2699 if (err) { 2700 mlx4_err(dev, "Failed to initialize completion queue table, aborting\n"); 2701 goto err_cmd_poll; 2702 } 2703 2704 err = mlx4_init_srq_table(dev); 2705 if (err) { 2706 mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n"); 2707 goto err_cq_table_free; 2708 } 2709 2710 err = mlx4_init_qp_table(dev); 2711 if (err) { 2712 mlx4_err(dev, "Failed to initialize queue pair table, aborting\n"); 2713 goto err_srq_table_free; 2714 } 2715 2716 if (!mlx4_is_slave(dev)) { 2717 err = mlx4_init_counters_table(dev); 2718 if (err && err != -ENOENT) { 2719 mlx4_err(dev, "Failed to initialize counters table, aborting\n"); 2720 goto err_qp_table_free; 2721 } 2722 } 2723 2724 err = mlx4_allocate_default_counters(dev); 2725 if (err) { 2726 mlx4_err(dev, "Failed to allocate default counters, aborting\n"); 2727 goto err_counters_table_free; 2728 } 2729 2730 if (!mlx4_is_slave(dev)) { 2731 for (port = 1; port <= dev->caps.num_ports; port++) { 2732 ib_port_default_caps = 0; 2733 err = mlx4_get_port_ib_caps(dev, port, 2734 &ib_port_default_caps); 2735 if (err) 2736 mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n", 2737 port, err); 2738 dev->caps.ib_port_def_cap[port] = ib_port_default_caps; 2739 2740 /* initialize per-slave default ib port capabilities */ 2741 if (mlx4_is_master(dev)) { 2742 int i; 2743 for (i = 0; i < dev->num_slaves; i++) { 2744 if (i == mlx4_master_func_num(dev)) 2745 continue; 2746 priv->mfunc.master.slave_state[i].ib_cap_mask[port] = 2747 ib_port_default_caps; 2748 } 2749 } 2750 2751 if (mlx4_is_mfunc(dev)) 2752 dev->caps.port_ib_mtu[port] = IB_MTU_2048; 2753 else 2754 dev->caps.port_ib_mtu[port] = IB_MTU_4096; 2755 2756 err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ? 2757 dev->caps.pkey_table_len[port] : -1); 2758 if (err) { 2759 mlx4_err(dev, "Failed to set port %d, aborting\n", 2760 port); 2761 goto err_default_countes_free; 2762 } 2763 } 2764 } 2765 2766 return 0; 2767 2768 err_default_countes_free: 2769 mlx4_cleanup_default_counters(dev); 2770 2771 err_counters_table_free: 2772 if (!mlx4_is_slave(dev)) 2773 mlx4_cleanup_counters_table(dev); 2774 2775 err_qp_table_free: 2776 mlx4_cleanup_qp_table(dev); 2777 2778 err_srq_table_free: 2779 mlx4_cleanup_srq_table(dev); 2780 2781 err_cq_table_free: 2782 mlx4_cleanup_cq_table(dev); 2783 2784 err_cmd_poll: 2785 mlx4_cmd_use_polling(dev); 2786 2787 err_eq_table_free: 2788 mlx4_cleanup_eq_table(dev); 2789 2790 err_mcg_table_free: 2791 if (!mlx4_is_slave(dev)) 2792 mlx4_cleanup_mcg_table(dev); 2793 2794 err_mr_table_free: 2795 mlx4_cleanup_mr_table(dev); 2796 2797 err_xrcd_table_free: 2798 mlx4_cleanup_xrcd_table(dev); 2799 2800 err_pd_table_free: 2801 mlx4_cleanup_pd_table(dev); 2802 2803 err_kar_unmap: 2804 iounmap(priv->kar); 2805 2806 err_uar_free: 2807 mlx4_uar_free(dev, &priv->driver_uar); 2808 2809 err_uar_table_free: 2810 mlx4_cleanup_uar_table(dev); 2811 return err; 2812 } 2813 2814 static int mlx4_init_affinity_hint(struct mlx4_dev *dev, int port, int eqn) 2815 { 2816 int requested_cpu = 0; 2817 struct mlx4_priv *priv = mlx4_priv(dev); 2818 struct mlx4_eq *eq; 2819 int off = 0; 2820 int i; 2821 2822 if (eqn > dev->caps.num_comp_vectors) 2823 return -EINVAL; 2824 2825 for (i = 1; i < port; i++) 2826 off += mlx4_get_eqs_per_port(dev, i); 2827 2828 requested_cpu = eqn - off - !!(eqn > MLX4_EQ_ASYNC); 2829 2830 /* Meaning EQs are shared, and this call comes from the second port */ 2831 if (requested_cpu < 0) 2832 return 0; 2833 2834 eq = &priv->eq_table.eq[eqn]; 2835 2836 eq->affinity_cpu_id = requested_cpu % num_online_cpus(); 2837 2838 return 0; 2839 } 2840 2841 static void mlx4_enable_msi_x(struct mlx4_dev *dev) 2842 { 2843 struct mlx4_priv *priv = mlx4_priv(dev); 2844 struct msix_entry *entries; 2845 int i; 2846 int port = 0; 2847 2848 if (msi_x) { 2849 int nreq = dev->caps.num_ports * num_online_cpus() + 1; 2850 2851 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, 2852 nreq); 2853 if (nreq > MAX_MSIX) 2854 nreq = MAX_MSIX; 2855 2856 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); 2857 if (!entries) 2858 goto no_msi; 2859 2860 for (i = 0; i < nreq; ++i) 2861 entries[i].entry = i; 2862 2863 nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2, 2864 nreq); 2865 2866 if (nreq < 0 || nreq < MLX4_EQ_ASYNC) { 2867 kfree(entries); 2868 goto no_msi; 2869 } 2870 /* 1 is reserved for events (asyncrounous EQ) */ 2871 dev->caps.num_comp_vectors = nreq - 1; 2872 2873 priv->eq_table.eq[MLX4_EQ_ASYNC].irq = entries[0].vector; 2874 bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports, 2875 dev->caps.num_ports); 2876 2877 for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) { 2878 if (i == MLX4_EQ_ASYNC) 2879 continue; 2880 2881 priv->eq_table.eq[i].irq = 2882 entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector; 2883 2884 if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) { 2885 bitmap_fill(priv->eq_table.eq[i].actv_ports.ports, 2886 dev->caps.num_ports); 2887 /* We don't set affinity hint when there 2888 * aren't enough EQs 2889 */ 2890 } else { 2891 set_bit(port, 2892 priv->eq_table.eq[i].actv_ports.ports); 2893 if (mlx4_init_affinity_hint(dev, port + 1, i)) 2894 mlx4_warn(dev, "Couldn't init hint cpumask for EQ %d\n", 2895 i); 2896 } 2897 /* We divide the Eqs evenly between the two ports. 2898 * (dev->caps.num_comp_vectors / dev->caps.num_ports) 2899 * refers to the number of Eqs per port 2900 * (i.e eqs_per_port). Theoretically, we would like to 2901 * write something like (i + 1) % eqs_per_port == 0. 2902 * However, since there's an asynchronous Eq, we have 2903 * to skip over it by comparing this condition to 2904 * !!((i + 1) > MLX4_EQ_ASYNC). 2905 */ 2906 if ((dev->caps.num_comp_vectors > dev->caps.num_ports) && 2907 ((i + 1) % 2908 (dev->caps.num_comp_vectors / dev->caps.num_ports)) == 2909 !!((i + 1) > MLX4_EQ_ASYNC)) 2910 /* If dev->caps.num_comp_vectors < dev->caps.num_ports, 2911 * everything is shared anyway. 2912 */ 2913 port++; 2914 } 2915 2916 dev->flags |= MLX4_FLAG_MSI_X; 2917 2918 kfree(entries); 2919 return; 2920 } 2921 2922 no_msi: 2923 dev->caps.num_comp_vectors = 1; 2924 2925 BUG_ON(MLX4_EQ_ASYNC >= 2); 2926 for (i = 0; i < 2; ++i) { 2927 priv->eq_table.eq[i].irq = dev->persist->pdev->irq; 2928 if (i != MLX4_EQ_ASYNC) { 2929 bitmap_fill(priv->eq_table.eq[i].actv_ports.ports, 2930 dev->caps.num_ports); 2931 } 2932 } 2933 } 2934 2935 static int mlx4_init_port_info(struct mlx4_dev *dev, int port) 2936 { 2937 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; 2938 int err = 0; 2939 2940 info->dev = dev; 2941 info->port = port; 2942 if (!mlx4_is_slave(dev)) { 2943 mlx4_init_mac_table(dev, &info->mac_table); 2944 mlx4_init_vlan_table(dev, &info->vlan_table); 2945 mlx4_init_roce_gid_table(dev, &info->gid_table); 2946 info->base_qpn = mlx4_get_base_qpn(dev, port); 2947 } 2948 2949 sprintf(info->dev_name, "mlx4_port%d", port); 2950 info->port_attr.attr.name = info->dev_name; 2951 if (mlx4_is_mfunc(dev)) 2952 info->port_attr.attr.mode = S_IRUGO; 2953 else { 2954 info->port_attr.attr.mode = S_IRUGO | S_IWUSR; 2955 info->port_attr.store = set_port_type; 2956 } 2957 info->port_attr.show = show_port_type; 2958 sysfs_attr_init(&info->port_attr.attr); 2959 2960 err = device_create_file(&dev->persist->pdev->dev, &info->port_attr); 2961 if (err) { 2962 mlx4_err(dev, "Failed to create file for port %d\n", port); 2963 info->port = -1; 2964 } 2965 2966 sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port); 2967 info->port_mtu_attr.attr.name = info->dev_mtu_name; 2968 if (mlx4_is_mfunc(dev)) 2969 info->port_mtu_attr.attr.mode = S_IRUGO; 2970 else { 2971 info->port_mtu_attr.attr.mode = S_IRUGO | S_IWUSR; 2972 info->port_mtu_attr.store = set_port_ib_mtu; 2973 } 2974 info->port_mtu_attr.show = show_port_ib_mtu; 2975 sysfs_attr_init(&info->port_mtu_attr.attr); 2976 2977 err = device_create_file(&dev->persist->pdev->dev, 2978 &info->port_mtu_attr); 2979 if (err) { 2980 mlx4_err(dev, "Failed to create mtu file for port %d\n", port); 2981 device_remove_file(&info->dev->persist->pdev->dev, 2982 &info->port_attr); 2983 info->port = -1; 2984 } 2985 2986 return err; 2987 } 2988 2989 static void mlx4_cleanup_port_info(struct mlx4_port_info *info) 2990 { 2991 if (info->port < 0) 2992 return; 2993 2994 device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr); 2995 device_remove_file(&info->dev->persist->pdev->dev, 2996 &info->port_mtu_attr); 2997 #ifdef CONFIG_RFS_ACCEL 2998 free_irq_cpu_rmap(info->rmap); 2999 info->rmap = NULL; 3000 #endif 3001 } 3002 3003 static int mlx4_init_steering(struct mlx4_dev *dev) 3004 { 3005 struct mlx4_priv *priv = mlx4_priv(dev); 3006 int num_entries = dev->caps.num_ports; 3007 int i, j; 3008 3009 priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL); 3010 if (!priv->steer) 3011 return -ENOMEM; 3012 3013 for (i = 0; i < num_entries; i++) 3014 for (j = 0; j < MLX4_NUM_STEERS; j++) { 3015 INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]); 3016 INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]); 3017 } 3018 return 0; 3019 } 3020 3021 static void mlx4_clear_steering(struct mlx4_dev *dev) 3022 { 3023 struct mlx4_priv *priv = mlx4_priv(dev); 3024 struct mlx4_steer_index *entry, *tmp_entry; 3025 struct mlx4_promisc_qp *pqp, *tmp_pqp; 3026 int num_entries = dev->caps.num_ports; 3027 int i, j; 3028 3029 for (i = 0; i < num_entries; i++) { 3030 for (j = 0; j < MLX4_NUM_STEERS; j++) { 3031 list_for_each_entry_safe(pqp, tmp_pqp, 3032 &priv->steer[i].promisc_qps[j], 3033 list) { 3034 list_del(&pqp->list); 3035 kfree(pqp); 3036 } 3037 list_for_each_entry_safe(entry, tmp_entry, 3038 &priv->steer[i].steer_entries[j], 3039 list) { 3040 list_del(&entry->list); 3041 list_for_each_entry_safe(pqp, tmp_pqp, 3042 &entry->duplicates, 3043 list) { 3044 list_del(&pqp->list); 3045 kfree(pqp); 3046 } 3047 kfree(entry); 3048 } 3049 } 3050 } 3051 kfree(priv->steer); 3052 } 3053 3054 static int extended_func_num(struct pci_dev *pdev) 3055 { 3056 return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn); 3057 } 3058 3059 #define MLX4_OWNER_BASE 0x8069c 3060 #define MLX4_OWNER_SIZE 4 3061 3062 static int mlx4_get_ownership(struct mlx4_dev *dev) 3063 { 3064 void __iomem *owner; 3065 u32 ret; 3066 3067 if (pci_channel_offline(dev->persist->pdev)) 3068 return -EIO; 3069 3070 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) + 3071 MLX4_OWNER_BASE, 3072 MLX4_OWNER_SIZE); 3073 if (!owner) { 3074 mlx4_err(dev, "Failed to obtain ownership bit\n"); 3075 return -ENOMEM; 3076 } 3077 3078 ret = readl(owner); 3079 iounmap(owner); 3080 return (int) !!ret; 3081 } 3082 3083 static void mlx4_free_ownership(struct mlx4_dev *dev) 3084 { 3085 void __iomem *owner; 3086 3087 if (pci_channel_offline(dev->persist->pdev)) 3088 return; 3089 3090 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) + 3091 MLX4_OWNER_BASE, 3092 MLX4_OWNER_SIZE); 3093 if (!owner) { 3094 mlx4_err(dev, "Failed to obtain ownership bit\n"); 3095 return; 3096 } 3097 writel(0, owner); 3098 msleep(1000); 3099 iounmap(owner); 3100 } 3101 3102 #define SRIOV_VALID_STATE(flags) (!!((flags) & MLX4_FLAG_SRIOV) ==\ 3103 !!((flags) & MLX4_FLAG_MASTER)) 3104 3105 static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev, 3106 u8 total_vfs, int existing_vfs, int reset_flow) 3107 { 3108 u64 dev_flags = dev->flags; 3109 int err = 0; 3110 3111 if (reset_flow) { 3112 dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs), 3113 GFP_KERNEL); 3114 if (!dev->dev_vfs) 3115 goto free_mem; 3116 return dev_flags; 3117 } 3118 3119 atomic_inc(&pf_loading); 3120 if (dev->flags & MLX4_FLAG_SRIOV) { 3121 if (existing_vfs != total_vfs) { 3122 mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n", 3123 existing_vfs, total_vfs); 3124 total_vfs = existing_vfs; 3125 } 3126 } 3127 3128 dev->dev_vfs = kzalloc(total_vfs * sizeof(*dev->dev_vfs), GFP_KERNEL); 3129 if (NULL == dev->dev_vfs) { 3130 mlx4_err(dev, "Failed to allocate memory for VFs\n"); 3131 goto disable_sriov; 3132 } 3133 3134 if (!(dev->flags & MLX4_FLAG_SRIOV)) { 3135 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs); 3136 err = pci_enable_sriov(pdev, total_vfs); 3137 } 3138 if (err) { 3139 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n", 3140 err); 3141 goto disable_sriov; 3142 } else { 3143 mlx4_warn(dev, "Running in master mode\n"); 3144 dev_flags |= MLX4_FLAG_SRIOV | 3145 MLX4_FLAG_MASTER; 3146 dev_flags &= ~MLX4_FLAG_SLAVE; 3147 dev->persist->num_vfs = total_vfs; 3148 } 3149 return dev_flags; 3150 3151 disable_sriov: 3152 atomic_dec(&pf_loading); 3153 free_mem: 3154 dev->persist->num_vfs = 0; 3155 kfree(dev->dev_vfs); 3156 dev->dev_vfs = NULL; 3157 return dev_flags & ~MLX4_FLAG_MASTER; 3158 } 3159 3160 enum { 3161 MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64 = -1, 3162 }; 3163 3164 static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, 3165 int *nvfs) 3166 { 3167 int requested_vfs = nvfs[0] + nvfs[1] + nvfs[2]; 3168 /* Checking for 64 VFs as a limitation of CX2 */ 3169 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_80_VFS) && 3170 requested_vfs >= 64) { 3171 mlx4_err(dev, "Requested %d VFs, but FW does not support more than 64\n", 3172 requested_vfs); 3173 return MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64; 3174 } 3175 return 0; 3176 } 3177 3178 static int mlx4_pci_enable_device(struct mlx4_dev *dev) 3179 { 3180 struct pci_dev *pdev = dev->persist->pdev; 3181 int err = 0; 3182 3183 mutex_lock(&dev->persist->pci_status_mutex); 3184 if (dev->persist->pci_status == MLX4_PCI_STATUS_DISABLED) { 3185 err = pci_enable_device(pdev); 3186 if (!err) 3187 dev->persist->pci_status = MLX4_PCI_STATUS_ENABLED; 3188 } 3189 mutex_unlock(&dev->persist->pci_status_mutex); 3190 3191 return err; 3192 } 3193 3194 static void mlx4_pci_disable_device(struct mlx4_dev *dev) 3195 { 3196 struct pci_dev *pdev = dev->persist->pdev; 3197 3198 mutex_lock(&dev->persist->pci_status_mutex); 3199 if (dev->persist->pci_status == MLX4_PCI_STATUS_ENABLED) { 3200 pci_disable_device(pdev); 3201 dev->persist->pci_status = MLX4_PCI_STATUS_DISABLED; 3202 } 3203 mutex_unlock(&dev->persist->pci_status_mutex); 3204 } 3205 3206 static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, 3207 int total_vfs, int *nvfs, struct mlx4_priv *priv, 3208 int reset_flow) 3209 { 3210 struct mlx4_dev *dev; 3211 unsigned sum = 0; 3212 int err; 3213 int port; 3214 int i; 3215 struct mlx4_dev_cap *dev_cap = NULL; 3216 int existing_vfs = 0; 3217 3218 dev = &priv->dev; 3219 3220 INIT_LIST_HEAD(&priv->ctx_list); 3221 spin_lock_init(&priv->ctx_lock); 3222 3223 mutex_init(&priv->port_mutex); 3224 mutex_init(&priv->bond_mutex); 3225 3226 INIT_LIST_HEAD(&priv->pgdir_list); 3227 mutex_init(&priv->pgdir_mutex); 3228 spin_lock_init(&priv->cmd.context_lock); 3229 3230 INIT_LIST_HEAD(&priv->bf_list); 3231 mutex_init(&priv->bf_mutex); 3232 3233 dev->rev_id = pdev->revision; 3234 dev->numa_node = dev_to_node(&pdev->dev); 3235 3236 /* Detect if this device is a virtual function */ 3237 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { 3238 mlx4_warn(dev, "Detected virtual function - running in slave mode\n"); 3239 dev->flags |= MLX4_FLAG_SLAVE; 3240 } else { 3241 /* We reset the device and enable SRIOV only for physical 3242 * devices. Try to claim ownership on the device; 3243 * if already taken, skip -- do not allow multiple PFs */ 3244 err = mlx4_get_ownership(dev); 3245 if (err) { 3246 if (err < 0) 3247 return err; 3248 else { 3249 mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n"); 3250 return -EINVAL; 3251 } 3252 } 3253 3254 atomic_set(&priv->opreq_count, 0); 3255 INIT_WORK(&priv->opreq_task, mlx4_opreq_action); 3256 3257 /* 3258 * Now reset the HCA before we touch the PCI capabilities or 3259 * attempt a firmware command, since a boot ROM may have left 3260 * the HCA in an undefined state. 3261 */ 3262 err = mlx4_reset(dev); 3263 if (err) { 3264 mlx4_err(dev, "Failed to reset HCA, aborting\n"); 3265 goto err_sriov; 3266 } 3267 3268 if (total_vfs) { 3269 dev->flags = MLX4_FLAG_MASTER; 3270 existing_vfs = pci_num_vf(pdev); 3271 if (existing_vfs) 3272 dev->flags |= MLX4_FLAG_SRIOV; 3273 dev->persist->num_vfs = total_vfs; 3274 } 3275 } 3276 3277 /* on load remove any previous indication of internal error, 3278 * device is up. 3279 */ 3280 dev->persist->state = MLX4_DEVICE_STATE_UP; 3281 3282 slave_start: 3283 err = mlx4_cmd_init(dev); 3284 if (err) { 3285 mlx4_err(dev, "Failed to init command interface, aborting\n"); 3286 goto err_sriov; 3287 } 3288 3289 /* In slave functions, the communication channel must be initialized 3290 * before posting commands. Also, init num_slaves before calling 3291 * mlx4_init_hca */ 3292 if (mlx4_is_mfunc(dev)) { 3293 if (mlx4_is_master(dev)) { 3294 dev->num_slaves = MLX4_MAX_NUM_SLAVES; 3295 3296 } else { 3297 dev->num_slaves = 0; 3298 err = mlx4_multi_func_init(dev); 3299 if (err) { 3300 mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n"); 3301 goto err_cmd; 3302 } 3303 } 3304 } 3305 3306 err = mlx4_init_fw(dev); 3307 if (err) { 3308 mlx4_err(dev, "Failed to init fw, aborting.\n"); 3309 goto err_mfunc; 3310 } 3311 3312 if (mlx4_is_master(dev)) { 3313 /* when we hit the goto slave_start below, dev_cap already initialized */ 3314 if (!dev_cap) { 3315 dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL); 3316 3317 if (!dev_cap) { 3318 err = -ENOMEM; 3319 goto err_fw; 3320 } 3321 3322 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 3323 if (err) { 3324 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 3325 goto err_fw; 3326 } 3327 3328 if (mlx4_check_dev_cap(dev, dev_cap, nvfs)) 3329 goto err_fw; 3330 3331 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { 3332 u64 dev_flags = mlx4_enable_sriov(dev, pdev, 3333 total_vfs, 3334 existing_vfs, 3335 reset_flow); 3336 3337 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 3338 dev->flags = dev_flags; 3339 if (!SRIOV_VALID_STATE(dev->flags)) { 3340 mlx4_err(dev, "Invalid SRIOV state\n"); 3341 goto err_sriov; 3342 } 3343 err = mlx4_reset(dev); 3344 if (err) { 3345 mlx4_err(dev, "Failed to reset HCA, aborting.\n"); 3346 goto err_sriov; 3347 } 3348 goto slave_start; 3349 } 3350 } else { 3351 /* Legacy mode FW requires SRIOV to be enabled before 3352 * doing QUERY_DEV_CAP, since max_eq's value is different if 3353 * SRIOV is enabled. 3354 */ 3355 memset(dev_cap, 0, sizeof(*dev_cap)); 3356 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 3357 if (err) { 3358 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 3359 goto err_fw; 3360 } 3361 3362 if (mlx4_check_dev_cap(dev, dev_cap, nvfs)) 3363 goto err_fw; 3364 } 3365 } 3366 3367 err = mlx4_init_hca(dev); 3368 if (err) { 3369 if (err == -EACCES) { 3370 /* Not primary Physical function 3371 * Running in slave mode */ 3372 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 3373 /* We're not a PF */ 3374 if (dev->flags & MLX4_FLAG_SRIOV) { 3375 if (!existing_vfs) 3376 pci_disable_sriov(pdev); 3377 if (mlx4_is_master(dev) && !reset_flow) 3378 atomic_dec(&pf_loading); 3379 dev->flags &= ~MLX4_FLAG_SRIOV; 3380 } 3381 if (!mlx4_is_slave(dev)) 3382 mlx4_free_ownership(dev); 3383 dev->flags |= MLX4_FLAG_SLAVE; 3384 dev->flags &= ~MLX4_FLAG_MASTER; 3385 goto slave_start; 3386 } else 3387 goto err_fw; 3388 } 3389 3390 if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { 3391 u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs, 3392 existing_vfs, reset_flow); 3393 3394 if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) { 3395 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR); 3396 dev->flags = dev_flags; 3397 err = mlx4_cmd_init(dev); 3398 if (err) { 3399 /* Only VHCR is cleaned up, so could still 3400 * send FW commands 3401 */ 3402 mlx4_err(dev, "Failed to init VHCR command interface, aborting\n"); 3403 goto err_close; 3404 } 3405 } else { 3406 dev->flags = dev_flags; 3407 } 3408 3409 if (!SRIOV_VALID_STATE(dev->flags)) { 3410 mlx4_err(dev, "Invalid SRIOV state\n"); 3411 goto err_close; 3412 } 3413 } 3414 3415 /* check if the device is functioning at its maximum possible speed. 3416 * No return code for this call, just warn the user in case of PCI 3417 * express device capabilities are under-satisfied by the bus. 3418 */ 3419 if (!mlx4_is_slave(dev)) 3420 mlx4_check_pcie_caps(dev); 3421 3422 /* In master functions, the communication channel must be initialized 3423 * after obtaining its address from fw */ 3424 if (mlx4_is_master(dev)) { 3425 if (dev->caps.num_ports < 2 && 3426 num_vfs_argc > 1) { 3427 err = -EINVAL; 3428 mlx4_err(dev, 3429 "Error: Trying to configure VFs on port 2, but HCA has only %d physical ports\n", 3430 dev->caps.num_ports); 3431 goto err_close; 3432 } 3433 memcpy(dev->persist->nvfs, nvfs, sizeof(dev->persist->nvfs)); 3434 3435 for (i = 0; 3436 i < sizeof(dev->persist->nvfs)/ 3437 sizeof(dev->persist->nvfs[0]); i++) { 3438 unsigned j; 3439 3440 for (j = 0; j < dev->persist->nvfs[i]; ++sum, ++j) { 3441 dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1; 3442 dev->dev_vfs[sum].n_ports = i < 2 ? 1 : 3443 dev->caps.num_ports; 3444 } 3445 } 3446 3447 /* In master functions, the communication channel 3448 * must be initialized after obtaining its address from fw 3449 */ 3450 err = mlx4_multi_func_init(dev); 3451 if (err) { 3452 mlx4_err(dev, "Failed to init master mfunc interface, aborting.\n"); 3453 goto err_close; 3454 } 3455 } 3456 3457 err = mlx4_alloc_eq_table(dev); 3458 if (err) 3459 goto err_master_mfunc; 3460 3461 bitmap_zero(priv->msix_ctl.pool_bm, MAX_MSIX); 3462 mutex_init(&priv->msix_ctl.pool_lock); 3463 3464 mlx4_enable_msi_x(dev); 3465 if ((mlx4_is_mfunc(dev)) && 3466 !(dev->flags & MLX4_FLAG_MSI_X)) { 3467 err = -ENOSYS; 3468 mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n"); 3469 goto err_free_eq; 3470 } 3471 3472 if (!mlx4_is_slave(dev)) { 3473 err = mlx4_init_steering(dev); 3474 if (err) 3475 goto err_disable_msix; 3476 } 3477 3478 mlx4_init_quotas(dev); 3479 3480 err = mlx4_setup_hca(dev); 3481 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) && 3482 !mlx4_is_mfunc(dev)) { 3483 dev->flags &= ~MLX4_FLAG_MSI_X; 3484 dev->caps.num_comp_vectors = 1; 3485 pci_disable_msix(pdev); 3486 err = mlx4_setup_hca(dev); 3487 } 3488 3489 if (err) 3490 goto err_steer; 3491 3492 /* When PF resources are ready arm its comm channel to enable 3493 * getting commands 3494 */ 3495 if (mlx4_is_master(dev)) { 3496 err = mlx4_ARM_COMM_CHANNEL(dev); 3497 if (err) { 3498 mlx4_err(dev, " Failed to arm comm channel eq: %x\n", 3499 err); 3500 goto err_steer; 3501 } 3502 } 3503 3504 for (port = 1; port <= dev->caps.num_ports; port++) { 3505 err = mlx4_init_port_info(dev, port); 3506 if (err) 3507 goto err_port; 3508 } 3509 3510 priv->v2p.port1 = 1; 3511 priv->v2p.port2 = 2; 3512 3513 err = mlx4_register_device(dev); 3514 if (err) 3515 goto err_port; 3516 3517 mlx4_request_modules(dev); 3518 3519 mlx4_sense_init(dev); 3520 mlx4_start_sense(dev); 3521 3522 priv->removed = 0; 3523 3524 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow) 3525 atomic_dec(&pf_loading); 3526 3527 kfree(dev_cap); 3528 return 0; 3529 3530 err_port: 3531 for (--port; port >= 1; --port) 3532 mlx4_cleanup_port_info(&priv->port[port]); 3533 3534 mlx4_cleanup_counters_table(dev); 3535 mlx4_cleanup_qp_table(dev); 3536 mlx4_cleanup_srq_table(dev); 3537 mlx4_cleanup_cq_table(dev); 3538 mlx4_cmd_use_polling(dev); 3539 mlx4_cleanup_eq_table(dev); 3540 mlx4_cleanup_mcg_table(dev); 3541 mlx4_cleanup_mr_table(dev); 3542 mlx4_cleanup_xrcd_table(dev); 3543 mlx4_cleanup_pd_table(dev); 3544 mlx4_cleanup_uar_table(dev); 3545 3546 err_steer: 3547 if (!mlx4_is_slave(dev)) 3548 mlx4_clear_steering(dev); 3549 3550 err_disable_msix: 3551 if (dev->flags & MLX4_FLAG_MSI_X) 3552 pci_disable_msix(pdev); 3553 3554 err_free_eq: 3555 mlx4_free_eq_table(dev); 3556 3557 err_master_mfunc: 3558 if (mlx4_is_master(dev)) { 3559 mlx4_free_resource_tracker(dev, RES_TR_FREE_STRUCTS_ONLY); 3560 mlx4_multi_func_cleanup(dev); 3561 } 3562 3563 if (mlx4_is_slave(dev)) { 3564 kfree(dev->caps.qp0_qkey); 3565 kfree(dev->caps.qp0_tunnel); 3566 kfree(dev->caps.qp0_proxy); 3567 kfree(dev->caps.qp1_tunnel); 3568 kfree(dev->caps.qp1_proxy); 3569 } 3570 3571 err_close: 3572 mlx4_close_hca(dev); 3573 3574 err_fw: 3575 mlx4_close_fw(dev); 3576 3577 err_mfunc: 3578 if (mlx4_is_slave(dev)) 3579 mlx4_multi_func_cleanup(dev); 3580 3581 err_cmd: 3582 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 3583 3584 err_sriov: 3585 if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) { 3586 pci_disable_sriov(pdev); 3587 dev->flags &= ~MLX4_FLAG_SRIOV; 3588 } 3589 3590 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow) 3591 atomic_dec(&pf_loading); 3592 3593 kfree(priv->dev.dev_vfs); 3594 3595 if (!mlx4_is_slave(dev)) 3596 mlx4_free_ownership(dev); 3597 3598 kfree(dev_cap); 3599 return err; 3600 } 3601 3602 static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data, 3603 struct mlx4_priv *priv) 3604 { 3605 int err; 3606 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 3607 int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 3608 const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = { 3609 {2, 0, 0}, {0, 1, 2}, {0, 1, 2} }; 3610 unsigned total_vfs = 0; 3611 unsigned int i; 3612 3613 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); 3614 3615 err = mlx4_pci_enable_device(&priv->dev); 3616 if (err) { 3617 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 3618 return err; 3619 } 3620 3621 /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS 3622 * per port, we must limit the number of VFs to 63 (since their are 3623 * 128 MACs) 3624 */ 3625 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && i < num_vfs_argc; 3626 total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) { 3627 nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i]; 3628 if (nvfs[i] < 0) { 3629 dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n"); 3630 err = -EINVAL; 3631 goto err_disable_pdev; 3632 } 3633 } 3634 for (i = 0; i < sizeof(prb_vf)/sizeof(prb_vf[0]) && i < probe_vfs_argc; 3635 i++) { 3636 prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i]; 3637 if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) { 3638 dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n"); 3639 err = -EINVAL; 3640 goto err_disable_pdev; 3641 } 3642 } 3643 if (total_vfs > MLX4_MAX_NUM_VF) { 3644 dev_err(&pdev->dev, 3645 "Requested more VF's (%d) than allowed by hw (%d)\n", 3646 total_vfs, MLX4_MAX_NUM_VF); 3647 err = -EINVAL; 3648 goto err_disable_pdev; 3649 } 3650 3651 for (i = 0; i < MLX4_MAX_PORTS; i++) { 3652 if (nvfs[i] + nvfs[2] > MLX4_MAX_NUM_VF_P_PORT) { 3653 dev_err(&pdev->dev, 3654 "Requested more VF's (%d) for port (%d) than allowed by driver (%d)\n", 3655 nvfs[i] + nvfs[2], i + 1, 3656 MLX4_MAX_NUM_VF_P_PORT); 3657 err = -EINVAL; 3658 goto err_disable_pdev; 3659 } 3660 } 3661 3662 /* Check for BARs. */ 3663 if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) && 3664 !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 3665 dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n", 3666 pci_dev_data, (long)pci_resource_flags(pdev, 0)); 3667 err = -ENODEV; 3668 goto err_disable_pdev; 3669 } 3670 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 3671 dev_err(&pdev->dev, "Missing UAR, aborting\n"); 3672 err = -ENODEV; 3673 goto err_disable_pdev; 3674 } 3675 3676 err = pci_request_regions(pdev, DRV_NAME); 3677 if (err) { 3678 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); 3679 goto err_disable_pdev; 3680 } 3681 3682 pci_set_master(pdev); 3683 3684 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 3685 if (err) { 3686 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n"); 3687 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3688 if (err) { 3689 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n"); 3690 goto err_release_regions; 3691 } 3692 } 3693 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 3694 if (err) { 3695 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n"); 3696 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 3697 if (err) { 3698 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n"); 3699 goto err_release_regions; 3700 } 3701 } 3702 3703 /* Allow large DMA segments, up to the firmware limit of 1 GB */ 3704 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); 3705 /* Detect if this device is a virtual function */ 3706 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { 3707 /* When acting as pf, we normally skip vfs unless explicitly 3708 * requested to probe them. 3709 */ 3710 if (total_vfs) { 3711 unsigned vfs_offset = 0; 3712 3713 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && 3714 vfs_offset + nvfs[i] < extended_func_num(pdev); 3715 vfs_offset += nvfs[i], i++) 3716 ; 3717 if (i == sizeof(nvfs)/sizeof(nvfs[0])) { 3718 err = -ENODEV; 3719 goto err_release_regions; 3720 } 3721 if ((extended_func_num(pdev) - vfs_offset) 3722 > prb_vf[i]) { 3723 dev_warn(&pdev->dev, "Skipping virtual function:%d\n", 3724 extended_func_num(pdev)); 3725 err = -ENODEV; 3726 goto err_release_regions; 3727 } 3728 } 3729 } 3730 3731 err = mlx4_catas_init(&priv->dev); 3732 if (err) 3733 goto err_release_regions; 3734 3735 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 0); 3736 if (err) 3737 goto err_catas; 3738 3739 return 0; 3740 3741 err_catas: 3742 mlx4_catas_end(&priv->dev); 3743 3744 err_release_regions: 3745 pci_release_regions(pdev); 3746 3747 err_disable_pdev: 3748 mlx4_pci_disable_device(&priv->dev); 3749 pci_set_drvdata(pdev, NULL); 3750 return err; 3751 } 3752 3753 static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 3754 { 3755 3756 struct sysctl_ctx_list *ctx; 3757 struct sysctl_oid *node; 3758 struct sysctl_oid_list *node_list; 3759 struct mlx4_priv *priv; 3760 struct mlx4_dev *dev; 3761 int ret; 3762 3763 printk_once(KERN_INFO "%s", mlx4_version); 3764 3765 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 3766 if (!priv) 3767 return -ENOMEM; 3768 3769 dev = &priv->dev; 3770 dev->persist = kzalloc(sizeof(*dev->persist), GFP_KERNEL); 3771 if (!dev->persist) { 3772 kfree(priv); 3773 return -ENOMEM; 3774 } 3775 dev->persist->pdev = pdev; 3776 dev->persist->dev = dev; 3777 pci_set_drvdata(pdev, dev->persist); 3778 priv->pci_dev_data = id->driver_data; 3779 mutex_init(&dev->persist->device_state_mutex); 3780 mutex_init(&dev->persist->interface_state_mutex); 3781 mutex_init(&dev->persist->pci_status_mutex); 3782 3783 ret = __mlx4_init_one(pdev, id->driver_data, priv); 3784 if (ret) { 3785 kfree(dev->persist); 3786 kfree(priv); 3787 return ret; 3788 } else { 3789 device_set_desc(pdev->dev.bsddev, mlx4_description); 3790 pci_save_state(pdev); 3791 } 3792 3793 snprintf(dev->fw_str, sizeof(dev->fw_str), "%d.%d.%d", 3794 (int) (dev->caps.fw_ver >> 32), 3795 (int) (dev->caps.fw_ver >> 16) & 0xffff, 3796 (int) (dev->caps.fw_ver & 0xffff)); 3797 3798 ctx = &dev->hw_ctx; 3799 sysctl_ctx_init(ctx); 3800 node = SYSCTL_ADD_NODE(ctx,SYSCTL_CHILDREN(pdev->dev.kobj.oidp), 3801 OID_AUTO, "hw" , CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 3802 "mlx4 dev hw information"); 3803 if (node != NULL) { 3804 node_list = SYSCTL_CHILDREN(node); 3805 SYSCTL_ADD_STRING(ctx, node_list, OID_AUTO, 3806 "fw_version", CTLFLAG_RD, dev->fw_str, 0, 3807 "Device firmware version"); 3808 SYSCTL_ADD_STRING(ctx, node_list, OID_AUTO, 3809 "board_id", CTLFLAG_RD, dev->board_id, 0, 3810 "Device board identifier"); 3811 } 3812 3813 return ret; 3814 } 3815 3816 static void mlx4_clean_dev(struct mlx4_dev *dev) 3817 { 3818 struct mlx4_dev_persistent *persist = dev->persist; 3819 struct mlx4_priv *priv = mlx4_priv(dev); 3820 unsigned long flags = (dev->flags & RESET_PERSIST_MASK_FLAGS); 3821 3822 memset(priv, 0, sizeof(*priv)); 3823 priv->dev.persist = persist; 3824 priv->dev.flags = flags; 3825 } 3826 3827 static void mlx4_unload_one(struct pci_dev *pdev) 3828 { 3829 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3830 struct mlx4_dev *dev = persist->dev; 3831 struct mlx4_priv *priv = mlx4_priv(dev); 3832 int pci_dev_data; 3833 int p, i; 3834 3835 if (priv->removed) 3836 return; 3837 3838 /* saving current ports type for further use */ 3839 for (i = 0; i < dev->caps.num_ports; i++) { 3840 dev->persist->curr_port_type[i] = dev->caps.port_type[i + 1]; 3841 dev->persist->curr_port_poss_type[i] = dev->caps. 3842 possible_type[i + 1]; 3843 } 3844 3845 pci_dev_data = priv->pci_dev_data; 3846 3847 mlx4_stop_sense(dev); 3848 mlx4_unregister_device(dev); 3849 3850 for (p = 1; p <= dev->caps.num_ports; p++) { 3851 mlx4_cleanup_port_info(&priv->port[p]); 3852 mlx4_CLOSE_PORT(dev, p); 3853 } 3854 3855 if (mlx4_is_master(dev)) 3856 mlx4_free_resource_tracker(dev, 3857 RES_TR_FREE_SLAVES_ONLY); 3858 3859 mlx4_cleanup_default_counters(dev); 3860 if (!mlx4_is_slave(dev)) 3861 mlx4_cleanup_counters_table(dev); 3862 mlx4_cleanup_qp_table(dev); 3863 mlx4_cleanup_srq_table(dev); 3864 mlx4_cleanup_cq_table(dev); 3865 mlx4_cmd_use_polling(dev); 3866 mlx4_cleanup_eq_table(dev); 3867 mlx4_cleanup_mcg_table(dev); 3868 mlx4_cleanup_mr_table(dev); 3869 mlx4_cleanup_xrcd_table(dev); 3870 mlx4_cleanup_pd_table(dev); 3871 3872 if (mlx4_is_master(dev)) 3873 mlx4_free_resource_tracker(dev, 3874 RES_TR_FREE_STRUCTS_ONLY); 3875 3876 iounmap(priv->kar); 3877 mlx4_uar_free(dev, &priv->driver_uar); 3878 mlx4_cleanup_uar_table(dev); 3879 if (!mlx4_is_slave(dev)) 3880 mlx4_clear_steering(dev); 3881 mlx4_free_eq_table(dev); 3882 if (mlx4_is_master(dev)) 3883 mlx4_multi_func_cleanup(dev); 3884 mlx4_close_hca(dev); 3885 mlx4_close_fw(dev); 3886 if (mlx4_is_slave(dev)) 3887 mlx4_multi_func_cleanup(dev); 3888 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 3889 3890 if (dev->flags & MLX4_FLAG_MSI_X) 3891 pci_disable_msix(pdev); 3892 3893 if (!mlx4_is_slave(dev)) 3894 mlx4_free_ownership(dev); 3895 3896 kfree(dev->caps.qp0_qkey); 3897 kfree(dev->caps.qp0_tunnel); 3898 kfree(dev->caps.qp0_proxy); 3899 kfree(dev->caps.qp1_tunnel); 3900 kfree(dev->caps.qp1_proxy); 3901 kfree(dev->dev_vfs); 3902 3903 mlx4_clean_dev(dev); 3904 priv->pci_dev_data = pci_dev_data; 3905 priv->removed = 1; 3906 } 3907 3908 static void mlx4_remove_one(struct pci_dev *pdev) 3909 { 3910 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3911 struct mlx4_dev *dev = persist->dev; 3912 struct mlx4_priv *priv = mlx4_priv(dev); 3913 int active_vfs = 0; 3914 3915 mutex_lock(&persist->interface_state_mutex); 3916 persist->interface_state |= MLX4_INTERFACE_STATE_DELETION; 3917 mutex_unlock(&persist->interface_state_mutex); 3918 3919 /* 3920 * Clear the device description to avoid use after free, 3921 * because the bsddev is not destroyed when this module is 3922 * unloaded: 3923 */ 3924 device_set_desc(pdev->dev.bsddev, NULL); 3925 3926 /* Disabling SR-IOV is not allowed while there are active vf's */ 3927 if (mlx4_is_master(dev) && dev->flags & MLX4_FLAG_SRIOV) { 3928 active_vfs = mlx4_how_many_lives_vf(dev); 3929 if (active_vfs) { 3930 pr_warn("Removing PF when there are active VF's !!\n"); 3931 pr_warn("Will not disable SR-IOV.\n"); 3932 } 3933 } 3934 3935 /* device marked to be under deletion running now without the lock 3936 * letting other tasks to be terminated 3937 */ 3938 if (persist->interface_state & MLX4_INTERFACE_STATE_UP) 3939 mlx4_unload_one(pdev); 3940 else 3941 mlx4_info(dev, "%s: interface is down\n", __func__); 3942 mlx4_catas_end(dev); 3943 if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) { 3944 mlx4_warn(dev, "Disabling SR-IOV\n"); 3945 pci_disable_sriov(pdev); 3946 } 3947 3948 pci_release_regions(pdev); 3949 pci_disable_device(pdev); 3950 kfree(dev->persist); 3951 kfree(priv); 3952 pci_set_drvdata(pdev, NULL); 3953 } 3954 3955 static int restore_current_port_types(struct mlx4_dev *dev, 3956 enum mlx4_port_type *types, 3957 enum mlx4_port_type *poss_types) 3958 { 3959 struct mlx4_priv *priv = mlx4_priv(dev); 3960 int err, i; 3961 3962 mlx4_stop_sense(dev); 3963 3964 mutex_lock(&priv->port_mutex); 3965 for (i = 0; i < dev->caps.num_ports; i++) 3966 dev->caps.possible_type[i + 1] = poss_types[i]; 3967 err = mlx4_change_port_types(dev, types); 3968 mutex_unlock(&priv->port_mutex); 3969 3970 mlx4_start_sense(dev); 3971 3972 return err; 3973 } 3974 3975 int mlx4_restart_one(struct pci_dev *pdev) 3976 { 3977 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3978 struct mlx4_dev *dev = persist->dev; 3979 struct mlx4_priv *priv = mlx4_priv(dev); 3980 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 3981 int pci_dev_data, err, total_vfs; 3982 3983 pci_dev_data = priv->pci_dev_data; 3984 total_vfs = dev->persist->num_vfs; 3985 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); 3986 3987 mlx4_unload_one(pdev); 3988 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 1); 3989 if (err) { 3990 mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n", 3991 __func__, pci_name(pdev), err); 3992 return err; 3993 } 3994 3995 err = restore_current_port_types(dev, dev->persist->curr_port_type, 3996 dev->persist->curr_port_poss_type); 3997 if (err) 3998 mlx4_err(dev, "could not restore original port types (%d)\n", 3999 err); 4000 4001 return err; 4002 } 4003 4004 static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = { 4005 /* MT25408 "Hermon" SDR */ 4006 { PCI_VDEVICE(MELLANOX, 0x6340), 4007 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, 4008 /* MT25408 "Hermon" DDR */ 4009 { PCI_VDEVICE(MELLANOX, 0x634a), 4010 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, 4011 /* MT25408 "Hermon" QDR */ 4012 { PCI_VDEVICE(MELLANOX, 0x6354), 4013 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, 4014 /* MT25408 "Hermon" DDR PCIe gen2 */ 4015 { PCI_VDEVICE(MELLANOX, 0x6732), 4016 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, 4017 /* MT25408 "Hermon" QDR PCIe gen2 */ 4018 { PCI_VDEVICE(MELLANOX, 0x673c), 4019 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, 4020 /* MT25408 "Hermon" EN 10GigE */ 4021 { PCI_VDEVICE(MELLANOX, 0x6368), 4022 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, 4023 /* MT25408 "Hermon" EN 10GigE PCIe gen2 */ 4024 { PCI_VDEVICE(MELLANOX, 0x6750), 4025 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, 4026 /* MT25458 ConnectX EN 10GBASE-T 10GigE */ 4027 { PCI_VDEVICE(MELLANOX, 0x6372), 4028 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, 4029 /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */ 4030 { PCI_VDEVICE(MELLANOX, 0x675a), 4031 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, 4032 /* MT26468 ConnectX EN 10GigE PCIe gen2*/ 4033 { PCI_VDEVICE(MELLANOX, 0x6764), 4034 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, 4035 /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */ 4036 { PCI_VDEVICE(MELLANOX, 0x6746), 4037 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, 4038 /* MT26478 ConnectX2 40GigE PCIe gen2 */ 4039 { PCI_VDEVICE(MELLANOX, 0x676e), 4040 .driver_data = MLX4_PCI_DEV_FORCE_SENSE_PORT }, 4041 /* MT25400 Family [ConnectX-2 Virtual Function] */ 4042 { PCI_VDEVICE(MELLANOX, 0x1002), 4043 .driver_data = MLX4_PCI_DEV_IS_VF }, 4044 /* MT27500 Family [ConnectX-3] */ 4045 { PCI_VDEVICE(MELLANOX, 0x1003) }, 4046 /* MT27500 Family [ConnectX-3 Virtual Function] */ 4047 { PCI_VDEVICE(MELLANOX, 0x1004), 4048 .driver_data = MLX4_PCI_DEV_IS_VF }, 4049 { PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */ 4050 { PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */ 4051 { PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */ 4052 { PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */ 4053 { PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */ 4054 { PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */ 4055 { PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */ 4056 { PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */ 4057 { PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */ 4058 { PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */ 4059 { PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */ 4060 { PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */ 4061 { 0, } 4062 }; 4063 4064 MODULE_DEVICE_TABLE(pci, mlx4_pci_table); 4065 4066 static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev, 4067 pci_channel_state_t state) 4068 { 4069 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 4070 4071 mlx4_err(persist->dev, "mlx4_pci_err_detected was called\n"); 4072 mlx4_enter_error_state(persist); 4073 4074 mutex_lock(&persist->interface_state_mutex); 4075 if (persist->interface_state & MLX4_INTERFACE_STATE_UP) 4076 mlx4_unload_one(pdev); 4077 4078 mutex_unlock(&persist->interface_state_mutex); 4079 if (state == pci_channel_io_perm_failure) 4080 return PCI_ERS_RESULT_DISCONNECT; 4081 4082 mlx4_pci_disable_device(persist->dev); 4083 return PCI_ERS_RESULT_NEED_RESET; 4084 } 4085 4086 static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev) 4087 { 4088 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 4089 struct mlx4_dev *dev = persist->dev; 4090 int err; 4091 4092 mlx4_err(dev, "mlx4_pci_slot_reset was called\n"); 4093 err = mlx4_pci_enable_device(dev); 4094 if (err) { 4095 mlx4_err(dev, "Can not re-enable device, err=%d\n", err); 4096 return PCI_ERS_RESULT_DISCONNECT; 4097 } 4098 4099 pci_set_master(pdev); 4100 return PCI_ERS_RESULT_RECOVERED; 4101 } 4102 4103 static void mlx4_pci_resume(struct pci_dev *pdev) 4104 { 4105 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 4106 struct mlx4_dev *dev = persist->dev; 4107 struct mlx4_priv *priv = mlx4_priv(dev); 4108 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 4109 int total_vfs; 4110 int err; 4111 4112 mlx4_err(dev, "%s was called\n", __func__); 4113 total_vfs = dev->persist->num_vfs; 4114 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); 4115 4116 mutex_lock(&persist->interface_state_mutex); 4117 if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) { 4118 err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs, 4119 priv, 1); 4120 if (err) { 4121 mlx4_err(dev, "%s: mlx4_load_one failed, err=%d\n", 4122 __func__, err); 4123 goto end; 4124 } 4125 4126 err = restore_current_port_types(dev, dev->persist-> 4127 curr_port_type, dev->persist-> 4128 curr_port_poss_type); 4129 if (err) 4130 mlx4_err(dev, "could not restore original port types (%d)\n", err); 4131 } 4132 end: 4133 mutex_unlock(&persist->interface_state_mutex); 4134 4135 } 4136 4137 static void mlx4_shutdown(struct pci_dev *pdev) 4138 { 4139 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 4140 4141 mlx4_info(persist->dev, "mlx4_shutdown was called\n"); 4142 mutex_lock(&persist->interface_state_mutex); 4143 if (persist->interface_state & MLX4_INTERFACE_STATE_UP) 4144 mlx4_unload_one(pdev); 4145 mutex_unlock(&persist->interface_state_mutex); 4146 } 4147 4148 static const struct pci_error_handlers mlx4_err_handler = { 4149 .error_detected = mlx4_pci_err_detected, 4150 .slot_reset = mlx4_pci_slot_reset, 4151 .resume = mlx4_pci_resume, 4152 }; 4153 4154 static struct pci_driver mlx4_driver = { 4155 .name = DRV_NAME, 4156 .id_table = mlx4_pci_table, 4157 .probe = mlx4_init_one, 4158 .shutdown = mlx4_shutdown, 4159 .remove = mlx4_remove_one, 4160 .err_handler = &mlx4_err_handler, 4161 }; 4162 4163 static int __init mlx4_verify_params(void) 4164 { 4165 if ((log_num_mac < 0) || (log_num_mac > 7)) { 4166 pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac); 4167 return -1; 4168 } 4169 4170 if (log_num_vlan != 0) 4171 pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n", 4172 MLX4_LOG_NUM_VLANS); 4173 4174 if (use_prio != 0) 4175 pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n"); 4176 4177 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) { 4178 pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n", 4179 log_mtts_per_seg); 4180 return -1; 4181 } 4182 4183 /* Check if module param for ports type has legal combination */ 4184 if (port_type_array[0] == false && port_type_array[1] == true) { 4185 pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n"); 4186 port_type_array[0] = true; 4187 } 4188 4189 if (mlx4_log_num_mgm_entry_size < -7 || 4190 (mlx4_log_num_mgm_entry_size > 0 && 4191 (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE || 4192 mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE))) { 4193 pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-7..0 or %d..%d)\n", 4194 mlx4_log_num_mgm_entry_size, 4195 MLX4_MIN_MGM_LOG_ENTRY_SIZE, 4196 MLX4_MAX_MGM_LOG_ENTRY_SIZE); 4197 return -1; 4198 } 4199 4200 return 0; 4201 } 4202 4203 static int __init mlx4_init(void) 4204 { 4205 int ret; 4206 4207 if (mlx4_verify_params()) 4208 return -EINVAL; 4209 4210 4211 mlx4_wq = create_singlethread_workqueue("mlx4"); 4212 if (!mlx4_wq) 4213 return -ENOMEM; 4214 4215 ret = pci_register_driver(&mlx4_driver); 4216 if (ret < 0) 4217 destroy_workqueue(mlx4_wq); 4218 return ret < 0 ? ret : 0; 4219 } 4220 4221 static void __exit mlx4_cleanup(void) 4222 { 4223 pci_unregister_driver(&mlx4_driver); 4224 destroy_workqueue(mlx4_wq); 4225 } 4226 4227 module_init_order(mlx4_init, SI_ORDER_FIRST); 4228 module_exit_order(mlx4_cleanup, SI_ORDER_FIRST); 4229 4230 static int 4231 mlx4_evhand(module_t mod, int event, void *arg) 4232 { 4233 return (0); 4234 } 4235 4236 static moduledata_t mlx4_mod = { 4237 .name = "mlx4", 4238 .evhand = mlx4_evhand, 4239 }; 4240 MODULE_VERSION(mlx4, 1); 4241 DECLARE_MODULE(mlx4, mlx4_mod, SI_SUB_OFED_PREINIT, SI_ORDER_ANY); 4242 MODULE_DEPEND(mlx4, linuxkpi, 1, 1, 1); 4243 4244