Lines Matching refs:dev

202 static void mlx5_set_driver_version(struct mlx5_core_dev *dev)  in mlx5_set_driver_version()  argument
210 if (!MLX5_CAP_GEN(dev, driver_version)) in mlx5_set_driver_version()
223 mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); in mlx5_set_driver_version()
234 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); in set_dma_caps() local
239 mlx5_core_warn(dev, "couldn't set 64-bit PCI DMA mask\n"); in set_dma_caps()
242 mlx5_core_err(dev, "Can't set PCI DMA mask, aborting\n"); in set_dma_caps()
249 mlx5_core_warn(dev, "couldn't set 64-bit consistent PCI DMA mask\n"); in set_dma_caps()
252 mlx5_core_err(dev, "Can't set consistent PCI DMA mask, aborting\n"); in set_dma_caps()
257 dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024); in set_dma_caps()
261 int mlx5_pci_read_power_status(struct mlx5_core_dev *dev, in mlx5_pci_read_power_status() argument
268 err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), in mlx5_pci_read_power_status()
276 static int mlx5_pci_enable_device(struct mlx5_core_dev *dev) in mlx5_pci_enable_device() argument
278 struct pci_dev *pdev = dev->pdev; in mlx5_pci_enable_device()
281 mutex_lock(&dev->pci_status_mutex); in mlx5_pci_enable_device()
282 if (dev->pci_status == MLX5_PCI_STATUS_DISABLED) { in mlx5_pci_enable_device()
285 dev->pci_status = MLX5_PCI_STATUS_ENABLED; in mlx5_pci_enable_device()
287 mutex_unlock(&dev->pci_status_mutex); in mlx5_pci_enable_device()
292 static void mlx5_pci_disable_device(struct mlx5_core_dev *dev) in mlx5_pci_disable_device() argument
294 struct pci_dev *pdev = dev->pdev; in mlx5_pci_disable_device()
296 mutex_lock(&dev->pci_status_mutex); in mlx5_pci_disable_device()
297 if (dev->pci_status == MLX5_PCI_STATUS_ENABLED) { in mlx5_pci_disable_device()
299 dev->pci_status = MLX5_PCI_STATUS_DISABLED; in mlx5_pci_disable_device()
301 mutex_unlock(&dev->pci_status_mutex); in mlx5_pci_disable_device()
306 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); in request_bar() local
310 mlx5_core_err(dev, "Missing registers BAR, aborting\n"); in request_bar()
316 mlx5_core_err(dev, "Couldn't get PCI resources, aborting\n"); in request_bar()
326 static int mlx5_enable_msix(struct mlx5_core_dev *dev) in mlx5_enable_msix() argument
328 struct mlx5_priv *priv = &dev->priv; in mlx5_enable_msix()
330 int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq); in mlx5_enable_msix()
331 int limit = dev->msix_eqvec; in mlx5_enable_msix()
338 nvec += MLX5_CAP_GEN(dev, num_ports) * num_online_cpus(); in mlx5_enable_msix()
352 nvec = pci_enable_msix_range(dev->pdev, priv->msix_arr, in mlx5_enable_msix()
361 static void mlx5_disable_msix(struct mlx5_core_dev *dev) in mlx5_disable_msix() argument
363 struct mlx5_priv *priv = &dev->priv; in mlx5_disable_msix()
365 pci_disable_msix(dev->pdev); in mlx5_disable_msix()
383 static u16 to_fw_pkey_sz(struct mlx5_core_dev *dev, u32 size) in to_fw_pkey_sz() argument
399 mlx5_core_warn(dev, "invalid pkey table size %d\n", size); in to_fw_pkey_sz()
404 static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, in mlx5_core_get_caps_mode() argument
419 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); in mlx5_core_get_caps_mode()
421 mlx5_core_warn(dev, in mlx5_core_get_caps_mode()
431 memcpy(dev->hca_caps_max[cap_type], hca_caps, in mlx5_core_get_caps_mode()
435 memcpy(dev->hca_caps_cur[cap_type], hca_caps, in mlx5_core_get_caps_mode()
439 mlx5_core_warn(dev, in mlx5_core_get_caps_mode()
450 int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type) in mlx5_core_get_caps() argument
454 ret = mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_CUR); in mlx5_core_get_caps()
458 return mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_MAX); in mlx5_core_get_caps()
461 static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz) in set_caps() argument
467 return mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out)); in set_caps()
470 static int handle_hca_cap(struct mlx5_core_dev *dev) in handle_hca_cap() argument
473 struct mlx5_profile *prof = dev->profile; in handle_hca_cap()
480 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL); in handle_hca_cap()
486 memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL], in handle_hca_cap()
489 mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n", in handle_hca_cap()
490 mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)), in handle_hca_cap()
494 to_fw_pkey_sz(dev, 128)); in handle_hca_cap()
506 if (MLX5_CAP_GEN_MAX(dev, uar_4k) && PAGE_SIZE > 4096) in handle_hca_cap()
514 err = set_caps(dev, set_ctx, set_sz); in handle_hca_cap()
521 static int handle_hca_cap_atomic(struct mlx5_core_dev *dev) in handle_hca_cap_atomic() argument
529 if (MLX5_CAP_GEN(dev, atomic)) { in handle_hca_cap_atomic()
530 err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC); in handle_hca_cap_atomic()
538 MLX5_CAP_ATOMIC(dev, in handle_hca_cap_atomic()
556 err = set_caps(dev, set_ctx, set_sz); in handle_hca_cap_atomic()
562 static int handle_hca_cap_2(struct mlx5_core_dev *dev) in handle_hca_cap_2() argument
566 if (MLX5_CAP_GEN_MAX(dev, hca_cap_2)) { in handle_hca_cap_2()
567 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL_2); in handle_hca_cap_2()
575 static int set_hca_ctrl(struct mlx5_core_dev *dev) in set_hca_ctrl() argument
581 if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH && in set_hca_ctrl()
582 !MLX5_CAP_GEN(dev, roce)) in set_hca_ctrl()
587 err = mlx5_core_access_reg(dev, &he_in, sizeof(he_in), in set_hca_ctrl()
593 static int mlx5_core_set_hca_defaults(struct mlx5_core_dev *dev) in mlx5_core_set_hca_defaults() argument
598 if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) in mlx5_core_set_hca_defaults()
599 ret = mlx5_nic_vport_update_local_lb(dev, false); in mlx5_core_set_hca_defaults()
604 static int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id) in mlx5_core_enable_hca() argument
611 return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); in mlx5_core_enable_hca()
614 static int mlx5_core_disable_hca(struct mlx5_core_dev *dev) in mlx5_core_disable_hca() argument
620 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); in mlx5_core_disable_hca()
623 static int mlx5_core_set_issi(struct mlx5_core_dev *dev) in mlx5_core_set_issi() argument
632 err = mlx5_cmd_exec(dev, query_in, sizeof(query_in), query_out, sizeof(query_out)); in mlx5_core_set_issi()
639 mlx5_core_dbg(dev, "Only ISSI 0 is supported\n"); in mlx5_core_set_issi()
643 mlx5_core_err(dev, "failed to query ISSI\n"); in mlx5_core_set_issi()
656 err = mlx5_cmd_exec(dev, set_in, sizeof(set_in), set_out, sizeof(set_out)); in mlx5_core_set_issi()
658 mlx5_core_err(dev, "failed to set ISSI=1 err(%d)\n", err); in mlx5_core_set_issi()
662 dev->issi = 1; in mlx5_core_set_issi()
673 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn) in mlx5_vector2eqn() argument
675 struct mlx5_eq_table *table = &dev->priv.eq_table; in mlx5_vector2eqn()
694 static void free_comp_eqs(struct mlx5_core_dev *dev) in free_comp_eqs() argument
696 struct mlx5_eq_table *table = &dev->priv.eq_table; in free_comp_eqs()
703 if (mlx5_destroy_unmap_eq(dev, eq)) in free_comp_eqs()
704 mlx5_core_warn(dev, "failed to destroy EQ 0x%x\n", in free_comp_eqs()
712 static int alloc_comp_eqs(struct mlx5_core_dev *dev) in alloc_comp_eqs() argument
714 struct mlx5_eq_table *table = &dev->priv.eq_table; in alloc_comp_eqs()
725 eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, dev->priv.numa_node); in alloc_comp_eqs()
727 err = mlx5_create_map_eq(dev, eq, in alloc_comp_eqs()
733 mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->eqn); in alloc_comp_eqs()
743 free_comp_eqs(dev); in alloc_comp_eqs()
747 static inline int fw_initializing(struct mlx5_core_dev *dev) in fw_initializing() argument
749 return ioread32be(&dev->iseg->initializing) >> 31; in fw_initializing()
752 static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili, in wait_fw_init() argument
761 while (fw_initializing(dev) == 1) { in wait_fw_init()
767 mlx5_core_warn(dev, in wait_fw_init()
776 mlx5_core_dbg(dev, "Full initializing bit dword = 0x%x\n", in wait_fw_init()
777 ioread32be(&dev->iseg->initializing)); in wait_fw_init()
785 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv); in mlx5_add_device() local
793 dev_ctx->context = intf->add(dev); in mlx5_add_device()
808 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv); in mlx5_remove_device() local
816 intf->remove(dev, dev_ctx->context); in mlx5_remove_device()
823 mlx5_register_device(struct mlx5_core_dev *dev) in mlx5_register_device() argument
825 struct mlx5_priv *priv = &dev->priv; in mlx5_register_device()
838 mlx5_unregister_device(struct mlx5_core_dev *dev) in mlx5_unregister_device() argument
840 struct mlx5_priv *priv = &dev->priv; in mlx5_unregister_device()
906 mlx5_firmware_update(struct mlx5_core_dev *dev) in mlx5_firmware_update() argument
916 err = mlx5_firmware_flash(dev, fw); in mlx5_firmware_update()
925 static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv) in mlx5_pci_init() argument
927 struct pci_dev *pdev = dev->pdev; in mlx5_pci_init()
930 pdev = dev->pdev; in mlx5_pci_init()
931 pci_set_drvdata(dev->pdev, dev); in mlx5_pci_init()
932 strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN); in mlx5_pci_init()
939 err = mlx5_pci_enable_device(dev); in mlx5_pci_init()
941 mlx5_core_err(dev, "Cannot enable PCI device, aborting\n"); in mlx5_pci_init()
947 mlx5_core_err(dev, "error requesting BARs, aborting\n"); in mlx5_pci_init()
955 mlx5_core_err(dev, "Failed setting DMA capabilities mask, aborting\n"); in mlx5_pci_init()
959 dev->iseg_base = pci_resource_start(dev->pdev, 0); in mlx5_pci_init()
960 dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg)); in mlx5_pci_init()
961 if (!dev->iseg) { in mlx5_pci_init()
963 mlx5_core_err(dev, "Failed mapping initialization segment, aborting\n"); in mlx5_pci_init()
970 release_bar(dev->pdev); in mlx5_pci_init()
972 mlx5_pci_disable_device(dev); in mlx5_pci_init()
977 static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv) in mlx5_pci_close() argument
980 if (MLX5_CAP_GEN(dev, eswitch_flow_table)) in mlx5_pci_close()
981 pci_iov_detach(dev->pdev->dev.bsddev); in mlx5_pci_close()
983 iounmap(dev->iseg); in mlx5_pci_close()
984 release_bar(dev->pdev); in mlx5_pci_close()
985 mlx5_pci_disable_device(dev); in mlx5_pci_close()
988 static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) in mlx5_init_once() argument
992 err = mlx5_vsc_find_cap(dev); in mlx5_init_once()
994 mlx5_core_warn(dev, "Unable to find vendor specific capabilities\n"); in mlx5_init_once()
996 err = mlx5_query_hca_caps(dev); in mlx5_init_once()
998 mlx5_core_err(dev, "query hca failed\n"); in mlx5_init_once()
1002 err = mlx5_query_board_id(dev); in mlx5_init_once()
1004 mlx5_core_err(dev, "query board id failed\n"); in mlx5_init_once()
1008 err = mlx5_eq_init(dev); in mlx5_init_once()
1010 mlx5_core_err(dev, "failed to initialize eq\n"); in mlx5_init_once()
1016 err = mlx5_init_cq_table(dev); in mlx5_init_once()
1018 mlx5_core_err(dev, "failed to initialize cq table\n"); in mlx5_init_once()
1022 mlx5_init_qp_table(dev); in mlx5_init_once()
1023 mlx5_init_srq_table(dev); in mlx5_init_once()
1024 mlx5_init_mr_table(dev); in mlx5_init_once()
1026 mlx5_init_reserved_gids(dev); in mlx5_init_once()
1027 mlx5_fpga_init(dev); in mlx5_init_once()
1030 err = mlx5_init_rl_table(dev); in mlx5_init_once()
1032 mlx5_core_err(dev, "Failed to init rate limiting\n"); in mlx5_init_once()
1040 mlx5_cleanup_mr_table(dev); in mlx5_init_once()
1041 mlx5_cleanup_srq_table(dev); in mlx5_init_once()
1042 mlx5_cleanup_qp_table(dev); in mlx5_init_once()
1043 mlx5_cleanup_cq_table(dev); in mlx5_init_once()
1047 mlx5_eq_cleanup(dev); in mlx5_init_once()
1053 static void mlx5_cleanup_once(struct mlx5_core_dev *dev) in mlx5_cleanup_once() argument
1056 mlx5_cleanup_rl_table(dev); in mlx5_cleanup_once()
1058 mlx5_fpga_cleanup(dev); in mlx5_cleanup_once()
1059 mlx5_cleanup_reserved_gids(dev); in mlx5_cleanup_once()
1060 mlx5_cleanup_mr_table(dev); in mlx5_cleanup_once()
1061 mlx5_cleanup_srq_table(dev); in mlx5_cleanup_once()
1062 mlx5_cleanup_qp_table(dev); in mlx5_cleanup_once()
1063 mlx5_cleanup_cq_table(dev); in mlx5_cleanup_once()
1064 mlx5_eq_cleanup(dev); in mlx5_cleanup_once()
1067 static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, in mlx5_load_one() argument
1072 mutex_lock(&dev->intf_state_mutex); in mlx5_load_one()
1073 if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { in mlx5_load_one()
1074 mlx5_core_warn(dev, "interface is up, NOP\n"); in mlx5_load_one()
1078 mlx5_core_dbg(dev, "firmware version: %d.%d.%d\n", in mlx5_load_one()
1079 fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev)); in mlx5_load_one()
1085 dev->state = MLX5_DEVICE_STATE_UP; in mlx5_load_one()
1089 err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI, in mlx5_load_one()
1092 dev_err(&dev->pdev->dev, in mlx5_load_one()
1098 err = mlx5_cmd_init(dev); in mlx5_load_one()
1100 mlx5_core_err(dev, in mlx5_load_one()
1105 err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI, 0); in mlx5_load_one()
1107 mlx5_core_err(dev, in mlx5_load_one()
1113 err = mlx5_core_enable_hca(dev, 0); in mlx5_load_one()
1115 mlx5_core_err(dev, "enable hca failed\n"); in mlx5_load_one()
1119 err = mlx5_core_set_issi(dev); in mlx5_load_one()
1121 mlx5_core_err(dev, "failed to set issi\n"); in mlx5_load_one()
1125 err = mlx5_pagealloc_start(dev); in mlx5_load_one()
1127 mlx5_core_err(dev, "mlx5_pagealloc_start failed\n"); in mlx5_load_one()
1131 err = mlx5_satisfy_startup_pages(dev, 1); in mlx5_load_one()
1133 mlx5_core_err(dev, "failed to allocate boot pages\n"); in mlx5_load_one()
1137 err = set_hca_ctrl(dev); in mlx5_load_one()
1139 mlx5_core_err(dev, "set_hca_ctrl failed\n"); in mlx5_load_one()
1143 err = handle_hca_cap(dev); in mlx5_load_one()
1145 mlx5_core_err(dev, "handle_hca_cap failed\n"); in mlx5_load_one()
1149 err = handle_hca_cap_atomic(dev); in mlx5_load_one()
1151 mlx5_core_err(dev, "handle_hca_cap_atomic failed\n"); in mlx5_load_one()
1155 err = handle_hca_cap_2(dev); in mlx5_load_one()
1157 mlx5_core_err(dev, "handle_hca_cap_2 failed\n"); in mlx5_load_one()
1161 err = mlx5_satisfy_startup_pages(dev, 0); in mlx5_load_one()
1163 mlx5_core_err(dev, "failed to allocate init pages\n"); in mlx5_load_one()
1167 err = mlx5_cmd_init_hca(dev); in mlx5_load_one()
1169 mlx5_core_err(dev, "init hca failed\n"); in mlx5_load_one()
1173 mlx5_set_driver_version(dev); in mlx5_load_one()
1175 mlx5_start_health_poll(dev); in mlx5_load_one()
1177 if (boot && (err = mlx5_init_once(dev, priv))) { in mlx5_load_one()
1178 mlx5_core_err(dev, "sw objs init failed\n"); in mlx5_load_one()
1182 dev->priv.uar = mlx5_get_uars_page(dev); in mlx5_load_one()
1183 if (IS_ERR(dev->priv.uar)) { in mlx5_load_one()
1184 mlx5_core_err(dev, "Failed allocating uar, aborting\n"); in mlx5_load_one()
1185 err = PTR_ERR(dev->priv.uar); in mlx5_load_one()
1189 err = mlx5_enable_msix(dev); in mlx5_load_one()
1191 mlx5_core_err(dev, "enable msix failed\n"); in mlx5_load_one()
1195 err = mlx5_start_eqs(dev); in mlx5_load_one()
1197 mlx5_core_err(dev, "Failed to start pages and async EQs\n"); in mlx5_load_one()
1201 err = alloc_comp_eqs(dev); in mlx5_load_one()
1203 mlx5_core_err(dev, "Failed to alloc completion EQs\n"); in mlx5_load_one()
1207 err = mlx5_init_fs(dev); in mlx5_load_one()
1209 mlx5_core_err(dev, "flow steering init %d\n", err); in mlx5_load_one()
1213 err = mlx5_core_set_hca_defaults(dev); in mlx5_load_one()
1215 mlx5_core_err(dev, "Failed to set HCA defaults %d\n", err); in mlx5_load_one()
1219 err = mlx5_mpfs_init(dev); in mlx5_load_one()
1221 mlx5_core_err(dev, "mpfs init failed %d\n", err); in mlx5_load_one()
1225 err = mlx5_fpga_device_start(dev); in mlx5_load_one()
1227 mlx5_core_err(dev, "fpga device start failed %d\n", err); in mlx5_load_one()
1231 err = mlx5_diag_cnt_init(dev); in mlx5_load_one()
1233 mlx5_core_err(dev, "diag cnt init failed %d\n", err); in mlx5_load_one()
1237 err = mlx5_register_device(dev); in mlx5_load_one()
1239 mlx5_core_err(dev, "mlx5_register_device failed %d\n", err); in mlx5_load_one()
1243 set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); in mlx5_load_one()
1246 mutex_unlock(&dev->intf_state_mutex); in mlx5_load_one()
1250 mlx5_diag_cnt_cleanup(dev); in mlx5_load_one()
1253 mlx5_fpga_device_stop(dev); in mlx5_load_one()
1256 mlx5_mpfs_destroy(dev); in mlx5_load_one()
1259 mlx5_cleanup_fs(dev); in mlx5_load_one()
1262 free_comp_eqs(dev); in mlx5_load_one()
1265 mlx5_stop_eqs(dev); in mlx5_load_one()
1268 mlx5_disable_msix(dev); in mlx5_load_one()
1271 mlx5_put_uars_page(dev, dev->priv.uar); in mlx5_load_one()
1275 mlx5_cleanup_once(dev); in mlx5_load_one()
1278 mlx5_stop_health_poll(dev, boot); in mlx5_load_one()
1279 if (mlx5_cmd_teardown_hca(dev)) { in mlx5_load_one()
1280 mlx5_core_err(dev, "tear_down_hca failed, skip cleanup\n"); in mlx5_load_one()
1285 mlx5_reclaim_startup_pages(dev); in mlx5_load_one()
1288 mlx5_pagealloc_stop(dev); in mlx5_load_one()
1291 mlx5_core_disable_hca(dev); in mlx5_load_one()
1294 mlx5_cmd_cleanup(dev); in mlx5_load_one()
1297 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; in mlx5_load_one()
1298 mutex_unlock(&dev->intf_state_mutex); in mlx5_load_one()
1303 static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, in mlx5_unload_one() argument
1309 mlx5_drain_health_recovery(dev); in mlx5_unload_one()
1311 mutex_lock(&dev->intf_state_mutex); in mlx5_unload_one()
1312 if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { in mlx5_unload_one()
1313 mlx5_core_warn(dev, "%s: interface is down, NOP\n", __func__); in mlx5_unload_one()
1315 mlx5_cleanup_once(dev); in mlx5_unload_one()
1319 mlx5_unregister_device(dev); in mlx5_unload_one()
1321 mlx5_eswitch_cleanup(dev->priv.eswitch); in mlx5_unload_one()
1322 mlx5_diag_cnt_cleanup(dev); in mlx5_unload_one()
1323 mlx5_fpga_device_stop(dev); in mlx5_unload_one()
1324 mlx5_mpfs_destroy(dev); in mlx5_unload_one()
1325 mlx5_cleanup_fs(dev); in mlx5_unload_one()
1326 mlx5_wait_for_reclaim_vfs_pages(dev); in mlx5_unload_one()
1327 free_comp_eqs(dev); in mlx5_unload_one()
1328 mlx5_stop_eqs(dev); in mlx5_unload_one()
1329 mlx5_disable_msix(dev); in mlx5_unload_one()
1330 mlx5_put_uars_page(dev, dev->priv.uar); in mlx5_unload_one()
1332 mlx5_cleanup_once(dev); in mlx5_unload_one()
1333 mlx5_stop_health_poll(dev, cleanup); in mlx5_unload_one()
1334 err = mlx5_cmd_teardown_hca(dev); in mlx5_unload_one()
1336 mlx5_core_err(dev, "tear_down_hca failed, skip cleanup\n"); in mlx5_unload_one()
1339 mlx5_pagealloc_stop(dev); in mlx5_unload_one()
1340 mlx5_reclaim_startup_pages(dev); in mlx5_unload_one()
1341 mlx5_core_disable_hca(dev); in mlx5_unload_one()
1342 mlx5_cmd_cleanup(dev); in mlx5_unload_one()
1345 clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); in mlx5_unload_one()
1346 mutex_unlock(&dev->intf_state_mutex); in mlx5_unload_one()
1350 void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, in mlx5_core_event() argument
1353 struct mlx5_priv *priv = &dev->priv; in mlx5_core_event()
1361 dev_ctx->intf->event(dev, dev_ctx->context, event, param); in mlx5_core_event()
1367 void (*event)(struct mlx5_core_dev *dev,
1398 struct mlx5_core_dev *dev; in init_one() local
1400 device_t bsddev = pdev->dev.bsddev; in init_one()
1415 numa_node = dev_to_node(&pdev->dev); in init_one()
1417 dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, numa_node); in init_one()
1419 priv = &dev->priv; in init_one()
1431 dev->profile = &profiles[mlx5_prof_sel]; in init_one()
1432 dev->pdev = pdev; in init_one()
1433 dev->event = mlx5_core_event; in init_one()
1438 sysctl_ctx_init(&dev->sysctl_ctx); in init_one()
1439 SYSCTL_ADD_INT(&dev->sysctl_ctx, in init_one()
1441 OID_AUTO, "msix_eqvec", CTLFLAG_RDTUN, &dev->msix_eqvec, 0, in init_one()
1443 SYSCTL_ADD_INT(&dev->sysctl_ctx, in init_one()
1445 OID_AUTO, "power_status", CTLFLAG_RD, &dev->pwr_status, 0, in init_one()
1447 SYSCTL_ADD_INT(&dev->sysctl_ctx, in init_one()
1449 OID_AUTO, "power_value", CTLFLAG_RD, &dev->pwr_value, 0, in init_one()
1452 pme_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx, in init_one()
1460 pme_err_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx, in init_one()
1468 SYSCTL_ADD_U64(&dev->sysctl_ctx, in init_one()
1471 &dev->priv.pme_stats.status_counters[MLX5_MODULE_STATUS_PLUGGED_ENABLED], in init_one()
1473 SYSCTL_ADD_U64(&dev->sysctl_ctx, in init_one()
1476 &dev->priv.pme_stats.status_counters[MLX5_MODULE_STATUS_UNPLUGGED], in init_one()
1479 SYSCTL_ADD_U64(&dev->sysctl_ctx, in init_one()
1482 &dev->priv.pme_stats.error_counters[i], in init_one()
1486 cap_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx, in init_one()
1494 current_cap_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx, in init_one()
1502 max_cap_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx, in init_one()
1510 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, in init_one()
1513 &dev->hca_caps_cur[MLX5_CAP_GENERAL], in init_one()
1515 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, in init_one()
1518 &dev->hca_caps_max[MLX5_CAP_GENERAL], in init_one()
1520 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, in init_one()
1523 &dev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], in init_one()
1525 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, in init_one()
1528 &dev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], in init_one()
1530 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, in init_one()
1533 &dev->hca_caps_cur[MLX5_CAP_ODP], in init_one()
1535 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, in init_one()
1538 &dev->hca_caps_max[MLX5_CAP_ODP], in init_one()
1540 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, in init_one()
1543 &dev->hca_caps_cur[MLX5_CAP_ATOMIC], in init_one()
1545 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, in init_one()
1548 &dev->hca_caps_max[MLX5_CAP_ATOMIC], in init_one()
1550 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, in init_one()
1553 &dev->hca_caps_cur[MLX5_CAP_ROCE], in init_one()
1555 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, in init_one()
1558 &dev->hca_caps_max[MLX5_CAP_ROCE], in init_one()
1560 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, in init_one()
1563 &dev->hca_caps_cur[MLX5_CAP_IPOIB_OFFLOADS], in init_one()
1565 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, in init_one()
1568 &dev->hca_caps_max[MLX5_CAP_IPOIB_OFFLOADS], in init_one()
1570 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, in init_one()
1573 &dev->hca_caps_cur[MLX5_CAP_EOIB_OFFLOADS], in init_one()
1575 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, in init_one()
1578 &dev->hca_caps_max[MLX5_CAP_EOIB_OFFLOADS], in init_one()
1580 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, in init_one()
1583 &dev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], in init_one()
1585 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, in init_one()
1588 &dev->hca_caps_max[MLX5_CAP_FLOW_TABLE], in init_one()
1590 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, in init_one()
1593 &dev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], in init_one()
1595 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, in init_one()
1598 &dev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE], in init_one()
1600 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, in init_one()
1603 &dev->hca_caps_cur[MLX5_CAP_ESWITCH], in init_one()
1605 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, in init_one()
1608 &dev->hca_caps_max[MLX5_CAP_ESWITCH], in init_one()
1610 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, in init_one()
1613 &dev->hca_caps_cur[MLX5_CAP_SNAPSHOT], in init_one()
1615 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, in init_one()
1618 &dev->hca_caps_max[MLX5_CAP_SNAPSHOT], in init_one()
1620 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, in init_one()
1623 &dev->hca_caps_cur[MLX5_CAP_VECTOR_CALC], in init_one()
1625 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, in init_one()
1628 &dev->hca_caps_max[MLX5_CAP_VECTOR_CALC], in init_one()
1630 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, in init_one()
1633 &dev->hca_caps_cur[MLX5_CAP_QOS], in init_one()
1635 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, in init_one()
1638 &dev->hca_caps_max[MLX5_CAP_QOS], in init_one()
1640 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, in init_one()
1643 &dev->hca_caps_cur[MLX5_CAP_DEBUG], in init_one()
1645 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, in init_one()
1648 &dev->hca_caps_max[MLX5_CAP_DEBUG], in init_one()
1650 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, in init_one()
1653 &dev->caps.pcam, sizeof(dev->caps.pcam), "IU", ""); in init_one()
1654 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, in init_one()
1657 &dev->caps.mcam, sizeof(dev->caps.mcam), "IU", ""); in init_one()
1658 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, in init_one()
1661 &dev->caps.qcam, sizeof(dev->caps.qcam), "IU", ""); in init_one()
1662 SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx, in init_one()
1665 &dev->caps.fpga, sizeof(dev->caps.fpga), "IU", ""); in init_one()
1669 mutex_init(&dev->pci_status_mutex); in init_one()
1670 mutex_init(&dev->intf_state_mutex); in init_one()
1677 mtx_init(&dev->dump_lock, "mlx5dmp", NULL, MTX_DEF | MTX_NEW); in init_one()
1678 err = mlx5_pci_init(dev, priv); in init_one()
1680 mlx5_core_err(dev, "mlx5_pci_init failed %d\n", err); in init_one()
1684 err = mlx5_health_init(dev); in init_one()
1686 mlx5_core_err(dev, "mlx5_health_init failed %d\n", err); in init_one()
1690 mlx5_pagealloc_init(dev); in init_one()
1692 err = mlx5_load_one(dev, priv, true); in init_one()
1694 mlx5_core_err(dev, "mlx5_load_one failed %d\n", err); in init_one()
1698 mlx5_fwdump_prep(dev); in init_one()
1700 mlx5_firmware_update(dev); in init_one()
1703 if (MLX5_CAP_GEN(dev, vport_group_manager)) { in init_one()
1708 mlx5_core_info(dev, "cannot find SR-IOV PCIe cap\n"); in init_one()
1711 err = mlx5_eswitch_init(dev, 1 + num_vfs); in init_one()
1723 dev->iov_pf = true; in init_one()
1730 mlx5_core_err(dev, "eswitch init failed, error %d\n", in init_one()
1740 mlx5_pagealloc_cleanup(dev); in init_one()
1741 mlx5_health_cleanup(dev); in init_one()
1743 mlx5_pci_close(dev, priv); in init_one()
1745 mtx_destroy(&dev->dump_lock); in init_one()
1747 sysctl_ctx_free(&dev->sysctl_ctx); in init_one()
1748 kfree(dev); in init_one()
1754 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); in remove_one() local
1755 struct mlx5_priv *priv = &dev->priv; in remove_one()
1758 if (dev->iov_pf) { in remove_one()
1759 pci_iov_detach(pdev->dev.bsddev); in remove_one()
1761 dev->iov_pf = false; in remove_one()
1765 if (mlx5_unload_one(dev, priv, true)) { in remove_one()
1766 mlx5_core_err(dev, "mlx5_unload_one() failed, leaked %lld bytes\n", in remove_one()
1767 (long long)(dev->priv.fw_pages * MLX5_ADAPTER_PAGE_SIZE)); in remove_one()
1770 mlx5_pagealloc_cleanup(dev); in remove_one()
1771 mlx5_health_cleanup(dev); in remove_one()
1772 mlx5_fwdump_clean(dev); in remove_one()
1773 mlx5_pci_close(dev, priv); in remove_one()
1774 mtx_destroy(&dev->dump_lock); in remove_one()
1776 sysctl_ctx_free(&dev->sysctl_ctx); in remove_one()
1777 kfree(dev); in remove_one()
1783 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); in mlx5_pci_err_detected() local
1784 struct mlx5_priv *priv = &dev->priv; in mlx5_pci_err_detected()
1786 mlx5_core_info(dev, "%s was called\n", __func__); in mlx5_pci_err_detected()
1787 mlx5_enter_error_state(dev, false); in mlx5_pci_err_detected()
1788 mlx5_unload_one(dev, priv, false); in mlx5_pci_err_detected()
1791 mlx5_drain_health_wq(dev); in mlx5_pci_err_detected()
1792 mlx5_pci_disable_device(dev); in mlx5_pci_err_detected()
1801 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); in mlx5_pci_slot_reset() local
1804 mlx5_core_info(dev,"%s was called\n", __func__); in mlx5_pci_slot_reset()
1806 err = mlx5_pci_enable_device(dev); in mlx5_pci_slot_reset()
1808 mlx5_core_err(dev, "mlx5_pci_enable_device failed with error code: %d\n" in mlx5_pci_slot_reset()
1813 pci_set_powerstate(pdev->dev.bsddev, PCI_POWERSTATE_D0); in mlx5_pci_slot_reset()
1826 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); in wait_vital() local
1827 struct mlx5_core_health *health = &dev->priv.health; in wait_vital()
1837 mlx5_core_warn(dev, "failed reading config word\n"); in wait_vital()
1841 mlx5_core_info(dev, in wait_vital()
1848 mlx5_core_warn(dev, "could not read device ID\n"); in wait_vital()
1853 mlx5_core_info(dev, in wait_vital()
1861 mlx5_core_warn(dev, "could not read device ID\n"); in wait_vital()
1866 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); in mlx5_pci_resume() local
1867 struct mlx5_priv *priv = &dev->priv; in mlx5_pci_resume()
1870 mlx5_core_info(dev,"%s was called\n", __func__); in mlx5_pci_resume()
1874 err = mlx5_load_one(dev, priv, false); in mlx5_pci_resume()
1876 mlx5_core_err(dev, in mlx5_pci_resume()
1879 mlx5_core_info(dev,"device recovered\n"); in mlx5_pci_resume()
1890 mlx5_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *pf_config) in mlx5_iov_init() argument
1897 pdev = device_get_softc(dev); in mlx5_iov_init()
1910 mlx5_iov_uninit(device_t dev) in mlx5_iov_uninit() argument
1916 pdev = device_get_softc(dev); in mlx5_iov_uninit()
1924 mlx5_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *vf_config) in mlx5_iov_add_vf() argument
1934 pdev = device_get_softc(dev); in mlx5_iov_add_vf()
1991 static int mlx5_try_fast_unload(struct mlx5_core_dev *dev) in mlx5_try_fast_unload() argument
1997 mlx5_core_dbg(dev, "fast unload is disabled by user\n"); in mlx5_try_fast_unload()
2001 fast_teardown = MLX5_CAP_GEN(dev, fast_teardown); in mlx5_try_fast_unload()
2002 force_teardown = MLX5_CAP_GEN(dev, force_teardown); in mlx5_try_fast_unload()
2004 mlx5_core_dbg(dev, "force teardown firmware support=%d\n", force_teardown); in mlx5_try_fast_unload()
2005 mlx5_core_dbg(dev, "fast teardown firmware support=%d\n", fast_teardown); in mlx5_try_fast_unload()
2010 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { in mlx5_try_fast_unload()
2011 mlx5_core_dbg(dev, "Device in internal error state, giving up\n"); in mlx5_try_fast_unload()
2018 mlx5_drain_health_wq(dev); in mlx5_try_fast_unload()
2019 mlx5_stop_health_poll(dev, false); in mlx5_try_fast_unload()
2021 err = mlx5_cmd_fast_teardown_hca(dev); in mlx5_try_fast_unload()
2025 err = mlx5_cmd_force_teardown_hca(dev); in mlx5_try_fast_unload()
2029 mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", err); in mlx5_try_fast_unload()
2030 mlx5_start_health_poll(dev); in mlx5_try_fast_unload()
2033 mlx5_enter_error_state(dev, true); in mlx5_try_fast_unload()
2051 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); in shutdown_one() local
2052 struct mlx5_priv *priv = &dev->priv; in shutdown_one()
2056 mlx5_cmd_use_polling(dev); in shutdown_one()
2058 set_bit(MLX5_INTERFACE_STATE_TEARDOWN, &dev->intf_state); in shutdown_one()
2061 mlx5_shutdown_disable_interrupts(dev); in shutdown_one()
2063 err = mlx5_try_fast_unload(dev); in shutdown_one()
2065 mlx5_unload_one(dev, priv, false); in shutdown_one()
2066 mlx5_pci_disable_device(dev); in shutdown_one()
2112 void mlx5_disable_device(struct mlx5_core_dev *dev) in mlx5_disable_device() argument
2114 mlx5_pci_err_detected(dev->pdev, 0); in mlx5_disable_device()
2117 void mlx5_recover_device(struct mlx5_core_dev *dev) in mlx5_recover_device() argument
2119 mlx5_pci_disable_device(dev); in mlx5_recover_device()
2120 if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED) in mlx5_recover_device()
2121 mlx5_pci_resume(dev->pdev); in mlx5_recover_device()