xref: /freebsd/sys/dev/mlx5/mlx5_core/mlx5_main.c (revision 81ad6265)
1 /*-
2  * Copyright (c) 2013-2021, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #include "opt_rss.h"
29 #include "opt_ratelimit.h"
30 
31 #include <linux/kmod.h>
32 #include <linux/module.h>
33 #include <linux/errno.h>
34 #include <linux/pci.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/slab.h>
37 #include <linux/io-mapping.h>
38 #include <linux/interrupt.h>
39 #include <linux/hardirq.h>
40 #include <dev/mlx5/driver.h>
41 #include <dev/mlx5/cq.h>
42 #include <dev/mlx5/qp.h>
43 #include <dev/mlx5/srq.h>
44 #include <dev/mlx5/mpfs.h>
45 #include <dev/mlx5/vport.h>
46 #include <linux/delay.h>
47 #include <dev/mlx5/mlx5_ifc.h>
48 #include <dev/mlx5/mlx5_fpga/core.h>
49 #include <dev/mlx5/mlx5_lib/mlx5.h>
50 #include <dev/mlx5/mlx5_core/mlx5_core.h>
51 #include <dev/mlx5/mlx5_core/eswitch.h>
52 #include <dev/mlx5/mlx5_core/fs_core.h>
53 #ifdef PCI_IOV
54 #include <sys/nv.h>
55 #include <dev/pci/pci_iov.h>
56 #include <sys/iov_schema.h>
57 #endif
58 
59 static const char mlx5_version[] = "Mellanox Core driver "
60 	DRIVER_VERSION " (" DRIVER_RELDATE ")";
61 MODULE_DESCRIPTION("Mellanox ConnectX-4 and onwards core driver");
62 MODULE_LICENSE("Dual BSD/GPL");
63 MODULE_DEPEND(mlx5, linuxkpi, 1, 1, 1);
64 MODULE_DEPEND(mlx5, mlxfw, 1, 1, 1);
65 MODULE_DEPEND(mlx5, firmware, 1, 1, 1);
66 MODULE_VERSION(mlx5, 1);
67 
68 SYSCTL_NODE(_hw, OID_AUTO, mlx5, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
69     "mlx5 hardware controls");
70 
71 int mlx5_core_debug_mask;
72 SYSCTL_INT(_hw_mlx5, OID_AUTO, debug_mask, CTLFLAG_RWTUN,
73     &mlx5_core_debug_mask, 0,
74     "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
75 
76 #define MLX5_DEFAULT_PROF	2
77 static int mlx5_prof_sel = MLX5_DEFAULT_PROF;
78 SYSCTL_INT(_hw_mlx5, OID_AUTO, prof_sel, CTLFLAG_RWTUN,
79     &mlx5_prof_sel, 0,
80     "profile selector. Valid range 0 - 2");
81 
82 static int mlx5_fast_unload_enabled = 1;
83 SYSCTL_INT(_hw_mlx5, OID_AUTO, fast_unload_enabled, CTLFLAG_RWTUN,
84     &mlx5_fast_unload_enabled, 0,
85     "Set to enable fast unload. Clear to disable.");
86 
87 static LIST_HEAD(intf_list);
88 static LIST_HEAD(dev_list);
89 static DEFINE_MUTEX(intf_mutex);
90 
91 struct mlx5_device_context {
92 	struct list_head	list;
93 	struct mlx5_interface  *intf;
94 	void		       *context;
95 };
96 
97 enum {
98 	MLX5_ATOMIC_REQ_MODE_BE = 0x0,
99 	MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS = 0x1,
100 };
101 
102 static struct mlx5_profile profiles[] = {
103 	[0] = {
104 		.mask           = 0,
105 	},
106 	[1] = {
107 		.mask		= MLX5_PROF_MASK_QP_SIZE,
108 		.log_max_qp	= 12,
109 	},
110 	[2] = {
111 		.mask		= MLX5_PROF_MASK_QP_SIZE |
112 				  MLX5_PROF_MASK_MR_CACHE,
113 		.log_max_qp	= 17,
114 		.mr_cache[0]	= {
115 			.size	= 500,
116 			.limit	= 250
117 		},
118 		.mr_cache[1]	= {
119 			.size	= 500,
120 			.limit	= 250
121 		},
122 		.mr_cache[2]	= {
123 			.size	= 500,
124 			.limit	= 250
125 		},
126 		.mr_cache[3]	= {
127 			.size	= 500,
128 			.limit	= 250
129 		},
130 		.mr_cache[4]	= {
131 			.size	= 500,
132 			.limit	= 250
133 		},
134 		.mr_cache[5]	= {
135 			.size	= 500,
136 			.limit	= 250
137 		},
138 		.mr_cache[6]	= {
139 			.size	= 500,
140 			.limit	= 250
141 		},
142 		.mr_cache[7]	= {
143 			.size	= 500,
144 			.limit	= 250
145 		},
146 		.mr_cache[8]	= {
147 			.size	= 500,
148 			.limit	= 250
149 		},
150 		.mr_cache[9]	= {
151 			.size	= 500,
152 			.limit	= 250
153 		},
154 		.mr_cache[10]	= {
155 			.size	= 500,
156 			.limit	= 250
157 		},
158 		.mr_cache[11]	= {
159 			.size	= 500,
160 			.limit	= 250
161 		},
162 		.mr_cache[12]	= {
163 			.size	= 64,
164 			.limit	= 32
165 		},
166 		.mr_cache[13]	= {
167 			.size	= 32,
168 			.limit	= 16
169 		},
170 		.mr_cache[14]	= {
171 			.size	= 16,
172 			.limit	= 8
173 		},
174 	},
175 	[3] = {
176 		.mask		= MLX5_PROF_MASK_QP_SIZE,
177 		.log_max_qp	= 17,
178 	},
179 };
180 
181 static void mlx5_set_driver_version(struct mlx5_core_dev *dev)
182 {
183 	const size_t driver_ver_sz =
184 	    MLX5_FLD_SZ_BYTES(set_driver_version_in, driver_version);
185 	u8 in[MLX5_ST_SZ_BYTES(set_driver_version_in)] = {};
186 	u8 out[MLX5_ST_SZ_BYTES(set_driver_version_out)] = {};
187 	char *string;
188 
189 	if (!MLX5_CAP_GEN(dev, driver_version))
190 		return;
191 
192 	string = MLX5_ADDR_OF(set_driver_version_in, in, driver_version);
193 
194 	snprintf(string, driver_ver_sz, "FreeBSD,mlx5_core,%u.%u.%u," DRIVER_VERSION,
195 	    __FreeBSD_version / 100000, (__FreeBSD_version / 1000) % 100,
196 	    __FreeBSD_version % 1000);
197 
198 	/* Send the command */
199 	MLX5_SET(set_driver_version_in, in, opcode,
200 	    MLX5_CMD_OP_SET_DRIVER_VERSION);
201 
202 	mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
203 }
204 
205 #ifdef PCI_IOV
206 static const char iov_mac_addr_name[] = "mac-addr";
207 static const char iov_node_guid_name[] = "node-guid";
208 static const char iov_port_guid_name[] = "port-guid";
209 #endif
210 
211 static int set_dma_caps(struct pci_dev *pdev)
212 {
213 	struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
214 	int err;
215 
216 	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
217 	if (err) {
218 		mlx5_core_warn(dev, "couldn't set 64-bit PCI DMA mask\n");
219 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
220 		if (err) {
221 			mlx5_core_err(dev, "Can't set PCI DMA mask, aborting\n");
222 			return err;
223 		}
224 	}
225 
226 	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
227 	if (err) {
228 		mlx5_core_warn(dev, "couldn't set 64-bit consistent PCI DMA mask\n");
229 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
230 		if (err) {
231 			mlx5_core_err(dev, "Can't set consistent PCI DMA mask, aborting\n");
232 			return err;
233 		}
234 	}
235 
236 	dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024);
237 	return err;
238 }
239 
240 int mlx5_pci_read_power_status(struct mlx5_core_dev *dev,
241 			       u16 *p_power, u8 *p_status)
242 {
243 	u32 in[MLX5_ST_SZ_DW(mpein_reg)] = {};
244 	u32 out[MLX5_ST_SZ_DW(mpein_reg)] = {};
245 	int err;
246 
247 	err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
248 	    MLX5_ACCESS_REG_SUMMARY_CTRL_ID_MPEIN, 0, 0);
249 
250 	*p_status = MLX5_GET(mpein_reg, out, pwr_status);
251 	*p_power = MLX5_GET(mpein_reg, out, pci_power);
252 	return err;
253 }
254 
255 static int mlx5_pci_enable_device(struct mlx5_core_dev *dev)
256 {
257 	struct pci_dev *pdev = dev->pdev;
258 	int err = 0;
259 
260 	mutex_lock(&dev->pci_status_mutex);
261 	if (dev->pci_status == MLX5_PCI_STATUS_DISABLED) {
262 		err = pci_enable_device(pdev);
263 		if (!err)
264 			dev->pci_status = MLX5_PCI_STATUS_ENABLED;
265 	}
266 	mutex_unlock(&dev->pci_status_mutex);
267 
268 	return err;
269 }
270 
271 static void mlx5_pci_disable_device(struct mlx5_core_dev *dev)
272 {
273 	struct pci_dev *pdev = dev->pdev;
274 
275 	mutex_lock(&dev->pci_status_mutex);
276 	if (dev->pci_status == MLX5_PCI_STATUS_ENABLED) {
277 		pci_disable_device(pdev);
278 		dev->pci_status = MLX5_PCI_STATUS_DISABLED;
279 	}
280 	mutex_unlock(&dev->pci_status_mutex);
281 }
282 
283 static int request_bar(struct pci_dev *pdev)
284 {
285 	struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
286 	int err = 0;
287 
288 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
289 		mlx5_core_err(dev, "Missing registers BAR, aborting\n");
290 		return -ENODEV;
291 	}
292 
293 	err = pci_request_regions(pdev, DRIVER_NAME);
294 	if (err)
295 		mlx5_core_err(dev, "Couldn't get PCI resources, aborting\n");
296 
297 	return err;
298 }
299 
300 static void release_bar(struct pci_dev *pdev)
301 {
302 	pci_release_regions(pdev);
303 }
304 
305 static int mlx5_enable_msix(struct mlx5_core_dev *dev)
306 {
307 	struct mlx5_priv *priv = &dev->priv;
308 	struct mlx5_eq_table *table = &priv->eq_table;
309 	int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
310 	int limit = dev->msix_eqvec;
311 	int nvec = MLX5_EQ_VEC_COMP_BASE;
312 	int i;
313 
314 	if (limit > 0)
315 		nvec += limit;
316 	else
317 		nvec += MLX5_CAP_GEN(dev, num_ports) * num_online_cpus();
318 
319 	if (nvec > num_eqs)
320 		nvec = num_eqs;
321 	if (nvec > 256)
322 		nvec = 256;	/* limit of firmware API */
323 	if (nvec <= MLX5_EQ_VEC_COMP_BASE)
324 		return -ENOMEM;
325 
326 	priv->msix_arr = kzalloc(nvec * sizeof(*priv->msix_arr), GFP_KERNEL);
327 
328 	for (i = 0; i < nvec; i++)
329 		priv->msix_arr[i].entry = i;
330 
331 	nvec = pci_enable_msix_range(dev->pdev, priv->msix_arr,
332 				     MLX5_EQ_VEC_COMP_BASE + 1, nvec);
333 	if (nvec < 0)
334 		return nvec;
335 
336 	table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
337 	return 0;
338 }
339 
340 static void mlx5_disable_msix(struct mlx5_core_dev *dev)
341 {
342 	struct mlx5_priv *priv = &dev->priv;
343 
344 	pci_disable_msix(dev->pdev);
345 	kfree(priv->msix_arr);
346 }
347 
348 struct mlx5_reg_host_endianess {
349 	u8	he;
350 	u8      rsvd[15];
351 };
352 
353 
354 #define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos))
355 
356 enum {
357 	MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) |
358 				MLX5_DEV_CAP_FLAG_DCT |
359 				MLX5_DEV_CAP_FLAG_DRAIN_SIGERR,
360 };
361 
362 static u16 to_fw_pkey_sz(struct mlx5_core_dev *dev, u32 size)
363 {
364 	switch (size) {
365 	case 128:
366 		return 0;
367 	case 256:
368 		return 1;
369 	case 512:
370 		return 2;
371 	case 1024:
372 		return 3;
373 	case 2048:
374 		return 4;
375 	case 4096:
376 		return 5;
377 	default:
378 		mlx5_core_warn(dev, "invalid pkey table size %d\n", size);
379 		return 0;
380 	}
381 }
382 
383 static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev,
384 				   enum mlx5_cap_type cap_type,
385 				   enum mlx5_cap_mode cap_mode)
386 {
387 	u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
388 	int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
389 	void *out, *hca_caps;
390 	u16 opmod = (cap_type << 1) | (cap_mode & 0x01);
391 	int err;
392 
393 	memset(in, 0, sizeof(in));
394 	out = kzalloc(out_sz, GFP_KERNEL);
395 
396 	MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
397 	MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
398 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
399 	if (err) {
400 		mlx5_core_warn(dev,
401 			       "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
402 			       cap_type, cap_mode, err);
403 		goto query_ex;
404 	}
405 
406 	hca_caps =  MLX5_ADDR_OF(query_hca_cap_out, out, capability);
407 
408 	switch (cap_mode) {
409 	case HCA_CAP_OPMOD_GET_MAX:
410 		memcpy(dev->hca_caps_max[cap_type], hca_caps,
411 		       MLX5_UN_SZ_BYTES(hca_cap_union));
412 		break;
413 	case HCA_CAP_OPMOD_GET_CUR:
414 		memcpy(dev->hca_caps_cur[cap_type], hca_caps,
415 		       MLX5_UN_SZ_BYTES(hca_cap_union));
416 		break;
417 	default:
418 		mlx5_core_warn(dev,
419 			       "Tried to query dev cap type(%x) with wrong opmode(%x)\n",
420 			       cap_type, cap_mode);
421 		err = -EINVAL;
422 		break;
423 	}
424 query_ex:
425 	kfree(out);
426 	return err;
427 }
428 
429 int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type)
430 {
431 	int ret;
432 
433 	ret = mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_CUR);
434 	if (ret)
435 		return ret;
436 
437 	return mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_MAX);
438 }
439 
440 static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz)
441 {
442 	u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)] = {0};
443 
444 	MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP);
445 
446 	return mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
447 }
448 
449 static int handle_hca_cap(struct mlx5_core_dev *dev)
450 {
451 	void *set_ctx = NULL;
452 	struct mlx5_profile *prof = dev->profile;
453 	int err = -ENOMEM;
454 	int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
455 	void *set_hca_cap;
456 
457 	set_ctx = kzalloc(set_sz, GFP_KERNEL);
458 
459 	err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
460 	if (err)
461 		goto query_ex;
462 
463 	set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
464 				   capability);
465 	memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL],
466 	       MLX5_ST_SZ_BYTES(cmd_hca_cap));
467 
468 	mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
469 		      mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)),
470 		      128);
471 	/* we limit the size of the pkey table to 128 entries for now */
472 	MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
473 		 to_fw_pkey_sz(dev, 128));
474 
475 	if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
476 		MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
477 			 prof->log_max_qp);
478 
479 	/* disable cmdif checksum */
480 	MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
481 
482 	/* Enable 4K UAR only when HCA supports it and page size is bigger
483 	 * than 4K.
484 	 */
485 	if (MLX5_CAP_GEN_MAX(dev, uar_4k) && PAGE_SIZE > 4096)
486 		MLX5_SET(cmd_hca_cap, set_hca_cap, uar_4k, 1);
487 
488 	/* enable drain sigerr */
489 	MLX5_SET(cmd_hca_cap, set_hca_cap, drain_sigerr, 1);
490 
491 	MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
492 
493 	err = set_caps(dev, set_ctx, set_sz);
494 
495 query_ex:
496 	kfree(set_ctx);
497 	return err;
498 }
499 
500 static int handle_hca_cap_atomic(struct mlx5_core_dev *dev)
501 {
502 	void *set_ctx;
503 	void *set_hca_cap;
504 	int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
505 	int req_endianness;
506 	int err;
507 
508 	if (MLX5_CAP_GEN(dev, atomic)) {
509 		err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC);
510 		if (err)
511 			return err;
512 	} else {
513 		return 0;
514 	}
515 
516 	req_endianness =
517 		MLX5_CAP_ATOMIC(dev,
518 				supported_atomic_req_8B_endianess_mode_1);
519 
520 	if (req_endianness != MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS)
521 		return 0;
522 
523 	set_ctx = kzalloc(set_sz, GFP_KERNEL);
524 	if (!set_ctx)
525 		return -ENOMEM;
526 
527 	MLX5_SET(set_hca_cap_in, set_ctx, op_mod,
528 		 MLX5_SET_HCA_CAP_OP_MOD_ATOMIC << 1);
529 	set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
530 
531 	/* Set requestor to host endianness */
532 	MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianess_mode,
533 		 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS);
534 
535 	err = set_caps(dev, set_ctx, set_sz);
536 
537 	kfree(set_ctx);
538 	return err;
539 }
540 
541 static int set_hca_ctrl(struct mlx5_core_dev *dev)
542 {
543 	struct mlx5_reg_host_endianess he_in;
544 	struct mlx5_reg_host_endianess he_out;
545 	int err;
546 
547 	if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH &&
548 	    !MLX5_CAP_GEN(dev, roce))
549 		return 0;
550 
551 	memset(&he_in, 0, sizeof(he_in));
552 	he_in.he = MLX5_SET_HOST_ENDIANNESS;
553 	err = mlx5_core_access_reg(dev, &he_in,  sizeof(he_in),
554 					&he_out, sizeof(he_out),
555 					MLX5_REG_HOST_ENDIANNESS, 0, 1);
556 	return err;
557 }
558 
559 static int mlx5_core_set_hca_defaults(struct mlx5_core_dev *dev)
560 {
561 	int ret = 0;
562 
563 	/* Disable local_lb by default */
564 	if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH)
565 		ret = mlx5_nic_vport_update_local_lb(dev, false);
566 
567        return ret;
568 }
569 
570 static int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id)
571 {
572 	u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {0};
573 	u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {0};
574 
575 	MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
576 	MLX5_SET(enable_hca_in, in, function_id, func_id);
577 	return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
578 }
579 
580 static int mlx5_core_disable_hca(struct mlx5_core_dev *dev)
581 {
582 	u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {0};
583 	u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {0};
584 
585 	MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
586 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
587 }
588 
589 static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
590 {
591 	u32 query_in[MLX5_ST_SZ_DW(query_issi_in)] = {0};
592 	u32 query_out[MLX5_ST_SZ_DW(query_issi_out)] = {0};
593 	u32 sup_issi;
594 	int err;
595 
596 	MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
597 
598 	err = mlx5_cmd_exec(dev, query_in, sizeof(query_in), query_out, sizeof(query_out));
599 	if (err) {
600 		u32 syndrome;
601 		u8 status;
602 
603 		mlx5_cmd_mbox_status(query_out, &status, &syndrome);
604 		if (status == MLX5_CMD_STAT_BAD_OP_ERR) {
605 			mlx5_core_dbg(dev, "Only ISSI 0 is supported\n");
606 			return 0;
607 		}
608 
609 		mlx5_core_err(dev, "failed to query ISSI\n");
610 		return err;
611 	}
612 
613 	sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
614 
615 	if (sup_issi & (1 << 1)) {
616 		u32 set_in[MLX5_ST_SZ_DW(set_issi_in)]	 = {0};
617 		u32 set_out[MLX5_ST_SZ_DW(set_issi_out)] = {0};
618 
619 		MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI);
620 		MLX5_SET(set_issi_in, set_in, current_issi, 1);
621 
622 		err = mlx5_cmd_exec(dev, set_in, sizeof(set_in), set_out, sizeof(set_out));
623 		if (err) {
624 			mlx5_core_err(dev, "failed to set ISSI=1 err(%d)\n", err);
625 			return err;
626 		}
627 
628 		dev->issi = 1;
629 
630 		return 0;
631 	} else if (sup_issi & (1 << 0)) {
632 		return 0;
633 	}
634 
635 	return -ENOTSUPP;
636 }
637 
638 
639 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn)
640 {
641 	struct mlx5_eq_table *table = &dev->priv.eq_table;
642 	struct mlx5_eq *eq;
643 	int err = -ENOENT;
644 
645 	spin_lock(&table->lock);
646 	list_for_each_entry(eq, &table->comp_eqs_list, list) {
647 		if (eq->index == vector) {
648 			*eqn = eq->eqn;
649 			*irqn = eq->irqn;
650 			err = 0;
651 			break;
652 		}
653 	}
654 	spin_unlock(&table->lock);
655 
656 	return err;
657 }
658 EXPORT_SYMBOL(mlx5_vector2eqn);
659 
660 static void free_comp_eqs(struct mlx5_core_dev *dev)
661 {
662 	struct mlx5_eq_table *table = &dev->priv.eq_table;
663 	struct mlx5_eq *eq, *n;
664 
665 	spin_lock(&table->lock);
666 	list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
667 		list_del(&eq->list);
668 		spin_unlock(&table->lock);
669 		if (mlx5_destroy_unmap_eq(dev, eq))
670 			mlx5_core_warn(dev, "failed to destroy EQ 0x%x\n",
671 				       eq->eqn);
672 		kfree(eq);
673 		spin_lock(&table->lock);
674 	}
675 	spin_unlock(&table->lock);
676 }
677 
678 static int alloc_comp_eqs(struct mlx5_core_dev *dev)
679 {
680 	struct mlx5_eq_table *table = &dev->priv.eq_table;
681 	struct mlx5_eq *eq;
682 	int ncomp_vec;
683 	int nent;
684 	int err;
685 	int i;
686 
687 	INIT_LIST_HEAD(&table->comp_eqs_list);
688 	ncomp_vec = table->num_comp_vectors;
689 	nent = MLX5_COMP_EQ_SIZE;
690 	for (i = 0; i < ncomp_vec; i++) {
691 		eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, dev->priv.numa_node);
692 
693 		err = mlx5_create_map_eq(dev, eq,
694 					 i + MLX5_EQ_VEC_COMP_BASE, nent, 0);
695 		if (err) {
696 			kfree(eq);
697 			goto clean;
698 		}
699 		mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->eqn);
700 		eq->index = i;
701 		spin_lock(&table->lock);
702 		list_add_tail(&eq->list, &table->comp_eqs_list);
703 		spin_unlock(&table->lock);
704 	}
705 
706 	return 0;
707 
708 clean:
709 	free_comp_eqs(dev);
710 	return err;
711 }
712 
713 static inline int fw_initializing(struct mlx5_core_dev *dev)
714 {
715 	return ioread32be(&dev->iseg->initializing) >> 31;
716 }
717 
718 static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili,
719 			u32 warn_time_mili)
720 {
721 	int warn = jiffies + msecs_to_jiffies(warn_time_mili);
722 	int end = jiffies + msecs_to_jiffies(max_wait_mili);
723 	int err = 0;
724 
725 	MPASS(max_wait_mili > warn_time_mili);
726 
727 	while (fw_initializing(dev) == 1) {
728 		if (time_after(jiffies, end)) {
729 			err = -EBUSY;
730 			break;
731 		}
732 		if (warn_time_mili && time_after(jiffies, warn)) {
733 			mlx5_core_warn(dev,
734 			    "Waiting for FW initialization, timeout abort in %u s\n",
735 			    (unsigned)(jiffies_to_msecs(end - warn) / 1000));
736 			warn = jiffies + msecs_to_jiffies(warn_time_mili);
737 		}
738 		msleep(FW_INIT_WAIT_MS);
739 	}
740 
741 	if (err != 0)
742 		mlx5_core_dbg(dev, "Full initializing bit dword = 0x%x\n",
743 		    ioread32be(&dev->iseg->initializing));
744 
745 	return err;
746 }
747 
748 static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
749 {
750 	struct mlx5_device_context *dev_ctx;
751 	struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
752 
753 	dev_ctx = kzalloc_node(sizeof(*dev_ctx), GFP_KERNEL, priv->numa_node);
754 	if (!dev_ctx)
755 		return;
756 
757 	dev_ctx->intf    = intf;
758 	CURVNET_SET_QUIET(vnet0);
759 	dev_ctx->context = intf->add(dev);
760 	CURVNET_RESTORE();
761 
762 	if (dev_ctx->context) {
763 		spin_lock_irq(&priv->ctx_lock);
764 		list_add_tail(&dev_ctx->list, &priv->ctx_list);
765 		spin_unlock_irq(&priv->ctx_lock);
766 	} else {
767 		kfree(dev_ctx);
768 	}
769 }
770 
771 static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
772 {
773 	struct mlx5_device_context *dev_ctx;
774 	struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
775 
776 	list_for_each_entry(dev_ctx, &priv->ctx_list, list)
777 		if (dev_ctx->intf == intf) {
778 			spin_lock_irq(&priv->ctx_lock);
779 			list_del(&dev_ctx->list);
780 			spin_unlock_irq(&priv->ctx_lock);
781 
782 			intf->remove(dev, dev_ctx->context);
783 			kfree(dev_ctx);
784 			return;
785 		}
786 }
787 
788 int
789 mlx5_register_device(struct mlx5_core_dev *dev)
790 {
791 	struct mlx5_priv *priv = &dev->priv;
792 	struct mlx5_interface *intf;
793 
794 	mutex_lock(&intf_mutex);
795 	list_add_tail(&priv->dev_list, &dev_list);
796 	list_for_each_entry(intf, &intf_list, list)
797 		mlx5_add_device(intf, priv);
798 	mutex_unlock(&intf_mutex);
799 
800 	return 0;
801 }
802 
803 void
804 mlx5_unregister_device(struct mlx5_core_dev *dev)
805 {
806 	struct mlx5_priv *priv = &dev->priv;
807 	struct mlx5_interface *intf;
808 
809 	mutex_lock(&intf_mutex);
810 	list_for_each_entry(intf, &intf_list, list)
811 		mlx5_remove_device(intf, priv);
812 	list_del(&priv->dev_list);
813 	mutex_unlock(&intf_mutex);
814 }
815 
816 int mlx5_register_interface(struct mlx5_interface *intf)
817 {
818 	struct mlx5_priv *priv;
819 
820 	if (!intf->add || !intf->remove)
821 		return -EINVAL;
822 
823 	mutex_lock(&intf_mutex);
824 	list_add_tail(&intf->list, &intf_list);
825 	list_for_each_entry(priv, &dev_list, dev_list)
826 		mlx5_add_device(intf, priv);
827 	mutex_unlock(&intf_mutex);
828 
829 	return 0;
830 }
831 EXPORT_SYMBOL(mlx5_register_interface);
832 
833 void mlx5_unregister_interface(struct mlx5_interface *intf)
834 {
835 	struct mlx5_priv *priv;
836 
837 	mutex_lock(&intf_mutex);
838 	list_for_each_entry(priv, &dev_list, dev_list)
839 		mlx5_remove_device(intf, priv);
840 	list_del(&intf->list);
841 	mutex_unlock(&intf_mutex);
842 }
843 EXPORT_SYMBOL(mlx5_unregister_interface);
844 
845 void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
846 {
847 	struct mlx5_priv *priv = &mdev->priv;
848 	struct mlx5_device_context *dev_ctx;
849 	unsigned long flags;
850 	void *result = NULL;
851 
852 	spin_lock_irqsave(&priv->ctx_lock, flags);
853 
854 	list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
855 		if ((dev_ctx->intf->protocol == protocol) &&
856 		    dev_ctx->intf->get_dev) {
857 			result = dev_ctx->intf->get_dev(dev_ctx->context);
858 			break;
859 		}
860 
861 	spin_unlock_irqrestore(&priv->ctx_lock, flags);
862 
863 	return result;
864 }
865 EXPORT_SYMBOL(mlx5_get_protocol_dev);
866 
867 static int mlx5_auto_fw_update;
868 SYSCTL_INT(_hw_mlx5, OID_AUTO, auto_fw_update, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
869     &mlx5_auto_fw_update, 0,
870     "Allow automatic firmware update on driver start");
871 static int
872 mlx5_firmware_update(struct mlx5_core_dev *dev)
873 {
874 	const struct firmware *fw;
875 	int err;
876 
877 	TUNABLE_INT_FETCH("hw.mlx5.auto_fw_update", &mlx5_auto_fw_update);
878 	if (!mlx5_auto_fw_update)
879 		return (0);
880 	fw = firmware_get("mlx5fw_mfa");
881 	if (fw) {
882 		err = mlx5_firmware_flash(dev, fw);
883 		firmware_put(fw, FIRMWARE_UNLOAD);
884 	}
885 	else
886 		return (-ENOENT);
887 
888 	return err;
889 }
890 
891 static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
892 {
893 	struct pci_dev *pdev = dev->pdev;
894 	int err;
895 
896 	pdev = dev->pdev;
897 	pci_set_drvdata(dev->pdev, dev);
898 	strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN);
899 	priv->name[MLX5_MAX_NAME_LEN - 1] = 0;
900 
901 	mutex_init(&priv->pgdir_mutex);
902 	INIT_LIST_HEAD(&priv->pgdir_list);
903 	spin_lock_init(&priv->mkey_lock);
904 
905 	err = mlx5_pci_enable_device(dev);
906 	if (err) {
907 		mlx5_core_err(dev, "Cannot enable PCI device, aborting\n");
908 		goto err_dbg;
909 	}
910 
911 	err = request_bar(pdev);
912 	if (err) {
913 		mlx5_core_err(dev, "error requesting BARs, aborting\n");
914 		goto err_disable;
915 	}
916 
917 	pci_set_master(pdev);
918 
919 	err = set_dma_caps(pdev);
920 	if (err) {
921 		mlx5_core_err(dev, "Failed setting DMA capabilities mask, aborting\n");
922 		goto err_clr_master;
923 	}
924 
925 	dev->iseg_base = pci_resource_start(dev->pdev, 0);
926 	dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg));
927 	if (!dev->iseg) {
928 		err = -ENOMEM;
929 		mlx5_core_err(dev, "Failed mapping initialization segment, aborting\n");
930 		goto err_clr_master;
931 	}
932 
933 	return 0;
934 
935 err_clr_master:
936 	release_bar(dev->pdev);
937 err_disable:
938 	mlx5_pci_disable_device(dev);
939 err_dbg:
940 	return err;
941 }
942 
943 static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
944 {
945 #ifdef PCI_IOV
946 	if (MLX5_CAP_GEN(dev, eswitch_flow_table))
947 		pci_iov_detach(dev->pdev->dev.bsddev);
948 #endif
949 	iounmap(dev->iseg);
950 	release_bar(dev->pdev);
951 	mlx5_pci_disable_device(dev);
952 }
953 
954 static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
955 {
956 	int err;
957 
958 	err = mlx5_vsc_find_cap(dev);
959 	if (err)
960 		mlx5_core_warn(dev, "Unable to find vendor specific capabilities\n");
961 
962 	err = mlx5_query_hca_caps(dev);
963 	if (err) {
964 		mlx5_core_err(dev, "query hca failed\n");
965 		goto out;
966 	}
967 
968 	err = mlx5_query_board_id(dev);
969 	if (err) {
970 		mlx5_core_err(dev, "query board id failed\n");
971 		goto out;
972 	}
973 
974 	err = mlx5_eq_init(dev);
975 	if (err) {
976 		mlx5_core_err(dev, "failed to initialize eq\n");
977 		goto out;
978 	}
979 
980 	MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
981 
982 	err = mlx5_init_cq_table(dev);
983 	if (err) {
984 		mlx5_core_err(dev, "failed to initialize cq table\n");
985 		goto err_eq_cleanup;
986 	}
987 
988 	mlx5_init_qp_table(dev);
989 	mlx5_init_srq_table(dev);
990 	mlx5_init_mr_table(dev);
991 
992 	mlx5_init_reserved_gids(dev);
993 	mlx5_fpga_init(dev);
994 
995 #ifdef RATELIMIT
996 	err = mlx5_init_rl_table(dev);
997 	if (err) {
998 		mlx5_core_err(dev, "Failed to init rate limiting\n");
999 		goto err_tables_cleanup;
1000 	}
1001 #endif
1002 	return 0;
1003 
1004 #ifdef RATELIMIT
1005 err_tables_cleanup:
1006 	mlx5_cleanup_mr_table(dev);
1007 	mlx5_cleanup_srq_table(dev);
1008 	mlx5_cleanup_qp_table(dev);
1009 	mlx5_cleanup_cq_table(dev);
1010 #endif
1011 
1012 err_eq_cleanup:
1013 	mlx5_eq_cleanup(dev);
1014 
1015 out:
1016 	return err;
1017 }
1018 
1019 static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
1020 {
1021 #ifdef RATELIMIT
1022 	mlx5_cleanup_rl_table(dev);
1023 #endif
1024 	mlx5_fpga_cleanup(dev);
1025 	mlx5_cleanup_reserved_gids(dev);
1026 	mlx5_cleanup_mr_table(dev);
1027 	mlx5_cleanup_srq_table(dev);
1028 	mlx5_cleanup_qp_table(dev);
1029 	mlx5_cleanup_cq_table(dev);
1030 	mlx5_eq_cleanup(dev);
1031 }
1032 
1033 static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1034 			 bool boot)
1035 {
1036 	int err;
1037 
1038 	mutex_lock(&dev->intf_state_mutex);
1039 	if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
1040 		mlx5_core_warn(dev, "interface is up, NOP\n");
1041 		goto out;
1042 	}
1043 
1044 	mlx5_core_dbg(dev, "firmware version: %d.%d.%d\n",
1045 	    fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev));
1046 
1047 	/*
1048 	 * On load removing any previous indication of internal error,
1049 	 * device is up
1050 	 */
1051 	dev->state = MLX5_DEVICE_STATE_UP;
1052 
1053 	/* wait for firmware to accept initialization segments configurations
1054 	*/
1055 	err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI,
1056 	    FW_INIT_WARN_MESSAGE_INTERVAL);
1057 	if (err) {
1058 		dev_err(&dev->pdev->dev,
1059 		    "Firmware over %d MS in pre-initializing state, aborting\n",
1060 		    FW_PRE_INIT_TIMEOUT_MILI);
1061 		goto out_err;
1062 	}
1063 
1064 	err = mlx5_cmd_init(dev);
1065 	if (err) {
1066 		mlx5_core_err(dev,
1067 		    "Failed initializing command interface, aborting\n");
1068 		goto out_err;
1069 	}
1070 
1071 	err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI, 0);
1072 	if (err) {
1073 		mlx5_core_err(dev,
1074 		    "Firmware over %d MS in initializing state, aborting\n",
1075 		    FW_INIT_TIMEOUT_MILI);
1076 		goto err_cmd_cleanup;
1077 	}
1078 
1079 	err = mlx5_core_enable_hca(dev, 0);
1080 	if (err) {
1081 		mlx5_core_err(dev, "enable hca failed\n");
1082 		goto err_cmd_cleanup;
1083 	}
1084 
1085 	err = mlx5_core_set_issi(dev);
1086 	if (err) {
1087 		mlx5_core_err(dev, "failed to set issi\n");
1088 		goto err_disable_hca;
1089 	}
1090 
1091 	err = mlx5_pagealloc_start(dev);
1092 	if (err) {
1093 		mlx5_core_err(dev, "mlx5_pagealloc_start failed\n");
1094 		goto err_disable_hca;
1095 	}
1096 
1097 	err = mlx5_satisfy_startup_pages(dev, 1);
1098 	if (err) {
1099 		mlx5_core_err(dev, "failed to allocate boot pages\n");
1100 		goto err_pagealloc_stop;
1101 	}
1102 
1103 	err = set_hca_ctrl(dev);
1104 	if (err) {
1105 		mlx5_core_err(dev, "set_hca_ctrl failed\n");
1106 		goto reclaim_boot_pages;
1107 	}
1108 
1109 	err = handle_hca_cap(dev);
1110 	if (err) {
1111 		mlx5_core_err(dev, "handle_hca_cap failed\n");
1112 		goto reclaim_boot_pages;
1113 	}
1114 
1115 	err = handle_hca_cap_atomic(dev);
1116 	if (err) {
1117 		mlx5_core_err(dev, "handle_hca_cap_atomic failed\n");
1118 		goto reclaim_boot_pages;
1119 	}
1120 
1121 	err = mlx5_satisfy_startup_pages(dev, 0);
1122 	if (err) {
1123 		mlx5_core_err(dev, "failed to allocate init pages\n");
1124 		goto reclaim_boot_pages;
1125 	}
1126 
1127 	err = mlx5_cmd_init_hca(dev);
1128 	if (err) {
1129 		mlx5_core_err(dev, "init hca failed\n");
1130 		goto reclaim_boot_pages;
1131 	}
1132 
1133 	mlx5_set_driver_version(dev);
1134 
1135 	mlx5_start_health_poll(dev);
1136 
1137 	if (boot && (err = mlx5_init_once(dev, priv))) {
1138 		mlx5_core_err(dev, "sw objs init failed\n");
1139 		goto err_stop_poll;
1140 	}
1141 
1142 	dev->priv.uar = mlx5_get_uars_page(dev);
1143 	if (IS_ERR(dev->priv.uar)) {
1144 		mlx5_core_err(dev, "Failed allocating uar, aborting\n");
1145 		err = PTR_ERR(dev->priv.uar);
1146 		goto err_cleanup_once;
1147 	}
1148 
1149 	err = mlx5_enable_msix(dev);
1150 	if (err) {
1151 		mlx5_core_err(dev, "enable msix failed\n");
1152 		goto err_cleanup_uar;
1153 	}
1154 
1155 	err = mlx5_start_eqs(dev);
1156 	if (err) {
1157 		mlx5_core_err(dev, "Failed to start pages and async EQs\n");
1158 		goto err_disable_msix;
1159 	}
1160 
1161 	err = alloc_comp_eqs(dev);
1162 	if (err) {
1163 		mlx5_core_err(dev, "Failed to alloc completion EQs\n");
1164 		goto err_stop_eqs;
1165 	}
1166 
1167 	err = mlx5_init_fs(dev);
1168 	if (err) {
1169 		mlx5_core_err(dev, "flow steering init %d\n", err);
1170 		goto err_free_comp_eqs;
1171 	}
1172 
1173 	err = mlx5_core_set_hca_defaults(dev);
1174 	if (err) {
1175 		mlx5_core_err(dev, "Failed to set HCA defaults %d\n", err);
1176 		goto err_free_comp_eqs;
1177 	}
1178 
1179 	err = mlx5_mpfs_init(dev);
1180 	if (err) {
1181 		mlx5_core_err(dev, "mpfs init failed %d\n", err);
1182 		goto err_fs;
1183 	}
1184 
1185 	err = mlx5_fpga_device_start(dev);
1186 	if (err) {
1187 		mlx5_core_err(dev, "fpga device start failed %d\n", err);
1188 		goto err_mpfs;
1189 	}
1190 
1191 	err = mlx5_register_device(dev);
1192 	if (err) {
1193 		mlx5_core_err(dev, "mlx5_register_device failed %d\n", err);
1194 		goto err_fpga;
1195 	}
1196 
1197 	set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1198 
1199 out:
1200 	mutex_unlock(&dev->intf_state_mutex);
1201 	return 0;
1202 
1203 err_fpga:
1204 	mlx5_fpga_device_stop(dev);
1205 
1206 err_mpfs:
1207 	mlx5_mpfs_destroy(dev);
1208 
1209 err_fs:
1210 	mlx5_cleanup_fs(dev);
1211 
1212 err_free_comp_eqs:
1213 	free_comp_eqs(dev);
1214 
1215 err_stop_eqs:
1216 	mlx5_stop_eqs(dev);
1217 
1218 err_disable_msix:
1219 	mlx5_disable_msix(dev);
1220 
1221 err_cleanup_uar:
1222 	mlx5_put_uars_page(dev, dev->priv.uar);
1223 
1224 err_cleanup_once:
1225 	if (boot)
1226 		mlx5_cleanup_once(dev);
1227 
1228 err_stop_poll:
1229 	mlx5_stop_health_poll(dev, boot);
1230 	if (mlx5_cmd_teardown_hca(dev)) {
1231 		mlx5_core_err(dev, "tear_down_hca failed, skip cleanup\n");
1232 		goto out_err;
1233 	}
1234 
1235 reclaim_boot_pages:
1236 	mlx5_reclaim_startup_pages(dev);
1237 
1238 err_pagealloc_stop:
1239 	mlx5_pagealloc_stop(dev);
1240 
1241 err_disable_hca:
1242 	mlx5_core_disable_hca(dev);
1243 
1244 err_cmd_cleanup:
1245 	mlx5_cmd_cleanup(dev);
1246 
1247 out_err:
1248 	dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
1249 	mutex_unlock(&dev->intf_state_mutex);
1250 
1251 	return err;
1252 }
1253 
1254 static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1255 			   bool cleanup)
1256 {
1257 	int err = 0;
1258 
1259 	if (cleanup)
1260 		mlx5_drain_health_recovery(dev);
1261 
1262 	mutex_lock(&dev->intf_state_mutex);
1263 	if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
1264 		mlx5_core_warn(dev, "%s: interface is down, NOP\n", __func__);
1265                 if (cleanup)
1266                         mlx5_cleanup_once(dev);
1267 		goto out;
1268 	}
1269 
1270 	mlx5_unregister_device(dev);
1271 
1272 	mlx5_eswitch_cleanup(dev->priv.eswitch);
1273 	mlx5_fpga_device_stop(dev);
1274 	mlx5_mpfs_destroy(dev);
1275 	mlx5_cleanup_fs(dev);
1276 	mlx5_wait_for_reclaim_vfs_pages(dev);
1277 	free_comp_eqs(dev);
1278 	mlx5_stop_eqs(dev);
1279 	mlx5_disable_msix(dev);
1280 	mlx5_put_uars_page(dev, dev->priv.uar);
1281         if (cleanup)
1282                 mlx5_cleanup_once(dev);
1283 	mlx5_stop_health_poll(dev, cleanup);
1284 	err = mlx5_cmd_teardown_hca(dev);
1285 	if (err) {
1286 		mlx5_core_err(dev, "tear_down_hca failed, skip cleanup\n");
1287 		goto out;
1288 	}
1289 	mlx5_pagealloc_stop(dev);
1290 	mlx5_reclaim_startup_pages(dev);
1291 	mlx5_core_disable_hca(dev);
1292 	mlx5_cmd_cleanup(dev);
1293 
1294 out:
1295 	clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1296 	mutex_unlock(&dev->intf_state_mutex);
1297 	return err;
1298 }
1299 
1300 void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
1301 		     unsigned long param)
1302 {
1303 	struct mlx5_priv *priv = &dev->priv;
1304 	struct mlx5_device_context *dev_ctx;
1305 	unsigned long flags;
1306 
1307 	spin_lock_irqsave(&priv->ctx_lock, flags);
1308 
1309 	list_for_each_entry(dev_ctx, &priv->ctx_list, list)
1310 		if (dev_ctx->intf->event)
1311 			dev_ctx->intf->event(dev, dev_ctx->context, event, param);
1312 
1313 	spin_unlock_irqrestore(&priv->ctx_lock, flags);
1314 }
1315 
1316 struct mlx5_core_event_handler {
1317 	void (*event)(struct mlx5_core_dev *dev,
1318 		      enum mlx5_dev_event event,
1319 		      void *data);
1320 };
1321 
1322 #define	MLX5_STATS_DESC(a, b, c, d, e, ...) d, e,
1323 
1324 #define	MLX5_PORT_MODULE_ERROR_STATS(m)				\
1325 m(+1, u64, power_budget_exceeded, "power_budget", "Module Power Budget Exceeded") \
1326 m(+1, u64, long_range, "long_range", "Module Long Range for non MLNX cable/module") \
1327 m(+1, u64, bus_stuck, "bus_stuck", "Module Bus stuck(I2C or data shorted)") \
1328 m(+1, u64, no_eeprom, "no_eeprom", "No EEPROM/retry timeout") \
1329 m(+1, u64, enforce_part_number, "enforce_part_number", "Module Enforce part number list") \
1330 m(+1, u64, unknown_id, "unknown_id", "Module Unknown identifier") \
1331 m(+1, u64, high_temp, "high_temp", "Module High Temperature") \
1332 m(+1, u64, cable_shorted, "cable_shorted", "Module Cable is shorted") \
1333 m(+1, u64, pmd_type_not_enabled, "pmd_type_not_enabled", "PMD type is not enabled") \
1334 m(+1, u64, laster_tec_failure, "laster_tec_failure", "Laster TEC failure") \
1335 m(+1, u64, high_current, "high_current", "High current") \
1336 m(+1, u64, high_voltage, "high_voltage", "High voltage") \
1337 m(+1, u64, pcie_sys_power_slot_exceeded, "pcie_sys_power_slot_exceeded", "PCIe system power slot Exceeded") \
1338 m(+1, u64, high_power, "high_power", "High power")			\
1339 m(+1, u64, module_state_machine_fault, "module_state_machine_fault", "Module State Machine fault")
1340 
1341 static const char *mlx5_pme_err_desc[] = {
1342 	MLX5_PORT_MODULE_ERROR_STATS(MLX5_STATS_DESC)
1343 };
1344 
1345 static int init_one(struct pci_dev *pdev,
1346 		    const struct pci_device_id *id)
1347 {
1348 	struct mlx5_core_dev *dev;
1349 	struct mlx5_priv *priv;
1350 	device_t bsddev = pdev->dev.bsddev;
1351 #ifdef PCI_IOV
1352 	nvlist_t *pf_schema, *vf_schema;
1353 	int num_vfs, sriov_pos;
1354 #endif
1355 	int i,err;
1356 	int numa_node;
1357 	struct sysctl_oid *pme_sysctl_node;
1358 	struct sysctl_oid *pme_err_sysctl_node;
1359 	struct sysctl_oid *cap_sysctl_node;
1360 	struct sysctl_oid *current_cap_sysctl_node;
1361 	struct sysctl_oid *max_cap_sysctl_node;
1362 
1363 	printk_once("mlx5: %s", mlx5_version);
1364 
1365 	numa_node = dev_to_node(&pdev->dev);
1366 
1367 	dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, numa_node);
1368 
1369 	priv = &dev->priv;
1370 	priv->numa_node = numa_node;
1371 
1372 	if (id)
1373 		priv->pci_dev_data = id->driver_data;
1374 
1375 	if (mlx5_prof_sel < 0 || mlx5_prof_sel >= ARRAY_SIZE(profiles)) {
1376 		device_printf(bsddev,
1377 		    "WARN: selected profile out of range, selecting default (%d)\n",
1378 		    MLX5_DEFAULT_PROF);
1379 		mlx5_prof_sel = MLX5_DEFAULT_PROF;
1380 	}
1381 	dev->profile = &profiles[mlx5_prof_sel];
1382 	dev->pdev = pdev;
1383 	dev->event = mlx5_core_event;
1384 
1385 	/* Set desc */
1386 	device_set_desc(bsddev, mlx5_version);
1387 
1388 	sysctl_ctx_init(&dev->sysctl_ctx);
1389 	SYSCTL_ADD_INT(&dev->sysctl_ctx,
1390 	    SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)),
1391 	    OID_AUTO, "msix_eqvec", CTLFLAG_RDTUN, &dev->msix_eqvec, 0,
1392 	    "Maximum number of MSIX event queue vectors, if set");
1393 	SYSCTL_ADD_INT(&dev->sysctl_ctx,
1394 	    SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)),
1395 	    OID_AUTO, "power_status", CTLFLAG_RD, &dev->pwr_status, 0,
1396 	    "0:Invalid 1:Sufficient 2:Insufficient");
1397 	SYSCTL_ADD_INT(&dev->sysctl_ctx,
1398 	    SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)),
1399 	    OID_AUTO, "power_value", CTLFLAG_RD, &dev->pwr_value, 0,
1400 	    "Current power value in Watts");
1401 
1402 	pme_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx,
1403 	    SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)),
1404 	    OID_AUTO, "pme_stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
1405 	    "Port module event statistics");
1406 	if (pme_sysctl_node == NULL) {
1407 		err = -ENOMEM;
1408 		goto clean_sysctl_ctx;
1409 	}
1410 	pme_err_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx,
1411 	    SYSCTL_CHILDREN(pme_sysctl_node),
1412 	    OID_AUTO, "errors", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
1413 	    "Port module event error statistics");
1414 	if (pme_err_sysctl_node == NULL) {
1415 		err = -ENOMEM;
1416 		goto clean_sysctl_ctx;
1417 	}
1418 	SYSCTL_ADD_U64(&dev->sysctl_ctx,
1419 	    SYSCTL_CHILDREN(pme_sysctl_node), OID_AUTO,
1420 	    "module_plug", CTLFLAG_RD | CTLFLAG_MPSAFE,
1421 	    &dev->priv.pme_stats.status_counters[MLX5_MODULE_STATUS_PLUGGED_ENABLED],
1422 	    0, "Number of time module plugged");
1423 	SYSCTL_ADD_U64(&dev->sysctl_ctx,
1424 	    SYSCTL_CHILDREN(pme_sysctl_node), OID_AUTO,
1425 	    "module_unplug", CTLFLAG_RD | CTLFLAG_MPSAFE,
1426 	    &dev->priv.pme_stats.status_counters[MLX5_MODULE_STATUS_UNPLUGGED],
1427 	    0, "Number of time module unplugged");
1428 	for (i = 0 ; i < MLX5_MODULE_EVENT_ERROR_NUM; i++) {
1429 		SYSCTL_ADD_U64(&dev->sysctl_ctx,
1430 		    SYSCTL_CHILDREN(pme_err_sysctl_node), OID_AUTO,
1431 		    mlx5_pme_err_desc[2 * i], CTLFLAG_RD | CTLFLAG_MPSAFE,
1432 		    &dev->priv.pme_stats.error_counters[i],
1433 		    0, mlx5_pme_err_desc[2 * i + 1]);
1434 	}
1435 
1436 	cap_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx,
1437 	    SYSCTL_CHILDREN(device_get_sysctl_tree(bsddev)),
1438 	    OID_AUTO, "caps", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
1439 	    "hardware capabilities raw bitstrings");
1440 	if (cap_sysctl_node == NULL) {
1441 		err = -ENOMEM;
1442 		goto clean_sysctl_ctx;
1443 	}
1444 	current_cap_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx,
1445 	    SYSCTL_CHILDREN(cap_sysctl_node),
1446 	    OID_AUTO, "current", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
1447 	    "");
1448 	if (current_cap_sysctl_node == NULL) {
1449 		err = -ENOMEM;
1450 		goto clean_sysctl_ctx;
1451 	}
1452 	max_cap_sysctl_node = SYSCTL_ADD_NODE(&dev->sysctl_ctx,
1453 	    SYSCTL_CHILDREN(cap_sysctl_node),
1454 	    OID_AUTO, "max", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
1455 	    "");
1456 	if (max_cap_sysctl_node == NULL) {
1457 		err = -ENOMEM;
1458 		goto clean_sysctl_ctx;
1459 	}
1460 	SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1461 	    SYSCTL_CHILDREN(current_cap_sysctl_node),
1462 	    OID_AUTO, "general", CTLFLAG_RD | CTLFLAG_MPSAFE,
1463 	    &dev->hca_caps_cur[MLX5_CAP_GENERAL],
1464 	    MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1465 	SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1466 	    SYSCTL_CHILDREN(max_cap_sysctl_node),
1467 	    OID_AUTO, "general", CTLFLAG_RD | CTLFLAG_MPSAFE,
1468 	    &dev->hca_caps_max[MLX5_CAP_GENERAL],
1469 	    MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1470 	SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1471 	    SYSCTL_CHILDREN(current_cap_sysctl_node),
1472 	    OID_AUTO, "ether", CTLFLAG_RD | CTLFLAG_MPSAFE,
1473 	    &dev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS],
1474 	    MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1475 	SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1476 	    SYSCTL_CHILDREN(max_cap_sysctl_node),
1477 	    OID_AUTO, "ether", CTLFLAG_RD | CTLFLAG_MPSAFE,
1478 	    &dev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS],
1479 	    MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1480 	SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1481 	    SYSCTL_CHILDREN(current_cap_sysctl_node),
1482 	    OID_AUTO, "odp", CTLFLAG_RD | CTLFLAG_MPSAFE,
1483 	    &dev->hca_caps_cur[MLX5_CAP_ODP],
1484 	    MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1485 	SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1486 	    SYSCTL_CHILDREN(max_cap_sysctl_node),
1487 	    OID_AUTO, "odp", CTLFLAG_RD | CTLFLAG_MPSAFE,
1488 	    &dev->hca_caps_max[MLX5_CAP_ODP],
1489 	    MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1490 	SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1491 	    SYSCTL_CHILDREN(current_cap_sysctl_node),
1492 	    OID_AUTO, "atomic", CTLFLAG_RD | CTLFLAG_MPSAFE,
1493 	    &dev->hca_caps_cur[MLX5_CAP_ATOMIC],
1494 	    MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1495 	SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1496 	    SYSCTL_CHILDREN(max_cap_sysctl_node),
1497 	    OID_AUTO, "atomic", CTLFLAG_RD | CTLFLAG_MPSAFE,
1498 	    &dev->hca_caps_max[MLX5_CAP_ATOMIC],
1499 	    MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1500 	SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1501 	    SYSCTL_CHILDREN(current_cap_sysctl_node),
1502 	    OID_AUTO, "roce", CTLFLAG_RD | CTLFLAG_MPSAFE,
1503 	    &dev->hca_caps_cur[MLX5_CAP_ROCE],
1504 	    MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1505 	SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1506 	    SYSCTL_CHILDREN(max_cap_sysctl_node),
1507 	    OID_AUTO, "roce", CTLFLAG_RD | CTLFLAG_MPSAFE,
1508 	    &dev->hca_caps_max[MLX5_CAP_ROCE],
1509 	    MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1510 	SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1511 	    SYSCTL_CHILDREN(current_cap_sysctl_node),
1512 	    OID_AUTO, "ipoib", CTLFLAG_RD | CTLFLAG_MPSAFE,
1513 	    &dev->hca_caps_cur[MLX5_CAP_IPOIB_OFFLOADS],
1514 	    MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1515 	SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1516 	    SYSCTL_CHILDREN(max_cap_sysctl_node),
1517 	    OID_AUTO, "ipoib", CTLFLAG_RD | CTLFLAG_MPSAFE,
1518 	    &dev->hca_caps_max[MLX5_CAP_IPOIB_OFFLOADS],
1519 	    MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1520 	SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1521 	    SYSCTL_CHILDREN(current_cap_sysctl_node),
1522 	    OID_AUTO, "eoib", CTLFLAG_RD | CTLFLAG_MPSAFE,
1523 	    &dev->hca_caps_cur[MLX5_CAP_EOIB_OFFLOADS],
1524 	    MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1525 	SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1526 	    SYSCTL_CHILDREN(max_cap_sysctl_node),
1527 	    OID_AUTO, "eoib", CTLFLAG_RD | CTLFLAG_MPSAFE,
1528 	    &dev->hca_caps_max[MLX5_CAP_EOIB_OFFLOADS],
1529 	    MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1530 	SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1531 	    SYSCTL_CHILDREN(current_cap_sysctl_node),
1532 	    OID_AUTO, "flow_table", CTLFLAG_RD | CTLFLAG_MPSAFE,
1533 	    &dev->hca_caps_cur[MLX5_CAP_FLOW_TABLE],
1534 	    MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1535 	SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1536 	    SYSCTL_CHILDREN(max_cap_sysctl_node),
1537 	    OID_AUTO, "flow_table", CTLFLAG_RD | CTLFLAG_MPSAFE,
1538 	    &dev->hca_caps_max[MLX5_CAP_FLOW_TABLE],
1539 	    MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1540 	SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1541 	    SYSCTL_CHILDREN(current_cap_sysctl_node),
1542 	    OID_AUTO, "eswitch_flow_table", CTLFLAG_RD | CTLFLAG_MPSAFE,
1543 	    &dev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE],
1544 	    MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1545 	SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1546 	    SYSCTL_CHILDREN(max_cap_sysctl_node),
1547 	    OID_AUTO, "eswitch_flow_table", CTLFLAG_RD | CTLFLAG_MPSAFE,
1548 	    &dev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE],
1549 	    MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1550 	SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1551 	    SYSCTL_CHILDREN(current_cap_sysctl_node),
1552 	    OID_AUTO, "eswitch", CTLFLAG_RD | CTLFLAG_MPSAFE,
1553 	    &dev->hca_caps_cur[MLX5_CAP_ESWITCH],
1554 	    MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1555 	SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1556 	    SYSCTL_CHILDREN(max_cap_sysctl_node),
1557 	    OID_AUTO, "eswitch", CTLFLAG_RD | CTLFLAG_MPSAFE,
1558 	    &dev->hca_caps_max[MLX5_CAP_ESWITCH],
1559 	    MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1560 	SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1561 	    SYSCTL_CHILDREN(current_cap_sysctl_node),
1562 	    OID_AUTO, "snapshot", CTLFLAG_RD | CTLFLAG_MPSAFE,
1563 	    &dev->hca_caps_cur[MLX5_CAP_SNAPSHOT],
1564 	    MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1565 	SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1566 	    SYSCTL_CHILDREN(max_cap_sysctl_node),
1567 	    OID_AUTO, "snapshot", CTLFLAG_RD | CTLFLAG_MPSAFE,
1568 	    &dev->hca_caps_max[MLX5_CAP_SNAPSHOT],
1569 	    MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1570 	SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1571 	    SYSCTL_CHILDREN(current_cap_sysctl_node),
1572 	    OID_AUTO, "vector_calc", CTLFLAG_RD | CTLFLAG_MPSAFE,
1573 	    &dev->hca_caps_cur[MLX5_CAP_VECTOR_CALC],
1574 	    MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1575 	SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1576 	    SYSCTL_CHILDREN(max_cap_sysctl_node),
1577 	    OID_AUTO, "vector_calc", CTLFLAG_RD | CTLFLAG_MPSAFE,
1578 	    &dev->hca_caps_max[MLX5_CAP_VECTOR_CALC],
1579 	    MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1580 	SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1581 	    SYSCTL_CHILDREN(current_cap_sysctl_node),
1582 	    OID_AUTO, "qos", CTLFLAG_RD | CTLFLAG_MPSAFE,
1583 	    &dev->hca_caps_cur[MLX5_CAP_QOS],
1584 	    MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1585 	SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1586 	    SYSCTL_CHILDREN(max_cap_sysctl_node),
1587 	    OID_AUTO, "qos", CTLFLAG_RD | CTLFLAG_MPSAFE,
1588 	    &dev->hca_caps_max[MLX5_CAP_QOS],
1589 	    MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1590 	SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1591 	    SYSCTL_CHILDREN(current_cap_sysctl_node),
1592 	    OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_MPSAFE,
1593 	    &dev->hca_caps_cur[MLX5_CAP_DEBUG],
1594 	    MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1595 	SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1596 	    SYSCTL_CHILDREN(max_cap_sysctl_node),
1597 	    OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_MPSAFE,
1598 	    &dev->hca_caps_max[MLX5_CAP_DEBUG],
1599 	    MLX5_UN_SZ_DW(hca_cap_union) * sizeof(u32), "IU", "");
1600 	SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1601 	    SYSCTL_CHILDREN(cap_sysctl_node),
1602 	    OID_AUTO, "pcam", CTLFLAG_RD | CTLFLAG_MPSAFE,
1603 	    &dev->caps.pcam, sizeof(dev->caps.pcam), "IU", "");
1604 	SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1605 	    SYSCTL_CHILDREN(cap_sysctl_node),
1606 	    OID_AUTO, "mcam", CTLFLAG_RD | CTLFLAG_MPSAFE,
1607 	    &dev->caps.mcam, sizeof(dev->caps.mcam), "IU", "");
1608 	SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1609 	    SYSCTL_CHILDREN(cap_sysctl_node),
1610 	    OID_AUTO, "qcam", CTLFLAG_RD | CTLFLAG_MPSAFE,
1611 	    &dev->caps.qcam, sizeof(dev->caps.qcam), "IU", "");
1612 	SYSCTL_ADD_OPAQUE(&dev->sysctl_ctx,
1613 	    SYSCTL_CHILDREN(cap_sysctl_node),
1614 	    OID_AUTO, "fpga", CTLFLAG_RD | CTLFLAG_MPSAFE,
1615 	    &dev->caps.fpga, sizeof(dev->caps.fpga), "IU", "");
1616 
1617 	INIT_LIST_HEAD(&priv->ctx_list);
1618 	spin_lock_init(&priv->ctx_lock);
1619 	mutex_init(&dev->pci_status_mutex);
1620 	mutex_init(&dev->intf_state_mutex);
1621 
1622 	mutex_init(&priv->bfregs.reg_head.lock);
1623 	mutex_init(&priv->bfregs.wc_head.lock);
1624 	INIT_LIST_HEAD(&priv->bfregs.reg_head.list);
1625 	INIT_LIST_HEAD(&priv->bfregs.wc_head.list);
1626 
1627 	mtx_init(&dev->dump_lock, "mlx5dmp", NULL, MTX_DEF | MTX_NEW);
1628 	err = mlx5_pci_init(dev, priv);
1629 	if (err) {
1630 		mlx5_core_err(dev, "mlx5_pci_init failed %d\n", err);
1631 		goto clean_dev;
1632 	}
1633 
1634 	err = mlx5_health_init(dev);
1635 	if (err) {
1636 		mlx5_core_err(dev, "mlx5_health_init failed %d\n", err);
1637 		goto close_pci;
1638 	}
1639 
1640 	mlx5_pagealloc_init(dev);
1641 
1642 	err = mlx5_load_one(dev, priv, true);
1643 	if (err) {
1644 		mlx5_core_err(dev, "mlx5_load_one failed %d\n", err);
1645 		goto clean_health;
1646 	}
1647 
1648 	mlx5_fwdump_prep(dev);
1649 
1650 	mlx5_firmware_update(dev);
1651 
1652 #ifdef PCI_IOV
1653 	if (MLX5_CAP_GEN(dev, vport_group_manager)) {
1654 		if (pci_find_extcap(bsddev, PCIZ_SRIOV, &sriov_pos) == 0) {
1655 			num_vfs = pci_read_config(bsddev, sriov_pos +
1656 			    PCIR_SRIOV_TOTAL_VFS, 2);
1657 		} else {
1658 			mlx5_core_info(dev, "cannot find SR-IOV PCIe cap\n");
1659 			num_vfs = 0;
1660 		}
1661 		err = mlx5_eswitch_init(dev, 1 + num_vfs);
1662 		if (err == 0) {
1663 			pf_schema = pci_iov_schema_alloc_node();
1664 			vf_schema = pci_iov_schema_alloc_node();
1665 			pci_iov_schema_add_unicast_mac(vf_schema,
1666 			    iov_mac_addr_name, 0, NULL);
1667 			pci_iov_schema_add_uint64(vf_schema, iov_node_guid_name,
1668 			    0, 0);
1669 			pci_iov_schema_add_uint64(vf_schema, iov_port_guid_name,
1670 			    0, 0);
1671 			err = pci_iov_attach(bsddev, pf_schema, vf_schema);
1672 			if (err != 0) {
1673 				device_printf(bsddev,
1674 			    "Failed to initialize SR-IOV support, error %d\n",
1675 				    err);
1676 			}
1677 		} else {
1678 			mlx5_core_err(dev, "eswitch init failed, error %d\n",
1679 			    err);
1680 		}
1681 	}
1682 #endif
1683 
1684 	pci_save_state(pdev);
1685 	return 0;
1686 
1687 clean_health:
1688 	mlx5_pagealloc_cleanup(dev);
1689 	mlx5_health_cleanup(dev);
1690 close_pci:
1691 	mlx5_pci_close(dev, priv);
1692 clean_dev:
1693 	mtx_destroy(&dev->dump_lock);
1694 clean_sysctl_ctx:
1695 	sysctl_ctx_free(&dev->sysctl_ctx);
1696 	kfree(dev);
1697 	return err;
1698 }
1699 
1700 static void remove_one(struct pci_dev *pdev)
1701 {
1702 	struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
1703 	struct mlx5_priv *priv = &dev->priv;
1704 
1705 #ifdef PCI_IOV
1706 	pci_iov_detach(pdev->dev.bsddev);
1707 	mlx5_eswitch_disable_sriov(priv->eswitch);
1708 #endif
1709 
1710 	if (mlx5_unload_one(dev, priv, true)) {
1711 		mlx5_core_err(dev, "mlx5_unload_one() failed, leaked %lld bytes\n",
1712 		    (long long)(dev->priv.fw_pages * MLX5_ADAPTER_PAGE_SIZE));
1713 	}
1714 
1715 	mlx5_pagealloc_cleanup(dev);
1716 	mlx5_health_cleanup(dev);
1717 	mlx5_fwdump_clean(dev);
1718 	mlx5_pci_close(dev, priv);
1719 	mtx_destroy(&dev->dump_lock);
1720 	pci_set_drvdata(pdev, NULL);
1721 	sysctl_ctx_free(&dev->sysctl_ctx);
1722 	kfree(dev);
1723 }
1724 
1725 static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
1726 					      pci_channel_state_t state)
1727 {
1728 	struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1729 	struct mlx5_priv *priv = &dev->priv;
1730 
1731 	mlx5_core_info(dev, "%s was called\n", __func__);
1732 	mlx5_enter_error_state(dev, false);
1733 	mlx5_unload_one(dev, priv, false);
1734 
1735 	if (state) {
1736 		mlx5_drain_health_wq(dev);
1737 		mlx5_pci_disable_device(dev);
1738 	}
1739 
1740 	return state == pci_channel_io_perm_failure ?
1741 		PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
1742 }
1743 
1744 static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
1745 {
1746 	struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1747 	int err = 0;
1748 
1749 	mlx5_core_info(dev,"%s was called\n", __func__);
1750 
1751 	err = mlx5_pci_enable_device(dev);
1752 	if (err) {
1753 		mlx5_core_err(dev, "mlx5_pci_enable_device failed with error code: %d\n"
1754 			,err);
1755 		return PCI_ERS_RESULT_DISCONNECT;
1756 	}
1757 	pci_set_master(pdev);
1758 	pci_set_powerstate(pdev->dev.bsddev, PCI_POWERSTATE_D0);
1759 	pci_restore_state(pdev);
1760 	pci_save_state(pdev);
1761 
1762 	return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
1763 }
1764 
1765 /* wait for the device to show vital signs. For now we check
1766  * that we can read the device ID and that the health buffer
1767  * shows a non zero value which is different than 0xffffffff
1768  */
1769 static void wait_vital(struct pci_dev *pdev)
1770 {
1771 	struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1772 	struct mlx5_core_health *health = &dev->priv.health;
1773 	const int niter = 100;
1774 	u32 count;
1775 	u16 did;
1776 	int i;
1777 
1778 	/* Wait for firmware to be ready after reset */
1779 	msleep(1000);
1780 	for (i = 0; i < niter; i++) {
1781 		if (pci_read_config_word(pdev, 2, &did)) {
1782 			mlx5_core_warn(dev, "failed reading config word\n");
1783 			break;
1784 		}
1785 		if (did == pdev->device) {
1786 			mlx5_core_info(dev,
1787 			    "device ID correctly read after %d iterations\n", i);
1788 			break;
1789 		}
1790 		msleep(50);
1791 	}
1792 	if (i == niter)
1793 		mlx5_core_warn(dev, "could not read device ID\n");
1794 
1795 	for (i = 0; i < niter; i++) {
1796 		count = ioread32be(health->health_counter);
1797 		if (count && count != 0xffffffff) {
1798 			mlx5_core_info(dev,
1799 			"Counter value 0x%x after %d iterations\n", count, i);
1800 			break;
1801 		}
1802 		msleep(50);
1803 	}
1804 
1805 	if (i == niter)
1806 		mlx5_core_warn(dev, "could not read device ID\n");
1807 }
1808 
1809 static void mlx5_pci_resume(struct pci_dev *pdev)
1810 {
1811 	struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1812 	struct mlx5_priv *priv = &dev->priv;
1813 	int err;
1814 
1815 	mlx5_core_info(dev,"%s was called\n", __func__);
1816 
1817 	wait_vital(pdev);
1818 
1819 	err = mlx5_load_one(dev, priv, false);
1820 	if (err)
1821 		mlx5_core_err(dev,
1822 		    "mlx5_load_one failed with error code: %d\n" ,err);
1823 	else
1824 		mlx5_core_info(dev,"device recovered\n");
1825 }
1826 
1827 static const struct pci_error_handlers mlx5_err_handler = {
1828 	.error_detected = mlx5_pci_err_detected,
1829 	.slot_reset	= mlx5_pci_slot_reset,
1830 	.resume		= mlx5_pci_resume
1831 };
1832 
1833 #ifdef PCI_IOV
1834 static int
1835 mlx5_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *pf_config)
1836 {
1837 	struct pci_dev *pdev;
1838 	struct mlx5_core_dev *core_dev;
1839 	struct mlx5_priv *priv;
1840 	int err;
1841 
1842 	pdev = device_get_softc(dev);
1843 	core_dev = pci_get_drvdata(pdev);
1844 	priv = &core_dev->priv;
1845 
1846 	if (priv->eswitch == NULL)
1847 		return (ENXIO);
1848 	if (priv->eswitch->total_vports < num_vfs + 1)
1849 		num_vfs = priv->eswitch->total_vports - 1;
1850 	err = mlx5_eswitch_enable_sriov(priv->eswitch, num_vfs);
1851 	return (-err);
1852 }
1853 
1854 static void
1855 mlx5_iov_uninit(device_t dev)
1856 {
1857 	struct pci_dev *pdev;
1858 	struct mlx5_core_dev *core_dev;
1859 	struct mlx5_priv *priv;
1860 
1861 	pdev = device_get_softc(dev);
1862 	core_dev = pci_get_drvdata(pdev);
1863 	priv = &core_dev->priv;
1864 
1865 	mlx5_eswitch_disable_sriov(priv->eswitch);
1866 }
1867 
1868 static int
1869 mlx5_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *vf_config)
1870 {
1871 	struct pci_dev *pdev;
1872 	struct mlx5_core_dev *core_dev;
1873 	struct mlx5_priv *priv;
1874 	const void *mac;
1875 	size_t mac_size;
1876 	uint64_t node_guid, port_guid;
1877 	int error;
1878 
1879 	pdev = device_get_softc(dev);
1880 	core_dev = pci_get_drvdata(pdev);
1881 	priv = &core_dev->priv;
1882 
1883 	if (vfnum + 1 >= priv->eswitch->total_vports)
1884 		return (ENXIO);
1885 
1886 	if (nvlist_exists_binary(vf_config, iov_mac_addr_name)) {
1887 		mac = nvlist_get_binary(vf_config, iov_mac_addr_name,
1888 		    &mac_size);
1889 		error = -mlx5_eswitch_set_vport_mac(priv->eswitch,
1890 		    vfnum + 1, __DECONST(u8 *, mac));
1891 		if (error != 0) {
1892 			mlx5_core_err(core_dev,
1893 			    "setting MAC for VF %d failed, error %d\n",
1894 			    vfnum + 1, error);
1895 		}
1896 	}
1897 
1898 	if (nvlist_exists_number(vf_config, iov_node_guid_name)) {
1899 		node_guid = nvlist_get_number(vf_config, iov_node_guid_name);
1900 		error = -mlx5_modify_nic_vport_node_guid(core_dev, vfnum + 1,
1901 		    node_guid);
1902 		if (error != 0) {
1903 			mlx5_core_err(core_dev,
1904 		    "modifying node GUID for VF %d failed, error %d\n",
1905 			    vfnum + 1, error);
1906 		}
1907 	}
1908 
1909 	if (nvlist_exists_number(vf_config, iov_port_guid_name)) {
1910 		port_guid = nvlist_get_number(vf_config, iov_port_guid_name);
1911 		error = -mlx5_modify_nic_vport_port_guid(core_dev, vfnum + 1,
1912 		    port_guid);
1913 		if (error != 0) {
1914 			mlx5_core_err(core_dev,
1915 		    "modifying port GUID for VF %d failed, error %d\n",
1916 			    vfnum + 1, error);
1917 		}
1918 	}
1919 
1920 	error = -mlx5_eswitch_set_vport_state(priv->eswitch, vfnum + 1,
1921 	    VPORT_STATE_FOLLOW);
1922 	if (error != 0) {
1923 		mlx5_core_err(core_dev,
1924 		    "upping vport for VF %d failed, error %d\n",
1925 		    vfnum + 1, error);
1926 	}
1927 	error = -mlx5_core_enable_hca(core_dev, vfnum + 1);
1928 	if (error != 0) {
1929 		mlx5_core_err(core_dev, "enabling VF %d failed, error %d\n",
1930 		    vfnum + 1, error);
1931 	}
1932 	return (error);
1933 }
1934 #endif
1935 
1936 static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
1937 {
1938 	bool fast_teardown, force_teardown;
1939 	int err;
1940 
1941 	if (!mlx5_fast_unload_enabled) {
1942 		mlx5_core_dbg(dev, "fast unload is disabled by user\n");
1943 		return -EOPNOTSUPP;
1944 	}
1945 
1946 	fast_teardown = MLX5_CAP_GEN(dev, fast_teardown);
1947 	force_teardown = MLX5_CAP_GEN(dev, force_teardown);
1948 
1949 	mlx5_core_dbg(dev, "force teardown firmware support=%d\n", force_teardown);
1950 	mlx5_core_dbg(dev, "fast teardown firmware support=%d\n", fast_teardown);
1951 
1952 	if (!fast_teardown && !force_teardown)
1953 		return -EOPNOTSUPP;
1954 
1955 	if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
1956 		mlx5_core_dbg(dev, "Device in internal error state, giving up\n");
1957 		return -EAGAIN;
1958 	}
1959 
1960 	/* Panic tear down fw command will stop the PCI bus communication
1961 	 * with the HCA, so the health polll is no longer needed.
1962 	 */
1963 	mlx5_drain_health_wq(dev);
1964 	mlx5_stop_health_poll(dev, false);
1965 
1966 	err = mlx5_cmd_fast_teardown_hca(dev);
1967 	if (!err)
1968 		goto done;
1969 
1970 	err = mlx5_cmd_force_teardown_hca(dev);
1971 	if (!err)
1972 		goto done;
1973 
1974 	mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", err);
1975 	mlx5_start_health_poll(dev);
1976 	return err;
1977 done:
1978 	mlx5_enter_error_state(dev, true);
1979 	return 0;
1980 }
1981 
1982 static void mlx5_shutdown_disable_interrupts(struct mlx5_core_dev *mdev)
1983 {
1984 	int nvec = mdev->priv.eq_table.num_comp_vectors + MLX5_EQ_VEC_COMP_BASE;
1985 	int x;
1986 
1987 	mdev->priv.disable_irqs = 1;
1988 
1989 	/* wait for all IRQ handlers to finish processing */
1990 	for (x = 0; x != nvec; x++)
1991 		synchronize_irq(mdev->priv.msix_arr[x].vector);
1992 }
1993 
1994 static void shutdown_one(struct pci_dev *pdev)
1995 {
1996 	struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
1997 	struct mlx5_priv *priv = &dev->priv;
1998 	int err;
1999 
2000 	/* enter polling mode */
2001 	mlx5_cmd_use_polling(dev);
2002 
2003 	set_bit(MLX5_INTERFACE_STATE_TEARDOWN, &dev->intf_state);
2004 
2005 	/* disable all interrupts */
2006 	mlx5_shutdown_disable_interrupts(dev);
2007 
2008 	err = mlx5_try_fast_unload(dev);
2009 	if (err)
2010 	        mlx5_unload_one(dev, priv, false);
2011 	mlx5_pci_disable_device(dev);
2012 }
2013 
2014 static const struct pci_device_id mlx5_core_pci_table[] = {
2015 	{ PCI_VDEVICE(MELLANOX, 4113) }, /* Connect-IB */
2016 	{ PCI_VDEVICE(MELLANOX, 4114) }, /* Connect-IB VF */
2017 	{ PCI_VDEVICE(MELLANOX, 4115) }, /* ConnectX-4 */
2018 	{ PCI_VDEVICE(MELLANOX, 4116) }, /* ConnectX-4 VF */
2019 	{ PCI_VDEVICE(MELLANOX, 4117) }, /* ConnectX-4LX */
2020 	{ PCI_VDEVICE(MELLANOX, 4118) }, /* ConnectX-4LX VF */
2021 	{ PCI_VDEVICE(MELLANOX, 4119) }, /* ConnectX-5, PCIe 3.0 */
2022 	{ PCI_VDEVICE(MELLANOX, 4120) }, /* ConnectX-5 VF */
2023 	{ PCI_VDEVICE(MELLANOX, 4121) }, /* ConnectX-5 Ex */
2024 	{ PCI_VDEVICE(MELLANOX, 4122) }, /* ConnectX-5 Ex VF */
2025 	{ PCI_VDEVICE(MELLANOX, 4123) }, /* ConnectX-6 */
2026 	{ PCI_VDEVICE(MELLANOX, 4124) }, /* ConnectX-6 VF */
2027 	{ PCI_VDEVICE(MELLANOX, 4125) }, /* ConnectX-6 Dx */
2028 	{ PCI_VDEVICE(MELLANOX, 4126) }, /* ConnectX Family mlx5Gen Virtual Function */
2029 	{ PCI_VDEVICE(MELLANOX, 4127) }, /* ConnectX-6 LX */
2030 	{ PCI_VDEVICE(MELLANOX, 4128) },
2031 	{ PCI_VDEVICE(MELLANOX, 4129) }, /* ConnectX-7 */
2032 	{ PCI_VDEVICE(MELLANOX, 4130) },
2033 	{ PCI_VDEVICE(MELLANOX, 4131) }, /* ConnectX-8 */
2034 	{ PCI_VDEVICE(MELLANOX, 4132) },
2035 	{ PCI_VDEVICE(MELLANOX, 4133) },
2036 	{ PCI_VDEVICE(MELLANOX, 4134) },
2037 	{ PCI_VDEVICE(MELLANOX, 4135) },
2038 	{ PCI_VDEVICE(MELLANOX, 4136) },
2039 	{ PCI_VDEVICE(MELLANOX, 4137) },
2040 	{ PCI_VDEVICE(MELLANOX, 4138) },
2041 	{ PCI_VDEVICE(MELLANOX, 4139) },
2042 	{ PCI_VDEVICE(MELLANOX, 4140) },
2043 	{ PCI_VDEVICE(MELLANOX, 4141) },
2044 	{ PCI_VDEVICE(MELLANOX, 4142) },
2045 	{ PCI_VDEVICE(MELLANOX, 4143) },
2046 	{ PCI_VDEVICE(MELLANOX, 4144) },
2047 	{ PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
2048 	{ PCI_VDEVICE(MELLANOX, 0xa2d3) }, /* BlueField integrated ConnectX-5 network controller VF */
2049 	{ PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
2050 	{ PCI_VDEVICE(MELLANOX, 0xa2dc) }, /* BlueField-3 integrated ConnectX-7 network controller */
2051 	{ PCI_VDEVICE(MELLANOX, 0xa2df) }, /* BlueField-4 integrated ConnectX-8 network controller */
2052 	{ }
2053 };
2054 
2055 MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
2056 
2057 void mlx5_disable_device(struct mlx5_core_dev *dev)
2058 {
2059 	mlx5_pci_err_detected(dev->pdev, 0);
2060 }
2061 
2062 void mlx5_recover_device(struct mlx5_core_dev *dev)
2063 {
2064 	mlx5_pci_disable_device(dev);
2065 	if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED)
2066 		mlx5_pci_resume(dev->pdev);
2067 }
2068 
2069 struct pci_driver mlx5_core_driver = {
2070 	.name           = DRIVER_NAME,
2071 	.id_table       = mlx5_core_pci_table,
2072 	.shutdown	= shutdown_one,
2073 	.probe          = init_one,
2074 	.remove         = remove_one,
2075 	.err_handler	= &mlx5_err_handler,
2076 #ifdef PCI_IOV
2077 	.bsd_iov_init	= mlx5_iov_init,
2078 	.bsd_iov_uninit	= mlx5_iov_uninit,
2079 	.bsd_iov_add_vf	= mlx5_iov_add_vf,
2080 #endif
2081 };
2082 
2083 static int __init init(void)
2084 {
2085 	int err;
2086 
2087 	err = pci_register_driver(&mlx5_core_driver);
2088 	if (err)
2089 		goto err_debug;
2090 
2091 	err = mlx5_ctl_init();
2092 	if (err)
2093 		goto err_ctl;
2094 
2095  	return 0;
2096 
2097 err_ctl:
2098 	pci_unregister_driver(&mlx5_core_driver);
2099 
2100 err_debug:
2101 	return err;
2102 }
2103 
2104 static void __exit cleanup(void)
2105 {
2106 	mlx5_ctl_fini();
2107 	pci_unregister_driver(&mlx5_core_driver);
2108 }
2109 
2110 module_init_order(init, SI_ORDER_FIRST);
2111 module_exit_order(cleanup, SI_ORDER_FIRST);
2112