xref: /freebsd/sys/dev/mlx5/mlx5_core/mlx5_health.c (revision c6df6f53)
1 /*-
2  * Copyright (c) 2013-2019, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/random.h>
31 #include <linux/vmalloc.h>
32 #include <linux/hardirq.h>
33 #include <linux/delay.h>
34 #include <dev/mlx5/driver.h>
35 #include <dev/mlx5/mlx5_ifc.h>
36 #include "mlx5_core.h"
37 
38 #define	MLX5_HEALTH_POLL_INTERVAL	(2 * HZ)
39 #define	MAX_MISSES			3
40 
41 enum {
42 	MLX5_DROP_NEW_HEALTH_WORK,
43 	MLX5_DROP_NEW_RECOVERY_WORK,
44 	MLX5_DROP_NEW_WATCHDOG_WORK,
45 };
46 
47 enum  {
48 	MLX5_SENSOR_NO_ERR		= 0,
49 	MLX5_SENSOR_PCI_COMM_ERR	= 1,
50 	MLX5_SENSOR_PCI_ERR		= 2,
51 	MLX5_SENSOR_NIC_DISABLED	= 3,
52 	MLX5_SENSOR_NIC_SW_RESET	= 4,
53 	MLX5_SENSOR_FW_SYND_RFR		= 5,
54 };
55 
56 static int mlx5_fw_reset_enable = 1;
57 SYSCTL_INT(_hw_mlx5, OID_AUTO, fw_reset_enable, CTLFLAG_RWTUN,
58     &mlx5_fw_reset_enable, 0,
59     "Enable firmware reset");
60 
61 static unsigned int sw_reset_to = 1200;
62 SYSCTL_UINT(_hw_mlx5, OID_AUTO, sw_reset_timeout, CTLFLAG_RWTUN,
63     &sw_reset_to, 0,
64     "Minimum timeout in seconds between two firmware resets");
65 
66 
67 static int lock_sem_sw_reset(struct mlx5_core_dev *dev)
68 {
69 	int ret;
70 
71 	/* Lock GW access */
72 	ret = -mlx5_vsc_lock(dev);
73 	if (ret) {
74 		mlx5_core_warn(dev, "Timed out locking gateway %d\n", ret);
75 		return ret;
76 	}
77 
78 	ret = -mlx5_vsc_lock_addr_space(dev, MLX5_SEMAPHORE_SW_RESET);
79 	if (ret) {
80 		if (ret == -EBUSY)
81 			mlx5_core_dbg(dev,
82 			    "SW reset FW semaphore already locked, another function will handle the reset\n");
83 		else
84 			mlx5_core_warn(dev,
85 			    "SW reset semaphore lock return %d\n", ret);
86 	}
87 
88 	/* Unlock GW access */
89 	mlx5_vsc_unlock(dev);
90 
91 	return ret;
92 }
93 
94 static int unlock_sem_sw_reset(struct mlx5_core_dev *dev)
95 {
96 	int ret;
97 
98 	/* Lock GW access */
99 	ret = -mlx5_vsc_lock(dev);
100 	if (ret) {
101 		mlx5_core_warn(dev, "Timed out locking gateway %d\n", ret);
102 		return ret;
103 	}
104 
105 	ret = -mlx5_vsc_unlock_addr_space(dev, MLX5_SEMAPHORE_SW_RESET);
106 
107 	/* Unlock GW access */
108 	mlx5_vsc_unlock(dev);
109 
110 	return ret;
111 }
112 
113 u8 mlx5_get_nic_state(struct mlx5_core_dev *dev)
114 {
115 	return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 7;
116 }
117 
118 void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state)
119 {
120 	u32 cur_cmdq_addr_l_sz;
121 
122 	cur_cmdq_addr_l_sz = ioread32be(&dev->iseg->cmdq_addr_l_sz);
123 	iowrite32be((cur_cmdq_addr_l_sz & 0xFFFFF000) |
124 		    state << MLX5_NIC_IFC_OFFSET,
125 		    &dev->iseg->cmdq_addr_l_sz);
126 }
127 
128 static bool sensor_fw_synd_rfr(struct mlx5_core_dev *dev)
129 {
130 	struct mlx5_core_health *health = &dev->priv.health;
131 	struct mlx5_health_buffer __iomem *h = health->health;
132 	u32 rfr = ioread32be(&h->rfr) >> MLX5_RFR_OFFSET;
133 	u8 synd = ioread8(&h->synd);
134 
135 	if (rfr && synd)
136 		mlx5_core_dbg(dev, "FW requests reset, synd: %d\n", synd);
137 	return rfr && synd;
138 }
139 
140 static void mlx5_trigger_cmd_completions(struct work_struct *work)
141 {
142 	struct mlx5_core_dev *dev =
143 	    container_of(work, struct mlx5_core_dev, priv.health.work_cmd_completion);
144 	unsigned long flags;
145 	u64 vector;
146 
147 	/* wait for pending handlers to complete */
148 	synchronize_irq(dev->priv.msix_arr[MLX5_EQ_VEC_CMD].vector);
149 	spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
150 	vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
151 	if (!vector)
152 		goto no_trig;
153 
154 	vector |= MLX5_TRIGGERED_CMD_COMP;
155 	spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
156 
157 	mlx5_core_dbg(dev, "vector 0x%jx\n", (uintmax_t)vector);
158 	mlx5_cmd_comp_handler(dev, vector, MLX5_CMD_MODE_EVENTS);
159 	return;
160 
161 no_trig:
162 	spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
163 }
164 
165 static bool sensor_pci_no_comm(struct mlx5_core_dev *dev)
166 {
167 	struct mlx5_core_health *health = &dev->priv.health;
168 	struct mlx5_health_buffer __iomem *h = health->health;
169 	bool err = ioread32be(&h->fw_ver) == 0xffffffff;
170 
171 	return err;
172 }
173 
174 static bool sensor_nic_disabled(struct mlx5_core_dev *dev)
175 {
176 	return mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED;
177 }
178 
179 static bool sensor_nic_sw_reset(struct mlx5_core_dev *dev)
180 {
181 	return mlx5_get_nic_state(dev) == MLX5_NIC_IFC_SW_RESET;
182 }
183 
184 static u32 check_fatal_sensors(struct mlx5_core_dev *dev)
185 {
186 	if (sensor_pci_no_comm(dev))
187 		return MLX5_SENSOR_PCI_COMM_ERR;
188 	if (pci_channel_offline(dev->pdev))
189 		return MLX5_SENSOR_PCI_ERR;
190 	if (sensor_nic_disabled(dev))
191 		return MLX5_SENSOR_NIC_DISABLED;
192 	if (sensor_nic_sw_reset(dev))
193 		return MLX5_SENSOR_NIC_SW_RESET;
194 	if (sensor_fw_synd_rfr(dev))
195 		return MLX5_SENSOR_FW_SYND_RFR;
196 
197 	return MLX5_SENSOR_NO_ERR;
198 }
199 
200 static void reset_fw_if_needed(struct mlx5_core_dev *dev)
201 {
202 	bool supported;
203 	u32 cmdq_addr, fatal_error;
204 
205 	if (!mlx5_fw_reset_enable)
206 		return;
207 	supported = (ioread32be(&dev->iseg->initializing) >>
208 	    MLX5_FW_RESET_SUPPORTED_OFFSET) & 1;
209 	if (!supported)
210 		return;
211 
212 	/* The reset only needs to be issued by one PF. The health buffer is
213 	 * shared between all functions, and will be cleared during a reset.
214 	 * Check again to avoid a redundant 2nd reset. If the fatal erros was
215 	 * PCI related a reset won't help.
216 	 */
217 	fatal_error = check_fatal_sensors(dev);
218 	if (fatal_error == MLX5_SENSOR_PCI_COMM_ERR ||
219 	    fatal_error == MLX5_SENSOR_NIC_DISABLED ||
220 	    fatal_error == MLX5_SENSOR_NIC_SW_RESET) {
221 		mlx5_core_warn(dev,
222 		    "Not issuing FW reset. Either it's already done or won't help.\n");
223 		return;
224 	}
225 
226 	mlx5_core_info(dev, "Issuing FW Reset\n");
227 	/* Write the NIC interface field to initiate the reset, the command
228 	 * interface address also resides here, don't overwrite it.
229 	 */
230 	cmdq_addr = ioread32be(&dev->iseg->cmdq_addr_l_sz);
231 	iowrite32be((cmdq_addr & 0xFFFFF000) |
232 		    MLX5_NIC_IFC_SW_RESET << MLX5_NIC_IFC_OFFSET,
233 		    &dev->iseg->cmdq_addr_l_sz);
234 }
235 
236 static bool
237 mlx5_health_allow_reset(struct mlx5_core_dev *dev)
238 {
239 	struct mlx5_core_health *health = &dev->priv.health;
240 	unsigned int delta;
241 	bool ret;
242 
243 	if (health->last_reset_req != 0) {
244 		delta = ticks - health->last_reset_req;
245 		delta /= hz;
246 		ret = delta >= sw_reset_to;
247 	} else {
248 		ret = true;
249 	}
250 
251 	/*
252 	 * In principle, ticks may be 0. Setting it to off by one (-1)
253 	 * to prevent certain reset in next request.
254 	 */
255 	health->last_reset_req = ticks ? : -1;
256 	if (!ret)
257 		mlx5_core_warn(dev,
258 		    "Firmware reset elided due to auto-reset frequency threshold.\n");
259 	return (ret);
260 }
261 
262 #define MLX5_CRDUMP_WAIT_MS	60000
263 #define MLX5_FW_RESET_WAIT_MS	1000
264 #define MLX5_NIC_STATE_POLL_MS	5
265 void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
266 {
267 	int end, delay_ms = MLX5_CRDUMP_WAIT_MS;
268 	u32 fatal_error;
269 	int lock = -EBUSY;
270 
271 	fatal_error = check_fatal_sensors(dev);
272 
273 	if (fatal_error || force) {
274 		if (xchg(&dev->state, MLX5_DEVICE_STATE_INTERNAL_ERROR) ==
275 		    MLX5_DEVICE_STATE_INTERNAL_ERROR)
276 			return;
277 		if (!force)
278 			mlx5_core_err(dev, "internal state error detected\n");
279 
280 		/*
281 		 * Queue the command completion handler on the command
282 		 * work queue to avoid racing with the real command
283 		 * completion handler and then wait for it to
284 		 * complete:
285 		 */
286 		queue_work(dev->priv.health.wq_cmd, &dev->priv.health.work_cmd_completion);
287 		flush_workqueue(dev->priv.health.wq_cmd);
288 	}
289 
290 	mutex_lock(&dev->intf_state_mutex);
291 
292 	if (force)
293 		goto err_state_done;
294 
295 	if (fatal_error == MLX5_SENSOR_FW_SYND_RFR &&
296 	    mlx5_health_allow_reset(dev)) {
297 		/* Get cr-dump and reset FW semaphore */
298 		if (mlx5_core_is_pf(dev))
299 			lock = lock_sem_sw_reset(dev);
300 
301 		/* Execute cr-dump and SW reset */
302 		if (lock != -EBUSY) {
303 			(void)mlx5_fwdump(dev);
304 			reset_fw_if_needed(dev);
305 			delay_ms = MLX5_FW_RESET_WAIT_MS;
306 		}
307 	}
308 
309 	/* Recover from SW reset */
310 	end = jiffies + msecs_to_jiffies(delay_ms);
311 	do {
312 		if (sensor_nic_disabled(dev))
313 			break;
314 
315 		msleep(MLX5_NIC_STATE_POLL_MS);
316 	} while (!time_after(jiffies, end));
317 
318 	if (!sensor_nic_disabled(dev)) {
319 		mlx5_core_err(dev, "NIC IFC still %d after %ums.\n",
320 			mlx5_get_nic_state(dev), delay_ms);
321 	}
322 
323 	/* Release FW semaphore if you are the lock owner */
324 	if (!lock)
325 		unlock_sem_sw_reset(dev);
326 
327 	mlx5_core_info(dev, "System error event triggered\n");
328 
329 err_state_done:
330 	mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 1);
331 	mutex_unlock(&dev->intf_state_mutex);
332 }
333 
334 static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
335 {
336 	u8 nic_mode = mlx5_get_nic_state(dev);
337 
338 	if (nic_mode == MLX5_NIC_IFC_SW_RESET) {
339 		/* The IFC mode field is 3 bits, so it will read 0x7 in two cases:
340 		 * 1. PCI has been disabled (ie. PCI-AER, PF driver unloaded
341 		 *    and this is a VF), this is not recoverable by SW reset.
342 		 *    Logging of this is handled elsewhere.
343 		 * 2. FW reset has been issued by another function, driver can
344 		 *    be reloaded to recover after the mode switches to
345 		 *    MLX5_NIC_IFC_DISABLED.
346 		 */
347 		if (dev->priv.health.fatal_error != MLX5_SENSOR_PCI_COMM_ERR)
348 			mlx5_core_warn(dev,
349 			    "NIC SW reset is already progress\n");
350 		else
351 			mlx5_core_warn(dev,
352 			    "Communication with FW over the PCI link is down\n");
353 	} else {
354 		mlx5_core_warn(dev, "NIC mode %d\n", nic_mode);
355 	}
356 
357 	mlx5_disable_device(dev);
358 }
359 
360 #define MLX5_FW_RESET_WAIT_MS	1000
361 #define MLX5_NIC_STATE_POLL_MS	5
362 static void health_recover(struct work_struct *work)
363 {
364 	unsigned long end = jiffies + msecs_to_jiffies(MLX5_FW_RESET_WAIT_MS);
365 	struct mlx5_core_health *health;
366 	struct delayed_work *dwork;
367 	struct mlx5_core_dev *dev;
368 	struct mlx5_priv *priv;
369 	bool recover = true;
370 	u8 nic_mode;
371 
372 	dwork = container_of(work, struct delayed_work, work);
373 	health = container_of(dwork, struct mlx5_core_health, recover_work);
374 	priv = container_of(health, struct mlx5_priv, health);
375 	dev = container_of(priv, struct mlx5_core_dev, priv);
376 
377 	/* This might likely be wrong, cut and paste from elsewhere? */
378 	bus_topo_lock();
379 
380 	if (sensor_pci_no_comm(dev)) {
381 		mlx5_core_err(dev,
382 		    "health recovery flow aborted, PCI reads still not working\n");
383 		recover = false;
384 	}
385 
386 	nic_mode = mlx5_get_nic_state(dev);
387 	while (nic_mode != MLX5_NIC_IFC_DISABLED &&
388 	       !time_after(jiffies, end)) {
389 		msleep(MLX5_NIC_STATE_POLL_MS);
390 		nic_mode = mlx5_get_nic_state(dev);
391 	}
392 
393 	if (nic_mode != MLX5_NIC_IFC_DISABLED) {
394 		mlx5_core_err(dev,
395 		    "health recovery flow aborted, unexpected NIC IFC mode %d.\n",
396 		    nic_mode);
397 		recover = false;
398 	}
399 
400 	if (recover) {
401 		mlx5_core_info(dev, "Starting health recovery flow\n");
402 		mlx5_recover_device(dev);
403 	}
404 
405 	bus_topo_unlock();
406 }
407 
408 /* How much time to wait until health resetting the driver (in msecs) */
409 #define MLX5_RECOVERY_DELAY_MSECS 60000
410 #define MLX5_RECOVERY_NO_DELAY 0
411 static unsigned long get_recovery_delay(struct mlx5_core_dev *dev)
412 {
413 	return dev->priv.health.fatal_error == MLX5_SENSOR_PCI_ERR ||
414 		dev->priv.health.fatal_error == MLX5_SENSOR_PCI_COMM_ERR	?
415 		MLX5_RECOVERY_DELAY_MSECS : MLX5_RECOVERY_NO_DELAY;
416 }
417 
418 static void health_care(struct work_struct *work)
419 {
420 	struct mlx5_core_health *health;
421 	unsigned long recover_delay;
422 	struct mlx5_core_dev *dev;
423 	struct mlx5_priv *priv;
424 	unsigned long flags;
425 
426 	health = container_of(work, struct mlx5_core_health, work);
427 	priv = container_of(health, struct mlx5_priv, health);
428 	dev = container_of(priv, struct mlx5_core_dev, priv);
429 
430 	mlx5_core_warn(dev, "handling bad device here\n");
431 	mlx5_handle_bad_state(dev);
432 	recover_delay = msecs_to_jiffies(get_recovery_delay(dev));
433 
434 	spin_lock_irqsave(&health->wq_lock, flags);
435 	if (!test_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags)) {
436 		mlx5_core_warn(dev,
437 		    "Scheduling recovery work with %lums delay\n",
438 		    recover_delay);
439 		schedule_delayed_work(&health->recover_work, recover_delay);
440 	} else {
441 		mlx5_core_err(dev,
442 		    "new health works are not permitted at this stage\n");
443 	}
444 	spin_unlock_irqrestore(&health->wq_lock, flags);
445 }
446 
447 static int get_next_poll_jiffies(void)
448 {
449 	unsigned long next;
450 
451 	get_random_bytes(&next, sizeof(next));
452 	next %= HZ;
453 	next += jiffies + MLX5_HEALTH_POLL_INTERVAL;
454 
455 	return next;
456 }
457 
458 void mlx5_trigger_health_work(struct mlx5_core_dev *dev)
459 {
460 	struct mlx5_core_health *health = &dev->priv.health;
461 	unsigned long flags;
462 
463 	spin_lock_irqsave(&health->wq_lock, flags);
464 	if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
465 		queue_work(health->wq, &health->work);
466 	else
467 		mlx5_core_err(dev,
468 			"new health works are not permitted at this stage\n");
469 	spin_unlock_irqrestore(&health->wq_lock, flags);
470 }
471 
472 static const char *hsynd_str(u8 synd)
473 {
474 	switch (synd) {
475 	case MLX5_HEALTH_SYNDR_FW_ERR:
476 		return "firmware internal error";
477 	case MLX5_HEALTH_SYNDR_IRISC_ERR:
478 		return "irisc not responding";
479 	case MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR:
480 		return "unrecoverable hardware error";
481 	case MLX5_HEALTH_SYNDR_CRC_ERR:
482 		return "firmware CRC error";
483 	case MLX5_HEALTH_SYNDR_FETCH_PCI_ERR:
484 		return "ICM fetch PCI error";
485 	case MLX5_HEALTH_SYNDR_HW_FTL_ERR:
486 		return "HW fatal error\n";
487 	case MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR:
488 		return "async EQ buffer overrun";
489 	case MLX5_HEALTH_SYNDR_EQ_ERR:
490 		return "EQ error";
491 	case MLX5_HEALTH_SYNDR_EQ_INV:
492 		return "Invalid EQ referenced";
493 	case MLX5_HEALTH_SYNDR_FFSER_ERR:
494 		return "FFSER error";
495 	case MLX5_HEALTH_SYNDR_HIGH_TEMP:
496 		return "High temperature";
497 	default:
498 		return "unrecognized error";
499 	}
500 }
501 
502 static u8
503 print_health_info(struct mlx5_core_dev *dev)
504 {
505 	struct mlx5_core_health *health = &dev->priv.health;
506 	struct mlx5_health_buffer __iomem *h = health->health;
507 	u8 synd = ioread8(&h->synd);
508 	char fw_str[18];
509 	u32 fw;
510 	int i;
511 
512 	/*
513 	 * If synd is 0x0 - this indicates that FW is unable to
514 	 * respond to initialization segment reads and health buffer
515 	 * should not be read.
516 	 */
517 	if (synd == 0)
518 		return (0);
519 
520 	for (i = 0; i < ARRAY_SIZE(h->assert_var); i++)
521 		mlx5_core_info(dev, "assert_var[%d] 0x%08x\n", i,
522 		    ioread32be(h->assert_var + i));
523 
524 	mlx5_core_info(dev, "assert_exit_ptr 0x%08x\n",
525 	    ioread32be(&h->assert_exit_ptr));
526 	mlx5_core_info(dev, "assert_callra 0x%08x\n",
527 	    ioread32be(&h->assert_callra));
528 	snprintf(fw_str, sizeof(fw_str), "%d.%d.%d",
529 	    fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev));
530 	mlx5_core_info(dev, "fw_ver %s\n", fw_str);
531 	mlx5_core_info(dev, "hw_id 0x%08x\n", ioread32be(&h->hw_id));
532 	mlx5_core_info(dev, "irisc_index %d\n", ioread8(&h->irisc_index));
533 	mlx5_core_info(dev, "synd 0x%x: %s\n",
534 	    ioread8(&h->synd), hsynd_str(ioread8(&h->synd)));
535 	mlx5_core_info(dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
536 	fw = ioread32be(&h->fw_ver);
537 	mlx5_core_info(dev, "raw fw_ver 0x%08x\n", fw);
538 
539 	return synd;
540 }
541 
542 static void health_watchdog(struct work_struct *work)
543 {
544 	struct mlx5_core_dev *dev;
545 	u16 power;
546 	u8 status;
547 	int err;
548 
549 	dev = container_of(work, struct mlx5_core_dev, priv.health.work_watchdog);
550 
551 	if (!MLX5_CAP_GEN(dev, mcam_reg) ||
552 	    !MLX5_CAP_MCAM_FEATURE(dev, pcie_status_and_power))
553 		return;
554 
555 	err = mlx5_pci_read_power_status(dev, &power, &status);
556 	if (err < 0) {
557 		mlx5_core_warn(dev, "Failed reading power status: %d\n",
558 		    err);
559 		return;
560 	}
561 
562 	dev->pwr_value = power;
563 
564 	if (dev->pwr_status != status) {
565 
566 		switch (status) {
567 		case 0:
568 			dev->pwr_status = status;
569 			mlx5_core_info(dev,
570 			    "PCI power is not published by the PCIe slot.\n");
571 			break;
572 		case 1:
573 			dev->pwr_status = status;
574 			mlx5_core_info(dev,
575 			    "PCIe slot advertised sufficient power (%uW).\n",
576 			    power);
577 			break;
578 		case 2:
579 			dev->pwr_status = status;
580 			mlx5_core_warn(dev,
581 			    "Detected insufficient power on the PCIe slot (%uW).\n",
582 			    power);
583 			break;
584 		default:
585 			dev->pwr_status = 0;
586 			mlx5_core_warn(dev,
587 			    "Unknown power state detected(%d).\n",
588 			    status);
589 			break;
590 		}
591 	}
592 }
593 
594 void
595 mlx5_trigger_health_watchdog(struct mlx5_core_dev *dev)
596 {
597 	struct mlx5_core_health *health = &dev->priv.health;
598 	unsigned long flags;
599 
600 	spin_lock_irqsave(&health->wq_lock, flags);
601 	if (!test_bit(MLX5_DROP_NEW_WATCHDOG_WORK, &health->flags))
602 		queue_work(health->wq_watchdog, &health->work_watchdog);
603 	else
604 		mlx5_core_err(dev,
605 		    "scheduling watchdog is not permitted at this stage\n");
606 	spin_unlock_irqrestore(&health->wq_lock, flags);
607 }
608 
609 static void poll_health(unsigned long data)
610 {
611 	struct mlx5_core_dev *dev = (struct mlx5_core_dev *)data;
612 	struct mlx5_core_health *health = &dev->priv.health;
613 	u32 fatal_error;
614 	u32 count;
615 
616 	if (dev->state != MLX5_DEVICE_STATE_UP)
617 		return;
618 
619 	count = ioread32be(health->health_counter);
620 	if (count == health->prev)
621 		++health->miss_counter;
622 	else
623 		health->miss_counter = 0;
624 
625 	health->prev = count;
626 	if (health->miss_counter == MAX_MISSES) {
627 		mlx5_core_err(dev, "device's health compromised - reached miss count\n");
628 		if (print_health_info(dev) == 0)
629 			mlx5_core_err(dev, "FW is unable to respond to initialization segment reads\n");
630 	}
631 
632 	fatal_error = check_fatal_sensors(dev);
633 
634 	if (fatal_error && !health->fatal_error) {
635 		mlx5_core_err(dev,
636 		    "Fatal error %u detected\n", fatal_error);
637 		dev->priv.health.fatal_error = fatal_error;
638 		print_health_info(dev);
639 		mlx5_trigger_health_work(dev);
640 	}
641 
642 	mod_timer(&health->timer, get_next_poll_jiffies());
643 }
644 
645 void mlx5_start_health_poll(struct mlx5_core_dev *dev)
646 {
647 	struct mlx5_core_health *health = &dev->priv.health;
648 
649 	init_timer(&health->timer);
650 	health->fatal_error = MLX5_SENSOR_NO_ERR;
651 	clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
652 	clear_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
653 	clear_bit(MLX5_DROP_NEW_WATCHDOG_WORK, &health->flags);
654 	health->health = &dev->iseg->health;
655 	health->health_counter = &dev->iseg->health_counter;
656 
657 	setup_timer(&health->timer, poll_health, (unsigned long)dev);
658 	mod_timer(&health->timer,
659 		  round_jiffies(jiffies + MLX5_HEALTH_POLL_INTERVAL));
660 
661 	/* do initial PCI power state readout */
662 	mlx5_trigger_health_watchdog(dev);
663 }
664 
665 void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health)
666 {
667 	struct mlx5_core_health *health = &dev->priv.health;
668 	unsigned long flags;
669 
670 	if (disable_health) {
671 		spin_lock_irqsave(&health->wq_lock, flags);
672 		set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
673 		set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
674 		set_bit(MLX5_DROP_NEW_WATCHDOG_WORK, &health->flags);
675 		spin_unlock_irqrestore(&health->wq_lock, flags);
676 	}
677 
678 	del_timer_sync(&health->timer);
679 }
680 
681 void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
682 {
683 	struct mlx5_core_health *health = &dev->priv.health;
684 	unsigned long flags;
685 
686 	spin_lock_irqsave(&health->wq_lock, flags);
687 	set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
688 	set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
689 	set_bit(MLX5_DROP_NEW_WATCHDOG_WORK, &health->flags);
690 	spin_unlock_irqrestore(&health->wq_lock, flags);
691 	cancel_delayed_work_sync(&health->recover_work);
692 	cancel_work_sync(&health->work);
693 	cancel_work_sync(&health->work_watchdog);
694 }
695 
696 void mlx5_drain_health_recovery(struct mlx5_core_dev *dev)
697 {
698 	struct mlx5_core_health *health = &dev->priv.health;
699 	unsigned long flags;
700 
701 	spin_lock_irqsave(&health->wq_lock, flags);
702 	set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
703 	spin_unlock_irqrestore(&health->wq_lock, flags);
704 	cancel_delayed_work_sync(&dev->priv.health.recover_work);
705 }
706 
707 void mlx5_health_cleanup(struct mlx5_core_dev *dev)
708 {
709 	struct mlx5_core_health *health = &dev->priv.health;
710 
711 	destroy_workqueue(health->wq);
712 	destroy_workqueue(health->wq_watchdog);
713 	destroy_workqueue(health->wq_cmd);
714 }
715 
716 int mlx5_health_init(struct mlx5_core_dev *dev)
717 {
718 	struct mlx5_core_health *health;
719 	char name[64];
720 
721 	health = &dev->priv.health;
722 
723 	snprintf(name, sizeof(name), "%s-rec", dev_name(&dev->pdev->dev));
724 	health->wq = create_singlethread_workqueue(name);
725 	if (!health->wq)
726 		goto err_recovery;
727 
728 	snprintf(name, sizeof(name), "%s-wdg", dev_name(&dev->pdev->dev));
729 	health->wq_watchdog = create_singlethread_workqueue(name);
730 	if (!health->wq_watchdog)
731 		goto err_watchdog;
732 
733 	snprintf(name, sizeof(name), "%s-cmd", dev_name(&dev->pdev->dev));
734 	health->wq_cmd = create_singlethread_workqueue(name);
735 	if (!health->wq_cmd)
736 		goto err_cmd;
737 
738 	spin_lock_init(&health->wq_lock);
739 	INIT_WORK(&health->work, health_care);
740 	INIT_WORK(&health->work_watchdog, health_watchdog);
741 	INIT_WORK(&health->work_cmd_completion, mlx5_trigger_cmd_completions);
742 	INIT_DELAYED_WORK(&health->recover_work, health_recover);
743 
744 	return 0;
745 
746 err_cmd:
747 	destroy_workqueue(health->wq_watchdog);
748 err_watchdog:
749 	destroy_workqueue(health->wq);
750 err_recovery:
751 	return -ENOMEM;
752 }
753