xref: /freebsd/sys/dev/mlx5/mlx5_core/mlx5_health.c (revision 721a1a6a)
1 /*-
2  * Copyright (c) 2013-2017, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/random.h>
31 #include <linux/vmalloc.h>
32 #include <linux/hardirq.h>
33 #include <linux/delay.h>
34 #include <dev/mlx5/driver.h>
35 #include <dev/mlx5/mlx5_ifc.h>
36 #include "mlx5_core.h"
37 
38 #define	MLX5_HEALTH_POLL_INTERVAL	(2 * HZ)
39 #define	MAX_MISSES			3
40 
41 enum {
42 	MLX5_NIC_IFC_FULL		= 0,
43 	MLX5_NIC_IFC_DISABLED		= 1,
44 	MLX5_NIC_IFC_NO_DRAM_NIC	= 2,
45 	MLX5_NIC_IFC_SW_RESET		= 7,
46 };
47 
48 enum {
49 	MLX5_DROP_NEW_HEALTH_WORK,
50 	MLX5_DROP_NEW_RECOVERY_WORK,
51 };
52 
53 enum  {
54 	MLX5_SENSOR_NO_ERR		= 0,
55 	MLX5_SENSOR_PCI_COMM_ERR	= 1,
56 	MLX5_SENSOR_PCI_ERR		= 2,
57 	MLX5_SENSOR_NIC_DISABLED	= 3,
58 	MLX5_SENSOR_NIC_SW_RESET	= 4,
59 	MLX5_SENSOR_FW_SYND_RFR		= 5,
60 };
61 
62 static int mlx5_fw_reset_enable = 1;
63 SYSCTL_INT(_hw_mlx5, OID_AUTO, fw_reset_enable, CTLFLAG_RWTUN,
64     &mlx5_fw_reset_enable, 0,
65     "Enable firmware reset");
66 
67 static int lock_sem_sw_reset(struct mlx5_core_dev *dev)
68 {
69 	int ret;
70 
71 	/* Lock GW access */
72 	ret = -mlx5_vsc_lock(dev);
73 	if (ret) {
74 		mlx5_core_warn(dev, "Timed out locking gateway %d\n", ret);
75 		return ret;
76 	}
77 
78 	ret = -mlx5_vsc_lock_addr_space(dev, MLX5_SEMAPHORE_SW_RESET);
79 	if (ret) {
80 		if (ret == -EBUSY)
81 			mlx5_core_dbg(dev, "SW reset FW semaphore already locked, another function will handle the reset\n");
82 		else
83 			mlx5_core_warn(dev, "SW reset semaphore lock return %d\n", ret);
84 	}
85 
86 	/* Unlock GW access */
87 	mlx5_vsc_unlock(dev);
88 
89 	return ret;
90 }
91 
92 static int unlock_sem_sw_reset(struct mlx5_core_dev *dev)
93 {
94 	int ret;
95 
96 	/* Lock GW access */
97 	ret = -mlx5_vsc_lock(dev);
98 	if (ret) {
99 		mlx5_core_warn(dev, "Timed out locking gateway %d\n", ret);
100 		return ret;
101 	}
102 
103 	ret = -mlx5_vsc_unlock_addr_space(dev, MLX5_SEMAPHORE_SW_RESET);
104 
105 	/* Unlock GW access */
106 	mlx5_vsc_unlock(dev);
107 
108 	return ret;
109 }
110 
111 static u8 get_nic_mode(struct mlx5_core_dev *dev)
112 {
113 	return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 7;
114 }
115 
116 static bool sensor_fw_synd_rfr(struct mlx5_core_dev *dev)
117 {
118 	struct mlx5_core_health *health = &dev->priv.health;
119 	struct mlx5_health_buffer __iomem *h = health->health;
120 	u32 rfr = ioread32be(&h->rfr) >> MLX5_RFR_OFFSET;
121 	u8 synd = ioread8(&h->synd);
122 
123 	if (rfr && synd)
124 		mlx5_core_dbg(dev, "FW requests reset, synd: %d\n", synd);
125 	return rfr && synd;
126 }
127 
128 static void mlx5_trigger_cmd_completions(struct mlx5_core_dev *dev)
129 {
130 	unsigned long flags;
131 	u64 vector;
132 
133 	/* wait for pending handlers to complete */
134 	synchronize_irq(dev->priv.msix_arr[MLX5_EQ_VEC_CMD].vector);
135 	spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
136 	vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
137 	if (!vector)
138 		goto no_trig;
139 
140 	vector |= MLX5_TRIGGERED_CMD_COMP;
141 	spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
142 
143 	mlx5_core_dbg(dev, "vector 0x%jx\n", (uintmax_t)vector);
144 	mlx5_cmd_comp_handler(dev, vector, MLX5_CMD_MODE_EVENTS);
145 	return;
146 
147 no_trig:
148 	spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
149 }
150 
151 static bool sensor_pci_no_comm(struct mlx5_core_dev *dev)
152 {
153 	struct mlx5_core_health *health = &dev->priv.health;
154 	struct mlx5_health_buffer __iomem *h = health->health;
155 	bool err = ioread32be(&h->fw_ver) == 0xffffffff;
156 
157 	return err;
158 }
159 
160 static bool sensor_nic_disabled(struct mlx5_core_dev *dev)
161 {
162 	return get_nic_mode(dev) == MLX5_NIC_IFC_DISABLED;
163 }
164 
165 static bool sensor_nic_sw_reset(struct mlx5_core_dev *dev)
166 {
167 	return get_nic_mode(dev) == MLX5_NIC_IFC_SW_RESET;
168 }
169 
170 static u32 check_fatal_sensors(struct mlx5_core_dev *dev)
171 {
172 	if (sensor_pci_no_comm(dev))
173 		return MLX5_SENSOR_PCI_COMM_ERR;
174 	if (pci_channel_offline(dev->pdev))
175 		return MLX5_SENSOR_PCI_ERR;
176 	if (sensor_nic_disabled(dev))
177 		return MLX5_SENSOR_NIC_DISABLED;
178 	if (sensor_nic_sw_reset(dev))
179 		return MLX5_SENSOR_NIC_SW_RESET;
180 	if (sensor_fw_synd_rfr(dev))
181 		return MLX5_SENSOR_FW_SYND_RFR;
182 
183 	return MLX5_SENSOR_NO_ERR;
184 }
185 
186 static void reset_fw_if_needed(struct mlx5_core_dev *dev)
187 {
188 	bool supported;
189 	u32 cmdq_addr, fatal_error;
190 
191 	if (!mlx5_fw_reset_enable)
192 		return;
193 	supported = (ioread32be(&dev->iseg->initializing) >>
194 	    MLX5_FW_RESET_SUPPORTED_OFFSET) & 1;
195 	if (!supported)
196 		return;
197 
198 	/* The reset only needs to be issued by one PF. The health buffer is
199 	 * shared between all functions, and will be cleared during a reset.
200 	 * Check again to avoid a redundant 2nd reset. If the fatal erros was
201 	 * PCI related a reset won't help.
202 	 */
203 	fatal_error = check_fatal_sensors(dev);
204 	if (fatal_error == MLX5_SENSOR_PCI_COMM_ERR ||
205 	    fatal_error == MLX5_SENSOR_NIC_DISABLED ||
206 	    fatal_error == MLX5_SENSOR_NIC_SW_RESET) {
207 		mlx5_core_warn(dev, "Not issuing FW reset. Either it's already done or won't help.\n");
208 		return;
209 	}
210 
211 	mlx5_core_warn(dev, "Issuing FW Reset\n");
212 	/* Write the NIC interface field to initiate the reset, the command
213 	 * interface address also resides here, don't overwrite it.
214 	 */
215 	cmdq_addr = ioread32be(&dev->iseg->cmdq_addr_l_sz);
216 	iowrite32be((cmdq_addr & 0xFFFFF000) |
217 		    MLX5_NIC_IFC_SW_RESET << MLX5_NIC_IFC_OFFSET,
218 		    &dev->iseg->cmdq_addr_l_sz);
219 }
220 
221 #define MLX5_CRDUMP_WAIT_MS	60000
222 #define MLX5_FW_RESET_WAIT_MS	1000
223 #define MLX5_NIC_STATE_POLL_MS	5
224 void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
225 {
226 	unsigned long end, delay_ms = MLX5_CRDUMP_WAIT_MS;
227 	u32 fatal_error;
228 	int lock = -EBUSY;
229 
230 	fatal_error = check_fatal_sensors(dev);
231 
232 	if (fatal_error || force) {
233 		if (xchg(&dev->state, MLX5_DEVICE_STATE_INTERNAL_ERROR) ==
234 		    MLX5_DEVICE_STATE_INTERNAL_ERROR)
235 			return;
236 		if (!force)
237 			mlx5_core_err(dev, "internal state error detected\n");
238 		mlx5_trigger_cmd_completions(dev);
239 	}
240 
241 	mutex_lock(&dev->intf_state_mutex);
242 
243 	if (force)
244 		goto err_state_done;
245 
246 	if (fatal_error == MLX5_SENSOR_FW_SYND_RFR) {
247 		/* Get cr-dump and reset FW semaphore */
248 		if (mlx5_core_is_pf(dev))
249 			lock = lock_sem_sw_reset(dev);
250 
251 		/* Execute cr-dump and SW reset */
252 		if (lock != -EBUSY) {
253 			mlx5_fwdump(dev);
254 			reset_fw_if_needed(dev);
255 			delay_ms = MLX5_FW_RESET_WAIT_MS;
256 		}
257 	}
258 
259 	/* Recover from SW reset */
260 	end = jiffies + msecs_to_jiffies(delay_ms);
261 	do {
262 		if (sensor_nic_disabled(dev))
263 			break;
264 
265 		msleep(MLX5_NIC_STATE_POLL_MS);
266 	} while (!time_after(jiffies, end));
267 
268 	if (!sensor_nic_disabled(dev)) {
269 		dev_err(&dev->pdev->dev, "NIC IFC still %d after %lums.\n",
270 			get_nic_mode(dev), delay_ms);
271 	}
272 
273 	/* Release FW semaphore if you are the lock owner */
274 	if (!lock)
275 		unlock_sem_sw_reset(dev);
276 
277 	mlx5_core_err(dev, "system error event triggered\n");
278 
279 err_state_done:
280 	mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 1);
281 	mutex_unlock(&dev->intf_state_mutex);
282 }
283 
284 static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
285 {
286 	u8 nic_mode = get_nic_mode(dev);
287 
288 	if (nic_mode == MLX5_NIC_IFC_SW_RESET) {
289 		/* The IFC mode field is 3 bits, so it will read 0x7 in two cases:
290 		 * 1. PCI has been disabled (ie. PCI-AER, PF driver unloaded
291 		 *    and this is a VF), this is not recoverable by SW reset.
292 		 *    Logging of this is handled elsewhere.
293 		 * 2. FW reset has been issued by another function, driver can
294 		 *    be reloaded to recover after the mode switches to
295 		 *    MLX5_NIC_IFC_DISABLED.
296 		 */
297 		if (dev->priv.health.fatal_error != MLX5_SENSOR_PCI_COMM_ERR)
298 			mlx5_core_warn(dev, "NIC SW reset is already progress\n");
299 		else
300 			mlx5_core_warn(dev, "Communication with FW over the PCI link is down\n");
301 	} else {
302 		mlx5_core_warn(dev, "NIC mode %d\n", nic_mode);
303 	}
304 
305 	mlx5_disable_device(dev);
306 }
307 
308 #define MLX5_FW_RESET_WAIT_MS	1000
309 #define MLX5_NIC_STATE_POLL_MS	5
310 static void health_recover(struct work_struct *work)
311 {
312 	unsigned long end = jiffies + msecs_to_jiffies(MLX5_FW_RESET_WAIT_MS);
313 	struct mlx5_core_health *health;
314 	struct delayed_work *dwork;
315 	struct mlx5_core_dev *dev;
316 	struct mlx5_priv *priv;
317 	bool recover = true;
318 	u8 nic_mode;
319 
320 	dwork = container_of(work, struct delayed_work, work);
321 	health = container_of(dwork, struct mlx5_core_health, recover_work);
322 	priv = container_of(health, struct mlx5_priv, health);
323 	dev = container_of(priv, struct mlx5_core_dev, priv);
324 
325 	mtx_lock(&Giant);	/* XXX newbus needs this */
326 
327 	if (sensor_pci_no_comm(dev)) {
328 		dev_err(&dev->pdev->dev, "health recovery flow aborted, PCI reads still not working\n");
329 		recover = false;
330 	}
331 
332 	nic_mode = get_nic_mode(dev);
333 	while (nic_mode != MLX5_NIC_IFC_DISABLED &&
334 	       !time_after(jiffies, end)) {
335 		msleep(MLX5_NIC_STATE_POLL_MS);
336 		nic_mode = get_nic_mode(dev);
337 	}
338 
339 	if (nic_mode != MLX5_NIC_IFC_DISABLED) {
340 		dev_err(&dev->pdev->dev, "health recovery flow aborted, unexpected NIC IFC mode %d.\n",
341 			nic_mode);
342 		recover = false;
343 	}
344 
345 	if (recover) {
346 		dev_err(&dev->pdev->dev, "starting health recovery flow\n");
347 		mlx5_recover_device(dev);
348 	}
349 
350 	mtx_unlock(&Giant);
351 }
352 
353 /* How much time to wait until health resetting the driver (in msecs) */
354 #define MLX5_RECOVERY_DELAY_MSECS 60000
355 #define MLX5_RECOVERY_NO_DELAY 0
356 static unsigned long get_recovery_delay(struct mlx5_core_dev *dev)
357 {
358 	return dev->priv.health.fatal_error == MLX5_SENSOR_PCI_ERR ||
359 		dev->priv.health.fatal_error == MLX5_SENSOR_PCI_COMM_ERR	?
360 		MLX5_RECOVERY_DELAY_MSECS : MLX5_RECOVERY_NO_DELAY;
361 }
362 
363 static void health_care(struct work_struct *work)
364 {
365 	struct mlx5_core_health *health;
366 	unsigned long recover_delay;
367 	struct mlx5_core_dev *dev;
368 	struct mlx5_priv *priv;
369 	unsigned long flags;
370 
371 	health = container_of(work, struct mlx5_core_health, work);
372 	priv = container_of(health, struct mlx5_priv, health);
373 	dev = container_of(priv, struct mlx5_core_dev, priv);
374 
375 	mlx5_core_warn(dev, "handling bad device here\n");
376 	mlx5_handle_bad_state(dev);
377 	recover_delay = msecs_to_jiffies(get_recovery_delay(dev));
378 
379 	spin_lock_irqsave(&health->wq_lock, flags);
380 	if (!test_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags)) {
381 		mlx5_core_warn(dev, "Scheduling recovery work with %lums delay\n",
382 			       recover_delay);
383 		schedule_delayed_work(&health->recover_work, recover_delay);
384 	} else {
385 		dev_err(&dev->pdev->dev,
386 			"new health works are not permitted at this stage\n");
387 	}
388 	spin_unlock_irqrestore(&health->wq_lock, flags);
389 }
390 
391 static int get_next_poll_jiffies(void)
392 {
393 	unsigned long next;
394 
395 	get_random_bytes(&next, sizeof(next));
396 	next %= HZ;
397 	next += jiffies + MLX5_HEALTH_POLL_INTERVAL;
398 
399 	return next;
400 }
401 
402 void mlx5_trigger_health_work(struct mlx5_core_dev *dev)
403 {
404 	struct mlx5_core_health *health = &dev->priv.health;
405 	unsigned long flags;
406 
407 	spin_lock_irqsave(&health->wq_lock, flags);
408 	if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
409 		queue_work(health->wq, &health->work);
410 	else
411 		dev_err(&dev->pdev->dev,
412 			"new health works are not permitted at this stage\n");
413 	spin_unlock_irqrestore(&health->wq_lock, flags);
414 }
415 
416 static const char *hsynd_str(u8 synd)
417 {
418 	switch (synd) {
419 	case MLX5_HEALTH_SYNDR_FW_ERR:
420 		return "firmware internal error";
421 	case MLX5_HEALTH_SYNDR_IRISC_ERR:
422 		return "irisc not responding";
423 	case MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR:
424 		return "unrecoverable hardware error";
425 	case MLX5_HEALTH_SYNDR_CRC_ERR:
426 		return "firmware CRC error";
427 	case MLX5_HEALTH_SYNDR_FETCH_PCI_ERR:
428 		return "ICM fetch PCI error";
429 	case MLX5_HEALTH_SYNDR_HW_FTL_ERR:
430 		return "HW fatal error\n";
431 	case MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR:
432 		return "async EQ buffer overrun";
433 	case MLX5_HEALTH_SYNDR_EQ_ERR:
434 		return "EQ error";
435 	case MLX5_HEALTH_SYNDR_EQ_INV:
436 		return "Invalid EQ referenced";
437 	case MLX5_HEALTH_SYNDR_FFSER_ERR:
438 		return "FFSER error";
439 	case MLX5_HEALTH_SYNDR_HIGH_TEMP:
440 		return "High temprature";
441 	default:
442 		return "unrecognized error";
443 	}
444 }
445 
446 static void print_health_info(struct mlx5_core_dev *dev)
447 {
448 	struct mlx5_core_health *health = &dev->priv.health;
449 	struct mlx5_health_buffer __iomem *h = health->health;
450 	char fw_str[18];
451 	u32 fw;
452 	int i;
453 
454 	/* If the syndrom is 0, the device is OK and no need to print buffer */
455 	if (!ioread8(&h->synd))
456 		return;
457 
458 	for (i = 0; i < ARRAY_SIZE(h->assert_var); i++)
459 		printf("mlx5_core: INFO: ""assert_var[%d] 0x%08x\n", i, ioread32be(h->assert_var + i));
460 
461 	printf("mlx5_core: INFO: ""assert_exit_ptr 0x%08x\n", ioread32be(&h->assert_exit_ptr));
462 	printf("mlx5_core: INFO: ""assert_callra 0x%08x\n", ioread32be(&h->assert_callra));
463 	snprintf(fw_str, sizeof(fw_str), "%d.%d.%d", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev));
464 	printf("mlx5_core: INFO: ""fw_ver %s\n", fw_str);
465 	printf("mlx5_core: INFO: ""hw_id 0x%08x\n", ioread32be(&h->hw_id));
466 	printf("mlx5_core: INFO: ""irisc_index %d\n", ioread8(&h->irisc_index));
467 	printf("mlx5_core: INFO: ""synd 0x%x: %s\n", ioread8(&h->synd), hsynd_str(ioread8(&h->synd)));
468 	printf("mlx5_core: INFO: ""ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
469 	fw = ioread32be(&h->fw_ver);
470 	printf("mlx5_core: INFO: ""raw fw_ver 0x%08x\n", fw);
471 }
472 
473 static void poll_health(unsigned long data)
474 {
475 	struct mlx5_core_dev *dev = (struct mlx5_core_dev *)data;
476 	struct mlx5_core_health *health = &dev->priv.health;
477 	u32 fatal_error;
478 	u32 count;
479 
480 	if (dev->state != MLX5_DEVICE_STATE_UP)
481 		return;
482 
483 	if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
484 		goto out;
485 
486 	count = ioread32be(health->health_counter);
487 	if (count == health->prev)
488 		++health->miss_counter;
489 	else
490 		health->miss_counter = 0;
491 
492 	health->prev = count;
493 	if (health->miss_counter == MAX_MISSES) {
494 		mlx5_core_err(dev, "device's health compromised - reached miss count\n");
495 		print_health_info(dev);
496 	}
497 
498 	fatal_error = check_fatal_sensors(dev);
499 
500 	if (fatal_error && !health->fatal_error) {
501 		mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error);
502 		dev->priv.health.fatal_error = fatal_error;
503 		print_health_info(dev);
504 		mlx5_trigger_health_work(dev);
505 	}
506 
507 out:
508 	mod_timer(&health->timer, get_next_poll_jiffies());
509 }
510 
511 void mlx5_start_health_poll(struct mlx5_core_dev *dev)
512 {
513 	struct mlx5_core_health *health = &dev->priv.health;
514 
515 	init_timer(&health->timer);
516 	health->fatal_error = MLX5_SENSOR_NO_ERR;
517 	clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
518 	clear_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
519 	health->health = &dev->iseg->health;
520 	health->health_counter = &dev->iseg->health_counter;
521 
522 	setup_timer(&health->timer, poll_health, (unsigned long)dev);
523 	mod_timer(&health->timer,
524 		  round_jiffies(jiffies + MLX5_HEALTH_POLL_INTERVAL));
525 }
526 
527 void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health)
528 {
529 	struct mlx5_core_health *health = &dev->priv.health;
530 	unsigned long flags;
531 
532 	if (disable_health) {
533 		spin_lock_irqsave(&health->wq_lock, flags);
534 		set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
535 		set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
536 		spin_unlock_irqrestore(&health->wq_lock, flags);
537 	}
538 
539 	del_timer_sync(&health->timer);
540 }
541 
542 void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
543 {
544 	struct mlx5_core_health *health = &dev->priv.health;
545 	unsigned long flags;
546 
547 	spin_lock_irqsave(&health->wq_lock, flags);
548 	set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
549 	set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
550 	spin_unlock_irqrestore(&health->wq_lock, flags);
551 	cancel_delayed_work_sync(&health->recover_work);
552 	cancel_work_sync(&health->work);
553 }
554 
555 void mlx5_drain_health_recovery(struct mlx5_core_dev *dev)
556 {
557 	struct mlx5_core_health *health = &dev->priv.health;
558 	unsigned long flags;
559 
560 	spin_lock_irqsave(&health->wq_lock, flags);
561 	set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
562 	spin_unlock_irqrestore(&health->wq_lock, flags);
563 	cancel_delayed_work_sync(&dev->priv.health.recover_work);
564 }
565 
566 void mlx5_health_cleanup(struct mlx5_core_dev *dev)
567 {
568 	struct mlx5_core_health *health = &dev->priv.health;
569 
570 	destroy_workqueue(health->wq);
571 }
572 
573 #define HEALTH_NAME "mlx5_health"
574 int mlx5_health_init(struct mlx5_core_dev *dev)
575 {
576 	struct mlx5_core_health *health;
577 	char *name;
578 	int len;
579 
580 	health = &dev->priv.health;
581 	len = strlen(HEALTH_NAME) + strlen(dev_name(&dev->pdev->dev));
582 	name = kmalloc(len + 1, GFP_KERNEL);
583 	if (!name)
584 		return -ENOMEM;
585 
586 	snprintf(name, len, "%s:%s", HEALTH_NAME, dev_name(&dev->pdev->dev));
587 	health->wq = create_singlethread_workqueue(name);
588 	kfree(name);
589 	if (!health->wq)
590 		return -ENOMEM;
591 
592 	spin_lock_init(&health->wq_lock);
593 	INIT_WORK(&health->work, health_care);
594 	INIT_DELAYED_WORK(&health->recover_work, health_recover);
595 
596 	return 0;
597 }
598