xref: /freebsd/sys/dev/mlx5/mlx5_core/mlx5_health.c (revision c2a1e807)
1 /*-
2  * Copyright (c) 2013-2017, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/random.h>
31 #include <linux/vmalloc.h>
32 #include <linux/hardirq.h>
33 #include <linux/delay.h>
34 #include <dev/mlx5/driver.h>
35 #include <dev/mlx5/mlx5_ifc.h>
36 #include "mlx5_core.h"
37 
38 #define	MLX5_HEALTH_POLL_INTERVAL	(2 * HZ)
39 #define	MAX_MISSES			3
40 
41 enum {
42 	MLX5_NIC_IFC_FULL		= 0,
43 	MLX5_NIC_IFC_DISABLED		= 1,
44 	MLX5_NIC_IFC_NO_DRAM_NIC	= 2,
45 	MLX5_NIC_IFC_SW_RESET		= 7,
46 };
47 
48 enum {
49 	MLX5_DROP_NEW_HEALTH_WORK,
50 	MLX5_DROP_NEW_RECOVERY_WORK,
51 };
52 
53 enum  {
54 	MLX5_SENSOR_NO_ERR		= 0,
55 	MLX5_SENSOR_PCI_COMM_ERR	= 1,
56 	MLX5_SENSOR_PCI_ERR		= 2,
57 	MLX5_SENSOR_NIC_DISABLED	= 3,
58 	MLX5_SENSOR_NIC_SW_RESET	= 4,
59 	MLX5_SENSOR_FW_SYND_RFR		= 5,
60 };
61 
62 static int mlx5_fw_reset_enable = 1;
63 SYSCTL_INT(_hw_mlx5, OID_AUTO, fw_reset_enable, CTLFLAG_RWTUN,
64     &mlx5_fw_reset_enable, 0,
65     "Enable firmware reset");
66 
67 static unsigned int sw_reset_to = 1200;
68 SYSCTL_UINT(_hw_mlx5, OID_AUTO, sw_reset_timeout, CTLFLAG_RWTUN,
69     &sw_reset_to, 0,
70     "Minimum timeout in seconds between two firmware resets");
71 
72 
73 static int lock_sem_sw_reset(struct mlx5_core_dev *dev)
74 {
75 	int ret;
76 
77 	/* Lock GW access */
78 	ret = -mlx5_vsc_lock(dev);
79 	if (ret) {
80 		mlx5_core_warn(dev, "Timed out locking gateway %d\n", ret);
81 		return ret;
82 	}
83 
84 	ret = -mlx5_vsc_lock_addr_space(dev, MLX5_SEMAPHORE_SW_RESET);
85 	if (ret) {
86 		if (ret == -EBUSY)
87 			mlx5_core_dbg(dev, "SW reset FW semaphore already locked, another function will handle the reset\n");
88 		else
89 			mlx5_core_warn(dev, "SW reset semaphore lock return %d\n", ret);
90 	}
91 
92 	/* Unlock GW access */
93 	mlx5_vsc_unlock(dev);
94 
95 	return ret;
96 }
97 
98 static int unlock_sem_sw_reset(struct mlx5_core_dev *dev)
99 {
100 	int ret;
101 
102 	/* Lock GW access */
103 	ret = -mlx5_vsc_lock(dev);
104 	if (ret) {
105 		mlx5_core_warn(dev, "Timed out locking gateway %d\n", ret);
106 		return ret;
107 	}
108 
109 	ret = -mlx5_vsc_unlock_addr_space(dev, MLX5_SEMAPHORE_SW_RESET);
110 
111 	/* Unlock GW access */
112 	mlx5_vsc_unlock(dev);
113 
114 	return ret;
115 }
116 
117 static u8 get_nic_mode(struct mlx5_core_dev *dev)
118 {
119 	return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 7;
120 }
121 
122 static bool sensor_fw_synd_rfr(struct mlx5_core_dev *dev)
123 {
124 	struct mlx5_core_health *health = &dev->priv.health;
125 	struct mlx5_health_buffer __iomem *h = health->health;
126 	u32 rfr = ioread32be(&h->rfr) >> MLX5_RFR_OFFSET;
127 	u8 synd = ioread8(&h->synd);
128 
129 	if (rfr && synd)
130 		mlx5_core_dbg(dev, "FW requests reset, synd: %d\n", synd);
131 	return rfr && synd;
132 }
133 
134 static void mlx5_trigger_cmd_completions(struct mlx5_core_dev *dev)
135 {
136 	unsigned long flags;
137 	u64 vector;
138 
139 	/* wait for pending handlers to complete */
140 	synchronize_irq(dev->priv.msix_arr[MLX5_EQ_VEC_CMD].vector);
141 	spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
142 	vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
143 	if (!vector)
144 		goto no_trig;
145 
146 	vector |= MLX5_TRIGGERED_CMD_COMP;
147 	spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
148 
149 	mlx5_core_dbg(dev, "vector 0x%jx\n", (uintmax_t)vector);
150 	mlx5_cmd_comp_handler(dev, vector, MLX5_CMD_MODE_EVENTS);
151 	return;
152 
153 no_trig:
154 	spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
155 }
156 
157 static bool sensor_pci_no_comm(struct mlx5_core_dev *dev)
158 {
159 	struct mlx5_core_health *health = &dev->priv.health;
160 	struct mlx5_health_buffer __iomem *h = health->health;
161 	bool err = ioread32be(&h->fw_ver) == 0xffffffff;
162 
163 	return err;
164 }
165 
166 static bool sensor_nic_disabled(struct mlx5_core_dev *dev)
167 {
168 	return get_nic_mode(dev) == MLX5_NIC_IFC_DISABLED;
169 }
170 
171 static bool sensor_nic_sw_reset(struct mlx5_core_dev *dev)
172 {
173 	return get_nic_mode(dev) == MLX5_NIC_IFC_SW_RESET;
174 }
175 
176 static u32 check_fatal_sensors(struct mlx5_core_dev *dev)
177 {
178 	if (sensor_pci_no_comm(dev))
179 		return MLX5_SENSOR_PCI_COMM_ERR;
180 	if (pci_channel_offline(dev->pdev))
181 		return MLX5_SENSOR_PCI_ERR;
182 	if (sensor_nic_disabled(dev))
183 		return MLX5_SENSOR_NIC_DISABLED;
184 	if (sensor_nic_sw_reset(dev))
185 		return MLX5_SENSOR_NIC_SW_RESET;
186 	if (sensor_fw_synd_rfr(dev))
187 		return MLX5_SENSOR_FW_SYND_RFR;
188 
189 	return MLX5_SENSOR_NO_ERR;
190 }
191 
192 static void reset_fw_if_needed(struct mlx5_core_dev *dev)
193 {
194 	bool supported;
195 	u32 cmdq_addr, fatal_error;
196 
197 	if (!mlx5_fw_reset_enable)
198 		return;
199 	supported = (ioread32be(&dev->iseg->initializing) >>
200 	    MLX5_FW_RESET_SUPPORTED_OFFSET) & 1;
201 	if (!supported)
202 		return;
203 
204 	/* The reset only needs to be issued by one PF. The health buffer is
205 	 * shared between all functions, and will be cleared during a reset.
206 	 * Check again to avoid a redundant 2nd reset. If the fatal erros was
207 	 * PCI related a reset won't help.
208 	 */
209 	fatal_error = check_fatal_sensors(dev);
210 	if (fatal_error == MLX5_SENSOR_PCI_COMM_ERR ||
211 	    fatal_error == MLX5_SENSOR_NIC_DISABLED ||
212 	    fatal_error == MLX5_SENSOR_NIC_SW_RESET) {
213 		mlx5_core_warn(dev, "Not issuing FW reset. Either it's already done or won't help.\n");
214 		return;
215 	}
216 
217 	mlx5_core_warn(dev, "Issuing FW Reset\n");
218 	/* Write the NIC interface field to initiate the reset, the command
219 	 * interface address also resides here, don't overwrite it.
220 	 */
221 	cmdq_addr = ioread32be(&dev->iseg->cmdq_addr_l_sz);
222 	iowrite32be((cmdq_addr & 0xFFFFF000) |
223 		    MLX5_NIC_IFC_SW_RESET << MLX5_NIC_IFC_OFFSET,
224 		    &dev->iseg->cmdq_addr_l_sz);
225 }
226 
227 static bool
228 mlx5_health_allow_reset(struct mlx5_core_dev *dev)
229 {
230 	struct mlx5_core_health *health = &dev->priv.health;
231 	unsigned int delta;
232 	bool ret;
233 
234 	if (health->last_reset_req != 0) {
235 		delta = ticks - health->last_reset_req;
236 		delta /= hz;
237 		ret = delta >= sw_reset_to;
238 	} else {
239 		ret = true;
240 	}
241 
242 	/*
243 	 * In principle, ticks may be 0. Setting it to off by one (-1)
244 	 * to prevent certain reset in next request.
245 	 */
246 	health->last_reset_req = ticks ? : -1;
247 	if (!ret)
248 		mlx5_core_warn(dev, "Firmware reset elided due to "
249 		    "auto-reset frequency threshold.\n");
250 	return (ret);
251 }
252 
253 #define MLX5_CRDUMP_WAIT_MS	60000
254 #define MLX5_FW_RESET_WAIT_MS	1000
255 #define MLX5_NIC_STATE_POLL_MS	5
256 void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
257 {
258 	int end, delay_ms = MLX5_CRDUMP_WAIT_MS;
259 	u32 fatal_error;
260 	int lock = -EBUSY;
261 
262 	fatal_error = check_fatal_sensors(dev);
263 
264 	if (fatal_error || force) {
265 		if (xchg(&dev->state, MLX5_DEVICE_STATE_INTERNAL_ERROR) ==
266 		    MLX5_DEVICE_STATE_INTERNAL_ERROR)
267 			return;
268 		if (!force)
269 			mlx5_core_err(dev, "internal state error detected\n");
270 		mlx5_trigger_cmd_completions(dev);
271 	}
272 
273 	mutex_lock(&dev->intf_state_mutex);
274 
275 	if (force)
276 		goto err_state_done;
277 
278 	if (fatal_error == MLX5_SENSOR_FW_SYND_RFR &&
279 	    mlx5_health_allow_reset(dev)) {
280 		/* Get cr-dump and reset FW semaphore */
281 		if (mlx5_core_is_pf(dev))
282 			lock = lock_sem_sw_reset(dev);
283 
284 		/* Execute cr-dump and SW reset */
285 		if (lock != -EBUSY) {
286 			mlx5_fwdump(dev);
287 			reset_fw_if_needed(dev);
288 			delay_ms = MLX5_FW_RESET_WAIT_MS;
289 		}
290 	}
291 
292 	/* Recover from SW reset */
293 	end = jiffies + msecs_to_jiffies(delay_ms);
294 	do {
295 		if (sensor_nic_disabled(dev))
296 			break;
297 
298 		msleep(MLX5_NIC_STATE_POLL_MS);
299 	} while (!time_after(jiffies, end));
300 
301 	if (!sensor_nic_disabled(dev)) {
302 		dev_err(&dev->pdev->dev, "NIC IFC still %d after %ums.\n",
303 			get_nic_mode(dev), delay_ms);
304 	}
305 
306 	/* Release FW semaphore if you are the lock owner */
307 	if (!lock)
308 		unlock_sem_sw_reset(dev);
309 
310 	mlx5_core_err(dev, "system error event triggered\n");
311 
312 err_state_done:
313 	mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 1);
314 	mutex_unlock(&dev->intf_state_mutex);
315 }
316 
317 static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
318 {
319 	u8 nic_mode = get_nic_mode(dev);
320 
321 	if (nic_mode == MLX5_NIC_IFC_SW_RESET) {
322 		/* The IFC mode field is 3 bits, so it will read 0x7 in two cases:
323 		 * 1. PCI has been disabled (ie. PCI-AER, PF driver unloaded
324 		 *    and this is a VF), this is not recoverable by SW reset.
325 		 *    Logging of this is handled elsewhere.
326 		 * 2. FW reset has been issued by another function, driver can
327 		 *    be reloaded to recover after the mode switches to
328 		 *    MLX5_NIC_IFC_DISABLED.
329 		 */
330 		if (dev->priv.health.fatal_error != MLX5_SENSOR_PCI_COMM_ERR)
331 			mlx5_core_warn(dev, "NIC SW reset is already progress\n");
332 		else
333 			mlx5_core_warn(dev, "Communication with FW over the PCI link is down\n");
334 	} else {
335 		mlx5_core_warn(dev, "NIC mode %d\n", nic_mode);
336 	}
337 
338 	mlx5_disable_device(dev);
339 }
340 
341 #define MLX5_FW_RESET_WAIT_MS	1000
342 #define MLX5_NIC_STATE_POLL_MS	5
343 static void health_recover(struct work_struct *work)
344 {
345 	unsigned long end = jiffies + msecs_to_jiffies(MLX5_FW_RESET_WAIT_MS);
346 	struct mlx5_core_health *health;
347 	struct delayed_work *dwork;
348 	struct mlx5_core_dev *dev;
349 	struct mlx5_priv *priv;
350 	bool recover = true;
351 	u8 nic_mode;
352 
353 	dwork = container_of(work, struct delayed_work, work);
354 	health = container_of(dwork, struct mlx5_core_health, recover_work);
355 	priv = container_of(health, struct mlx5_priv, health);
356 	dev = container_of(priv, struct mlx5_core_dev, priv);
357 
358 	mtx_lock(&Giant);	/* XXX newbus needs this */
359 
360 	if (sensor_pci_no_comm(dev)) {
361 		dev_err(&dev->pdev->dev, "health recovery flow aborted, PCI reads still not working\n");
362 		recover = false;
363 	}
364 
365 	nic_mode = get_nic_mode(dev);
366 	while (nic_mode != MLX5_NIC_IFC_DISABLED &&
367 	       !time_after(jiffies, end)) {
368 		msleep(MLX5_NIC_STATE_POLL_MS);
369 		nic_mode = get_nic_mode(dev);
370 	}
371 
372 	if (nic_mode != MLX5_NIC_IFC_DISABLED) {
373 		dev_err(&dev->pdev->dev, "health recovery flow aborted, unexpected NIC IFC mode %d.\n",
374 			nic_mode);
375 		recover = false;
376 	}
377 
378 	if (recover) {
379 		dev_err(&dev->pdev->dev, "starting health recovery flow\n");
380 		mlx5_recover_device(dev);
381 	}
382 
383 	mtx_unlock(&Giant);
384 }
385 
386 /* How much time to wait until health resetting the driver (in msecs) */
387 #define MLX5_RECOVERY_DELAY_MSECS 60000
388 #define MLX5_RECOVERY_NO_DELAY 0
389 static unsigned long get_recovery_delay(struct mlx5_core_dev *dev)
390 {
391 	return dev->priv.health.fatal_error == MLX5_SENSOR_PCI_ERR ||
392 		dev->priv.health.fatal_error == MLX5_SENSOR_PCI_COMM_ERR	?
393 		MLX5_RECOVERY_DELAY_MSECS : MLX5_RECOVERY_NO_DELAY;
394 }
395 
396 static void health_care(struct work_struct *work)
397 {
398 	struct mlx5_core_health *health;
399 	unsigned long recover_delay;
400 	struct mlx5_core_dev *dev;
401 	struct mlx5_priv *priv;
402 	unsigned long flags;
403 
404 	health = container_of(work, struct mlx5_core_health, work);
405 	priv = container_of(health, struct mlx5_priv, health);
406 	dev = container_of(priv, struct mlx5_core_dev, priv);
407 
408 	mlx5_core_warn(dev, "handling bad device here\n");
409 	mlx5_handle_bad_state(dev);
410 	recover_delay = msecs_to_jiffies(get_recovery_delay(dev));
411 
412 	spin_lock_irqsave(&health->wq_lock, flags);
413 	if (!test_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags)) {
414 		mlx5_core_warn(dev, "Scheduling recovery work with %lums delay\n",
415 			       recover_delay);
416 		schedule_delayed_work(&health->recover_work, recover_delay);
417 	} else {
418 		dev_err(&dev->pdev->dev,
419 			"new health works are not permitted at this stage\n");
420 	}
421 	spin_unlock_irqrestore(&health->wq_lock, flags);
422 }
423 
424 static int get_next_poll_jiffies(void)
425 {
426 	unsigned long next;
427 
428 	get_random_bytes(&next, sizeof(next));
429 	next %= HZ;
430 	next += jiffies + MLX5_HEALTH_POLL_INTERVAL;
431 
432 	return next;
433 }
434 
435 void mlx5_trigger_health_work(struct mlx5_core_dev *dev)
436 {
437 	struct mlx5_core_health *health = &dev->priv.health;
438 	unsigned long flags;
439 
440 	spin_lock_irqsave(&health->wq_lock, flags);
441 	if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
442 		queue_work(health->wq, &health->work);
443 	else
444 		dev_err(&dev->pdev->dev,
445 			"new health works are not permitted at this stage\n");
446 	spin_unlock_irqrestore(&health->wq_lock, flags);
447 }
448 
449 static const char *hsynd_str(u8 synd)
450 {
451 	switch (synd) {
452 	case MLX5_HEALTH_SYNDR_FW_ERR:
453 		return "firmware internal error";
454 	case MLX5_HEALTH_SYNDR_IRISC_ERR:
455 		return "irisc not responding";
456 	case MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR:
457 		return "unrecoverable hardware error";
458 	case MLX5_HEALTH_SYNDR_CRC_ERR:
459 		return "firmware CRC error";
460 	case MLX5_HEALTH_SYNDR_FETCH_PCI_ERR:
461 		return "ICM fetch PCI error";
462 	case MLX5_HEALTH_SYNDR_HW_FTL_ERR:
463 		return "HW fatal error\n";
464 	case MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR:
465 		return "async EQ buffer overrun";
466 	case MLX5_HEALTH_SYNDR_EQ_ERR:
467 		return "EQ error";
468 	case MLX5_HEALTH_SYNDR_EQ_INV:
469 		return "Invalid EQ referenced";
470 	case MLX5_HEALTH_SYNDR_FFSER_ERR:
471 		return "FFSER error";
472 	case MLX5_HEALTH_SYNDR_HIGH_TEMP:
473 		return "High temprature";
474 	default:
475 		return "unrecognized error";
476 	}
477 }
478 
479 static void print_health_info(struct mlx5_core_dev *dev)
480 {
481 	struct mlx5_core_health *health = &dev->priv.health;
482 	struct mlx5_health_buffer __iomem *h = health->health;
483 	char fw_str[18];
484 	u32 fw;
485 	int i;
486 
487 	/* If the syndrom is 0, the device is OK and no need to print buffer */
488 	if (!ioread8(&h->synd))
489 		return;
490 
491 	for (i = 0; i < ARRAY_SIZE(h->assert_var); i++)
492 		printf("mlx5_core: INFO: ""assert_var[%d] 0x%08x\n", i, ioread32be(h->assert_var + i));
493 
494 	printf("mlx5_core: INFO: ""assert_exit_ptr 0x%08x\n", ioread32be(&h->assert_exit_ptr));
495 	printf("mlx5_core: INFO: ""assert_callra 0x%08x\n", ioread32be(&h->assert_callra));
496 	snprintf(fw_str, sizeof(fw_str), "%d.%d.%d", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev));
497 	printf("mlx5_core: INFO: ""fw_ver %s\n", fw_str);
498 	printf("mlx5_core: INFO: ""hw_id 0x%08x\n", ioread32be(&h->hw_id));
499 	printf("mlx5_core: INFO: ""irisc_index %d\n", ioread8(&h->irisc_index));
500 	printf("mlx5_core: INFO: ""synd 0x%x: %s\n", ioread8(&h->synd), hsynd_str(ioread8(&h->synd)));
501 	printf("mlx5_core: INFO: ""ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
502 	fw = ioread32be(&h->fw_ver);
503 	printf("mlx5_core: INFO: ""raw fw_ver 0x%08x\n", fw);
504 }
505 
506 static void poll_health(unsigned long data)
507 {
508 	struct mlx5_core_dev *dev = (struct mlx5_core_dev *)data;
509 	struct mlx5_core_health *health = &dev->priv.health;
510 	u32 fatal_error;
511 	u32 count;
512 
513 	if (dev->state != MLX5_DEVICE_STATE_UP)
514 		return;
515 
516 	if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
517 		goto out;
518 
519 	count = ioread32be(health->health_counter);
520 	if (count == health->prev)
521 		++health->miss_counter;
522 	else
523 		health->miss_counter = 0;
524 
525 	health->prev = count;
526 	if (health->miss_counter == MAX_MISSES) {
527 		mlx5_core_err(dev, "device's health compromised - reached miss count\n");
528 		print_health_info(dev);
529 	}
530 
531 	fatal_error = check_fatal_sensors(dev);
532 
533 	if (fatal_error && !health->fatal_error) {
534 		mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error);
535 		dev->priv.health.fatal_error = fatal_error;
536 		print_health_info(dev);
537 		mlx5_trigger_health_work(dev);
538 	}
539 
540 out:
541 	mod_timer(&health->timer, get_next_poll_jiffies());
542 }
543 
544 void mlx5_start_health_poll(struct mlx5_core_dev *dev)
545 {
546 	struct mlx5_core_health *health = &dev->priv.health;
547 
548 	init_timer(&health->timer);
549 	health->fatal_error = MLX5_SENSOR_NO_ERR;
550 	clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
551 	clear_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
552 	health->health = &dev->iseg->health;
553 	health->health_counter = &dev->iseg->health_counter;
554 
555 	setup_timer(&health->timer, poll_health, (unsigned long)dev);
556 	mod_timer(&health->timer,
557 		  round_jiffies(jiffies + MLX5_HEALTH_POLL_INTERVAL));
558 }
559 
560 void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health)
561 {
562 	struct mlx5_core_health *health = &dev->priv.health;
563 	unsigned long flags;
564 
565 	if (disable_health) {
566 		spin_lock_irqsave(&health->wq_lock, flags);
567 		set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
568 		set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
569 		spin_unlock_irqrestore(&health->wq_lock, flags);
570 	}
571 
572 	del_timer_sync(&health->timer);
573 }
574 
575 void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
576 {
577 	struct mlx5_core_health *health = &dev->priv.health;
578 	unsigned long flags;
579 
580 	spin_lock_irqsave(&health->wq_lock, flags);
581 	set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
582 	set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
583 	spin_unlock_irqrestore(&health->wq_lock, flags);
584 	cancel_delayed_work_sync(&health->recover_work);
585 	cancel_work_sync(&health->work);
586 }
587 
588 void mlx5_drain_health_recovery(struct mlx5_core_dev *dev)
589 {
590 	struct mlx5_core_health *health = &dev->priv.health;
591 	unsigned long flags;
592 
593 	spin_lock_irqsave(&health->wq_lock, flags);
594 	set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
595 	spin_unlock_irqrestore(&health->wq_lock, flags);
596 	cancel_delayed_work_sync(&dev->priv.health.recover_work);
597 }
598 
599 void mlx5_health_cleanup(struct mlx5_core_dev *dev)
600 {
601 	struct mlx5_core_health *health = &dev->priv.health;
602 
603 	destroy_workqueue(health->wq);
604 }
605 
606 #define HEALTH_NAME "mlx5_health"
607 int mlx5_health_init(struct mlx5_core_dev *dev)
608 {
609 	struct mlx5_core_health *health;
610 	char *name;
611 	int len;
612 
613 	health = &dev->priv.health;
614 	len = strlen(HEALTH_NAME) + strlen(dev_name(&dev->pdev->dev));
615 	name = kmalloc(len + 1, GFP_KERNEL);
616 	if (!name)
617 		return -ENOMEM;
618 
619 	snprintf(name, len, "%s:%s", HEALTH_NAME, dev_name(&dev->pdev->dev));
620 	health->wq = create_singlethread_workqueue(name);
621 	kfree(name);
622 	if (!health->wq)
623 		return -ENOMEM;
624 
625 	spin_lock_init(&health->wq_lock);
626 	INIT_WORK(&health->work, health_care);
627 	INIT_DELAYED_WORK(&health->recover_work, health_recover);
628 
629 	return 0;
630 }
631