xref: /freebsd/sys/dev/mlx5/mlx5_core/mlx5_health.c (revision 519774ea)
1 /*-
2  * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/random.h>
31 #include <linux/vmalloc.h>
32 #include <linux/hardirq.h>
33 #include <dev/mlx5/driver.h>
34 #include <dev/mlx5/mlx5_ifc.h>
35 #include "mlx5_core.h"
36 
37 #define	MLX5_HEALTH_POLL_INTERVAL	(2 * HZ)
38 #define	MAX_MISSES			3
39 
40 enum {
41 	MLX5_NIC_IFC_FULL		= 0,
42 	MLX5_NIC_IFC_DISABLED		= 1,
43 	MLX5_NIC_IFC_NO_DRAM_NIC	= 2,
44 	MLX5_NIC_IFC_INVALID		= 3,
45 };
46 
47 enum {
48 	MLX5_DROP_NEW_HEALTH_WORK,
49 	MLX5_DROP_NEW_RECOVERY_WORK,
50 };
51 
52 static u8 get_nic_state(struct mlx5_core_dev *dev)
53 {
54 	return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 3;
55 }
56 
57 static void mlx5_trigger_cmd_completions(struct mlx5_core_dev *dev)
58 {
59 	unsigned long flags;
60 	u64 vector;
61 
62 	/* wait for pending handlers to complete */
63 	synchronize_irq(dev->priv.msix_arr[MLX5_EQ_VEC_CMD].vector);
64 	spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
65 	vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
66 	if (!vector)
67 		goto no_trig;
68 
69 	vector |= MLX5_TRIGGERED_CMD_COMP;
70 	spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
71 
72 	mlx5_core_dbg(dev, "vector 0x%jx\n", (uintmax_t)vector);
73 	mlx5_cmd_comp_handler(dev, vector);
74 	return;
75 
76 no_trig:
77 	spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
78 }
79 
80 static int in_fatal(struct mlx5_core_dev *dev)
81 {
82 	struct mlx5_core_health *health = &dev->priv.health;
83 	struct mlx5_health_buffer __iomem *h = health->health;
84 
85 	if (get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
86 		return 1;
87 
88 	if (ioread32be(&h->fw_ver) == 0xffffffff)
89 		return 1;
90 
91 	return 0;
92 }
93 
94 void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
95 {
96 	mutex_lock(&dev->intf_state_mutex);
97 	if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
98 		goto unlock;
99 		return;
100 	}
101 
102 	mlx5_core_err(dev, "start\n");
103 	if (pci_channel_offline(dev->pdev) || in_fatal(dev) || force) {
104 		dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
105 		mlx5_trigger_cmd_completions(dev);
106 	}
107 
108 	mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0);
109 	mlx5_core_err(dev, "end\n");
110 
111 unlock:
112 	mutex_unlock(&dev->intf_state_mutex);
113 }
114 
115 static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
116 {
117 	u8 nic_state = get_nic_state(dev);
118 
119 	switch (nic_state) {
120 	case MLX5_NIC_IFC_FULL:
121 		mlx5_core_warn(dev, "Expected to see disabled NIC but it is full driver\n");
122 		break;
123 
124 	case MLX5_NIC_IFC_DISABLED:
125 		mlx5_core_warn(dev, "starting teardown\n");
126 		break;
127 
128 	case MLX5_NIC_IFC_NO_DRAM_NIC:
129 		mlx5_core_warn(dev, "Expected to see disabled NIC but it is no dram nic\n");
130 		break;
131 	default:
132 		mlx5_core_warn(dev, "Expected to see disabled NIC but it is has invalid value %d\n",
133 			       nic_state);
134 	}
135 
136 	mlx5_disable_device(dev);
137 }
138 
139 static void health_recover(struct work_struct *work)
140 {
141 	struct mlx5_core_health *health;
142 	struct delayed_work *dwork;
143 	struct mlx5_core_dev *dev;
144 	struct mlx5_priv *priv;
145 	u8 nic_state;
146 
147 	dwork = container_of(work, struct delayed_work, work);
148 	health = container_of(dwork, struct mlx5_core_health, recover_work);
149 	priv = container_of(health, struct mlx5_priv, health);
150 	dev = container_of(priv, struct mlx5_core_dev, priv);
151 
152 	nic_state = get_nic_state(dev);
153 	if (nic_state == MLX5_NIC_IFC_INVALID) {
154 		dev_err(&dev->pdev->dev, "health recovery flow aborted since the nic state is invalid\n");
155 		return;
156 	}
157 
158 	dev_err(&dev->pdev->dev, "starting health recovery flow\n");
159 	mlx5_recover_device(dev);
160 }
161 
162 /* How much time to wait until health resetting the driver (in msecs) */
163 #define MLX5_RECOVERY_DELAY_MSECS 60000
164 static void health_care(struct work_struct *work)
165 {
166 	unsigned long recover_delay = msecs_to_jiffies(MLX5_RECOVERY_DELAY_MSECS);
167 	struct mlx5_core_health *health;
168 	struct mlx5_core_dev *dev;
169 	struct mlx5_priv *priv;
170 	unsigned long flags;
171 
172 	health = container_of(work, struct mlx5_core_health, work);
173 	priv = container_of(health, struct mlx5_priv, health);
174 	dev = container_of(priv, struct mlx5_core_dev, priv);
175 	mlx5_core_warn(dev, "handling bad device here\n");
176 	mlx5_handle_bad_state(dev);
177 
178 	spin_lock_irqsave(&health->wq_lock, flags);
179 	if (!test_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags))
180 		schedule_delayed_work(&health->recover_work, recover_delay);
181 	else
182 		dev_err(&dev->pdev->dev,
183 			"new health works are not permitted at this stage\n");
184 	spin_unlock_irqrestore(&health->wq_lock, flags);
185 }
186 
187 static int get_next_poll_jiffies(void)
188 {
189 	unsigned long next;
190 
191 	get_random_bytes(&next, sizeof(next));
192 	next %= HZ;
193 	next += jiffies + MLX5_HEALTH_POLL_INTERVAL;
194 
195 	return next;
196 }
197 
198 void mlx5_trigger_health_work(struct mlx5_core_dev *dev)
199 {
200 	struct mlx5_core_health *health = &dev->priv.health;
201 	unsigned long flags;
202 
203 	spin_lock_irqsave(&health->wq_lock, flags);
204 	if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
205 		queue_work(health->wq, &health->work);
206 	else
207 		dev_err(&dev->pdev->dev,
208 			"new health works are not permitted at this stage\n");
209 	spin_unlock_irqrestore(&health->wq_lock, flags);
210 }
211 
212 static const char *hsynd_str(u8 synd)
213 {
214 	switch (synd) {
215 	case MLX5_HEALTH_SYNDR_FW_ERR:
216 		return "firmware internal error";
217 	case MLX5_HEALTH_SYNDR_IRISC_ERR:
218 		return "irisc not responding";
219 	case MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR:
220 		return "unrecoverable hardware error";
221 	case MLX5_HEALTH_SYNDR_CRC_ERR:
222 		return "firmware CRC error";
223 	case MLX5_HEALTH_SYNDR_FETCH_PCI_ERR:
224 		return "ICM fetch PCI error";
225 	case MLX5_HEALTH_SYNDR_HW_FTL_ERR:
226 		return "HW fatal error\n";
227 	case MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR:
228 		return "async EQ buffer overrun";
229 	case MLX5_HEALTH_SYNDR_EQ_ERR:
230 		return "EQ error";
231 	case MLX5_HEALTH_SYNDR_EQ_INV:
232 		return "Invalid EQ referenced";
233 	case MLX5_HEALTH_SYNDR_FFSER_ERR:
234 		return "FFSER error";
235 	case MLX5_HEALTH_SYNDR_HIGH_TEMP:
236 		return "High temprature";
237 	default:
238 		return "unrecognized error";
239 	}
240 }
241 
242 static void print_health_info(struct mlx5_core_dev *dev)
243 {
244 	struct mlx5_core_health *health = &dev->priv.health;
245 	struct mlx5_health_buffer __iomem *h = health->health;
246 	char fw_str[18];
247 	u32 fw;
248 	int i;
249 
250 	/* If the syndrom is 0, the device is OK and no need to print buffer */
251 	if (!ioread8(&h->synd))
252 		return;
253 
254 	for (i = 0; i < ARRAY_SIZE(h->assert_var); i++)
255 		printf("mlx5_core: INFO: ""assert_var[%d] 0x%08x\n", i, ioread32be(h->assert_var + i));
256 
257 	printf("mlx5_core: INFO: ""assert_exit_ptr 0x%08x\n", ioread32be(&h->assert_exit_ptr));
258 	printf("mlx5_core: INFO: ""assert_callra 0x%08x\n", ioread32be(&h->assert_callra));
259 	snprintf(fw_str, sizeof(fw_str), "%d.%d.%d", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev));
260 	printf("mlx5_core: INFO: ""fw_ver %s\n", fw_str);
261 	printf("mlx5_core: INFO: ""hw_id 0x%08x\n", ioread32be(&h->hw_id));
262 	printf("mlx5_core: INFO: ""irisc_index %d\n", ioread8(&h->irisc_index));
263 	printf("mlx5_core: INFO: ""synd 0x%x: %s\n", ioread8(&h->synd), hsynd_str(ioread8(&h->synd)));
264 	printf("mlx5_core: INFO: ""ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
265 	fw = ioread32be(&h->fw_ver);
266 	printf("mlx5_core: INFO: ""raw fw_ver 0x%08x\n", fw);
267 }
268 
269 static void poll_health(unsigned long data)
270 {
271 	struct mlx5_core_dev *dev = (struct mlx5_core_dev *)data;
272 	struct mlx5_core_health *health = &dev->priv.health;
273 	u32 count;
274 
275 	if (dev->state != MLX5_DEVICE_STATE_UP)
276 		return;
277 
278 	if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
279 		goto out;
280 
281 	count = ioread32be(health->health_counter);
282 	if (count == health->prev)
283 		++health->miss_counter;
284 	else
285 		health->miss_counter = 0;
286 
287 	health->prev = count;
288 	if (health->miss_counter == MAX_MISSES) {
289 		mlx5_core_err(dev, "device's health compromised - reached miss count\n");
290 		print_health_info(dev);
291 	}
292 
293 	if (in_fatal(dev) && !health->sick) {
294 		health->sick = true;
295 		print_health_info(dev);
296 		mlx5_trigger_health_work(dev);
297 	}
298 
299 out:
300 	mod_timer(&health->timer, get_next_poll_jiffies());
301 }
302 
303 void mlx5_start_health_poll(struct mlx5_core_dev *dev)
304 {
305 	struct mlx5_core_health *health = &dev->priv.health;
306 
307 	init_timer(&health->timer);
308 	health->sick = 0;
309 	clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
310 	clear_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
311 	health->health = &dev->iseg->health;
312 	health->health_counter = &dev->iseg->health_counter;
313 
314 	setup_timer(&health->timer, poll_health, (unsigned long)dev);
315 	mod_timer(&health->timer,
316 		  round_jiffies(jiffies + MLX5_HEALTH_POLL_INTERVAL));
317 }
318 
319 void mlx5_stop_health_poll(struct mlx5_core_dev *dev)
320 {
321 	struct mlx5_core_health *health = &dev->priv.health;
322 
323 	del_timer_sync(&health->timer);
324 }
325 
326 void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
327 {
328 	struct mlx5_core_health *health = &dev->priv.health;
329 	unsigned long flags;
330 
331 	spin_lock_irqsave(&health->wq_lock, flags);
332 	set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
333 	set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
334 	spin_unlock_irqrestore(&health->wq_lock, flags);
335 	cancel_delayed_work_sync(&health->recover_work);
336 	cancel_work_sync(&health->work);
337 }
338 
339 void mlx5_drain_health_recovery(struct mlx5_core_dev *dev)
340 {
341 	struct mlx5_core_health *health = &dev->priv.health;
342 	unsigned long flags;
343 
344 	spin_lock_irqsave(&health->wq_lock, flags);
345 	set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
346 	spin_unlock_irqrestore(&health->wq_lock, flags);
347 	cancel_delayed_work_sync(&dev->priv.health.recover_work);
348 }
349 
350 void mlx5_health_cleanup(struct mlx5_core_dev *dev)
351 {
352 	struct mlx5_core_health *health = &dev->priv.health;
353 
354 	destroy_workqueue(health->wq);
355 }
356 
357 #define HEALTH_NAME "mlx5_health"
358 int mlx5_health_init(struct mlx5_core_dev *dev)
359 {
360 	struct mlx5_core_health *health;
361 	char *name;
362 	int len;
363 
364 	health = &dev->priv.health;
365 	len = strlen(HEALTH_NAME) + strlen(dev_name(&dev->pdev->dev));
366 	name = kmalloc(len + 1, GFP_KERNEL);
367 	if (!name)
368 		return -ENOMEM;
369 
370 	snprintf(name, len, "%s:%s", HEALTH_NAME, dev_name(&dev->pdev->dev));
371 	health->wq = create_singlethread_workqueue(name);
372 	kfree(name);
373 	if (!health->wq)
374 		return -ENOMEM;
375 
376 	spin_lock_init(&health->wq_lock);
377 	INIT_WORK(&health->work, health_care);
378 	INIT_DELAYED_WORK(&health->recover_work, health_recover);
379 
380 	return 0;
381 }
382