xref: /freebsd/sys/dev/mlx5/mlx5_core/mlx5_health.c (revision c0902569)
1 /*-
2  * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/random.h>
31 #include <linux/vmalloc.h>
32 #include <linux/hardirq.h>
33 #include <dev/mlx5/driver.h>
34 #include <dev/mlx5/mlx5_ifc.h>
35 #include "mlx5_core.h"
36 
37 #define	MLX5_HEALTH_POLL_INTERVAL	(2 * HZ)
38 #define	MAX_MISSES			3
39 
40 enum {
41 	MLX5_NIC_IFC_FULL		= 0,
42 	MLX5_NIC_IFC_DISABLED		= 1,
43 	MLX5_NIC_IFC_NO_DRAM_NIC	= 2,
44 	MLX5_NIC_IFC_INVALID		= 3,
45 };
46 
47 enum {
48 	MLX5_DROP_NEW_HEALTH_WORK,
49 };
50 
51 static u8 get_nic_state(struct mlx5_core_dev *dev)
52 {
53 	return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 3;
54 }
55 
56 static void mlx5_trigger_cmd_completions(struct mlx5_core_dev *dev)
57 {
58 	unsigned long flags;
59 	u64 vector;
60 
61 	/* wait for pending handlers to complete */
62 	synchronize_irq(dev->priv.msix_arr[MLX5_EQ_VEC_CMD].vector);
63 	spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
64 	vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
65 	if (!vector)
66 		goto no_trig;
67 
68 	vector |= MLX5_TRIGGERED_CMD_COMP;
69 	spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
70 
71 	mlx5_core_dbg(dev, "vector 0x%jx\n", (uintmax_t)vector);
72 	mlx5_cmd_comp_handler(dev, vector);
73 	return;
74 
75 no_trig:
76 	spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
77 }
78 
79 static int in_fatal(struct mlx5_core_dev *dev)
80 {
81 	struct mlx5_core_health *health = &dev->priv.health;
82 	struct mlx5_health_buffer __iomem *h = health->health;
83 
84 	if (get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
85 		return 1;
86 
87 	if (ioread32be(&h->fw_ver) == 0xffffffff)
88 		return 1;
89 
90 	return 0;
91 }
92 
93 void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
94 {
95 	mutex_lock(&dev->intf_state_mutex);
96 	if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
97 		goto unlock;
98 		return;
99 	}
100 
101 	mlx5_core_err(dev, "start\n");
102 	if (pci_channel_offline(dev->pdev) || in_fatal(dev) || force) {
103 		dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
104 		mlx5_trigger_cmd_completions(dev);
105 	}
106 
107 	mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0);
108 	mlx5_core_err(dev, "end\n");
109 
110 unlock:
111 	mutex_unlock(&dev->intf_state_mutex);
112 }
113 
114 static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
115 {
116 	u8 nic_state = get_nic_state(dev);
117 
118 	switch (nic_state) {
119 	case MLX5_NIC_IFC_FULL:
120 		mlx5_core_warn(dev, "Expected to see disabled NIC but it is full driver\n");
121 		break;
122 
123 	case MLX5_NIC_IFC_DISABLED:
124 		mlx5_core_warn(dev, "starting teardown\n");
125 		break;
126 
127 	case MLX5_NIC_IFC_NO_DRAM_NIC:
128 		mlx5_core_warn(dev, "Expected to see disabled NIC but it is no dram nic\n");
129 		break;
130 	default:
131 		mlx5_core_warn(dev, "Expected to see disabled NIC but it is has invalid value %d\n",
132 			       nic_state);
133 	}
134 
135 	mlx5_disable_device(dev);
136 }
137 
138 static void health_recover(struct work_struct *work)
139 {
140 	struct mlx5_core_health *health;
141 	struct delayed_work *dwork;
142 	struct mlx5_core_dev *dev;
143 	struct mlx5_priv *priv;
144 	u8 nic_state;
145 
146 	dwork = container_of(work, struct delayed_work, work);
147 	health = container_of(dwork, struct mlx5_core_health, recover_work);
148 	priv = container_of(health, struct mlx5_priv, health);
149 	dev = container_of(priv, struct mlx5_core_dev, priv);
150 
151 	nic_state = get_nic_state(dev);
152 	if (nic_state == MLX5_NIC_IFC_INVALID) {
153 		dev_err(&dev->pdev->dev, "health recovery flow aborted since the nic state is invalid\n");
154 		return;
155 	}
156 
157 	dev_err(&dev->pdev->dev, "starting health recovery flow\n");
158 	mlx5_recover_device(dev);
159 }
160 
161 /* How much time to wait until health resetting the driver (in msecs) */
162 #define MLX5_RECOVERY_DELAY_MSECS 60000
163 static void health_care(struct work_struct *work)
164 {
165 	unsigned long recover_delay = msecs_to_jiffies(MLX5_RECOVERY_DELAY_MSECS);
166 	struct mlx5_core_health *health;
167 	struct mlx5_core_dev *dev;
168 	struct mlx5_priv *priv;
169 	unsigned long flags;
170 
171 	health = container_of(work, struct mlx5_core_health, work);
172 	priv = container_of(health, struct mlx5_priv, health);
173 	dev = container_of(priv, struct mlx5_core_dev, priv);
174 	mlx5_core_warn(dev, "handling bad device here\n");
175 	mlx5_handle_bad_state(dev);
176 
177 	spin_lock_irqsave(&health->wq_lock, flags);
178 	if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
179 		schedule_delayed_work(&health->recover_work, recover_delay);
180 	else
181 		dev_err(&dev->pdev->dev,
182 			"new health works are not permitted at this stage\n");
183 	spin_unlock_irqrestore(&health->wq_lock, flags);
184 }
185 
186 static int get_next_poll_jiffies(void)
187 {
188 	unsigned long next;
189 
190 	get_random_bytes(&next, sizeof(next));
191 	next %= HZ;
192 	next += jiffies + MLX5_HEALTH_POLL_INTERVAL;
193 
194 	return next;
195 }
196 
197 void mlx5_trigger_health_work(struct mlx5_core_dev *dev)
198 {
199 	struct mlx5_core_health *health = &dev->priv.health;
200 	unsigned long flags;
201 
202 	spin_lock_irqsave(&health->wq_lock, flags);
203 	if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
204 		queue_work(health->wq, &health->work);
205 	else
206 		dev_err(&dev->pdev->dev,
207 			"new health works are not permitted at this stage\n");
208 	spin_unlock_irqrestore(&health->wq_lock, flags);
209 }
210 
211 static const char *hsynd_str(u8 synd)
212 {
213 	switch (synd) {
214 	case MLX5_HEALTH_SYNDR_FW_ERR:
215 		return "firmware internal error";
216 	case MLX5_HEALTH_SYNDR_IRISC_ERR:
217 		return "irisc not responding";
218 	case MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR:
219 		return "unrecoverable hardware error";
220 	case MLX5_HEALTH_SYNDR_CRC_ERR:
221 		return "firmware CRC error";
222 	case MLX5_HEALTH_SYNDR_FETCH_PCI_ERR:
223 		return "ICM fetch PCI error";
224 	case MLX5_HEALTH_SYNDR_HW_FTL_ERR:
225 		return "HW fatal error\n";
226 	case MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR:
227 		return "async EQ buffer overrun";
228 	case MLX5_HEALTH_SYNDR_EQ_ERR:
229 		return "EQ error";
230 	case MLX5_HEALTH_SYNDR_EQ_INV:
231 		return "Invalid EQ referenced";
232 	case MLX5_HEALTH_SYNDR_FFSER_ERR:
233 		return "FFSER error";
234 	case MLX5_HEALTH_SYNDR_HIGH_TEMP:
235 		return "High temprature";
236 	default:
237 		return "unrecognized error";
238 	}
239 }
240 
241 static void print_health_info(struct mlx5_core_dev *dev)
242 {
243 	struct mlx5_core_health *health = &dev->priv.health;
244 	struct mlx5_health_buffer __iomem *h = health->health;
245 	char fw_str[18];
246 	u32 fw;
247 	int i;
248 
249 	/* If the syndrom is 0, the device is OK and no need to print buffer */
250 	if (!ioread8(&h->synd))
251 		return;
252 
253 	for (i = 0; i < ARRAY_SIZE(h->assert_var); i++)
254 		printf("mlx5_core: INFO: ""assert_var[%d] 0x%08x\n", i, ioread32be(h->assert_var + i));
255 
256 	printf("mlx5_core: INFO: ""assert_exit_ptr 0x%08x\n", ioread32be(&h->assert_exit_ptr));
257 	printf("mlx5_core: INFO: ""assert_callra 0x%08x\n", ioread32be(&h->assert_callra));
258 	snprintf(fw_str, sizeof(fw_str), "%d.%d.%d", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev));
259 	printf("mlx5_core: INFO: ""fw_ver %s\n", fw_str);
260 	printf("mlx5_core: INFO: ""hw_id 0x%08x\n", ioread32be(&h->hw_id));
261 	printf("mlx5_core: INFO: ""irisc_index %d\n", ioread8(&h->irisc_index));
262 	printf("mlx5_core: INFO: ""synd 0x%x: %s\n", ioread8(&h->synd), hsynd_str(ioread8(&h->synd)));
263 	printf("mlx5_core: INFO: ""ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
264 	fw = ioread32be(&h->fw_ver);
265 	printf("mlx5_core: INFO: ""raw fw_ver 0x%08x\n", fw);
266 }
267 
268 static void poll_health(unsigned long data)
269 {
270 	struct mlx5_core_dev *dev = (struct mlx5_core_dev *)data;
271 	struct mlx5_core_health *health = &dev->priv.health;
272 	u32 count;
273 
274 	if (dev->state != MLX5_DEVICE_STATE_UP)
275 		return;
276 
277 	if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
278 		goto out;
279 
280 	count = ioread32be(health->health_counter);
281 	if (count == health->prev)
282 		++health->miss_counter;
283 	else
284 		health->miss_counter = 0;
285 
286 	health->prev = count;
287 	if (health->miss_counter == MAX_MISSES) {
288 		mlx5_core_err(dev, "device's health compromised - reached miss count\n");
289 		print_health_info(dev);
290 	}
291 
292 	if (in_fatal(dev) && !health->sick) {
293 		health->sick = true;
294 		print_health_info(dev);
295 		mlx5_trigger_health_work(dev);
296 	}
297 
298 out:
299 	mod_timer(&health->timer, get_next_poll_jiffies());
300 }
301 
302 void mlx5_start_health_poll(struct mlx5_core_dev *dev)
303 {
304 	struct mlx5_core_health *health = &dev->priv.health;
305 
306 	init_timer(&health->timer);
307 	health->sick = 0;
308 	clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
309 	health->health = &dev->iseg->health;
310 	health->health_counter = &dev->iseg->health_counter;
311 
312 	setup_timer(&health->timer, poll_health, (unsigned long)dev);
313 	mod_timer(&health->timer,
314 		  round_jiffies(jiffies + MLX5_HEALTH_POLL_INTERVAL));
315 }
316 
317 void mlx5_stop_health_poll(struct mlx5_core_dev *dev)
318 {
319 	struct mlx5_core_health *health = &dev->priv.health;
320 
321 	del_timer_sync(&health->timer);
322 }
323 
324 void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
325 {
326 	struct mlx5_core_health *health = &dev->priv.health;
327 	unsigned long flags;
328 
329 	spin_lock_irqsave(&health->wq_lock, flags);
330 	set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
331 	spin_unlock_irqrestore(&health->wq_lock, flags);
332 	cancel_delayed_work_sync(&health->recover_work);
333 	cancel_work_sync(&health->work);
334 }
335 
336 void mlx5_health_cleanup(struct mlx5_core_dev *dev)
337 {
338 	struct mlx5_core_health *health = &dev->priv.health;
339 
340 	destroy_workqueue(health->wq);
341 }
342 
343 #define HEALTH_NAME "mlx5_health"
344 int mlx5_health_init(struct mlx5_core_dev *dev)
345 {
346 	struct mlx5_core_health *health;
347 	char *name;
348 	int len;
349 
350 	health = &dev->priv.health;
351 	len = strlen(HEALTH_NAME) + strlen(dev_name(&dev->pdev->dev));
352 	name = kmalloc(len + 1, GFP_KERNEL);
353 	if (!name)
354 		return -ENOMEM;
355 
356 	snprintf(name, len, "%s:%s", HEALTH_NAME, dev_name(&dev->pdev->dev));
357 	health->wq = create_singlethread_workqueue(name);
358 	kfree(name);
359 	if (!health->wq)
360 		return -ENOMEM;
361 
362 	spin_lock_init(&health->wq_lock);
363 	INIT_WORK(&health->work, health_care);
364 	INIT_DELAYED_WORK(&health->recover_work, health_recover);
365 
366 	return 0;
367 }
368