1 /*-
2 * Copyright (c) 2013-2021, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26 #include "opt_rss.h"
27 #include "opt_ratelimit.h"
28
29 #include <linux/interrupt.h>
30 #include <linux/module.h>
31 #include <dev/mlx5/port.h>
32 #include <dev/mlx5/mlx5_ifc.h>
33 #include <dev/mlx5/mlx5_fpga/core.h>
34 #include <dev/mlx5/mlx5_core/mlx5_core.h>
35 #include <dev/mlx5/mlx5_core/eswitch.h>
36
37 #ifdef RSS
38 #include <net/rss_config.h>
39 #include <netinet/in_rss.h>
40 #endif
41
42 enum {
43 MLX5_EQE_SIZE = sizeof(struct mlx5_eqe),
44 MLX5_EQE_OWNER_INIT_VAL = 0x1,
45 };
46
47 enum {
48 MLX5_NUM_SPARE_EQE = 0x80,
49 MLX5_NUM_ASYNC_EQE = 0x100,
50 MLX5_NUM_CMD_EQE = 32,
51 };
52
53 enum {
54 MLX5_EQ_DOORBEL_OFFSET = 0x40,
55 };
56
57 #define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
58 (1ull << MLX5_EVENT_TYPE_COMM_EST) | \
59 (1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \
60 (1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \
61 (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \
62 (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \
63 (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
64 (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \
65 (1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \
66 (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \
67 (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \
68 (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT) | \
69 (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE))
70
71 struct map_eq_in {
72 u64 mask;
73 u32 reserved;
74 u32 unmap_eqn;
75 };
76
77 struct cre_des_eq {
78 u8 reserved[15];
79 u8 eqn;
80 };
81
82 /*Function prototype*/
83 static void mlx5_port_module_event(struct mlx5_core_dev *dev,
84 struct mlx5_eqe *eqe);
85 static void mlx5_port_general_notification_event(struct mlx5_core_dev *dev,
86 struct mlx5_eqe *eqe);
87
mlx5_cmd_destroy_eq(struct mlx5_core_dev * dev,u8 eqn)88 static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
89 {
90 u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {0};
91 u32 out[MLX5_ST_SZ_DW(destroy_eq_out)] = {0};
92
93 MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ);
94 MLX5_SET(destroy_eq_in, in, eq_number, eqn);
95
96 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
97 }
98
get_eqe(struct mlx5_eq * eq,u32 entry)99 static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry)
100 {
101 return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE);
102 }
103
next_eqe_sw(struct mlx5_eq * eq)104 static struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq)
105 {
106 struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1));
107
108 return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe;
109 }
110
eqe_type_str(u8 type)111 static const char *eqe_type_str(u8 type)
112 {
113 switch (type) {
114 case MLX5_EVENT_TYPE_COMP:
115 return "MLX5_EVENT_TYPE_COMP";
116 case MLX5_EVENT_TYPE_PATH_MIG:
117 return "MLX5_EVENT_TYPE_PATH_MIG";
118 case MLX5_EVENT_TYPE_COMM_EST:
119 return "MLX5_EVENT_TYPE_COMM_EST";
120 case MLX5_EVENT_TYPE_SQ_DRAINED:
121 return "MLX5_EVENT_TYPE_SQ_DRAINED";
122 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
123 return "MLX5_EVENT_TYPE_SRQ_LAST_WQE";
124 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
125 return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT";
126 case MLX5_EVENT_TYPE_CQ_ERROR:
127 return "MLX5_EVENT_TYPE_CQ_ERROR";
128 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
129 return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR";
130 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
131 return "MLX5_EVENT_TYPE_PATH_MIG_FAILED";
132 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
133 return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR";
134 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
135 return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR";
136 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
137 return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR";
138 case MLX5_EVENT_TYPE_INTERNAL_ERROR:
139 return "MLX5_EVENT_TYPE_INTERNAL_ERROR";
140 case MLX5_EVENT_TYPE_PORT_CHANGE:
141 return "MLX5_EVENT_TYPE_PORT_CHANGE";
142 case MLX5_EVENT_TYPE_GPIO_EVENT:
143 return "MLX5_EVENT_TYPE_GPIO_EVENT";
144 case MLX5_EVENT_TYPE_CODING_PORT_MODULE_EVENT:
145 return "MLX5_EVENT_TYPE_PORT_MODULE_EVENT";
146 case MLX5_EVENT_TYPE_TEMP_WARN_EVENT:
147 return "MLX5_EVENT_TYPE_TEMP_WARN_EVENT";
148 case MLX5_EVENT_TYPE_REMOTE_CONFIG:
149 return "MLX5_EVENT_TYPE_REMOTE_CONFIG";
150 case MLX5_EVENT_TYPE_DB_BF_CONGESTION:
151 return "MLX5_EVENT_TYPE_DB_BF_CONGESTION";
152 case MLX5_EVENT_TYPE_STALL_EVENT:
153 return "MLX5_EVENT_TYPE_STALL_EVENT";
154 case MLX5_EVENT_TYPE_CMD:
155 return "MLX5_EVENT_TYPE_CMD";
156 case MLX5_EVENT_TYPE_PAGE_REQUEST:
157 return "MLX5_EVENT_TYPE_PAGE_REQUEST";
158 case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
159 return "MLX5_EVENT_TYPE_NIC_VPORT_CHANGE";
160 case MLX5_EVENT_TYPE_FPGA_ERROR:
161 return "MLX5_EVENT_TYPE_FPGA_ERROR";
162 case MLX5_EVENT_TYPE_FPGA_QP_ERROR:
163 return "MLX5_EVENT_TYPE_FPGA_QP_ERROR";
164 case MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT:
165 return "MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT";
166 case MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT:
167 return "MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT";
168 default:
169 return "Unrecognized event";
170 }
171 }
172
port_subtype_event(u8 subtype)173 static enum mlx5_dev_event port_subtype_event(u8 subtype)
174 {
175 switch (subtype) {
176 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
177 return MLX5_DEV_EVENT_PORT_DOWN;
178 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
179 return MLX5_DEV_EVENT_PORT_UP;
180 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
181 return MLX5_DEV_EVENT_PORT_INITIALIZED;
182 case MLX5_PORT_CHANGE_SUBTYPE_LID:
183 return MLX5_DEV_EVENT_LID_CHANGE;
184 case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
185 return MLX5_DEV_EVENT_PKEY_CHANGE;
186 case MLX5_PORT_CHANGE_SUBTYPE_GUID:
187 return MLX5_DEV_EVENT_GUID_CHANGE;
188 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
189 return MLX5_DEV_EVENT_CLIENT_REREG;
190 }
191 return -1;
192 }
193
dcbx_subevent(u8 subtype)194 static enum mlx5_dev_event dcbx_subevent(u8 subtype)
195 {
196 switch (subtype) {
197 case MLX5_DCBX_EVENT_SUBTYPE_ERROR_STATE_DCBX:
198 return MLX5_DEV_EVENT_ERROR_STATE_DCBX;
199 case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_CHANGE:
200 return MLX5_DEV_EVENT_REMOTE_CONFIG_CHANGE;
201 case MLX5_DCBX_EVENT_SUBTYPE_LOCAL_OPER_CHANGE:
202 return MLX5_DEV_EVENT_LOCAL_OPER_CHANGE;
203 case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_APP_PRIORITY_CHANGE:
204 return MLX5_DEV_EVENT_REMOTE_CONFIG_APPLICATION_PRIORITY_CHANGE;
205 }
206 return -1;
207 }
208
eq_update_ci(struct mlx5_eq * eq,int arm)209 static void eq_update_ci(struct mlx5_eq *eq, int arm)
210 {
211 __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
212 u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
213 __raw_writel((__force u32) cpu_to_be32(val), addr);
214 /* We still want ordering, just not swabbing, so add a barrier */
215 mb();
216 }
217
218 static void
mlx5_temp_warning_event(struct mlx5_core_dev * dev,struct mlx5_eqe * eqe)219 mlx5_temp_warning_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
220 {
221
222 mlx5_core_warn(dev,
223 "High temperature on sensors with bit set %#jx %#jx\n",
224 (uintmax_t)be64_to_cpu(eqe->data.temp_warning.sensor_warning_msb),
225 (uintmax_t)be64_to_cpu(eqe->data.temp_warning.sensor_warning_lsb));
226 }
227
mlx5_eq_int(struct mlx5_core_dev * dev,struct mlx5_eq * eq)228 static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
229 {
230 struct mlx5_eqe *eqe;
231 int eqes_found = 0;
232 int set_ci = 0;
233 u32 cqn;
234 u32 rsn;
235 u8 port;
236
237 while ((eqe = next_eqe_sw(eq))) {
238 /*
239 * Make sure we read EQ entry contents after we've
240 * checked the ownership bit.
241 */
242 atomic_thread_fence_acq();
243
244 mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n",
245 eq->eqn, eqe_type_str(eqe->type));
246
247 if (dev->priv.eq_table.cb != NULL &&
248 dev->priv.eq_table.cb(dev, eqe->type, &eqe->data)) {
249 /* FALLTHROUGH */
250 } else switch (eqe->type) {
251 case MLX5_EVENT_TYPE_COMP:
252 mlx5_cq_completion(dev, eqe);
253 break;
254
255 case MLX5_EVENT_TYPE_PATH_MIG:
256 case MLX5_EVENT_TYPE_COMM_EST:
257 case MLX5_EVENT_TYPE_SQ_DRAINED:
258 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
259 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
260 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
261 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
262 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
263 rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
264 mlx5_core_dbg(dev, "event %s(%d) arrived on resource 0x%x\n",
265 eqe_type_str(eqe->type), eqe->type, rsn);
266 mlx5_rsc_event(dev, rsn, eqe->type);
267 break;
268
269 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
270 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
271 rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
272 mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n",
273 eqe_type_str(eqe->type), eqe->type, rsn);
274 mlx5_srq_event(dev, rsn, eqe->type);
275 break;
276
277 case MLX5_EVENT_TYPE_CMD:
278 if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
279 mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector),
280 MLX5_CMD_MODE_EVENTS);
281 }
282 break;
283
284 case MLX5_EVENT_TYPE_PORT_CHANGE:
285 port = (eqe->data.port.port >> 4) & 0xf;
286 switch (eqe->sub_type) {
287 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
288 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
289 case MLX5_PORT_CHANGE_SUBTYPE_LID:
290 case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
291 case MLX5_PORT_CHANGE_SUBTYPE_GUID:
292 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
293 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
294 if (dev->event)
295 dev->event(dev, port_subtype_event(eqe->sub_type),
296 (unsigned long)port);
297 break;
298 default:
299 mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n",
300 port, eqe->sub_type);
301 }
302 break;
303
304 case MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT:
305 port = (eqe->data.port.port >> 4) & 0xf;
306 switch (eqe->sub_type) {
307 case MLX5_DCBX_EVENT_SUBTYPE_ERROR_STATE_DCBX:
308 case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_CHANGE:
309 case MLX5_DCBX_EVENT_SUBTYPE_LOCAL_OPER_CHANGE:
310 case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_APP_PRIORITY_CHANGE:
311 if (dev->event)
312 dev->event(dev,
313 dcbx_subevent(eqe->sub_type),
314 0);
315 break;
316 default:
317 mlx5_core_warn(dev,
318 "dcbx event with unrecognized subtype: port %d, sub_type %d\n",
319 port, eqe->sub_type);
320 }
321 break;
322
323 case MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT:
324 mlx5_port_general_notification_event(dev, eqe);
325 break;
326
327 case MLX5_EVENT_TYPE_CQ_ERROR:
328 cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
329 mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n",
330 cqn, eqe->data.cq_err.syndrome);
331 mlx5_cq_event(dev, cqn, eqe->type);
332 break;
333
334 case MLX5_EVENT_TYPE_PAGE_REQUEST:
335 {
336 u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
337 s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
338
339 mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
340 func_id, npages);
341 mlx5_core_req_pages_handler(dev, func_id, npages);
342 }
343 break;
344
345 case MLX5_EVENT_TYPE_CODING_PORT_MODULE_EVENT:
346 mlx5_port_module_event(dev, eqe);
347 break;
348
349 case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
350 {
351 struct mlx5_eqe_vport_change *vc_eqe =
352 &eqe->data.vport_change;
353 u16 vport_num = be16_to_cpu(vc_eqe->vport_num);
354
355 if (dev->event)
356 dev->event(dev,
357 MLX5_DEV_EVENT_VPORT_CHANGE,
358 (unsigned long)vport_num);
359 }
360 if (dev->priv.eswitch != NULL)
361 mlx5_eswitch_vport_event(dev->priv.eswitch,
362 eqe);
363 break;
364
365 case MLX5_EVENT_TYPE_FPGA_ERROR:
366 case MLX5_EVENT_TYPE_FPGA_QP_ERROR:
367 mlx5_fpga_event(dev, eqe->type, &eqe->data.raw);
368 break;
369 case MLX5_EVENT_TYPE_TEMP_WARN_EVENT:
370 mlx5_temp_warning_event(dev, eqe);
371 break;
372
373 default:
374 mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
375 eqe->type, eq->eqn);
376 break;
377 }
378
379 ++eq->cons_index;
380 eqes_found = 1;
381 ++set_ci;
382
383 /* The HCA will think the queue has overflowed if we
384 * don't tell it we've been processing events. We
385 * create our EQs with MLX5_NUM_SPARE_EQE extra
386 * entries, so we must update our consumer index at
387 * least that often.
388 */
389 if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
390 eq_update_ci(eq, 0);
391 set_ci = 0;
392 }
393 }
394
395 eq_update_ci(eq, 1);
396
397 return eqes_found;
398 }
399
mlx5_msix_handler(int irq,void * eq_ptr)400 static irqreturn_t mlx5_msix_handler(int irq, void *eq_ptr)
401 {
402 struct mlx5_eq *eq = eq_ptr;
403 struct mlx5_core_dev *dev = eq->dev;
404
405 /* check if IRQs are not disabled */
406 if (likely(dev->priv.disable_irqs == 0))
407 mlx5_eq_int(dev, eq);
408
409 /* MSI-X vectors always belong to us */
410 return IRQ_HANDLED;
411 }
412
init_eq_buf(struct mlx5_eq * eq)413 static void init_eq_buf(struct mlx5_eq *eq)
414 {
415 struct mlx5_eqe *eqe;
416 int i;
417
418 for (i = 0; i < eq->nent; i++) {
419 eqe = get_eqe(eq, i);
420 eqe->owner = MLX5_EQE_OWNER_INIT_VAL;
421 }
422 }
423
mlx5_create_map_eq(struct mlx5_core_dev * dev,struct mlx5_eq * eq,u8 vecidx,int nent,u64 mask)424 int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
425 int nent, u64 mask)
426 {
427 u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
428 struct mlx5_priv *priv = &dev->priv;
429 __be64 *pas;
430 void *eqc;
431 int inlen;
432 u32 *in;
433 int err;
434
435 eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
436 eq->cons_index = 0;
437 err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, 2 * PAGE_SIZE,
438 &eq->buf);
439 if (err)
440 return err;
441
442 init_eq_buf(eq);
443
444 inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
445 MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages;
446 in = mlx5_vzalloc(inlen);
447 if (!in) {
448 err = -ENOMEM;
449 goto err_buf;
450 }
451
452 pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas);
453 mlx5_fill_page_array(&eq->buf, pas);
454
455 MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
456 MLX5_SET64(create_eq_in, in, event_bitmask, mask);
457
458 eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
459 MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent));
460 MLX5_SET(eqc, eqc, uar_page, priv->uar->index);
461 MLX5_SET(eqc, eqc, intr, vecidx);
462 MLX5_SET(eqc, eqc, log_page_size,
463 eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
464
465 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
466 if (err)
467 goto err_in;
468
469 eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
470 eq->irqn = vecidx;
471 eq->dev = dev;
472 eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET;
473 err = request_irq(priv->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
474 "mlx5_core", eq);
475 if (err)
476 goto err_eq;
477 #ifdef RSS
478 if (vecidx >= MLX5_EQ_VEC_COMP_BASE) {
479 u8 bucket = vecidx - MLX5_EQ_VEC_COMP_BASE;
480 err = bind_irq_to_cpu(priv->msix_arr[vecidx].vector,
481 rss_getcpu(bucket % rss_getnumbuckets()));
482 if (err)
483 goto err_irq;
484 }
485 #else
486 if (0)
487 goto err_irq;
488 #endif
489
490
491 /* EQs are created in ARMED state
492 */
493 eq_update_ci(eq, 1);
494
495 kvfree(in);
496 return 0;
497
498 err_irq:
499 free_irq(priv->msix_arr[vecidx].vector, eq);
500
501 err_eq:
502 mlx5_cmd_destroy_eq(dev, eq->eqn);
503
504 err_in:
505 kvfree(in);
506
507 err_buf:
508 mlx5_buf_free(dev, &eq->buf);
509 return err;
510 }
511 EXPORT_SYMBOL_GPL(mlx5_create_map_eq);
512
mlx5_destroy_unmap_eq(struct mlx5_core_dev * dev,struct mlx5_eq * eq)513 int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
514 {
515 int err;
516
517 free_irq(dev->priv.msix_arr[eq->irqn].vector, eq);
518 err = mlx5_cmd_destroy_eq(dev, eq->eqn);
519 if (err)
520 mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
521 eq->eqn);
522 mlx5_buf_free(dev, &eq->buf);
523
524 return err;
525 }
526 EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq);
527
mlx5_eq_init(struct mlx5_core_dev * dev)528 int mlx5_eq_init(struct mlx5_core_dev *dev)
529 {
530 int err;
531
532 spin_lock_init(&dev->priv.eq_table.lock);
533
534 err = 0;
535
536 return err;
537 }
538
539
mlx5_eq_cleanup(struct mlx5_core_dev * dev)540 void mlx5_eq_cleanup(struct mlx5_core_dev *dev)
541 {
542 }
543
mlx5_start_eqs(struct mlx5_core_dev * dev)544 int mlx5_start_eqs(struct mlx5_core_dev *dev)
545 {
546 struct mlx5_eq_table *table = &dev->priv.eq_table;
547 u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
548 int err;
549
550 if (MLX5_CAP_GEN(dev, port_module_event))
551 async_event_mask |= (1ull <<
552 MLX5_EVENT_TYPE_CODING_PORT_MODULE_EVENT);
553
554 if (MLX5_CAP_GEN(dev, nic_vport_change_event))
555 async_event_mask |= (1ull <<
556 MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
557
558 if (MLX5_CAP_GEN(dev, dcbx))
559 async_event_mask |= (1ull <<
560 MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT);
561
562 if (MLX5_CAP_GEN(dev, fpga))
563 async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR) |
564 (1ull << MLX5_EVENT_TYPE_FPGA_QP_ERROR);
565
566 if (MLX5_CAP_GEN(dev, temp_warn_event))
567 async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT);
568
569 if (MLX5_CAP_GEN(dev, general_notification_event)) {
570 async_event_mask |= (1ull <<
571 MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT);
572 }
573
574 err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
575 MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD);
576 if (err) {
577 mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
578 return err;
579 }
580
581 mlx5_cmd_use_events(dev);
582
583 err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC,
584 MLX5_NUM_ASYNC_EQE, async_event_mask);
585 if (err) {
586 mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
587 goto err1;
588 }
589
590 err = mlx5_create_map_eq(dev, &table->pages_eq,
591 MLX5_EQ_VEC_PAGES,
592 /* TODO: sriov max_vf + */ 1,
593 1 << MLX5_EVENT_TYPE_PAGE_REQUEST);
594 if (err) {
595 mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
596 goto err2;
597 }
598
599 return err;
600
601 err2:
602 mlx5_destroy_unmap_eq(dev, &table->async_eq);
603
604 err1:
605 mlx5_cmd_use_polling(dev);
606 mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
607 return err;
608 }
609
mlx5_stop_eqs(struct mlx5_core_dev * dev)610 int mlx5_stop_eqs(struct mlx5_core_dev *dev)
611 {
612 struct mlx5_eq_table *table = &dev->priv.eq_table;
613 int err;
614
615 err = mlx5_destroy_unmap_eq(dev, &table->pages_eq);
616 if (err)
617 return err;
618
619 mlx5_destroy_unmap_eq(dev, &table->async_eq);
620 mlx5_cmd_use_polling(dev);
621
622 err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
623 if (err)
624 mlx5_cmd_use_events(dev);
625
626 return err;
627 }
628
mlx5_core_eq_query(struct mlx5_core_dev * dev,struct mlx5_eq * eq,u32 * out,int outlen)629 int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
630 u32 *out, int outlen)
631 {
632 u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {0};
633
634 memset(out, 0, outlen);
635 MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ);
636 MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
637
638 return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
639 }
640 EXPORT_SYMBOL_GPL(mlx5_core_eq_query);
641
mlx5_port_module_event_error_type_to_string(u8 error_type)642 static const char *mlx5_port_module_event_error_type_to_string(u8 error_type)
643 {
644 switch (error_type) {
645 case MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED:
646 return "Power budget exceeded";
647 case MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE:
648 return "Long Range for non MLNX cable";
649 case MLX5_MODULE_EVENT_ERROR_BUS_STUCK:
650 return "Bus stuck(I2C or data shorted)";
651 case MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT:
652 return "No EEPROM/retry timeout";
653 case MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST:
654 return "Enforce part number list";
655 case MLX5_MODULE_EVENT_ERROR_UNSUPPORTED_CABLE:
656 return "Unknown identifier";
657 case MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE:
658 return "High Temperature";
659 case MLX5_MODULE_EVENT_ERROR_CABLE_IS_SHORTED:
660 return "Bad or shorted cable/module";
661 case MLX5_MODULE_EVENT_ERROR_PMD_TYPE_NOT_ENABLED:
662 return "PMD type is not enabled";
663 case MLX5_MODULE_EVENT_ERROR_LASTER_TEC_FAILURE:
664 return "Laster_TEC_failure";
665 case MLX5_MODULE_EVENT_ERROR_HIGH_CURRENT:
666 return "High_current";
667 case MLX5_MODULE_EVENT_ERROR_HIGH_VOLTAGE:
668 return "High_voltage";
669 case MLX5_MODULE_EVENT_ERROR_PCIE_SYS_POWER_SLOT_EXCEEDED:
670 return "pcie_system_power_slot_Exceeded";
671 case MLX5_MODULE_EVENT_ERROR_HIGH_POWER:
672 return "High_power";
673 case MLX5_MODULE_EVENT_ERROR_MODULE_STATE_MACHINE_FAULT:
674 return "Module_state_machine_fault";
675 default:
676 return "Unknown error type";
677 }
678 }
679
mlx5_query_module_status(struct mlx5_core_dev * dev,int module_num)680 unsigned int mlx5_query_module_status(struct mlx5_core_dev *dev, int module_num)
681 {
682 if (module_num < 0 || module_num >= MLX5_MAX_PORTS)
683 return 0; /* undefined */
684 return dev->module_status[module_num];
685 }
686
mlx5_port_module_event(struct mlx5_core_dev * dev,struct mlx5_eqe * eqe)687 static void mlx5_port_module_event(struct mlx5_core_dev *dev,
688 struct mlx5_eqe *eqe)
689 {
690 unsigned int module_num;
691 unsigned int module_status;
692 unsigned int error_type;
693 struct mlx5_eqe_port_module_event *module_event_eqe;
694
695 module_event_eqe = &eqe->data.port_module_event;
696
697 module_num = (unsigned int)module_event_eqe->module;
698 module_status = (unsigned int)module_event_eqe->module_status &
699 PORT_MODULE_EVENT_MODULE_STATUS_MASK;
700 error_type = (unsigned int)module_event_eqe->error_type &
701 PORT_MODULE_EVENT_ERROR_TYPE_MASK;
702
703 if (module_status < MLX5_MODULE_STATUS_NUM)
704 dev->priv.pme_stats.status_counters[module_status]++;
705 switch (module_status) {
706 case MLX5_MODULE_STATUS_PLUGGED_ENABLED:
707 mlx5_core_info(dev,
708 "Module %u, status: plugged and enabled\n",
709 module_num);
710 break;
711
712 case MLX5_MODULE_STATUS_UNPLUGGED:
713 mlx5_core_info(dev,
714 "Module %u, status: unplugged\n", module_num);
715 break;
716
717 case MLX5_MODULE_STATUS_ERROR:
718 mlx5_core_err(dev,
719 "Module %u, status: error, %s (%d)\n",
720 module_num,
721 mlx5_port_module_event_error_type_to_string(error_type),
722 error_type);
723 if (error_type < MLX5_MODULE_EVENT_ERROR_NUM)
724 dev->priv.pme_stats.error_counters[error_type]++;
725 break;
726
727 default:
728 mlx5_core_info(dev,
729 "Module %u, unknown status %d\n", module_num, module_status);
730 }
731 /* store module status */
732 if (module_num < MLX5_MAX_PORTS)
733 dev->module_status[module_num] = module_status;
734 }
735
mlx5_port_general_notification_event(struct mlx5_core_dev * dev,struct mlx5_eqe * eqe)736 static void mlx5_port_general_notification_event(struct mlx5_core_dev *dev,
737 struct mlx5_eqe *eqe)
738 {
739 u8 port = (eqe->data.port.port >> 4) & 0xf;
740
741 switch (eqe->sub_type) {
742 case MLX5_GEN_EVENT_SUBTYPE_DELAY_DROP_TIMEOUT:
743 break;
744 case MLX5_GEN_EVENT_SUBTYPE_PCI_POWER_CHANGE_EVENT:
745 mlx5_trigger_health_watchdog(dev);
746 break;
747 default:
748 mlx5_core_warn(dev,
749 "general event with unrecognized subtype: port %d, sub_type %d\n",
750 port, eqe->sub_type);
751 break;
752 }
753 }
754
755 void
mlx5_disable_interrupts(struct mlx5_core_dev * dev)756 mlx5_disable_interrupts(struct mlx5_core_dev *dev)
757 {
758 int nvec = dev->priv.eq_table.num_comp_vectors + MLX5_EQ_VEC_COMP_BASE;
759 int x;
760
761 for (x = 0; x != nvec; x++)
762 disable_irq(dev->priv.msix_arr[x].vector);
763 }
764
765 void
mlx5_poll_interrupts(struct mlx5_core_dev * dev)766 mlx5_poll_interrupts(struct mlx5_core_dev *dev)
767 {
768 struct mlx5_eq *eq;
769
770 if (unlikely(dev->priv.disable_irqs != 0))
771 return;
772
773 mlx5_eq_int(dev, &dev->priv.eq_table.cmd_eq);
774 mlx5_eq_int(dev, &dev->priv.eq_table.async_eq);
775 mlx5_eq_int(dev, &dev->priv.eq_table.pages_eq);
776
777 list_for_each_entry(eq, &dev->priv.eq_table.comp_eqs_list, list)
778 mlx5_eq_int(dev, eq);
779 }
780