1 /*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/interrupt.h>
34 #include <linux/notifier.h>
35 #include <linux/module.h>
36 #include <linux/mlx5/driver.h>
37 #include <linux/mlx5/vport.h>
38 #include <linux/mlx5/eq.h>
39 #ifdef CONFIG_RFS_ACCEL
40 #include <linux/cpu_rmap.h>
41 #endif
42 #include "mlx5_core.h"
43 #include "lib/eq.h"
44 #include "fpga/core.h"
45 #include "eswitch.h"
46 #include "lib/clock.h"
47 #include "diag/fw_tracer.h"
48
49 enum {
50 MLX5_EQE_OWNER_INIT_VAL = 0x1,
51 };
52
53 enum {
54 MLX5_EQ_STATE_ARMED = 0x9,
55 MLX5_EQ_STATE_FIRED = 0xa,
56 MLX5_EQ_STATE_ALWAYS_ARMED = 0xb,
57 };
58
59 enum {
60 MLX5_EQ_DOORBEL_OFFSET = 0x40,
61 };
62
63 /* budget must be smaller than MLX5_NUM_SPARE_EQE to guarantee that we update
64 * the ci before we polled all the entries in the EQ. MLX5_NUM_SPARE_EQE is
65 * used to set the EQ size, budget must be smaller than the EQ size.
66 */
67 enum {
68 MLX5_EQ_POLLING_BUDGET = 128,
69 };
70
71 static_assert(MLX5_EQ_POLLING_BUDGET <= MLX5_NUM_SPARE_EQE);
72
73 struct mlx5_eq_table {
74 struct list_head comp_eqs_list;
75 struct mlx5_eq_async pages_eq;
76 struct mlx5_eq_async cmd_eq;
77 struct mlx5_eq_async async_eq;
78
79 struct atomic_notifier_head nh[MLX5_EVENT_TYPE_MAX];
80
81 /* Since CQ DB is stored in async_eq */
82 struct mlx5_nb cq_err_nb;
83
84 struct mutex lock; /* sync async eqs creations */
85 int num_comp_eqs;
86 struct mlx5_irq_table *irq_table;
87 };
88
89 #define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
90 (1ull << MLX5_EVENT_TYPE_COMM_EST) | \
91 (1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \
92 (1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \
93 (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \
94 (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \
95 (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
96 (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \
97 (1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \
98 (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \
99 (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \
100 (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
101
mlx5_cmd_destroy_eq(struct mlx5_core_dev * dev,u8 eqn)102 static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
103 {
104 u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {};
105
106 MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ);
107 MLX5_SET(destroy_eq_in, in, eq_number, eqn);
108 return mlx5_cmd_exec_in(dev, destroy_eq, in);
109 }
110
111 /* caller must eventually call mlx5_cq_put on the returned cq */
mlx5_eq_cq_get(struct mlx5_eq * eq,u32 cqn)112 static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
113 {
114 struct mlx5_cq_table *table = &eq->cq_table;
115 struct mlx5_core_cq *cq = NULL;
116
117 rcu_read_lock();
118 cq = radix_tree_lookup(&table->tree, cqn);
119 if (likely(cq))
120 mlx5_cq_hold(cq);
121 rcu_read_unlock();
122
123 return cq;
124 }
125
mlx5_eq_comp_int(struct notifier_block * nb,__always_unused unsigned long action,__always_unused void * data)126 static int mlx5_eq_comp_int(struct notifier_block *nb,
127 __always_unused unsigned long action,
128 __always_unused void *data)
129 {
130 struct mlx5_eq_comp *eq_comp =
131 container_of(nb, struct mlx5_eq_comp, irq_nb);
132 struct mlx5_eq *eq = &eq_comp->core;
133 struct mlx5_eqe *eqe;
134 int num_eqes = 0;
135 u32 cqn = -1;
136
137 eqe = next_eqe_sw(eq);
138 if (!eqe)
139 return 0;
140
141 do {
142 struct mlx5_core_cq *cq;
143
144 /* Make sure we read EQ entry contents after we've
145 * checked the ownership bit.
146 */
147 dma_rmb();
148 /* Assume (eqe->type) is always MLX5_EVENT_TYPE_COMP */
149 cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
150
151 cq = mlx5_eq_cq_get(eq, cqn);
152 if (likely(cq)) {
153 ++cq->arm_sn;
154 cq->comp(cq, eqe);
155 mlx5_cq_put(cq);
156 } else {
157 dev_dbg_ratelimited(eq->dev->device,
158 "Completion event for bogus CQ 0x%x\n", cqn);
159 }
160
161 ++eq->cons_index;
162
163 } while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
164 eq_update_ci(eq, 1);
165
166 if (cqn != -1)
167 tasklet_schedule(&eq_comp->tasklet_ctx.task);
168
169 return 0;
170 }
171
172 /* Some architectures don't latch interrupts when they are disabled, so using
173 * mlx5_eq_poll_irq_disabled could end up losing interrupts while trying to
174 * avoid losing them. It is not recommended to use it, unless this is the last
175 * resort.
176 */
mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp * eq)177 u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq)
178 {
179 u32 count_eqe;
180
181 disable_irq(eq->core.irqn);
182 count_eqe = eq->core.cons_index;
183 mlx5_eq_comp_int(&eq->irq_nb, 0, NULL);
184 count_eqe = eq->core.cons_index - count_eqe;
185 enable_irq(eq->core.irqn);
186
187 return count_eqe;
188 }
189
mlx5_eq_async_int_lock(struct mlx5_eq_async * eq,bool recovery,unsigned long * flags)190 static void mlx5_eq_async_int_lock(struct mlx5_eq_async *eq, bool recovery,
191 unsigned long *flags)
192 __acquires(&eq->lock)
193 {
194 if (!recovery)
195 spin_lock(&eq->lock);
196 else
197 spin_lock_irqsave(&eq->lock, *flags);
198 }
199
mlx5_eq_async_int_unlock(struct mlx5_eq_async * eq,bool recovery,unsigned long * flags)200 static void mlx5_eq_async_int_unlock(struct mlx5_eq_async *eq, bool recovery,
201 unsigned long *flags)
202 __releases(&eq->lock)
203 {
204 if (!recovery)
205 spin_unlock(&eq->lock);
206 else
207 spin_unlock_irqrestore(&eq->lock, *flags);
208 }
209
210 enum async_eq_nb_action {
211 ASYNC_EQ_IRQ_HANDLER = 0,
212 ASYNC_EQ_RECOVER = 1,
213 };
214
mlx5_eq_async_int(struct notifier_block * nb,unsigned long action,void * data)215 static int mlx5_eq_async_int(struct notifier_block *nb,
216 unsigned long action, void *data)
217 {
218 struct mlx5_eq_async *eq_async =
219 container_of(nb, struct mlx5_eq_async, irq_nb);
220 struct mlx5_eq *eq = &eq_async->core;
221 struct mlx5_eq_table *eqt;
222 struct mlx5_core_dev *dev;
223 struct mlx5_eqe *eqe;
224 unsigned long flags;
225 int num_eqes = 0;
226 bool recovery;
227
228 dev = eq->dev;
229 eqt = dev->priv.eq_table;
230
231 recovery = action == ASYNC_EQ_RECOVER;
232 mlx5_eq_async_int_lock(eq_async, recovery, &flags);
233
234 eqe = next_eqe_sw(eq);
235 if (!eqe)
236 goto out;
237
238 do {
239 /*
240 * Make sure we read EQ entry contents after we've
241 * checked the ownership bit.
242 */
243 dma_rmb();
244
245 atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe);
246 atomic_notifier_call_chain(&eqt->nh[MLX5_EVENT_TYPE_NOTIFY_ANY], eqe->type, eqe);
247
248 ++eq->cons_index;
249
250 } while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
251 eq_update_ci(eq, 1);
252
253 out:
254 mlx5_eq_async_int_unlock(eq_async, recovery, &flags);
255
256 return unlikely(recovery) ? num_eqes : 0;
257 }
258
mlx5_cmd_eq_recover(struct mlx5_core_dev * dev)259 void mlx5_cmd_eq_recover(struct mlx5_core_dev *dev)
260 {
261 struct mlx5_eq_async *eq = &dev->priv.eq_table->cmd_eq;
262 int eqes;
263
264 eqes = mlx5_eq_async_int(&eq->irq_nb, ASYNC_EQ_RECOVER, NULL);
265 if (eqes)
266 mlx5_core_warn(dev, "Recovered %d EQEs on cmd_eq\n", eqes);
267 }
268
init_eq_buf(struct mlx5_eq * eq)269 static void init_eq_buf(struct mlx5_eq *eq)
270 {
271 struct mlx5_eqe *eqe;
272 int i;
273
274 for (i = 0; i < eq_get_size(eq); i++) {
275 eqe = get_eqe(eq, i);
276 eqe->owner = MLX5_EQE_OWNER_INIT_VAL;
277 }
278 }
279
280 static int
create_map_eq(struct mlx5_core_dev * dev,struct mlx5_eq * eq,struct mlx5_eq_param * param)281 create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
282 struct mlx5_eq_param *param)
283 {
284 u8 log_eq_size = order_base_2(param->nent + MLX5_NUM_SPARE_EQE);
285 struct mlx5_cq_table *cq_table = &eq->cq_table;
286 u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
287 u8 log_eq_stride = ilog2(MLX5_EQE_SIZE);
288 struct mlx5_priv *priv = &dev->priv;
289 u8 vecidx = param->irq_index;
290 __be64 *pas;
291 void *eqc;
292 int inlen;
293 u32 *in;
294 int err;
295 int i;
296
297 /* Init CQ table */
298 memset(cq_table, 0, sizeof(*cq_table));
299 spin_lock_init(&cq_table->lock);
300 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
301
302 eq->cons_index = 0;
303
304 err = mlx5_frag_buf_alloc_node(dev, wq_get_byte_sz(log_eq_size, log_eq_stride),
305 &eq->frag_buf, dev->priv.numa_node);
306 if (err)
307 return err;
308
309 mlx5_init_fbc(eq->frag_buf.frags, log_eq_stride, log_eq_size, &eq->fbc);
310 init_eq_buf(eq);
311
312 inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
313 MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->frag_buf.npages;
314
315 in = kvzalloc(inlen, GFP_KERNEL);
316 if (!in) {
317 err = -ENOMEM;
318 goto err_buf;
319 }
320
321 pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas);
322 mlx5_fill_page_frag_array(&eq->frag_buf, pas);
323
324 MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
325 if (!param->mask[0] && MLX5_CAP_GEN(dev, log_max_uctx))
326 MLX5_SET(create_eq_in, in, uid, MLX5_SHARED_RESOURCE_UID);
327
328 for (i = 0; i < 4; i++)
329 MLX5_ARRAY_SET64(create_eq_in, in, event_bitmask, i,
330 param->mask[i]);
331
332 eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
333 MLX5_SET(eqc, eqc, log_eq_size, eq->fbc.log_sz);
334 MLX5_SET(eqc, eqc, uar_page, priv->uar->index);
335 MLX5_SET(eqc, eqc, intr, vecidx);
336 MLX5_SET(eqc, eqc, log_page_size,
337 eq->frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
338
339 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
340 if (err)
341 goto err_in;
342
343 eq->vecidx = vecidx;
344 eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
345 eq->irqn = pci_irq_vector(dev->pdev, vecidx);
346 eq->dev = dev;
347 eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET;
348
349 err = mlx5_debug_eq_add(dev, eq);
350 if (err)
351 goto err_eq;
352
353 kvfree(in);
354 return 0;
355
356 err_eq:
357 mlx5_cmd_destroy_eq(dev, eq->eqn);
358
359 err_in:
360 kvfree(in);
361
362 err_buf:
363 mlx5_frag_buf_free(dev, &eq->frag_buf);
364 return err;
365 }
366
367 /**
368 * mlx5_eq_enable - Enable EQ for receiving EQEs
369 * @dev : Device which owns the eq
370 * @eq : EQ to enable
371 * @nb : Notifier call block
372 *
373 * Must be called after EQ is created in device.
374 *
375 * @return: 0 if no error
376 */
mlx5_eq_enable(struct mlx5_core_dev * dev,struct mlx5_eq * eq,struct notifier_block * nb)377 int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
378 struct notifier_block *nb)
379 {
380 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
381 int err;
382
383 err = mlx5_irq_attach_nb(eq_table->irq_table, eq->vecidx, nb);
384 if (!err)
385 eq_update_ci(eq, 1);
386
387 return err;
388 }
389 EXPORT_SYMBOL(mlx5_eq_enable);
390
391 /**
392 * mlx5_eq_disable - Disable EQ for receiving EQEs
393 * @dev : Device which owns the eq
394 * @eq : EQ to disable
395 * @nb : Notifier call block
396 *
397 * Must be called before EQ is destroyed.
398 */
mlx5_eq_disable(struct mlx5_core_dev * dev,struct mlx5_eq * eq,struct notifier_block * nb)399 void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
400 struct notifier_block *nb)
401 {
402 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
403
404 mlx5_irq_detach_nb(eq_table->irq_table, eq->vecidx, nb);
405 }
406 EXPORT_SYMBOL(mlx5_eq_disable);
407
destroy_unmap_eq(struct mlx5_core_dev * dev,struct mlx5_eq * eq)408 static int destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
409 {
410 int err;
411
412 mlx5_debug_eq_remove(dev, eq);
413
414 err = mlx5_cmd_destroy_eq(dev, eq->eqn);
415 if (err)
416 mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
417 eq->eqn);
418 synchronize_irq(eq->irqn);
419
420 mlx5_frag_buf_free(dev, &eq->frag_buf);
421
422 return err;
423 }
424
mlx5_eq_add_cq(struct mlx5_eq * eq,struct mlx5_core_cq * cq)425 int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
426 {
427 struct mlx5_cq_table *table = &eq->cq_table;
428 int err;
429
430 spin_lock(&table->lock);
431 err = radix_tree_insert(&table->tree, cq->cqn, cq);
432 spin_unlock(&table->lock);
433
434 return err;
435 }
436
mlx5_eq_del_cq(struct mlx5_eq * eq,struct mlx5_core_cq * cq)437 void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
438 {
439 struct mlx5_cq_table *table = &eq->cq_table;
440 struct mlx5_core_cq *tmp;
441
442 spin_lock(&table->lock);
443 tmp = radix_tree_delete(&table->tree, cq->cqn);
444 spin_unlock(&table->lock);
445
446 if (!tmp) {
447 mlx5_core_dbg(eq->dev, "cq 0x%x not found in eq 0x%x tree\n",
448 eq->eqn, cq->cqn);
449 return;
450 }
451
452 if (tmp != cq)
453 mlx5_core_dbg(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n",
454 eq->eqn, cq->cqn);
455 }
456
mlx5_eq_table_init(struct mlx5_core_dev * dev)457 int mlx5_eq_table_init(struct mlx5_core_dev *dev)
458 {
459 struct mlx5_eq_table *eq_table;
460 int i;
461
462 eq_table = kvzalloc(sizeof(*eq_table), GFP_KERNEL);
463 if (!eq_table)
464 return -ENOMEM;
465
466 dev->priv.eq_table = eq_table;
467
468 mlx5_eq_debugfs_init(dev);
469
470 mutex_init(&eq_table->lock);
471 for (i = 0; i < MLX5_EVENT_TYPE_MAX; i++)
472 ATOMIC_INIT_NOTIFIER_HEAD(&eq_table->nh[i]);
473
474 eq_table->irq_table = mlx5_irq_table_get(dev);
475 return 0;
476 }
477
mlx5_eq_table_cleanup(struct mlx5_core_dev * dev)478 void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev)
479 {
480 mlx5_eq_debugfs_cleanup(dev);
481 kvfree(dev->priv.eq_table);
482 }
483
484 /* Async EQs */
485
create_async_eq(struct mlx5_core_dev * dev,struct mlx5_eq * eq,struct mlx5_eq_param * param)486 static int create_async_eq(struct mlx5_core_dev *dev,
487 struct mlx5_eq *eq, struct mlx5_eq_param *param)
488 {
489 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
490 int err;
491
492 mutex_lock(&eq_table->lock);
493 /* Async EQs must share irq index 0 */
494 if (param->irq_index != 0) {
495 err = -EINVAL;
496 goto unlock;
497 }
498
499 err = create_map_eq(dev, eq, param);
500 unlock:
501 mutex_unlock(&eq_table->lock);
502 return err;
503 }
504
destroy_async_eq(struct mlx5_core_dev * dev,struct mlx5_eq * eq)505 static int destroy_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
506 {
507 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
508 int err;
509
510 mutex_lock(&eq_table->lock);
511 err = destroy_unmap_eq(dev, eq);
512 mutex_unlock(&eq_table->lock);
513 return err;
514 }
515
cq_err_event_notifier(struct notifier_block * nb,unsigned long type,void * data)516 static int cq_err_event_notifier(struct notifier_block *nb,
517 unsigned long type, void *data)
518 {
519 struct mlx5_eq_table *eqt;
520 struct mlx5_core_cq *cq;
521 struct mlx5_eqe *eqe;
522 struct mlx5_eq *eq;
523 u32 cqn;
524
525 /* type == MLX5_EVENT_TYPE_CQ_ERROR */
526
527 eqt = mlx5_nb_cof(nb, struct mlx5_eq_table, cq_err_nb);
528 eq = &eqt->async_eq.core;
529 eqe = data;
530
531 cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
532 mlx5_core_warn(eq->dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
533 cqn, eqe->data.cq_err.syndrome);
534
535 cq = mlx5_eq_cq_get(eq, cqn);
536 if (unlikely(!cq)) {
537 mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn);
538 return NOTIFY_OK;
539 }
540
541 if (cq->event)
542 cq->event(cq, type);
543
544 mlx5_cq_put(cq);
545
546 return NOTIFY_OK;
547 }
548
gather_user_async_events(struct mlx5_core_dev * dev,u64 mask[4])549 static void gather_user_async_events(struct mlx5_core_dev *dev, u64 mask[4])
550 {
551 __be64 *user_unaffiliated_events;
552 __be64 *user_affiliated_events;
553 int i;
554
555 user_affiliated_events =
556 MLX5_CAP_DEV_EVENT(dev, user_affiliated_events);
557 user_unaffiliated_events =
558 MLX5_CAP_DEV_EVENT(dev, user_unaffiliated_events);
559
560 for (i = 0; i < 4; i++)
561 mask[i] |= be64_to_cpu(user_affiliated_events[i] |
562 user_unaffiliated_events[i]);
563 }
564
gather_async_events_mask(struct mlx5_core_dev * dev,u64 mask[4])565 static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4])
566 {
567 u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
568
569 if (MLX5_VPORT_MANAGER(dev))
570 async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
571
572 if (MLX5_CAP_GEN(dev, general_notification_event))
573 async_event_mask |= (1ull << MLX5_EVENT_TYPE_GENERAL_EVENT);
574
575 if (MLX5_CAP_GEN(dev, port_module_event))
576 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PORT_MODULE_EVENT);
577 else
578 mlx5_core_dbg(dev, "port_module_event is not set\n");
579
580 if (MLX5_PPS_CAP(dev))
581 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT);
582
583 if (MLX5_CAP_GEN(dev, fpga))
584 async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR) |
585 (1ull << MLX5_EVENT_TYPE_FPGA_QP_ERROR);
586 if (MLX5_CAP_GEN_MAX(dev, dct))
587 async_event_mask |= (1ull << MLX5_EVENT_TYPE_DCT_DRAINED);
588
589 if (MLX5_CAP_GEN(dev, temp_warn_event))
590 async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT);
591
592 if (MLX5_CAP_MCAM_REG(dev, tracer_registers))
593 async_event_mask |= (1ull << MLX5_EVENT_TYPE_DEVICE_TRACER);
594
595 if (MLX5_CAP_GEN(dev, max_num_of_monitor_counters))
596 async_event_mask |= (1ull << MLX5_EVENT_TYPE_MONITOR_COUNTER);
597
598 if (mlx5_eswitch_is_funcs_handler(dev))
599 async_event_mask |=
600 (1ull << MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED);
601
602 if (MLX5_CAP_GEN_MAX(dev, vhca_state))
603 async_event_mask |= (1ull << MLX5_EVENT_TYPE_VHCA_STATE_CHANGE);
604
605 mask[0] = async_event_mask;
606
607 if (MLX5_CAP_GEN(dev, event_cap))
608 gather_user_async_events(dev, mask);
609 }
610
611 static int
setup_async_eq(struct mlx5_core_dev * dev,struct mlx5_eq_async * eq,struct mlx5_eq_param * param,const char * name)612 setup_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq_async *eq,
613 struct mlx5_eq_param *param, const char *name)
614 {
615 int err;
616
617 eq->irq_nb.notifier_call = mlx5_eq_async_int;
618 spin_lock_init(&eq->lock);
619
620 err = create_async_eq(dev, &eq->core, param);
621 if (err) {
622 mlx5_core_warn(dev, "failed to create %s EQ %d\n", name, err);
623 return err;
624 }
625 err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
626 if (err) {
627 mlx5_core_warn(dev, "failed to enable %s EQ %d\n", name, err);
628 destroy_async_eq(dev, &eq->core);
629 }
630 return err;
631 }
632
cleanup_async_eq(struct mlx5_core_dev * dev,struct mlx5_eq_async * eq,const char * name)633 static void cleanup_async_eq(struct mlx5_core_dev *dev,
634 struct mlx5_eq_async *eq, const char *name)
635 {
636 int err;
637
638 mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
639 err = destroy_async_eq(dev, &eq->core);
640 if (err)
641 mlx5_core_err(dev, "failed to destroy %s eq, err(%d)\n",
642 name, err);
643 }
644
create_async_eqs(struct mlx5_core_dev * dev)645 static int create_async_eqs(struct mlx5_core_dev *dev)
646 {
647 struct mlx5_eq_table *table = dev->priv.eq_table;
648 struct mlx5_eq_param param = {};
649 int err;
650
651 MLX5_NB_INIT(&table->cq_err_nb, cq_err_event_notifier, CQ_ERROR);
652 mlx5_eq_notifier_register(dev, &table->cq_err_nb);
653
654 param = (struct mlx5_eq_param) {
655 .irq_index = 0,
656 .nent = MLX5_NUM_CMD_EQE,
657 .mask[0] = 1ull << MLX5_EVENT_TYPE_CMD,
658 };
659 mlx5_cmd_allowed_opcode(dev, MLX5_CMD_OP_CREATE_EQ);
660 err = setup_async_eq(dev, &table->cmd_eq, ¶m, "cmd");
661 if (err)
662 goto err1;
663
664 mlx5_cmd_use_events(dev);
665 mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
666
667 param = (struct mlx5_eq_param) {
668 .irq_index = 0,
669 .nent = MLX5_NUM_ASYNC_EQE,
670 };
671
672 gather_async_events_mask(dev, param.mask);
673 err = setup_async_eq(dev, &table->async_eq, ¶m, "async");
674 if (err)
675 goto err2;
676
677 param = (struct mlx5_eq_param) {
678 .irq_index = 0,
679 .nent = /* TODO: sriov max_vf + */ 1,
680 .mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST,
681 };
682
683 err = setup_async_eq(dev, &table->pages_eq, ¶m, "pages");
684 if (err)
685 goto err3;
686
687 return 0;
688
689 err3:
690 cleanup_async_eq(dev, &table->async_eq, "async");
691 err2:
692 mlx5_cmd_use_polling(dev);
693 cleanup_async_eq(dev, &table->cmd_eq, "cmd");
694 err1:
695 mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
696 mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
697 return err;
698 }
699
destroy_async_eqs(struct mlx5_core_dev * dev)700 static void destroy_async_eqs(struct mlx5_core_dev *dev)
701 {
702 struct mlx5_eq_table *table = dev->priv.eq_table;
703
704 cleanup_async_eq(dev, &table->pages_eq, "pages");
705 cleanup_async_eq(dev, &table->async_eq, "async");
706 mlx5_cmd_allowed_opcode(dev, MLX5_CMD_OP_DESTROY_EQ);
707 mlx5_cmd_use_polling(dev);
708 cleanup_async_eq(dev, &table->cmd_eq, "cmd");
709 mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
710 mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
711 }
712
mlx5_get_async_eq(struct mlx5_core_dev * dev)713 struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev)
714 {
715 return &dev->priv.eq_table->async_eq.core;
716 }
717
mlx5_eq_synchronize_async_irq(struct mlx5_core_dev * dev)718 void mlx5_eq_synchronize_async_irq(struct mlx5_core_dev *dev)
719 {
720 synchronize_irq(dev->priv.eq_table->async_eq.core.irqn);
721 }
722
mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev * dev)723 void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev)
724 {
725 synchronize_irq(dev->priv.eq_table->cmd_eq.core.irqn);
726 }
727
728 /* Generic EQ API for mlx5_core consumers
729 * Needed For RDMA ODP EQ for now
730 */
731 struct mlx5_eq *
mlx5_eq_create_generic(struct mlx5_core_dev * dev,struct mlx5_eq_param * param)732 mlx5_eq_create_generic(struct mlx5_core_dev *dev,
733 struct mlx5_eq_param *param)
734 {
735 struct mlx5_eq *eq = kvzalloc(sizeof(*eq), GFP_KERNEL);
736 int err;
737
738 if (!eq)
739 return ERR_PTR(-ENOMEM);
740
741 err = create_async_eq(dev, eq, param);
742 if (err) {
743 kvfree(eq);
744 eq = ERR_PTR(err);
745 }
746
747 return eq;
748 }
749 EXPORT_SYMBOL(mlx5_eq_create_generic);
750
mlx5_eq_destroy_generic(struct mlx5_core_dev * dev,struct mlx5_eq * eq)751 int mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
752 {
753 int err;
754
755 if (IS_ERR(eq))
756 return -EINVAL;
757
758 err = destroy_async_eq(dev, eq);
759 if (err)
760 goto out;
761
762 kvfree(eq);
763 out:
764 return err;
765 }
766 EXPORT_SYMBOL(mlx5_eq_destroy_generic);
767
mlx5_eq_get_eqe(struct mlx5_eq * eq,u32 cc)768 struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc)
769 {
770 u32 ci = eq->cons_index + cc;
771 u32 nent = eq_get_size(eq);
772 struct mlx5_eqe *eqe;
773
774 eqe = get_eqe(eq, ci & (nent - 1));
775 eqe = ((eqe->owner & 1) ^ !!(ci & nent)) ? NULL : eqe;
776 /* Make sure we read EQ entry contents after we've
777 * checked the ownership bit.
778 */
779 if (eqe)
780 dma_rmb();
781
782 return eqe;
783 }
784 EXPORT_SYMBOL(mlx5_eq_get_eqe);
785
mlx5_eq_update_ci(struct mlx5_eq * eq,u32 cc,bool arm)786 void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm)
787 {
788 __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
789 u32 val;
790
791 eq->cons_index += cc;
792 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
793
794 __raw_writel((__force u32)cpu_to_be32(val), addr);
795 /* We still want ordering, just not swabbing, so add a barrier */
796 wmb();
797 }
798 EXPORT_SYMBOL(mlx5_eq_update_ci);
799
destroy_comp_eqs(struct mlx5_core_dev * dev)800 static void destroy_comp_eqs(struct mlx5_core_dev *dev)
801 {
802 struct mlx5_eq_table *table = dev->priv.eq_table;
803 struct mlx5_eq_comp *eq, *n;
804
805 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
806 list_del(&eq->list);
807 mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
808 if (destroy_unmap_eq(dev, &eq->core))
809 mlx5_core_warn(dev, "failed to destroy comp EQ 0x%x\n",
810 eq->core.eqn);
811 tasklet_disable(&eq->tasklet_ctx.task);
812 kfree(eq);
813 }
814 }
815
create_comp_eqs(struct mlx5_core_dev * dev)816 static int create_comp_eqs(struct mlx5_core_dev *dev)
817 {
818 struct mlx5_eq_table *table = dev->priv.eq_table;
819 struct mlx5_eq_comp *eq;
820 int ncomp_eqs;
821 int nent;
822 int err;
823 int i;
824
825 INIT_LIST_HEAD(&table->comp_eqs_list);
826 ncomp_eqs = table->num_comp_eqs;
827 nent = MLX5_COMP_EQ_SIZE;
828 for (i = 0; i < ncomp_eqs; i++) {
829 int vecidx = i + MLX5_IRQ_VEC_COMP_BASE;
830 struct mlx5_eq_param param = {};
831
832 eq = kzalloc(sizeof(*eq), GFP_KERNEL);
833 if (!eq) {
834 err = -ENOMEM;
835 goto clean;
836 }
837
838 INIT_LIST_HEAD(&eq->tasklet_ctx.list);
839 INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
840 spin_lock_init(&eq->tasklet_ctx.lock);
841 tasklet_setup(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb);
842
843 eq->irq_nb.notifier_call = mlx5_eq_comp_int;
844 param = (struct mlx5_eq_param) {
845 .irq_index = vecidx,
846 .nent = nent,
847 };
848 err = create_map_eq(dev, &eq->core, ¶m);
849 if (err) {
850 kfree(eq);
851 goto clean;
852 }
853 err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
854 if (err) {
855 destroy_unmap_eq(dev, &eq->core);
856 kfree(eq);
857 goto clean;
858 }
859
860 mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->core.eqn);
861 /* add tail, to keep the list ordered, for mlx5_vector2eqn to work */
862 list_add_tail(&eq->list, &table->comp_eqs_list);
863 }
864
865 return 0;
866
867 clean:
868 destroy_comp_eqs(dev);
869 return err;
870 }
871
mlx5_vector2eqn(struct mlx5_core_dev * dev,int vector,int * eqn,unsigned int * irqn)872 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
873 unsigned int *irqn)
874 {
875 struct mlx5_eq_table *table = dev->priv.eq_table;
876 struct mlx5_eq_comp *eq, *n;
877 int err = -ENOENT;
878 int i = 0;
879
880 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
881 if (i++ == vector) {
882 *eqn = eq->core.eqn;
883 *irqn = eq->core.irqn;
884 err = 0;
885 break;
886 }
887 }
888
889 return err;
890 }
891 EXPORT_SYMBOL(mlx5_vector2eqn);
892
mlx5_comp_vectors_count(struct mlx5_core_dev * dev)893 unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev)
894 {
895 return dev->priv.eq_table->num_comp_eqs;
896 }
897 EXPORT_SYMBOL(mlx5_comp_vectors_count);
898
899 struct cpumask *
mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev * dev,int vector)900 mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
901 {
902 int vecidx = vector + MLX5_IRQ_VEC_COMP_BASE;
903
904 return mlx5_irq_get_affinity_mask(dev->priv.eq_table->irq_table,
905 vecidx);
906 }
907 EXPORT_SYMBOL(mlx5_comp_irq_get_affinity_mask);
908
909 #ifdef CONFIG_RFS_ACCEL
mlx5_eq_table_get_rmap(struct mlx5_core_dev * dev)910 struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev)
911 {
912 return mlx5_irq_get_rmap(dev->priv.eq_table->irq_table);
913 }
914 #endif
915
mlx5_eqn2comp_eq(struct mlx5_core_dev * dev,int eqn)916 struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn)
917 {
918 struct mlx5_eq_table *table = dev->priv.eq_table;
919 struct mlx5_eq_comp *eq;
920
921 list_for_each_entry(eq, &table->comp_eqs_list, list) {
922 if (eq->core.eqn == eqn)
923 return eq;
924 }
925
926 return ERR_PTR(-ENOENT);
927 }
928
929 /* This function should only be called after mlx5_cmd_force_teardown_hca */
mlx5_core_eq_free_irqs(struct mlx5_core_dev * dev)930 void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
931 {
932 struct mlx5_eq_table *table = dev->priv.eq_table;
933
934 mutex_lock(&table->lock); /* sync with create/destroy_async_eq */
935 mlx5_irq_table_destroy(dev);
936 mutex_unlock(&table->lock);
937 }
938
939 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
940 #define MLX5_MAX_ASYNC_EQS 4
941 #else
942 #define MLX5_MAX_ASYNC_EQS 3
943 #endif
944
mlx5_eq_table_create(struct mlx5_core_dev * dev)945 int mlx5_eq_table_create(struct mlx5_core_dev *dev)
946 {
947 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
948 int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
949 MLX5_CAP_GEN(dev, max_num_eqs) :
950 1 << MLX5_CAP_GEN(dev, log_max_eq);
951 int err;
952
953 eq_table->num_comp_eqs =
954 min_t(int,
955 mlx5_irq_get_num_comp(eq_table->irq_table),
956 num_eqs - MLX5_MAX_ASYNC_EQS);
957
958 err = create_async_eqs(dev);
959 if (err) {
960 mlx5_core_err(dev, "Failed to create async EQs\n");
961 goto err_async_eqs;
962 }
963
964 err = create_comp_eqs(dev);
965 if (err) {
966 mlx5_core_err(dev, "Failed to create completion EQs\n");
967 goto err_comp_eqs;
968 }
969
970 return 0;
971 err_comp_eqs:
972 destroy_async_eqs(dev);
973 err_async_eqs:
974 return err;
975 }
976
mlx5_eq_table_destroy(struct mlx5_core_dev * dev)977 void mlx5_eq_table_destroy(struct mlx5_core_dev *dev)
978 {
979 destroy_comp_eqs(dev);
980 destroy_async_eqs(dev);
981 }
982
mlx5_eq_notifier_register(struct mlx5_core_dev * dev,struct mlx5_nb * nb)983 int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
984 {
985 struct mlx5_eq_table *eqt = dev->priv.eq_table;
986
987 return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb);
988 }
989 EXPORT_SYMBOL(mlx5_eq_notifier_register);
990
mlx5_eq_notifier_unregister(struct mlx5_core_dev * dev,struct mlx5_nb * nb)991 int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
992 {
993 struct mlx5_eq_table *eqt = dev->priv.eq_table;
994
995 return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb);
996 }
997 EXPORT_SYMBOL(mlx5_eq_notifier_unregister);
998