1 // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
2 /*
3 * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
4 */
5
6 #include "efa_com.h"
7 #include "efa_regs_defs.h"
8
9 #define ADMIN_CMD_TIMEOUT_US 30000000 /* usecs */
10
11 #define EFA_REG_READ_TIMEOUT_US 50000 /* usecs */
12 #define EFA_MMIO_READ_INVALID 0xffffffff
13
14 #define EFA_POLL_INTERVAL_MS 100 /* msecs */
15
16 #define EFA_ASYNC_QUEUE_DEPTH 16
17 #define EFA_ADMIN_QUEUE_DEPTH 32
18
19 #define EFA_CTRL_MAJOR 0
20 #define EFA_CTRL_MINOR 0
21 #define EFA_CTRL_SUB_MINOR 1
22
23 enum efa_cmd_status {
24 EFA_CMD_SUBMITTED,
25 EFA_CMD_COMPLETED,
26 };
27
28 struct efa_comp_ctx {
29 struct completion wait_event;
30 struct efa_admin_acq_entry *user_cqe;
31 u32 comp_size;
32 enum efa_cmd_status status;
33 u8 cmd_opcode;
34 u8 occupied;
35 };
36
efa_com_cmd_str(u8 cmd)37 static const char *efa_com_cmd_str(u8 cmd)
38 {
39 #define EFA_CMD_STR_CASE(_cmd) case EFA_ADMIN_##_cmd: return #_cmd
40
41 switch (cmd) {
42 EFA_CMD_STR_CASE(CREATE_QP);
43 EFA_CMD_STR_CASE(MODIFY_QP);
44 EFA_CMD_STR_CASE(QUERY_QP);
45 EFA_CMD_STR_CASE(DESTROY_QP);
46 EFA_CMD_STR_CASE(CREATE_AH);
47 EFA_CMD_STR_CASE(DESTROY_AH);
48 EFA_CMD_STR_CASE(REG_MR);
49 EFA_CMD_STR_CASE(DEREG_MR);
50 EFA_CMD_STR_CASE(CREATE_CQ);
51 EFA_CMD_STR_CASE(DESTROY_CQ);
52 EFA_CMD_STR_CASE(GET_FEATURE);
53 EFA_CMD_STR_CASE(SET_FEATURE);
54 EFA_CMD_STR_CASE(GET_STATS);
55 EFA_CMD_STR_CASE(ALLOC_PD);
56 EFA_CMD_STR_CASE(DEALLOC_PD);
57 EFA_CMD_STR_CASE(ALLOC_UAR);
58 EFA_CMD_STR_CASE(DEALLOC_UAR);
59 EFA_CMD_STR_CASE(CREATE_EQ);
60 EFA_CMD_STR_CASE(DESTROY_EQ);
61 default: return "unknown command opcode";
62 }
63 #undef EFA_CMD_STR_CASE
64 }
65
efa_com_set_dma_addr(dma_addr_t addr,u32 * addr_high,u32 * addr_low)66 void efa_com_set_dma_addr(dma_addr_t addr, u32 *addr_high, u32 *addr_low)
67 {
68 *addr_low = lower_32_bits(addr);
69 *addr_high = upper_32_bits(addr);
70 }
71
efa_com_reg_read32(struct efa_com_dev * edev,u16 offset)72 static u32 efa_com_reg_read32(struct efa_com_dev *edev, u16 offset)
73 {
74 struct efa_com_mmio_read *mmio_read = &edev->mmio_read;
75 struct efa_admin_mmio_req_read_less_resp *read_resp;
76 unsigned long exp_time;
77 u32 mmio_read_reg = 0;
78 u32 err;
79
80 read_resp = mmio_read->read_resp;
81
82 spin_lock(&mmio_read->lock);
83 mmio_read->seq_num++;
84
85 /* trash DMA req_id to identify when hardware is done */
86 read_resp->req_id = mmio_read->seq_num + 0x9aL;
87 EFA_SET(&mmio_read_reg, EFA_REGS_MMIO_REG_READ_REG_OFF, offset);
88 EFA_SET(&mmio_read_reg, EFA_REGS_MMIO_REG_READ_REQ_ID,
89 mmio_read->seq_num);
90
91 writel(mmio_read_reg, edev->reg_bar + EFA_REGS_MMIO_REG_READ_OFF);
92
93 exp_time = jiffies + usecs_to_jiffies(mmio_read->mmio_read_timeout);
94 do {
95 if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
96 break;
97 udelay(1);
98 } while (time_is_after_jiffies(exp_time));
99
100 if (read_resp->req_id != mmio_read->seq_num) {
101 ibdev_err_ratelimited(
102 edev->efa_dev,
103 "Reading register timed out. expected: req id[%u] offset[%#x] actual: req id[%u] offset[%#x]\n",
104 mmio_read->seq_num, offset, read_resp->req_id,
105 read_resp->reg_off);
106 err = EFA_MMIO_READ_INVALID;
107 goto out;
108 }
109
110 if (read_resp->reg_off != offset) {
111 ibdev_err_ratelimited(
112 edev->efa_dev,
113 "Reading register failed: wrong offset provided\n");
114 err = EFA_MMIO_READ_INVALID;
115 goto out;
116 }
117
118 err = read_resp->reg_val;
119 out:
120 spin_unlock(&mmio_read->lock);
121 return err;
122 }
123
efa_com_admin_init_sq(struct efa_com_dev * edev)124 static int efa_com_admin_init_sq(struct efa_com_dev *edev)
125 {
126 struct efa_com_admin_queue *aq = &edev->aq;
127 struct efa_com_admin_sq *sq = &aq->sq;
128 u16 size = aq->depth * sizeof(*sq->entries);
129 u32 aq_caps = 0;
130 u32 addr_high;
131 u32 addr_low;
132
133 sq->entries =
134 dma_alloc_coherent(aq->dmadev, size, &sq->dma_addr, GFP_KERNEL);
135 if (!sq->entries)
136 return -ENOMEM;
137
138 spin_lock_init(&sq->lock);
139
140 sq->cc = 0;
141 sq->pc = 0;
142 sq->phase = 1;
143
144 sq->db_addr = (u32 __iomem *)(edev->reg_bar + EFA_REGS_AQ_PROD_DB_OFF);
145
146 addr_high = upper_32_bits(sq->dma_addr);
147 addr_low = lower_32_bits(sq->dma_addr);
148
149 writel(addr_low, edev->reg_bar + EFA_REGS_AQ_BASE_LO_OFF);
150 writel(addr_high, edev->reg_bar + EFA_REGS_AQ_BASE_HI_OFF);
151
152 EFA_SET(&aq_caps, EFA_REGS_AQ_CAPS_AQ_DEPTH, aq->depth);
153 EFA_SET(&aq_caps, EFA_REGS_AQ_CAPS_AQ_ENTRY_SIZE,
154 sizeof(struct efa_admin_aq_entry));
155
156 writel(aq_caps, edev->reg_bar + EFA_REGS_AQ_CAPS_OFF);
157
158 return 0;
159 }
160
efa_com_admin_init_cq(struct efa_com_dev * edev)161 static int efa_com_admin_init_cq(struct efa_com_dev *edev)
162 {
163 struct efa_com_admin_queue *aq = &edev->aq;
164 struct efa_com_admin_cq *cq = &aq->cq;
165 u16 size = aq->depth * sizeof(*cq->entries);
166 u32 acq_caps = 0;
167 u32 addr_high;
168 u32 addr_low;
169
170 cq->entries =
171 dma_alloc_coherent(aq->dmadev, size, &cq->dma_addr, GFP_KERNEL);
172 if (!cq->entries)
173 return -ENOMEM;
174
175 spin_lock_init(&cq->lock);
176
177 cq->cc = 0;
178 cq->phase = 1;
179
180 addr_high = upper_32_bits(cq->dma_addr);
181 addr_low = lower_32_bits(cq->dma_addr);
182
183 writel(addr_low, edev->reg_bar + EFA_REGS_ACQ_BASE_LO_OFF);
184 writel(addr_high, edev->reg_bar + EFA_REGS_ACQ_BASE_HI_OFF);
185
186 EFA_SET(&acq_caps, EFA_REGS_ACQ_CAPS_ACQ_DEPTH, aq->depth);
187 EFA_SET(&acq_caps, EFA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE,
188 sizeof(struct efa_admin_acq_entry));
189 EFA_SET(&acq_caps, EFA_REGS_ACQ_CAPS_ACQ_MSIX_VECTOR,
190 aq->msix_vector_idx);
191
192 writel(acq_caps, edev->reg_bar + EFA_REGS_ACQ_CAPS_OFF);
193
194 return 0;
195 }
196
efa_com_admin_init_aenq(struct efa_com_dev * edev,struct efa_aenq_handlers * aenq_handlers)197 static int efa_com_admin_init_aenq(struct efa_com_dev *edev,
198 struct efa_aenq_handlers *aenq_handlers)
199 {
200 struct efa_com_aenq *aenq = &edev->aenq;
201 u32 addr_low, addr_high;
202 u32 aenq_caps = 0;
203 u16 size;
204
205 if (!aenq_handlers) {
206 ibdev_err(edev->efa_dev, "aenq handlers pointer is NULL\n");
207 return -EINVAL;
208 }
209
210 size = EFA_ASYNC_QUEUE_DEPTH * sizeof(*aenq->entries);
211 aenq->entries = dma_alloc_coherent(edev->dmadev, size, &aenq->dma_addr,
212 GFP_KERNEL);
213 if (!aenq->entries)
214 return -ENOMEM;
215
216 aenq->aenq_handlers = aenq_handlers;
217 aenq->depth = EFA_ASYNC_QUEUE_DEPTH;
218 aenq->cc = 0;
219 aenq->phase = 1;
220
221 addr_low = lower_32_bits(aenq->dma_addr);
222 addr_high = upper_32_bits(aenq->dma_addr);
223
224 writel(addr_low, edev->reg_bar + EFA_REGS_AENQ_BASE_LO_OFF);
225 writel(addr_high, edev->reg_bar + EFA_REGS_AENQ_BASE_HI_OFF);
226
227 EFA_SET(&aenq_caps, EFA_REGS_AENQ_CAPS_AENQ_DEPTH, aenq->depth);
228 EFA_SET(&aenq_caps, EFA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE,
229 sizeof(struct efa_admin_aenq_entry));
230 EFA_SET(&aenq_caps, EFA_REGS_AENQ_CAPS_AENQ_MSIX_VECTOR,
231 aenq->msix_vector_idx);
232 writel(aenq_caps, edev->reg_bar + EFA_REGS_AENQ_CAPS_OFF);
233
234 /*
235 * Init cons_db to mark that all entries in the queue
236 * are initially available
237 */
238 writel(edev->aenq.cc, edev->reg_bar + EFA_REGS_AENQ_CONS_DB_OFF);
239
240 return 0;
241 }
242
243 /* ID to be used with efa_com_get_comp_ctx */
efa_com_alloc_ctx_id(struct efa_com_admin_queue * aq)244 static u16 efa_com_alloc_ctx_id(struct efa_com_admin_queue *aq)
245 {
246 u16 ctx_id;
247
248 spin_lock(&aq->comp_ctx_lock);
249 ctx_id = aq->comp_ctx_pool[aq->comp_ctx_pool_next];
250 aq->comp_ctx_pool_next++;
251 spin_unlock(&aq->comp_ctx_lock);
252
253 return ctx_id;
254 }
255
efa_com_dealloc_ctx_id(struct efa_com_admin_queue * aq,u16 ctx_id)256 static void efa_com_dealloc_ctx_id(struct efa_com_admin_queue *aq,
257 u16 ctx_id)
258 {
259 spin_lock(&aq->comp_ctx_lock);
260 aq->comp_ctx_pool_next--;
261 aq->comp_ctx_pool[aq->comp_ctx_pool_next] = ctx_id;
262 spin_unlock(&aq->comp_ctx_lock);
263 }
264
efa_com_put_comp_ctx(struct efa_com_admin_queue * aq,struct efa_comp_ctx * comp_ctx)265 static inline void efa_com_put_comp_ctx(struct efa_com_admin_queue *aq,
266 struct efa_comp_ctx *comp_ctx)
267 {
268 u16 cmd_id = EFA_GET(&comp_ctx->user_cqe->acq_common_descriptor.command,
269 EFA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID);
270 u16 ctx_id = cmd_id & (aq->depth - 1);
271
272 ibdev_dbg(aq->efa_dev, "Put completion command_id %#x\n", cmd_id);
273 comp_ctx->occupied = 0;
274 efa_com_dealloc_ctx_id(aq, ctx_id);
275 }
276
efa_com_get_comp_ctx(struct efa_com_admin_queue * aq,u16 cmd_id,bool capture)277 static struct efa_comp_ctx *efa_com_get_comp_ctx(struct efa_com_admin_queue *aq,
278 u16 cmd_id, bool capture)
279 {
280 u16 ctx_id = cmd_id & (aq->depth - 1);
281
282 if (aq->comp_ctx[ctx_id].occupied && capture) {
283 ibdev_err_ratelimited(
284 aq->efa_dev,
285 "Completion context for command_id %#x is occupied\n",
286 cmd_id);
287 return NULL;
288 }
289
290 if (capture) {
291 aq->comp_ctx[ctx_id].occupied = 1;
292 ibdev_dbg(aq->efa_dev,
293 "Take completion ctxt for command_id %#x\n", cmd_id);
294 }
295
296 return &aq->comp_ctx[ctx_id];
297 }
298
__efa_com_submit_admin_cmd(struct efa_com_admin_queue * aq,struct efa_admin_aq_entry * cmd,size_t cmd_size_in_bytes,struct efa_admin_acq_entry * comp,size_t comp_size_in_bytes)299 static struct efa_comp_ctx *__efa_com_submit_admin_cmd(struct efa_com_admin_queue *aq,
300 struct efa_admin_aq_entry *cmd,
301 size_t cmd_size_in_bytes,
302 struct efa_admin_acq_entry *comp,
303 size_t comp_size_in_bytes)
304 {
305 struct efa_admin_aq_entry *aqe;
306 struct efa_comp_ctx *comp_ctx;
307 u16 queue_size_mask;
308 u16 cmd_id;
309 u16 ctx_id;
310 u16 pi;
311
312 queue_size_mask = aq->depth - 1;
313 pi = aq->sq.pc & queue_size_mask;
314
315 ctx_id = efa_com_alloc_ctx_id(aq);
316
317 /* cmd_id LSBs are the ctx_id and MSBs are entropy bits from pc */
318 cmd_id = ctx_id & queue_size_mask;
319 cmd_id |= aq->sq.pc & ~queue_size_mask;
320 cmd_id &= EFA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
321
322 cmd->aq_common_descriptor.command_id = cmd_id;
323 EFA_SET(&cmd->aq_common_descriptor.flags,
324 EFA_ADMIN_AQ_COMMON_DESC_PHASE, aq->sq.phase);
325
326 comp_ctx = efa_com_get_comp_ctx(aq, cmd_id, true);
327 if (!comp_ctx) {
328 efa_com_dealloc_ctx_id(aq, ctx_id);
329 return ERR_PTR(-EINVAL);
330 }
331
332 comp_ctx->status = EFA_CMD_SUBMITTED;
333 comp_ctx->comp_size = comp_size_in_bytes;
334 comp_ctx->user_cqe = comp;
335 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
336
337 reinit_completion(&comp_ctx->wait_event);
338
339 aqe = &aq->sq.entries[pi];
340 memset(aqe, 0, sizeof(*aqe));
341 memcpy(aqe, cmd, cmd_size_in_bytes);
342
343 aq->sq.pc++;
344 atomic64_inc(&aq->stats.submitted_cmd);
345
346 if ((aq->sq.pc & queue_size_mask) == 0)
347 aq->sq.phase = !aq->sq.phase;
348
349 /* barrier not needed in case of writel */
350 writel(aq->sq.pc, aq->sq.db_addr);
351
352 return comp_ctx;
353 }
354
efa_com_init_comp_ctxt(struct efa_com_admin_queue * aq)355 static inline int efa_com_init_comp_ctxt(struct efa_com_admin_queue *aq)
356 {
357 size_t pool_size = aq->depth * sizeof(*aq->comp_ctx_pool);
358 size_t size = aq->depth * sizeof(struct efa_comp_ctx);
359 struct efa_comp_ctx *comp_ctx;
360 u16 i;
361
362 aq->comp_ctx = devm_kzalloc(aq->dmadev, size, GFP_KERNEL);
363 aq->comp_ctx_pool = devm_kzalloc(aq->dmadev, pool_size, GFP_KERNEL);
364 if (!aq->comp_ctx || !aq->comp_ctx_pool) {
365 devm_kfree(aq->dmadev, aq->comp_ctx_pool);
366 devm_kfree(aq->dmadev, aq->comp_ctx);
367 return -ENOMEM;
368 }
369
370 for (i = 0; i < aq->depth; i++) {
371 comp_ctx = efa_com_get_comp_ctx(aq, i, false);
372 if (comp_ctx)
373 init_completion(&comp_ctx->wait_event);
374
375 aq->comp_ctx_pool[i] = i;
376 }
377
378 spin_lock_init(&aq->comp_ctx_lock);
379
380 aq->comp_ctx_pool_next = 0;
381
382 return 0;
383 }
384
efa_com_submit_admin_cmd(struct efa_com_admin_queue * aq,struct efa_admin_aq_entry * cmd,size_t cmd_size_in_bytes,struct efa_admin_acq_entry * comp,size_t comp_size_in_bytes)385 static struct efa_comp_ctx *efa_com_submit_admin_cmd(struct efa_com_admin_queue *aq,
386 struct efa_admin_aq_entry *cmd,
387 size_t cmd_size_in_bytes,
388 struct efa_admin_acq_entry *comp,
389 size_t comp_size_in_bytes)
390 {
391 struct efa_comp_ctx *comp_ctx;
392
393 spin_lock(&aq->sq.lock);
394 if (!test_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state)) {
395 ibdev_err_ratelimited(aq->efa_dev, "Admin queue is closed\n");
396 spin_unlock(&aq->sq.lock);
397 return ERR_PTR(-ENODEV);
398 }
399
400 comp_ctx = __efa_com_submit_admin_cmd(aq, cmd, cmd_size_in_bytes, comp,
401 comp_size_in_bytes);
402 spin_unlock(&aq->sq.lock);
403 if (IS_ERR(comp_ctx))
404 clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
405
406 return comp_ctx;
407 }
408
efa_com_handle_single_admin_completion(struct efa_com_admin_queue * aq,struct efa_admin_acq_entry * cqe)409 static int efa_com_handle_single_admin_completion(struct efa_com_admin_queue *aq,
410 struct efa_admin_acq_entry *cqe)
411 {
412 struct efa_comp_ctx *comp_ctx;
413 u16 cmd_id;
414
415 cmd_id = EFA_GET(&cqe->acq_common_descriptor.command,
416 EFA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID);
417
418 comp_ctx = efa_com_get_comp_ctx(aq, cmd_id, false);
419 if (comp_ctx->status != EFA_CMD_SUBMITTED) {
420 ibdev_err(aq->efa_dev,
421 "Received completion with unexpected command id[%d], sq producer: %d, sq consumer: %d, cq consumer: %d\n",
422 cmd_id, aq->sq.pc, aq->sq.cc, aq->cq.cc);
423 return -EINVAL;
424 }
425
426 comp_ctx->status = EFA_CMD_COMPLETED;
427 memcpy(comp_ctx->user_cqe, cqe, comp_ctx->comp_size);
428
429 if (!test_bit(EFA_AQ_STATE_POLLING_BIT, &aq->state))
430 complete(&comp_ctx->wait_event);
431
432 return 0;
433 }
434
efa_com_handle_admin_completion(struct efa_com_admin_queue * aq)435 static void efa_com_handle_admin_completion(struct efa_com_admin_queue *aq)
436 {
437 struct efa_admin_acq_entry *cqe;
438 u16 queue_size_mask;
439 u16 comp_cmds = 0;
440 u8 phase;
441 int err;
442 u16 ci;
443
444 queue_size_mask = aq->depth - 1;
445
446 ci = aq->cq.cc & queue_size_mask;
447 phase = aq->cq.phase;
448
449 cqe = &aq->cq.entries[ci];
450
451 /* Go over all the completions */
452 while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
453 EFA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
454 /*
455 * Do not read the rest of the completion entry before the
456 * phase bit was validated
457 */
458 dma_rmb();
459 err = efa_com_handle_single_admin_completion(aq, cqe);
460 if (!err)
461 comp_cmds++;
462
463 aq->cq.cc++;
464 ci++;
465 if (ci == aq->depth) {
466 ci = 0;
467 phase = !phase;
468 }
469
470 cqe = &aq->cq.entries[ci];
471 }
472
473 aq->cq.phase = phase;
474 aq->sq.cc += comp_cmds;
475 atomic64_add(comp_cmds, &aq->stats.completed_cmd);
476 }
477
efa_com_comp_status_to_errno(u8 comp_status)478 static int efa_com_comp_status_to_errno(u8 comp_status)
479 {
480 switch (comp_status) {
481 case EFA_ADMIN_SUCCESS:
482 return 0;
483 case EFA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
484 return -ENOMEM;
485 case EFA_ADMIN_UNSUPPORTED_OPCODE:
486 return -EOPNOTSUPP;
487 case EFA_ADMIN_BAD_OPCODE:
488 case EFA_ADMIN_MALFORMED_REQUEST:
489 case EFA_ADMIN_ILLEGAL_PARAMETER:
490 case EFA_ADMIN_UNKNOWN_ERROR:
491 return -EINVAL;
492 default:
493 return -EINVAL;
494 }
495 }
496
efa_com_wait_and_process_admin_cq_polling(struct efa_comp_ctx * comp_ctx,struct efa_com_admin_queue * aq)497 static int efa_com_wait_and_process_admin_cq_polling(struct efa_comp_ctx *comp_ctx,
498 struct efa_com_admin_queue *aq)
499 {
500 unsigned long timeout;
501 unsigned long flags;
502 int err;
503
504 timeout = jiffies + usecs_to_jiffies(aq->completion_timeout);
505
506 while (1) {
507 spin_lock_irqsave(&aq->cq.lock, flags);
508 efa_com_handle_admin_completion(aq);
509 spin_unlock_irqrestore(&aq->cq.lock, flags);
510
511 if (comp_ctx->status != EFA_CMD_SUBMITTED)
512 break;
513
514 if (time_is_before_jiffies(timeout)) {
515 ibdev_err_ratelimited(
516 aq->efa_dev,
517 "Wait for completion (polling) timeout\n");
518 /* EFA didn't have any completion */
519 atomic64_inc(&aq->stats.no_completion);
520
521 clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
522 err = -ETIME;
523 goto out;
524 }
525
526 msleep(aq->poll_interval);
527 }
528
529 err = efa_com_comp_status_to_errno(comp_ctx->user_cqe->acq_common_descriptor.status);
530 out:
531 efa_com_put_comp_ctx(aq, comp_ctx);
532 return err;
533 }
534
efa_com_wait_and_process_admin_cq_interrupts(struct efa_comp_ctx * comp_ctx,struct efa_com_admin_queue * aq)535 static int efa_com_wait_and_process_admin_cq_interrupts(struct efa_comp_ctx *comp_ctx,
536 struct efa_com_admin_queue *aq)
537 {
538 unsigned long flags;
539 int err;
540
541 wait_for_completion_timeout(&comp_ctx->wait_event,
542 usecs_to_jiffies(aq->completion_timeout));
543
544 /*
545 * In case the command wasn't completed find out the root cause.
546 * There might be 2 kinds of errors
547 * 1) No completion (timeout reached)
548 * 2) There is completion but the device didn't get any msi-x interrupt.
549 */
550 if (comp_ctx->status == EFA_CMD_SUBMITTED) {
551 spin_lock_irqsave(&aq->cq.lock, flags);
552 efa_com_handle_admin_completion(aq);
553 spin_unlock_irqrestore(&aq->cq.lock, flags);
554
555 atomic64_inc(&aq->stats.no_completion);
556
557 if (comp_ctx->status == EFA_CMD_COMPLETED)
558 ibdev_err_ratelimited(
559 aq->efa_dev,
560 "The device sent a completion but the driver didn't receive any MSI-X interrupt for admin cmd %s(%d) status %d (ctx: 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
561 efa_com_cmd_str(comp_ctx->cmd_opcode),
562 comp_ctx->cmd_opcode, comp_ctx->status,
563 comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc);
564 else
565 ibdev_err_ratelimited(
566 aq->efa_dev,
567 "The device didn't send any completion for admin cmd %s(%d) status %d (ctx 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
568 efa_com_cmd_str(comp_ctx->cmd_opcode),
569 comp_ctx->cmd_opcode, comp_ctx->status,
570 comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc);
571
572 clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
573 err = -ETIME;
574 goto out;
575 }
576
577 err = efa_com_comp_status_to_errno(comp_ctx->user_cqe->acq_common_descriptor.status);
578 out:
579 efa_com_put_comp_ctx(aq, comp_ctx);
580 return err;
581 }
582
583 /*
584 * There are two types to wait for completion.
585 * Polling mode - wait until the completion is available.
586 * Async mode - wait on wait queue until the completion is ready
587 * (or the timeout expired).
588 * It is expected that the IRQ called efa_com_handle_admin_completion
589 * to mark the completions.
590 */
efa_com_wait_and_process_admin_cq(struct efa_comp_ctx * comp_ctx,struct efa_com_admin_queue * aq)591 static int efa_com_wait_and_process_admin_cq(struct efa_comp_ctx *comp_ctx,
592 struct efa_com_admin_queue *aq)
593 {
594 if (test_bit(EFA_AQ_STATE_POLLING_BIT, &aq->state))
595 return efa_com_wait_and_process_admin_cq_polling(comp_ctx, aq);
596
597 return efa_com_wait_and_process_admin_cq_interrupts(comp_ctx, aq);
598 }
599
600 /**
601 * efa_com_cmd_exec - Execute admin command
602 * @aq: admin queue.
603 * @cmd: the admin command to execute.
604 * @cmd_size: the command size.
605 * @comp: command completion return entry.
606 * @comp_size: command completion size.
607 * Submit an admin command and then wait until the device will return a
608 * completion.
609 * The completion will be copied into comp.
610 *
611 * @return - 0 on success, negative value on failure.
612 */
efa_com_cmd_exec(struct efa_com_admin_queue * aq,struct efa_admin_aq_entry * cmd,size_t cmd_size,struct efa_admin_acq_entry * comp,size_t comp_size)613 int efa_com_cmd_exec(struct efa_com_admin_queue *aq,
614 struct efa_admin_aq_entry *cmd,
615 size_t cmd_size,
616 struct efa_admin_acq_entry *comp,
617 size_t comp_size)
618 {
619 struct efa_comp_ctx *comp_ctx;
620 int err;
621
622 might_sleep();
623
624 /* In case of queue FULL */
625 down(&aq->avail_cmds);
626
627 ibdev_dbg(aq->efa_dev, "%s (opcode %d)\n",
628 efa_com_cmd_str(cmd->aq_common_descriptor.opcode),
629 cmd->aq_common_descriptor.opcode);
630 comp_ctx = efa_com_submit_admin_cmd(aq, cmd, cmd_size, comp, comp_size);
631 if (IS_ERR(comp_ctx)) {
632 ibdev_err_ratelimited(
633 aq->efa_dev,
634 "Failed to submit command %s (opcode %u) err %ld\n",
635 efa_com_cmd_str(cmd->aq_common_descriptor.opcode),
636 cmd->aq_common_descriptor.opcode, PTR_ERR(comp_ctx));
637
638 up(&aq->avail_cmds);
639 atomic64_inc(&aq->stats.cmd_err);
640 return PTR_ERR(comp_ctx);
641 }
642
643 err = efa_com_wait_and_process_admin_cq(comp_ctx, aq);
644 if (err) {
645 ibdev_err_ratelimited(
646 aq->efa_dev,
647 "Failed to process command %s (opcode %u) comp_status %d err %d\n",
648 efa_com_cmd_str(cmd->aq_common_descriptor.opcode),
649 cmd->aq_common_descriptor.opcode,
650 comp_ctx->user_cqe->acq_common_descriptor.status, err);
651 atomic64_inc(&aq->stats.cmd_err);
652 }
653
654 up(&aq->avail_cmds);
655
656 return err;
657 }
658
659 /**
660 * efa_com_admin_destroy - Destroy the admin and the async events queues.
661 * @edev: EFA communication layer struct
662 */
efa_com_admin_destroy(struct efa_com_dev * edev)663 void efa_com_admin_destroy(struct efa_com_dev *edev)
664 {
665 struct efa_com_admin_queue *aq = &edev->aq;
666 struct efa_com_aenq *aenq = &edev->aenq;
667 struct efa_com_admin_cq *cq = &aq->cq;
668 struct efa_com_admin_sq *sq = &aq->sq;
669 u16 size;
670
671 clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
672
673 devm_kfree(edev->dmadev, aq->comp_ctx_pool);
674 devm_kfree(edev->dmadev, aq->comp_ctx);
675
676 size = aq->depth * sizeof(*sq->entries);
677 dma_free_coherent(edev->dmadev, size, sq->entries, sq->dma_addr);
678
679 size = aq->depth * sizeof(*cq->entries);
680 dma_free_coherent(edev->dmadev, size, cq->entries, cq->dma_addr);
681
682 size = aenq->depth * sizeof(*aenq->entries);
683 dma_free_coherent(edev->dmadev, size, aenq->entries, aenq->dma_addr);
684 }
685
686 /**
687 * efa_com_set_admin_polling_mode - Set the admin completion queue polling mode
688 * @edev: EFA communication layer struct
689 * @polling: Enable/Disable polling mode
690 *
691 * Set the admin completion mode.
692 */
efa_com_set_admin_polling_mode(struct efa_com_dev * edev,bool polling)693 void efa_com_set_admin_polling_mode(struct efa_com_dev *edev, bool polling)
694 {
695 u32 mask_value = 0;
696
697 if (polling)
698 EFA_SET(&mask_value, EFA_REGS_INTR_MASK_EN, 1);
699
700 writel(mask_value, edev->reg_bar + EFA_REGS_INTR_MASK_OFF);
701 if (polling)
702 set_bit(EFA_AQ_STATE_POLLING_BIT, &edev->aq.state);
703 else
704 clear_bit(EFA_AQ_STATE_POLLING_BIT, &edev->aq.state);
705 }
706
efa_com_stats_init(struct efa_com_dev * edev)707 static void efa_com_stats_init(struct efa_com_dev *edev)
708 {
709 atomic64_t *s = (atomic64_t *)&edev->aq.stats;
710 int i;
711
712 for (i = 0; i < sizeof(edev->aq.stats) / sizeof(*s); i++, s++)
713 atomic64_set(s, 0);
714 }
715
716 /**
717 * efa_com_admin_init - Init the admin and the async queues
718 * @edev: EFA communication layer struct
719 * @aenq_handlers: Those handlers to be called upon event.
720 *
721 * Initialize the admin submission and completion queues.
722 * Initialize the asynchronous events notification queues.
723 *
724 * @return - 0 on success, negative value on failure.
725 */
efa_com_admin_init(struct efa_com_dev * edev,struct efa_aenq_handlers * aenq_handlers)726 int efa_com_admin_init(struct efa_com_dev *edev,
727 struct efa_aenq_handlers *aenq_handlers)
728 {
729 struct efa_com_admin_queue *aq = &edev->aq;
730 u32 timeout;
731 u32 dev_sts;
732 u32 cap;
733 int err;
734
735 dev_sts = efa_com_reg_read32(edev, EFA_REGS_DEV_STS_OFF);
736 if (!EFA_GET(&dev_sts, EFA_REGS_DEV_STS_READY)) {
737 ibdev_err(edev->efa_dev,
738 "Device isn't ready, abort com init %#x\n", dev_sts);
739 return -ENODEV;
740 }
741
742 aq->depth = EFA_ADMIN_QUEUE_DEPTH;
743
744 aq->dmadev = edev->dmadev;
745 aq->efa_dev = edev->efa_dev;
746 set_bit(EFA_AQ_STATE_POLLING_BIT, &aq->state);
747
748 sema_init(&aq->avail_cmds, aq->depth);
749
750 efa_com_stats_init(edev);
751
752 err = efa_com_init_comp_ctxt(aq);
753 if (err)
754 return err;
755
756 err = efa_com_admin_init_sq(edev);
757 if (err)
758 goto err_destroy_comp_ctxt;
759
760 err = efa_com_admin_init_cq(edev);
761 if (err)
762 goto err_destroy_sq;
763
764 efa_com_set_admin_polling_mode(edev, false);
765
766 err = efa_com_admin_init_aenq(edev, aenq_handlers);
767 if (err)
768 goto err_destroy_cq;
769
770 cap = efa_com_reg_read32(edev, EFA_REGS_CAPS_OFF);
771 timeout = EFA_GET(&cap, EFA_REGS_CAPS_ADMIN_CMD_TO);
772 if (timeout)
773 /* the resolution of timeout reg is 100ms */
774 aq->completion_timeout = timeout * 100000;
775 else
776 aq->completion_timeout = ADMIN_CMD_TIMEOUT_US;
777
778 aq->poll_interval = EFA_POLL_INTERVAL_MS;
779
780 set_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
781
782 return 0;
783
784 err_destroy_cq:
785 dma_free_coherent(edev->dmadev, aq->depth * sizeof(*aq->cq.entries),
786 aq->cq.entries, aq->cq.dma_addr);
787 err_destroy_sq:
788 dma_free_coherent(edev->dmadev, aq->depth * sizeof(*aq->sq.entries),
789 aq->sq.entries, aq->sq.dma_addr);
790 err_destroy_comp_ctxt:
791 devm_kfree(edev->dmadev, aq->comp_ctx);
792
793 return err;
794 }
795
796 /**
797 * efa_com_admin_q_comp_intr_handler - admin queue interrupt handler
798 * @edev: EFA communication layer struct
799 *
800 * This method goes over the admin completion queue and wakes up
801 * all the pending threads that wait on the commands wait event.
802 *
803 * Note: Should be called after MSI-X interrupt.
804 */
efa_com_admin_q_comp_intr_handler(struct efa_com_dev * edev)805 void efa_com_admin_q_comp_intr_handler(struct efa_com_dev *edev)
806 {
807 unsigned long flags;
808
809 spin_lock_irqsave(&edev->aq.cq.lock, flags);
810 efa_com_handle_admin_completion(&edev->aq);
811 spin_unlock_irqrestore(&edev->aq.cq.lock, flags);
812 }
813
814 /*
815 * efa_handle_specific_aenq_event:
816 * return the handler that is relevant to the specific event group
817 */
efa_com_get_specific_aenq_cb(struct efa_com_dev * edev,u16 group)818 static efa_aenq_handler efa_com_get_specific_aenq_cb(struct efa_com_dev *edev,
819 u16 group)
820 {
821 struct efa_aenq_handlers *aenq_handlers = edev->aenq.aenq_handlers;
822
823 if (group < EFA_MAX_HANDLERS && aenq_handlers->handlers[group])
824 return aenq_handlers->handlers[group];
825
826 return aenq_handlers->unimplemented_handler;
827 }
828
829 /**
830 * efa_com_aenq_intr_handler - AENQ interrupt handler
831 * @edev: EFA communication layer struct
832 * @data: Data of interrupt handler.
833 *
834 * Go over the async event notification queue and call the proper aenq handler.
835 */
efa_com_aenq_intr_handler(struct efa_com_dev * edev,void * data)836 void efa_com_aenq_intr_handler(struct efa_com_dev *edev, void *data)
837 {
838 struct efa_admin_aenq_common_desc *aenq_common;
839 struct efa_com_aenq *aenq = &edev->aenq;
840 struct efa_admin_aenq_entry *aenq_e;
841 efa_aenq_handler handler_cb;
842 u32 processed = 0;
843 u8 phase;
844 u32 ci;
845
846 ci = aenq->cc & (aenq->depth - 1);
847 phase = aenq->phase;
848 aenq_e = &aenq->entries[ci]; /* Get first entry */
849 aenq_common = &aenq_e->aenq_common_desc;
850
851 /* Go over all the events */
852 while ((READ_ONCE(aenq_common->flags) &
853 EFA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
854 /*
855 * Do not read the rest of the completion entry before the
856 * phase bit was validated
857 */
858 dma_rmb();
859
860 /* Handle specific event*/
861 handler_cb = efa_com_get_specific_aenq_cb(edev,
862 aenq_common->group);
863 handler_cb(data, aenq_e); /* call the actual event handler*/
864
865 /* Get next event entry */
866 ci++;
867 processed++;
868
869 if (ci == aenq->depth) {
870 ci = 0;
871 phase = !phase;
872 }
873 aenq_e = &aenq->entries[ci];
874 aenq_common = &aenq_e->aenq_common_desc;
875 }
876
877 aenq->cc += processed;
878 aenq->phase = phase;
879
880 /* Don't update aenq doorbell if there weren't any processed events */
881 if (!processed)
882 return;
883
884 /* barrier not needed in case of writel */
885 writel(aenq->cc, edev->reg_bar + EFA_REGS_AENQ_CONS_DB_OFF);
886 }
887
efa_com_mmio_reg_read_resp_addr_init(struct efa_com_dev * edev)888 static void efa_com_mmio_reg_read_resp_addr_init(struct efa_com_dev *edev)
889 {
890 struct efa_com_mmio_read *mmio_read = &edev->mmio_read;
891 u32 addr_high;
892 u32 addr_low;
893
894 /* dma_addr_bits is unknown at this point */
895 addr_high = (mmio_read->read_resp_dma_addr >> 32) & GENMASK(31, 0);
896 addr_low = mmio_read->read_resp_dma_addr & GENMASK(31, 0);
897
898 writel(addr_high, edev->reg_bar + EFA_REGS_MMIO_RESP_HI_OFF);
899 writel(addr_low, edev->reg_bar + EFA_REGS_MMIO_RESP_LO_OFF);
900 }
901
efa_com_mmio_reg_read_init(struct efa_com_dev * edev)902 int efa_com_mmio_reg_read_init(struct efa_com_dev *edev)
903 {
904 struct efa_com_mmio_read *mmio_read = &edev->mmio_read;
905
906 spin_lock_init(&mmio_read->lock);
907 mmio_read->read_resp =
908 dma_alloc_coherent(edev->dmadev, sizeof(*mmio_read->read_resp),
909 &mmio_read->read_resp_dma_addr, GFP_KERNEL);
910 if (!mmio_read->read_resp)
911 return -ENOMEM;
912
913 efa_com_mmio_reg_read_resp_addr_init(edev);
914
915 mmio_read->read_resp->req_id = 0;
916 mmio_read->seq_num = 0;
917 mmio_read->mmio_read_timeout = EFA_REG_READ_TIMEOUT_US;
918
919 return 0;
920 }
921
efa_com_mmio_reg_read_destroy(struct efa_com_dev * edev)922 void efa_com_mmio_reg_read_destroy(struct efa_com_dev *edev)
923 {
924 struct efa_com_mmio_read *mmio_read = &edev->mmio_read;
925
926 dma_free_coherent(edev->dmadev, sizeof(*mmio_read->read_resp),
927 mmio_read->read_resp, mmio_read->read_resp_dma_addr);
928 }
929
efa_com_validate_version(struct efa_com_dev * edev)930 int efa_com_validate_version(struct efa_com_dev *edev)
931 {
932 u32 min_ctrl_ver = 0;
933 u32 ctrl_ver_masked;
934 u32 min_ver = 0;
935 u32 ctrl_ver;
936 u32 ver;
937
938 /*
939 * Make sure the EFA version and the controller version are at least
940 * as the driver expects
941 */
942 ver = efa_com_reg_read32(edev, EFA_REGS_VERSION_OFF);
943 ctrl_ver = efa_com_reg_read32(edev,
944 EFA_REGS_CONTROLLER_VERSION_OFF);
945
946 ibdev_dbg(edev->efa_dev, "efa device version: %d.%d\n",
947 EFA_GET(&ver, EFA_REGS_VERSION_MAJOR_VERSION),
948 EFA_GET(&ver, EFA_REGS_VERSION_MINOR_VERSION));
949
950 EFA_SET(&min_ver, EFA_REGS_VERSION_MAJOR_VERSION,
951 EFA_ADMIN_API_VERSION_MAJOR);
952 EFA_SET(&min_ver, EFA_REGS_VERSION_MINOR_VERSION,
953 EFA_ADMIN_API_VERSION_MINOR);
954 if (ver < min_ver) {
955 ibdev_err(edev->efa_dev,
956 "EFA version is lower than the minimal version the driver supports\n");
957 return -EOPNOTSUPP;
958 }
959
960 ibdev_dbg(
961 edev->efa_dev,
962 "efa controller version: %d.%d.%d implementation version %d\n",
963 EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION),
964 EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION),
965 EFA_GET(&ctrl_ver,
966 EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION),
967 EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_IMPL_ID));
968
969 ctrl_ver_masked =
970 EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION) |
971 EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION) |
972 EFA_GET(&ctrl_ver,
973 EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION);
974
975 EFA_SET(&min_ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION,
976 EFA_CTRL_MAJOR);
977 EFA_SET(&min_ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION,
978 EFA_CTRL_MINOR);
979 EFA_SET(&min_ctrl_ver, EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION,
980 EFA_CTRL_SUB_MINOR);
981 /* Validate the ctrl version without the implementation ID */
982 if (ctrl_ver_masked < min_ctrl_ver) {
983 ibdev_err(edev->efa_dev,
984 "EFA ctrl version is lower than the minimal ctrl version the driver supports\n");
985 return -EOPNOTSUPP;
986 }
987
988 return 0;
989 }
990
991 /**
992 * efa_com_get_dma_width - Retrieve physical dma address width the device
993 * supports.
994 * @edev: EFA communication layer struct
995 *
996 * Retrieve the maximum physical address bits the device can handle.
997 *
998 * @return: > 0 on Success and negative value otherwise.
999 */
efa_com_get_dma_width(struct efa_com_dev * edev)1000 int efa_com_get_dma_width(struct efa_com_dev *edev)
1001 {
1002 u32 caps = efa_com_reg_read32(edev, EFA_REGS_CAPS_OFF);
1003 int width;
1004
1005 width = EFA_GET(&caps, EFA_REGS_CAPS_DMA_ADDR_WIDTH);
1006
1007 ibdev_dbg(edev->efa_dev, "DMA width: %d\n", width);
1008
1009 if (width < 32 || width > 64) {
1010 ibdev_err(edev->efa_dev, "DMA width illegal value: %d\n", width);
1011 return -EINVAL;
1012 }
1013
1014 edev->dma_addr_bits = width;
1015
1016 return width;
1017 }
1018
wait_for_reset_state(struct efa_com_dev * edev,u32 timeout,int on)1019 static int wait_for_reset_state(struct efa_com_dev *edev, u32 timeout, int on)
1020 {
1021 u32 val, i;
1022
1023 for (i = 0; i < timeout; i++) {
1024 val = efa_com_reg_read32(edev, EFA_REGS_DEV_STS_OFF);
1025
1026 if (EFA_GET(&val, EFA_REGS_DEV_STS_RESET_IN_PROGRESS) == on)
1027 return 0;
1028
1029 ibdev_dbg(edev->efa_dev, "Reset indication val %d\n", val);
1030 msleep(EFA_POLL_INTERVAL_MS);
1031 }
1032
1033 return -ETIME;
1034 }
1035
1036 /**
1037 * efa_com_dev_reset - Perform device FLR to the device.
1038 * @edev: EFA communication layer struct
1039 * @reset_reason: Specify what is the trigger for the reset in case of an error.
1040 *
1041 * @return - 0 on success, negative value on failure.
1042 */
efa_com_dev_reset(struct efa_com_dev * edev,enum efa_regs_reset_reason_types reset_reason)1043 int efa_com_dev_reset(struct efa_com_dev *edev,
1044 enum efa_regs_reset_reason_types reset_reason)
1045 {
1046 u32 stat, timeout, cap;
1047 u32 reset_val = 0;
1048 int err;
1049
1050 stat = efa_com_reg_read32(edev, EFA_REGS_DEV_STS_OFF);
1051 cap = efa_com_reg_read32(edev, EFA_REGS_CAPS_OFF);
1052
1053 if (!EFA_GET(&stat, EFA_REGS_DEV_STS_READY)) {
1054 ibdev_err(edev->efa_dev,
1055 "Device isn't ready, can't reset device\n");
1056 return -EINVAL;
1057 }
1058
1059 timeout = EFA_GET(&cap, EFA_REGS_CAPS_RESET_TIMEOUT);
1060 if (!timeout) {
1061 ibdev_err(edev->efa_dev, "Invalid timeout value\n");
1062 return -EINVAL;
1063 }
1064
1065 /* start reset */
1066 EFA_SET(&reset_val, EFA_REGS_DEV_CTL_DEV_RESET, 1);
1067 EFA_SET(&reset_val, EFA_REGS_DEV_CTL_RESET_REASON, reset_reason);
1068 writel(reset_val, edev->reg_bar + EFA_REGS_DEV_CTL_OFF);
1069
1070 /* reset clears the mmio readless address, restore it */
1071 efa_com_mmio_reg_read_resp_addr_init(edev);
1072
1073 err = wait_for_reset_state(edev, timeout, 1);
1074 if (err) {
1075 ibdev_err(edev->efa_dev, "Reset indication didn't turn on\n");
1076 return err;
1077 }
1078
1079 /* reset done */
1080 writel(0, edev->reg_bar + EFA_REGS_DEV_CTL_OFF);
1081 err = wait_for_reset_state(edev, timeout, 0);
1082 if (err) {
1083 ibdev_err(edev->efa_dev, "Reset indication didn't turn off\n");
1084 return err;
1085 }
1086
1087 timeout = EFA_GET(&cap, EFA_REGS_CAPS_ADMIN_CMD_TO);
1088 if (timeout)
1089 /* the resolution of timeout reg is 100ms */
1090 edev->aq.completion_timeout = timeout * 100000;
1091 else
1092 edev->aq.completion_timeout = ADMIN_CMD_TIMEOUT_US;
1093
1094 return 0;
1095 }
1096
efa_com_create_eq(struct efa_com_dev * edev,struct efa_com_create_eq_params * params,struct efa_com_create_eq_result * result)1097 static int efa_com_create_eq(struct efa_com_dev *edev,
1098 struct efa_com_create_eq_params *params,
1099 struct efa_com_create_eq_result *result)
1100 {
1101 struct efa_com_admin_queue *aq = &edev->aq;
1102 struct efa_admin_create_eq_resp resp = {};
1103 struct efa_admin_create_eq_cmd cmd = {};
1104 int err;
1105
1106 cmd.aq_common_descriptor.opcode = EFA_ADMIN_CREATE_EQ;
1107 EFA_SET(&cmd.caps, EFA_ADMIN_CREATE_EQ_CMD_ENTRY_SIZE_WORDS,
1108 params->entry_size_in_bytes / 4);
1109 cmd.depth = params->depth;
1110 cmd.event_bitmask = params->event_bitmask;
1111 cmd.msix_vec = params->msix_vec;
1112
1113 efa_com_set_dma_addr(params->dma_addr, &cmd.ba.mem_addr_high,
1114 &cmd.ba.mem_addr_low);
1115
1116 err = efa_com_cmd_exec(aq,
1117 (struct efa_admin_aq_entry *)&cmd,
1118 sizeof(cmd),
1119 (struct efa_admin_acq_entry *)&resp,
1120 sizeof(resp));
1121 if (err) {
1122 ibdev_err_ratelimited(edev->efa_dev,
1123 "Failed to create eq[%d]\n", err);
1124 return err;
1125 }
1126
1127 result->eqn = resp.eqn;
1128
1129 return 0;
1130 }
1131
efa_com_destroy_eq(struct efa_com_dev * edev,struct efa_com_destroy_eq_params * params)1132 static void efa_com_destroy_eq(struct efa_com_dev *edev,
1133 struct efa_com_destroy_eq_params *params)
1134 {
1135 struct efa_com_admin_queue *aq = &edev->aq;
1136 struct efa_admin_destroy_eq_resp resp = {};
1137 struct efa_admin_destroy_eq_cmd cmd = {};
1138 int err;
1139
1140 cmd.aq_common_descriptor.opcode = EFA_ADMIN_DESTROY_EQ;
1141 cmd.eqn = params->eqn;
1142
1143 err = efa_com_cmd_exec(aq,
1144 (struct efa_admin_aq_entry *)&cmd,
1145 sizeof(cmd),
1146 (struct efa_admin_acq_entry *)&resp,
1147 sizeof(resp));
1148 if (err)
1149 ibdev_err_ratelimited(edev->efa_dev,
1150 "Failed to destroy EQ-%u [%d]\n", cmd.eqn,
1151 err);
1152 }
1153
efa_com_arm_eq(struct efa_com_dev * edev,struct efa_com_eq * eeq)1154 static void efa_com_arm_eq(struct efa_com_dev *edev, struct efa_com_eq *eeq)
1155 {
1156 u32 val = 0;
1157
1158 EFA_SET(&val, EFA_REGS_EQ_DB_EQN, eeq->eqn);
1159 EFA_SET(&val, EFA_REGS_EQ_DB_ARM, 1);
1160
1161 writel(val, edev->reg_bar + EFA_REGS_EQ_DB_OFF);
1162 }
1163
efa_com_eq_comp_intr_handler(struct efa_com_dev * edev,struct efa_com_eq * eeq)1164 void efa_com_eq_comp_intr_handler(struct efa_com_dev *edev,
1165 struct efa_com_eq *eeq)
1166 {
1167 struct efa_admin_eqe *eqe;
1168 u32 processed = 0;
1169 u8 phase;
1170 u32 ci;
1171
1172 ci = eeq->cc & (eeq->depth - 1);
1173 phase = eeq->phase;
1174 eqe = &eeq->eqes[ci];
1175
1176 /* Go over all the events */
1177 while ((READ_ONCE(eqe->common) & EFA_ADMIN_EQE_PHASE_MASK) == phase) {
1178 /*
1179 * Do not read the rest of the completion entry before the
1180 * phase bit was validated
1181 */
1182 dma_rmb();
1183
1184 eeq->cb(eeq, eqe);
1185
1186 /* Get next event entry */
1187 ci++;
1188 processed++;
1189
1190 if (ci == eeq->depth) {
1191 ci = 0;
1192 phase = !phase;
1193 }
1194
1195 eqe = &eeq->eqes[ci];
1196 }
1197
1198 eeq->cc += processed;
1199 eeq->phase = phase;
1200 efa_com_arm_eq(eeq->edev, eeq);
1201 }
1202
efa_com_eq_destroy(struct efa_com_dev * edev,struct efa_com_eq * eeq)1203 void efa_com_eq_destroy(struct efa_com_dev *edev, struct efa_com_eq *eeq)
1204 {
1205 struct efa_com_destroy_eq_params params = {
1206 .eqn = eeq->eqn,
1207 };
1208
1209 efa_com_destroy_eq(edev, ¶ms);
1210 dma_free_coherent(edev->dmadev, eeq->depth * sizeof(*eeq->eqes),
1211 eeq->eqes, eeq->dma_addr);
1212 }
1213
efa_com_eq_init(struct efa_com_dev * edev,struct efa_com_eq * eeq,efa_eqe_handler cb,u16 depth,u8 msix_vec)1214 int efa_com_eq_init(struct efa_com_dev *edev, struct efa_com_eq *eeq,
1215 efa_eqe_handler cb, u16 depth, u8 msix_vec)
1216 {
1217 struct efa_com_create_eq_params params = {};
1218 struct efa_com_create_eq_result result = {};
1219 int err;
1220
1221 params.depth = depth;
1222 params.entry_size_in_bytes = sizeof(*eeq->eqes);
1223 EFA_SET(¶ms.event_bitmask,
1224 EFA_ADMIN_CREATE_EQ_CMD_COMPLETION_EVENTS, 1);
1225 params.msix_vec = msix_vec;
1226
1227 eeq->eqes = dma_alloc_coherent(edev->dmadev,
1228 params.depth * sizeof(*eeq->eqes),
1229 ¶ms.dma_addr, GFP_KERNEL);
1230 if (!eeq->eqes)
1231 return -ENOMEM;
1232
1233 err = efa_com_create_eq(edev, ¶ms, &result);
1234 if (err)
1235 goto err_free_coherent;
1236
1237 eeq->eqn = result.eqn;
1238 eeq->edev = edev;
1239 eeq->dma_addr = params.dma_addr;
1240 eeq->phase = 1;
1241 eeq->depth = params.depth;
1242 eeq->cb = cb;
1243 efa_com_arm_eq(edev, eeq);
1244
1245 return 0;
1246
1247 err_free_coherent:
1248 dma_free_coherent(edev->dmadev, params.depth * sizeof(*eeq->eqes),
1249 eeq->eqes, params.dma_addr);
1250 return err;
1251 }
1252