1 /*-
2 * BSD LICENSE
3 *
4 * Copyright (c) 2015-2017 Amazon.com, Inc. or its affiliates.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include "ena_com.h"
35 #ifdef ENA_INTERNAL
36 #include "ena_gen_info.h"
37 #endif
38
39 /*****************************************************************************/
40 /*****************************************************************************/
41
42 /* Timeout in micro-sec */
43 #define ADMIN_CMD_TIMEOUT_US (3000000)
44
45 #define ENA_ASYNC_QUEUE_DEPTH 16
46 #define ENA_ADMIN_QUEUE_DEPTH 32
47
48 #ifdef ENA_EXTENDED_STATS
49
50 #define ENA_HISTOGRAM_ACTIVE_MASK_OFFSET 0xF08
51 #define ENA_EXTENDED_STAT_GET_FUNCT(_funct_queue) (_funct_queue & 0xFFFF)
52 #define ENA_EXTENDED_STAT_GET_QUEUE(_funct_queue) (_funct_queue >> 16)
53
54 #endif /* ENA_EXTENDED_STATS */
55 #define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
56 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
57 | (ENA_COMMON_SPEC_VERSION_MINOR))
58
59 #define ENA_CTRL_MAJOR 0
60 #define ENA_CTRL_MINOR 0
61 #define ENA_CTRL_SUB_MINOR 1
62
63 #define MIN_ENA_CTRL_VER \
64 (((ENA_CTRL_MAJOR) << \
65 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
66 ((ENA_CTRL_MINOR) << \
67 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
68 (ENA_CTRL_SUB_MINOR))
69
70 #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
71 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
72
73 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
74
75 #define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4
76
77 #define ENA_REGS_ADMIN_INTR_MASK 1
78
79 /*****************************************************************************/
80 /*****************************************************************************/
81 /*****************************************************************************/
82
83 enum ena_cmd_status {
84 ENA_CMD_SUBMITTED,
85 ENA_CMD_COMPLETED,
86 /* Abort - canceled by the driver */
87 ENA_CMD_ABORTED,
88 };
89
90 struct ena_comp_ctx {
91 ena_wait_event_t wait_event;
92 struct ena_admin_acq_entry *user_cqe;
93 u32 comp_size;
94 enum ena_cmd_status status;
95 /* status from the device */
96 u8 comp_status;
97 u8 cmd_opcode;
98 bool occupied;
99 };
100
101 struct ena_com_stats_ctx {
102 struct ena_admin_aq_get_stats_cmd get_cmd;
103 struct ena_admin_acq_get_stats_resp get_resp;
104 };
105
ena_com_mem_addr_set(struct ena_com_dev * ena_dev,struct ena_common_mem_addr * ena_addr,dma_addr_t addr)106 static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
107 struct ena_common_mem_addr *ena_addr,
108 dma_addr_t addr)
109 {
110 if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
111 ena_trc_err("dma address has more bits that the device supports\n");
112 return ENA_COM_INVAL;
113 }
114
115 ena_addr->mem_addr_low = (u32)addr;
116 ena_addr->mem_addr_high = (u16)((u64)addr >> 32);
117
118 return 0;
119 }
120
ena_com_admin_init_sq(struct ena_com_admin_queue * queue)121 static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
122 {
123 struct ena_com_admin_sq *sq = &queue->sq;
124 u16 size = ADMIN_SQ_SIZE(queue->q_depth);
125
126 ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, sq->entries, sq->dma_addr,
127 sq->mem_handle);
128
129 if (!sq->entries) {
130 ena_trc_err("memory allocation failed");
131 return ENA_COM_NO_MEM;
132 }
133
134 sq->head = 0;
135 sq->tail = 0;
136 sq->phase = 1;
137
138 sq->db_addr = NULL;
139
140 return 0;
141 }
142
ena_com_admin_init_cq(struct ena_com_admin_queue * queue)143 static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
144 {
145 struct ena_com_admin_cq *cq = &queue->cq;
146 u16 size = ADMIN_CQ_SIZE(queue->q_depth);
147
148 ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, cq->entries, cq->dma_addr,
149 cq->mem_handle);
150
151 if (!cq->entries) {
152 ena_trc_err("memory allocation failed");
153 return ENA_COM_NO_MEM;
154 }
155
156 cq->head = 0;
157 cq->phase = 1;
158
159 return 0;
160 }
161
ena_com_admin_init_aenq(struct ena_com_dev * dev,struct ena_aenq_handlers * aenq_handlers)162 static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
163 struct ena_aenq_handlers *aenq_handlers)
164 {
165 struct ena_com_aenq *aenq = &dev->aenq;
166 u32 addr_low, addr_high, aenq_caps;
167 u16 size;
168
169 dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
170 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
171 ENA_MEM_ALLOC_COHERENT(dev->dmadev, size,
172 aenq->entries,
173 aenq->dma_addr,
174 aenq->mem_handle);
175
176 if (!aenq->entries) {
177 ena_trc_err("memory allocation failed");
178 return ENA_COM_NO_MEM;
179 }
180
181 aenq->head = aenq->q_depth;
182 aenq->phase = 1;
183
184 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
185 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
186
187 ENA_REG_WRITE32(dev->bus, addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
188 ENA_REG_WRITE32(dev->bus, addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
189
190 aenq_caps = 0;
191 aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
192 aenq_caps |= (sizeof(struct ena_admin_aenq_entry) <<
193 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
194 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
195 ENA_REG_WRITE32(dev->bus, aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
196
197 if (unlikely(!aenq_handlers)) {
198 ena_trc_err("aenq handlers pointer is NULL\n");
199 return ENA_COM_INVAL;
200 }
201
202 aenq->aenq_handlers = aenq_handlers;
203
204 return 0;
205 }
206
comp_ctxt_release(struct ena_com_admin_queue * queue,struct ena_comp_ctx * comp_ctx)207 static inline void comp_ctxt_release(struct ena_com_admin_queue *queue,
208 struct ena_comp_ctx *comp_ctx)
209 {
210 comp_ctx->occupied = false;
211 ATOMIC32_DEC(&queue->outstanding_cmds);
212 }
213
get_comp_ctxt(struct ena_com_admin_queue * queue,u16 command_id,bool capture)214 static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
215 u16 command_id, bool capture)
216 {
217 if (unlikely(command_id >= queue->q_depth)) {
218 ena_trc_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
219 command_id, queue->q_depth);
220 return NULL;
221 }
222
223 if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
224 ena_trc_err("Completion context is occupied\n");
225 return NULL;
226 }
227
228 if (capture) {
229 ATOMIC32_INC(&queue->outstanding_cmds);
230 queue->comp_ctx[command_id].occupied = true;
231 }
232
233 return &queue->comp_ctx[command_id];
234 }
235
__ena_com_submit_admin_cmd(struct ena_com_admin_queue * admin_queue,struct ena_admin_aq_entry * cmd,size_t cmd_size_in_bytes,struct ena_admin_acq_entry * comp,size_t comp_size_in_bytes)236 static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
237 struct ena_admin_aq_entry *cmd,
238 size_t cmd_size_in_bytes,
239 struct ena_admin_acq_entry *comp,
240 size_t comp_size_in_bytes)
241 {
242 struct ena_comp_ctx *comp_ctx;
243 u16 tail_masked, cmd_id;
244 u16 queue_size_mask;
245 u16 cnt;
246
247 queue_size_mask = admin_queue->q_depth - 1;
248
249 tail_masked = admin_queue->sq.tail & queue_size_mask;
250
251 /* In case of queue FULL */
252 cnt = ATOMIC32_READ(&admin_queue->outstanding_cmds);
253 if (cnt >= admin_queue->q_depth) {
254 ena_trc_dbg("admin queue is full.\n");
255 admin_queue->stats.out_of_space++;
256 return ERR_PTR(ENA_COM_NO_SPACE);
257 }
258
259 cmd_id = admin_queue->curr_cmd_id;
260
261 cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
262 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
263
264 cmd->aq_common_descriptor.command_id |= cmd_id &
265 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
266
267 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
268 if (unlikely(!comp_ctx))
269 return ERR_PTR(ENA_COM_INVAL);
270
271 comp_ctx->status = ENA_CMD_SUBMITTED;
272 comp_ctx->comp_size = (u32)comp_size_in_bytes;
273 comp_ctx->user_cqe = comp;
274 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
275
276 ENA_WAIT_EVENT_CLEAR(comp_ctx->wait_event);
277
278 memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
279
280 admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
281 queue_size_mask;
282
283 admin_queue->sq.tail++;
284 admin_queue->stats.submitted_cmd++;
285
286 if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
287 admin_queue->sq.phase = !admin_queue->sq.phase;
288
289 ENA_DB_SYNC(&admin_queue->sq.mem_handle);
290 ENA_REG_WRITE32(admin_queue->bus, admin_queue->sq.tail,
291 admin_queue->sq.db_addr);
292
293 return comp_ctx;
294 }
295
ena_com_init_comp_ctxt(struct ena_com_admin_queue * queue)296 static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
297 {
298 size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
299 struct ena_comp_ctx *comp_ctx;
300 u16 i;
301
302 queue->comp_ctx = ENA_MEM_ALLOC(queue->q_dmadev, size);
303 if (unlikely(!queue->comp_ctx)) {
304 ena_trc_err("memory allocation failed");
305 return ENA_COM_NO_MEM;
306 }
307
308 for (i = 0; i < queue->q_depth; i++) {
309 comp_ctx = get_comp_ctxt(queue, i, false);
310 if (comp_ctx)
311 ENA_WAIT_EVENT_INIT(comp_ctx->wait_event);
312 }
313
314 return 0;
315 }
316
ena_com_submit_admin_cmd(struct ena_com_admin_queue * admin_queue,struct ena_admin_aq_entry * cmd,size_t cmd_size_in_bytes,struct ena_admin_acq_entry * comp,size_t comp_size_in_bytes)317 static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
318 struct ena_admin_aq_entry *cmd,
319 size_t cmd_size_in_bytes,
320 struct ena_admin_acq_entry *comp,
321 size_t comp_size_in_bytes)
322 {
323 unsigned long flags;
324 struct ena_comp_ctx *comp_ctx;
325
326 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
327 if (unlikely(!admin_queue->running_state)) {
328 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
329 return ERR_PTR(ENA_COM_NO_DEVICE);
330 }
331 comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
332 cmd_size_in_bytes,
333 comp,
334 comp_size_in_bytes);
335 if (unlikely(IS_ERR(comp_ctx)))
336 admin_queue->running_state = false;
337 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
338
339 return comp_ctx;
340 }
341
ena_com_init_io_sq(struct ena_com_dev * ena_dev,struct ena_com_create_io_ctx * ctx,struct ena_com_io_sq * io_sq)342 static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
343 struct ena_com_create_io_ctx *ctx,
344 struct ena_com_io_sq *io_sq)
345 {
346 size_t size;
347 int dev_node = 0;
348
349 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
350
351 io_sq->desc_entry_size =
352 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
353 sizeof(struct ena_eth_io_tx_desc) :
354 sizeof(struct ena_eth_io_rx_desc);
355
356 size = io_sq->desc_entry_size * io_sq->q_depth;
357 io_sq->bus = ena_dev->bus;
358
359 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
360 ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
361 size,
362 io_sq->desc_addr.virt_addr,
363 io_sq->desc_addr.phys_addr,
364 io_sq->desc_addr.mem_handle,
365 ctx->numa_node,
366 dev_node);
367 if (!io_sq->desc_addr.virt_addr) {
368 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
369 size,
370 io_sq->desc_addr.virt_addr,
371 io_sq->desc_addr.phys_addr,
372 io_sq->desc_addr.mem_handle);
373 }
374
375 if (!io_sq->desc_addr.virt_addr) {
376 ena_trc_err("memory allocation failed");
377 return ENA_COM_NO_MEM;
378 }
379 }
380
381 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
382 /* Allocate bounce buffers */
383 io_sq->bounce_buf_ctrl.buffer_size = ena_dev->llq_info.desc_list_entry_size;
384 io_sq->bounce_buf_ctrl.buffers_num = ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
385 io_sq->bounce_buf_ctrl.next_to_use = 0;
386
387 size = io_sq->bounce_buf_ctrl.buffer_size * io_sq->bounce_buf_ctrl.buffers_num;
388
389 ENA_MEM_ALLOC_NODE(ena_dev->dmadev,
390 size,
391 io_sq->bounce_buf_ctrl.base_buffer,
392 ctx->numa_node,
393 dev_node);
394 if (!io_sq->bounce_buf_ctrl.base_buffer)
395 io_sq->bounce_buf_ctrl.base_buffer = ENA_MEM_ALLOC(ena_dev->dmadev, size);
396
397 if (!io_sq->bounce_buf_ctrl.base_buffer) {
398 ena_trc_err("bounce buffer memory allocation failed");
399 return ENA_COM_NO_MEM;
400 }
401
402 memcpy(&io_sq->llq_info, &ena_dev->llq_info, sizeof(io_sq->llq_info));
403
404 /* Initiate the first bounce buffer */
405 io_sq->llq_buf_ctrl.curr_bounce_buf =
406 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
407 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
408 0x0, io_sq->llq_info.desc_list_entry_size);
409 io_sq->llq_buf_ctrl.descs_left_in_line =
410 io_sq->llq_info.descs_num_before_header;
411 }
412
413 io_sq->tail = 0;
414 io_sq->next_to_comp = 0;
415 io_sq->phase = 1;
416
417 return 0;
418 }
419
ena_com_init_io_cq(struct ena_com_dev * ena_dev,struct ena_com_create_io_ctx * ctx,struct ena_com_io_cq * io_cq)420 static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
421 struct ena_com_create_io_ctx *ctx,
422 struct ena_com_io_cq *io_cq)
423 {
424 size_t size;
425 int prev_node = 0;
426
427 memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
428
429 /* Use the basic completion descriptor for Rx */
430 io_cq->cdesc_entry_size_in_bytes =
431 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
432 sizeof(struct ena_eth_io_tx_cdesc) :
433 sizeof(struct ena_eth_io_rx_cdesc_base);
434
435 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
436 io_cq->bus = ena_dev->bus;
437
438 ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
439 size,
440 io_cq->cdesc_addr.virt_addr,
441 io_cq->cdesc_addr.phys_addr,
442 io_cq->cdesc_addr.mem_handle,
443 ctx->numa_node,
444 prev_node);
445 if (!io_cq->cdesc_addr.virt_addr) {
446 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
447 size,
448 io_cq->cdesc_addr.virt_addr,
449 io_cq->cdesc_addr.phys_addr,
450 io_cq->cdesc_addr.mem_handle);
451 }
452
453 if (!io_cq->cdesc_addr.virt_addr) {
454 ena_trc_err("memory allocation failed");
455 return ENA_COM_NO_MEM;
456 }
457
458 io_cq->phase = 1;
459 io_cq->head = 0;
460
461 return 0;
462 }
463
ena_com_handle_single_admin_completion(struct ena_com_admin_queue * admin_queue,struct ena_admin_acq_entry * cqe)464 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
465 struct ena_admin_acq_entry *cqe)
466 {
467 struct ena_comp_ctx *comp_ctx;
468 u16 cmd_id;
469
470 cmd_id = cqe->acq_common_descriptor.command &
471 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
472
473 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
474 if (unlikely(!comp_ctx)) {
475 ena_trc_err("comp_ctx is NULL. Changing the admin queue running state\n");
476 admin_queue->running_state = false;
477 return;
478 }
479
480 comp_ctx->status = ENA_CMD_COMPLETED;
481 comp_ctx->comp_status = cqe->acq_common_descriptor.status;
482
483 if (comp_ctx->user_cqe)
484 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
485
486 if (!admin_queue->polling)
487 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
488 }
489
ena_com_handle_admin_completion(struct ena_com_admin_queue * admin_queue)490 static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
491 {
492 struct ena_admin_acq_entry *cqe = NULL;
493 u16 comp_num = 0;
494 u16 head_masked;
495 u8 phase;
496
497 head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
498 phase = admin_queue->cq.phase;
499
500 cqe = &admin_queue->cq.entries[head_masked];
501
502 /* Go over all the completions */
503 while ((cqe->acq_common_descriptor.flags &
504 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
505 /* Do not read the rest of the completion entry before the
506 * phase bit was validated
507 */
508 rmb();
509 ena_com_handle_single_admin_completion(admin_queue, cqe);
510
511 head_masked++;
512 comp_num++;
513 if (unlikely(head_masked == admin_queue->q_depth)) {
514 head_masked = 0;
515 phase = !phase;
516 }
517
518 cqe = &admin_queue->cq.entries[head_masked];
519 }
520
521 admin_queue->cq.head += comp_num;
522 admin_queue->cq.phase = phase;
523 admin_queue->sq.head += comp_num;
524 admin_queue->stats.completed_cmd += comp_num;
525 }
526
ena_com_comp_status_to_errno(u8 comp_status)527 static int ena_com_comp_status_to_errno(u8 comp_status)
528 {
529 if (unlikely(comp_status != 0))
530 ena_trc_err("admin command failed[%u]\n", comp_status);
531
532 if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR))
533 return ENA_COM_INVAL;
534
535 switch (comp_status) {
536 case ENA_ADMIN_SUCCESS:
537 return 0;
538 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
539 return ENA_COM_NO_MEM;
540 case ENA_ADMIN_UNSUPPORTED_OPCODE:
541 return ENA_COM_UNSUPPORTED;
542 case ENA_ADMIN_BAD_OPCODE:
543 case ENA_ADMIN_MALFORMED_REQUEST:
544 case ENA_ADMIN_ILLEGAL_PARAMETER:
545 case ENA_ADMIN_UNKNOWN_ERROR:
546 return ENA_COM_INVAL;
547 }
548
549 return 0;
550 }
551
ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx * comp_ctx,struct ena_com_admin_queue * admin_queue)552 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
553 struct ena_com_admin_queue *admin_queue)
554 {
555 unsigned long flags, timeout;
556 int ret;
557
558 timeout = ENA_GET_SYSTEM_TIMEOUT(admin_queue->completion_timeout);
559
560 while (1) {
561 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
562 ena_com_handle_admin_completion(admin_queue);
563 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
564
565 if (comp_ctx->status != ENA_CMD_SUBMITTED)
566 break;
567
568 if (ENA_TIME_EXPIRE(timeout)) {
569 ena_trc_err("Wait for completion (polling) timeout\n");
570 /* ENA didn't have any completion */
571 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
572 admin_queue->stats.no_completion++;
573 admin_queue->running_state = false;
574 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
575
576 ret = ENA_COM_TIMER_EXPIRED;
577 goto err;
578 }
579
580 ENA_MSLEEP(100);
581 }
582
583 if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
584 ena_trc_err("Command was aborted\n");
585 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
586 admin_queue->stats.aborted_cmd++;
587 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
588 ret = ENA_COM_NO_DEVICE;
589 goto err;
590 }
591
592 ENA_WARN(comp_ctx->status != ENA_CMD_COMPLETED,
593 "Invalid comp status %d\n", comp_ctx->status);
594
595 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
596 err:
597 comp_ctxt_release(admin_queue, comp_ctx);
598 return ret;
599 }
600
ena_com_config_llq_info(struct ena_com_dev * ena_dev,struct ena_admin_feature_llq_desc * llq_desc)601 static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
602 struct ena_admin_feature_llq_desc *llq_desc)
603 {
604 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
605
606 memset(llq_info, 0, sizeof(*llq_info));
607
608 switch (llq_desc->header_location_ctrl) {
609 case ENA_ADMIN_INLINE_HEADER:
610 llq_info->inline_header = true;
611 break;
612 case ENA_ADMIN_HEADER_RING:
613 llq_info->inline_header = false;
614 break;
615 default:
616 ena_trc_err("Invalid header location control\n");
617 return -EINVAL;
618 }
619
620 switch (llq_desc->entry_size_ctrl) {
621 case ENA_ADMIN_LIST_ENTRY_SIZE_128B:
622 llq_info->desc_list_entry_size = 128;
623 break;
624 case ENA_ADMIN_LIST_ENTRY_SIZE_192B:
625 llq_info->desc_list_entry_size = 192;
626 break;
627 case ENA_ADMIN_LIST_ENTRY_SIZE_256B:
628 llq_info->desc_list_entry_size = 256;
629 break;
630 default:
631 ena_trc_err("Invalid entry_size_ctrl %d\n",
632 llq_desc->entry_size_ctrl);
633 return -EINVAL;
634 }
635
636 if ((llq_info->desc_list_entry_size & 0x7)) {
637 /* The desc list entry size should be whole multiply of 8
638 * This requirement comes from __iowrite64_copy()
639 */
640 ena_trc_err("illegal entry size %d\n",
641 llq_info->desc_list_entry_size);
642 return -EINVAL;
643 }
644
645 if (llq_info->inline_header) {
646 llq_info->desc_stride_ctrl = llq_desc->descriptors_stride_ctrl;
647 if ((llq_info->desc_stride_ctrl != ENA_ADMIN_SINGLE_DESC_PER_ENTRY) &&
648 (llq_info->desc_stride_ctrl != ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)) {
649 ena_trc_err("Invalid desc_stride_ctrl %d\n",
650 llq_info->desc_stride_ctrl);
651 return -EINVAL;
652 }
653 } else {
654 llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
655 }
656
657 if (llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY)
658 llq_info->descs_per_entry = llq_info->desc_list_entry_size /
659 sizeof(struct ena_eth_io_tx_desc);
660 else
661 llq_info->descs_per_entry = 1;
662
663 llq_info->descs_num_before_header = llq_desc->desc_num_before_header_ctrl;
664
665 return 0;
666 }
667
668
669
ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx * comp_ctx,struct ena_com_admin_queue * admin_queue)670 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
671 struct ena_com_admin_queue *admin_queue)
672 {
673 unsigned long flags;
674 int ret;
675
676 ENA_WAIT_EVENT_WAIT(comp_ctx->wait_event,
677 admin_queue->completion_timeout);
678
679 /* In case the command wasn't completed find out the root cause.
680 * There might be 2 kinds of errors
681 * 1) No completion (timeout reached)
682 * 2) There is completion but the device didn't get any msi-x interrupt.
683 */
684 if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
685 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
686 ena_com_handle_admin_completion(admin_queue);
687 admin_queue->stats.no_completion++;
688 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
689
690 if (comp_ctx->status == ENA_CMD_COMPLETED)
691 ena_trc_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n",
692 comp_ctx->cmd_opcode);
693 else
694 ena_trc_err("The ena device doesn't send any completion for the admin cmd %d status %d\n",
695 comp_ctx->cmd_opcode, comp_ctx->status);
696
697 admin_queue->running_state = false;
698 ret = ENA_COM_TIMER_EXPIRED;
699 goto err;
700 }
701
702 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
703 err:
704 comp_ctxt_release(admin_queue, comp_ctx);
705 return ret;
706 }
707
708 /* This method read the hardware device register through posting writes
709 * and waiting for response
710 * On timeout the function will return ENA_MMIO_READ_TIMEOUT
711 */
ena_com_reg_bar_read32(struct ena_com_dev * ena_dev,u16 offset)712 static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
713 {
714 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
715 volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
716 mmio_read->read_resp;
717 u32 mmio_read_reg, ret, i;
718 unsigned long flags;
719 u32 timeout = mmio_read->reg_read_to;
720
721 ENA_MIGHT_SLEEP();
722
723 if (timeout == 0)
724 timeout = ENA_REG_READ_TIMEOUT;
725
726 /* If readless is disabled, perform regular read */
727 if (!mmio_read->readless_supported)
728 return ENA_REG_READ32(ena_dev->bus, ena_dev->reg_bar + offset);
729
730 ENA_SPINLOCK_LOCK(mmio_read->lock, flags);
731 mmio_read->seq_num++;
732
733 read_resp->req_id = mmio_read->seq_num + 0xDEAD;
734 mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
735 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
736 mmio_read_reg |= mmio_read->seq_num &
737 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
738
739 /* make sure read_resp->req_id get updated before the hw can write
740 * there
741 */
742 wmb();
743
744 ENA_REG_WRITE32(ena_dev->bus, mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
745
746 for (i = 0; i < timeout; i++) {
747 if (read_resp->req_id == mmio_read->seq_num)
748 break;
749
750 ENA_UDELAY(1);
751 }
752
753 if (unlikely(i == timeout)) {
754 ena_trc_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
755 mmio_read->seq_num,
756 offset,
757 read_resp->req_id,
758 read_resp->reg_off);
759 ret = ENA_MMIO_READ_TIMEOUT;
760 goto err;
761 }
762
763 if (read_resp->reg_off != offset) {
764 ena_trc_err("Read failure: wrong offset provided");
765 ret = ENA_MMIO_READ_TIMEOUT;
766 } else {
767 ret = read_resp->reg_val;
768 }
769 err:
770 ENA_SPINLOCK_UNLOCK(mmio_read->lock, flags);
771
772 return ret;
773 }
774
775 /* There are two types to wait for completion.
776 * Polling mode - wait until the completion is available.
777 * Async mode - wait on wait queue until the completion is ready
778 * (or the timeout expired).
779 * It is expected that the IRQ called ena_com_handle_admin_completion
780 * to mark the completions.
781 */
ena_com_wait_and_process_admin_cq(struct ena_comp_ctx * comp_ctx,struct ena_com_admin_queue * admin_queue)782 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
783 struct ena_com_admin_queue *admin_queue)
784 {
785 if (admin_queue->polling)
786 return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
787 admin_queue);
788
789 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
790 admin_queue);
791 }
792
ena_com_destroy_io_sq(struct ena_com_dev * ena_dev,struct ena_com_io_sq * io_sq)793 static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
794 struct ena_com_io_sq *io_sq)
795 {
796 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
797 struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
798 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
799 u8 direction;
800 int ret;
801
802 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
803
804 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
805 direction = ENA_ADMIN_SQ_DIRECTION_TX;
806 else
807 direction = ENA_ADMIN_SQ_DIRECTION_RX;
808
809 destroy_cmd.sq.sq_identity |= (direction <<
810 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
811 ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
812
813 destroy_cmd.sq.sq_idx = io_sq->idx;
814 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
815
816 ret = ena_com_execute_admin_command(admin_queue,
817 (struct ena_admin_aq_entry *)&destroy_cmd,
818 sizeof(destroy_cmd),
819 (struct ena_admin_acq_entry *)&destroy_resp,
820 sizeof(destroy_resp));
821
822 if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
823 ena_trc_err("failed to destroy io sq error: %d\n", ret);
824
825 return ret;
826 }
827
ena_com_io_queue_free(struct ena_com_dev * ena_dev,struct ena_com_io_sq * io_sq,struct ena_com_io_cq * io_cq)828 static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
829 struct ena_com_io_sq *io_sq,
830 struct ena_com_io_cq *io_cq)
831 {
832 size_t size;
833
834 if (io_cq->cdesc_addr.virt_addr) {
835 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
836
837 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
838 size,
839 io_cq->cdesc_addr.virt_addr,
840 io_cq->cdesc_addr.phys_addr,
841 io_cq->cdesc_addr.mem_handle);
842
843 io_cq->cdesc_addr.virt_addr = NULL;
844 }
845
846 if (io_sq->desc_addr.virt_addr) {
847 size = io_sq->desc_entry_size * io_sq->q_depth;
848
849 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
850 size,
851 io_sq->desc_addr.virt_addr,
852 io_sq->desc_addr.phys_addr,
853 io_sq->desc_addr.mem_handle);
854
855 io_sq->desc_addr.virt_addr = NULL;
856 }
857
858 if (io_sq->bounce_buf_ctrl.base_buffer) {
859 size = io_sq->llq_info.desc_list_entry_size * ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
860 ENA_MEM_FREE(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer);
861 io_sq->bounce_buf_ctrl.base_buffer = NULL;
862 }
863 }
864
wait_for_reset_state(struct ena_com_dev * ena_dev,u32 timeout,u16 exp_state)865 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
866 u16 exp_state)
867 {
868 u32 val, i;
869
870 for (i = 0; i < timeout; i++) {
871 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
872
873 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
874 ena_trc_err("Reg read timeout occurred\n");
875 return ENA_COM_TIMER_EXPIRED;
876 }
877
878 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
879 exp_state)
880 return 0;
881
882 /* The resolution of the timeout is 100ms */
883 ENA_MSLEEP(100);
884 }
885
886 return ENA_COM_TIMER_EXPIRED;
887 }
888
ena_com_check_supported_feature_id(struct ena_com_dev * ena_dev,enum ena_admin_aq_feature_id feature_id)889 static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
890 enum ena_admin_aq_feature_id feature_id)
891 {
892 u32 feature_mask = 1 << feature_id;
893
894 /* Device attributes is always supported */
895 if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
896 !(ena_dev->supported_features & feature_mask))
897 return false;
898
899 return true;
900 }
901
ena_com_get_feature_ex(struct ena_com_dev * ena_dev,struct ena_admin_get_feat_resp * get_resp,enum ena_admin_aq_feature_id feature_id,dma_addr_t control_buf_dma_addr,u32 control_buff_size)902 static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
903 struct ena_admin_get_feat_resp *get_resp,
904 enum ena_admin_aq_feature_id feature_id,
905 dma_addr_t control_buf_dma_addr,
906 u32 control_buff_size)
907 {
908 struct ena_com_admin_queue *admin_queue;
909 struct ena_admin_get_feat_cmd get_cmd;
910 int ret;
911
912 if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
913 ena_trc_dbg("Feature %d isn't supported\n", feature_id);
914 return ENA_COM_UNSUPPORTED;
915 }
916
917 memset(&get_cmd, 0x0, sizeof(get_cmd));
918 admin_queue = &ena_dev->admin_queue;
919
920 get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
921
922 if (control_buff_size)
923 get_cmd.aq_common_descriptor.flags =
924 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
925 else
926 get_cmd.aq_common_descriptor.flags = 0;
927
928 ret = ena_com_mem_addr_set(ena_dev,
929 &get_cmd.control_buffer.address,
930 control_buf_dma_addr);
931 if (unlikely(ret)) {
932 ena_trc_err("memory address set failed\n");
933 return ret;
934 }
935
936 get_cmd.control_buffer.length = control_buff_size;
937
938 get_cmd.feat_common.feature_id = feature_id;
939
940 ret = ena_com_execute_admin_command(admin_queue,
941 (struct ena_admin_aq_entry *)
942 &get_cmd,
943 sizeof(get_cmd),
944 (struct ena_admin_acq_entry *)
945 get_resp,
946 sizeof(*get_resp));
947
948 if (unlikely(ret))
949 ena_trc_err("Failed to submit get_feature command %d error: %d\n",
950 feature_id, ret);
951
952 return ret;
953 }
954
ena_com_get_feature(struct ena_com_dev * ena_dev,struct ena_admin_get_feat_resp * get_resp,enum ena_admin_aq_feature_id feature_id)955 static int ena_com_get_feature(struct ena_com_dev *ena_dev,
956 struct ena_admin_get_feat_resp *get_resp,
957 enum ena_admin_aq_feature_id feature_id)
958 {
959 return ena_com_get_feature_ex(ena_dev,
960 get_resp,
961 feature_id,
962 0,
963 0);
964 }
965
ena_com_hash_key_allocate(struct ena_com_dev * ena_dev)966 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
967 {
968 struct ena_rss *rss = &ena_dev->rss;
969
970 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
971 sizeof(*rss->hash_key),
972 rss->hash_key,
973 rss->hash_key_dma_addr,
974 rss->hash_key_mem_handle);
975
976 if (unlikely(!rss->hash_key))
977 return ENA_COM_NO_MEM;
978
979 return 0;
980 }
981
ena_com_hash_key_destroy(struct ena_com_dev * ena_dev)982 static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
983 {
984 struct ena_rss *rss = &ena_dev->rss;
985
986 if (rss->hash_key)
987 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
988 sizeof(*rss->hash_key),
989 rss->hash_key,
990 rss->hash_key_dma_addr,
991 rss->hash_key_mem_handle);
992 rss->hash_key = NULL;
993 }
994
ena_com_hash_ctrl_init(struct ena_com_dev * ena_dev)995 static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
996 {
997 struct ena_rss *rss = &ena_dev->rss;
998
999 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1000 sizeof(*rss->hash_ctrl),
1001 rss->hash_ctrl,
1002 rss->hash_ctrl_dma_addr,
1003 rss->hash_ctrl_mem_handle);
1004
1005 if (unlikely(!rss->hash_ctrl))
1006 return ENA_COM_NO_MEM;
1007
1008 return 0;
1009 }
1010
ena_com_hash_ctrl_destroy(struct ena_com_dev * ena_dev)1011 static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
1012 {
1013 struct ena_rss *rss = &ena_dev->rss;
1014
1015 if (rss->hash_ctrl)
1016 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1017 sizeof(*rss->hash_ctrl),
1018 rss->hash_ctrl,
1019 rss->hash_ctrl_dma_addr,
1020 rss->hash_ctrl_mem_handle);
1021 rss->hash_ctrl = NULL;
1022 }
1023
ena_com_indirect_table_allocate(struct ena_com_dev * ena_dev,u16 log_size)1024 static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
1025 u16 log_size)
1026 {
1027 struct ena_rss *rss = &ena_dev->rss;
1028 struct ena_admin_get_feat_resp get_resp;
1029 size_t tbl_size;
1030 int ret;
1031
1032 ret = ena_com_get_feature(ena_dev, &get_resp,
1033 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
1034 if (unlikely(ret))
1035 return ret;
1036
1037 if ((get_resp.u.ind_table.min_size > log_size) ||
1038 (get_resp.u.ind_table.max_size < log_size)) {
1039 ena_trc_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
1040 1 << log_size,
1041 1 << get_resp.u.ind_table.min_size,
1042 1 << get_resp.u.ind_table.max_size);
1043 return ENA_COM_INVAL;
1044 }
1045
1046 tbl_size = (1ULL << log_size) *
1047 sizeof(struct ena_admin_rss_ind_table_entry);
1048
1049 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1050 tbl_size,
1051 rss->rss_ind_tbl,
1052 rss->rss_ind_tbl_dma_addr,
1053 rss->rss_ind_tbl_mem_handle);
1054 if (unlikely(!rss->rss_ind_tbl))
1055 goto mem_err1;
1056
1057 tbl_size = (1ULL << log_size) * sizeof(u16);
1058 rss->host_rss_ind_tbl =
1059 ENA_MEM_ALLOC(ena_dev->dmadev, tbl_size);
1060 if (unlikely(!rss->host_rss_ind_tbl))
1061 goto mem_err2;
1062
1063 rss->tbl_log_size = log_size;
1064
1065 return 0;
1066
1067 mem_err2:
1068 tbl_size = (1ULL << log_size) *
1069 sizeof(struct ena_admin_rss_ind_table_entry);
1070
1071 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1072 tbl_size,
1073 rss->rss_ind_tbl,
1074 rss->rss_ind_tbl_dma_addr,
1075 rss->rss_ind_tbl_mem_handle);
1076 rss->rss_ind_tbl = NULL;
1077 mem_err1:
1078 rss->tbl_log_size = 0;
1079 return ENA_COM_NO_MEM;
1080 }
1081
ena_com_indirect_table_destroy(struct ena_com_dev * ena_dev)1082 static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
1083 {
1084 struct ena_rss *rss = &ena_dev->rss;
1085 size_t tbl_size = (1ULL << rss->tbl_log_size) *
1086 sizeof(struct ena_admin_rss_ind_table_entry);
1087
1088 if (rss->rss_ind_tbl)
1089 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1090 tbl_size,
1091 rss->rss_ind_tbl,
1092 rss->rss_ind_tbl_dma_addr,
1093 rss->rss_ind_tbl_mem_handle);
1094 rss->rss_ind_tbl = NULL;
1095
1096 if (rss->host_rss_ind_tbl)
1097 ENA_MEM_FREE(ena_dev->dmadev, rss->host_rss_ind_tbl);
1098 rss->host_rss_ind_tbl = NULL;
1099 }
1100
ena_com_create_io_sq(struct ena_com_dev * ena_dev,struct ena_com_io_sq * io_sq,u16 cq_idx)1101 static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
1102 struct ena_com_io_sq *io_sq, u16 cq_idx)
1103 {
1104 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1105 struct ena_admin_aq_create_sq_cmd create_cmd;
1106 struct ena_admin_acq_create_sq_resp_desc cmd_completion;
1107 u8 direction;
1108 int ret;
1109
1110 memset(&create_cmd, 0x0, sizeof(create_cmd));
1111
1112 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
1113
1114 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1115 direction = ENA_ADMIN_SQ_DIRECTION_TX;
1116 else
1117 direction = ENA_ADMIN_SQ_DIRECTION_RX;
1118
1119 create_cmd.sq_identity |= (direction <<
1120 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
1121 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
1122
1123 create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
1124 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
1125
1126 create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
1127 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
1128 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
1129
1130 create_cmd.sq_caps_3 |=
1131 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
1132
1133 create_cmd.cq_idx = cq_idx;
1134 create_cmd.sq_depth = io_sq->q_depth;
1135
1136 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
1137 ret = ena_com_mem_addr_set(ena_dev,
1138 &create_cmd.sq_ba,
1139 io_sq->desc_addr.phys_addr);
1140 if (unlikely(ret)) {
1141 ena_trc_err("memory address set failed\n");
1142 return ret;
1143 }
1144 }
1145
1146 ret = ena_com_execute_admin_command(admin_queue,
1147 (struct ena_admin_aq_entry *)&create_cmd,
1148 sizeof(create_cmd),
1149 (struct ena_admin_acq_entry *)&cmd_completion,
1150 sizeof(cmd_completion));
1151 if (unlikely(ret)) {
1152 ena_trc_err("Failed to create IO SQ. error: %d\n", ret);
1153 return ret;
1154 }
1155
1156 io_sq->idx = cmd_completion.sq_idx;
1157
1158 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1159 (uintptr_t)cmd_completion.sq_doorbell_offset);
1160
1161 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1162 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
1163 + cmd_completion.llq_headers_offset);
1164
1165 io_sq->desc_addr.pbuf_dev_addr =
1166 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1167 cmd_completion.llq_descriptors_offset);
1168 }
1169
1170 ena_trc_dbg("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
1171
1172 return ret;
1173 }
1174
ena_com_ind_tbl_convert_to_device(struct ena_com_dev * ena_dev)1175 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1176 {
1177 struct ena_rss *rss = &ena_dev->rss;
1178 struct ena_com_io_sq *io_sq;
1179 u16 qid;
1180 int i;
1181
1182 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1183 qid = rss->host_rss_ind_tbl[i];
1184 if (qid >= ENA_TOTAL_NUM_QUEUES)
1185 return ENA_COM_INVAL;
1186
1187 io_sq = &ena_dev->io_sq_queues[qid];
1188
1189 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1190 return ENA_COM_INVAL;
1191
1192 rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1193 }
1194
1195 return 0;
1196 }
1197
ena_com_ind_tbl_convert_from_device(struct ena_com_dev * ena_dev)1198 static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)
1199 {
1200 u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 };
1201 struct ena_rss *rss = &ena_dev->rss;
1202 u8 idx;
1203 u16 i;
1204
1205 for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++)
1206 dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;
1207
1208 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1209 if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES)
1210 return ENA_COM_INVAL;
1211 idx = (u8)rss->rss_ind_tbl[i].cq_idx;
1212
1213 if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES)
1214 return ENA_COM_INVAL;
1215
1216 rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx];
1217 }
1218
1219 return 0;
1220 }
1221
ena_com_init_interrupt_moderation_table(struct ena_com_dev * ena_dev)1222 static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev)
1223 {
1224 size_t size;
1225
1226 size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS;
1227
1228 ena_dev->intr_moder_tbl = ENA_MEM_ALLOC(ena_dev->dmadev, size);
1229 if (!ena_dev->intr_moder_tbl)
1230 return ENA_COM_NO_MEM;
1231
1232 ena_com_config_default_interrupt_moderation_table(ena_dev);
1233
1234 return 0;
1235 }
1236
ena_com_update_intr_delay_resolution(struct ena_com_dev * ena_dev,u16 intr_delay_resolution)1237 static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1238 u16 intr_delay_resolution)
1239 {
1240 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
1241 unsigned int i;
1242
1243 if (!intr_delay_resolution) {
1244 ena_trc_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1245 intr_delay_resolution = 1;
1246 }
1247 ena_dev->intr_delay_resolution = intr_delay_resolution;
1248
1249 /* update Rx */
1250 for (i = 0; i < ENA_INTR_MAX_NUM_OF_LEVELS; i++)
1251 intr_moder_tbl[i].intr_moder_interval /= intr_delay_resolution;
1252
1253 /* update Tx */
1254 ena_dev->intr_moder_tx_interval /= intr_delay_resolution;
1255 }
1256
1257 /*****************************************************************************/
1258 /******************************* API ******************************/
1259 /*****************************************************************************/
1260
ena_com_execute_admin_command(struct ena_com_admin_queue * admin_queue,struct ena_admin_aq_entry * cmd,size_t cmd_size,struct ena_admin_acq_entry * comp,size_t comp_size)1261 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1262 struct ena_admin_aq_entry *cmd,
1263 size_t cmd_size,
1264 struct ena_admin_acq_entry *comp,
1265 size_t comp_size)
1266 {
1267 struct ena_comp_ctx *comp_ctx;
1268 int ret;
1269
1270 comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1271 comp, comp_size);
1272 if (unlikely(IS_ERR(comp_ctx))) {
1273 if (comp_ctx == ERR_PTR(ENA_COM_NO_DEVICE))
1274 ena_trc_dbg("Failed to submit command [%ld]\n",
1275 PTR_ERR(comp_ctx));
1276 else
1277 ena_trc_err("Failed to submit command [%ld]\n",
1278 PTR_ERR(comp_ctx));
1279
1280 return PTR_ERR(comp_ctx);
1281 }
1282
1283 ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1284 if (unlikely(ret)) {
1285 if (admin_queue->running_state)
1286 ena_trc_err("Failed to process command. ret = %d\n",
1287 ret);
1288 else
1289 ena_trc_dbg("Failed to process command. ret = %d\n",
1290 ret);
1291 }
1292 return ret;
1293 }
1294
ena_com_create_io_cq(struct ena_com_dev * ena_dev,struct ena_com_io_cq * io_cq)1295 int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1296 struct ena_com_io_cq *io_cq)
1297 {
1298 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1299 struct ena_admin_aq_create_cq_cmd create_cmd;
1300 struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1301 int ret;
1302
1303 memset(&create_cmd, 0x0, sizeof(create_cmd));
1304
1305 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1306
1307 create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1308 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1309 create_cmd.cq_caps_1 |=
1310 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1311
1312 create_cmd.msix_vector = io_cq->msix_vector;
1313 create_cmd.cq_depth = io_cq->q_depth;
1314
1315 ret = ena_com_mem_addr_set(ena_dev,
1316 &create_cmd.cq_ba,
1317 io_cq->cdesc_addr.phys_addr);
1318 if (unlikely(ret)) {
1319 ena_trc_err("memory address set failed\n");
1320 return ret;
1321 }
1322
1323 ret = ena_com_execute_admin_command(admin_queue,
1324 (struct ena_admin_aq_entry *)&create_cmd,
1325 sizeof(create_cmd),
1326 (struct ena_admin_acq_entry *)&cmd_completion,
1327 sizeof(cmd_completion));
1328 if (unlikely(ret)) {
1329 ena_trc_err("Failed to create IO CQ. error: %d\n", ret);
1330 return ret;
1331 }
1332
1333 io_cq->idx = cmd_completion.cq_idx;
1334
1335 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1336 cmd_completion.cq_interrupt_unmask_register_offset);
1337
1338 if (cmd_completion.cq_head_db_register_offset)
1339 io_cq->cq_head_db_reg =
1340 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1341 cmd_completion.cq_head_db_register_offset);
1342
1343 if (cmd_completion.numa_node_register_offset)
1344 io_cq->numa_node_cfg_reg =
1345 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1346 cmd_completion.numa_node_register_offset);
1347
1348 ena_trc_dbg("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
1349
1350 return ret;
1351 }
1352
ena_com_get_io_handlers(struct ena_com_dev * ena_dev,u16 qid,struct ena_com_io_sq ** io_sq,struct ena_com_io_cq ** io_cq)1353 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1354 struct ena_com_io_sq **io_sq,
1355 struct ena_com_io_cq **io_cq)
1356 {
1357 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1358 ena_trc_err("Invalid queue number %d but the max is %d\n",
1359 qid, ENA_TOTAL_NUM_QUEUES);
1360 return ENA_COM_INVAL;
1361 }
1362
1363 *io_sq = &ena_dev->io_sq_queues[qid];
1364 *io_cq = &ena_dev->io_cq_queues[qid];
1365
1366 return 0;
1367 }
1368
ena_com_abort_admin_commands(struct ena_com_dev * ena_dev)1369 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1370 {
1371 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1372 struct ena_comp_ctx *comp_ctx;
1373 u16 i;
1374
1375 if (!admin_queue->comp_ctx)
1376 return;
1377
1378 for (i = 0; i < admin_queue->q_depth; i++) {
1379 comp_ctx = get_comp_ctxt(admin_queue, i, false);
1380 if (unlikely(!comp_ctx))
1381 break;
1382
1383 comp_ctx->status = ENA_CMD_ABORTED;
1384
1385 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
1386 }
1387 }
1388
ena_com_wait_for_abort_completion(struct ena_com_dev * ena_dev)1389 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1390 {
1391 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1392 unsigned long flags;
1393
1394 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1395 while (ATOMIC32_READ(&admin_queue->outstanding_cmds) != 0) {
1396 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1397 ENA_MSLEEP(20);
1398 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1399 }
1400 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1401 }
1402
ena_com_destroy_io_cq(struct ena_com_dev * ena_dev,struct ena_com_io_cq * io_cq)1403 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1404 struct ena_com_io_cq *io_cq)
1405 {
1406 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1407 struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1408 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1409 int ret;
1410
1411 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
1412
1413 destroy_cmd.cq_idx = io_cq->idx;
1414 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1415
1416 ret = ena_com_execute_admin_command(admin_queue,
1417 (struct ena_admin_aq_entry *)&destroy_cmd,
1418 sizeof(destroy_cmd),
1419 (struct ena_admin_acq_entry *)&destroy_resp,
1420 sizeof(destroy_resp));
1421
1422 if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
1423 ena_trc_err("Failed to destroy IO CQ. error: %d\n", ret);
1424
1425 return ret;
1426 }
1427
ena_com_get_admin_running_state(struct ena_com_dev * ena_dev)1428 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1429 {
1430 return ena_dev->admin_queue.running_state;
1431 }
1432
ena_com_set_admin_running_state(struct ena_com_dev * ena_dev,bool state)1433 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1434 {
1435 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1436 unsigned long flags;
1437
1438 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1439 ena_dev->admin_queue.running_state = state;
1440 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1441 }
1442
ena_com_admin_aenq_enable(struct ena_com_dev * ena_dev)1443 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1444 {
1445 u16 depth = ena_dev->aenq.q_depth;
1446
1447 ENA_WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
1448
1449 /* Init head_db to mark that all entries in the queue
1450 * are initially available
1451 */
1452 ENA_REG_WRITE32(ena_dev->bus, depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1453 }
1454
ena_com_set_aenq_config(struct ena_com_dev * ena_dev,u32 groups_flag)1455 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1456 {
1457 struct ena_com_admin_queue *admin_queue;
1458 struct ena_admin_set_feat_cmd cmd;
1459 struct ena_admin_set_feat_resp resp;
1460 struct ena_admin_get_feat_resp get_resp;
1461 int ret;
1462
1463 ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG);
1464 if (ret) {
1465 ena_trc_info("Can't get aenq configuration\n");
1466 return ret;
1467 }
1468
1469 if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1470 ena_trc_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
1471 get_resp.u.aenq.supported_groups,
1472 groups_flag);
1473 return ENA_COM_UNSUPPORTED;
1474 }
1475
1476 memset(&cmd, 0x0, sizeof(cmd));
1477 admin_queue = &ena_dev->admin_queue;
1478
1479 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1480 cmd.aq_common_descriptor.flags = 0;
1481 cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1482 cmd.u.aenq.enabled_groups = groups_flag;
1483
1484 ret = ena_com_execute_admin_command(admin_queue,
1485 (struct ena_admin_aq_entry *)&cmd,
1486 sizeof(cmd),
1487 (struct ena_admin_acq_entry *)&resp,
1488 sizeof(resp));
1489
1490 if (unlikely(ret))
1491 ena_trc_err("Failed to config AENQ ret: %d\n", ret);
1492
1493 return ret;
1494 }
1495
ena_com_get_dma_width(struct ena_com_dev * ena_dev)1496 int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1497 {
1498 u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1499 int width;
1500
1501 if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1502 ena_trc_err("Reg read timeout occurred\n");
1503 return ENA_COM_TIMER_EXPIRED;
1504 }
1505
1506 width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
1507 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
1508
1509 ena_trc_dbg("ENA dma width: %d\n", width);
1510
1511 if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
1512 ena_trc_err("DMA width illegal value: %d\n", width);
1513 return ENA_COM_INVAL;
1514 }
1515
1516 ena_dev->dma_addr_bits = width;
1517
1518 return width;
1519 }
1520
ena_com_validate_version(struct ena_com_dev * ena_dev)1521 int ena_com_validate_version(struct ena_com_dev *ena_dev)
1522 {
1523 u32 ver;
1524 u32 ctrl_ver;
1525 u32 ctrl_ver_masked;
1526
1527 /* Make sure the ENA version and the controller version are at least
1528 * as the driver expects
1529 */
1530 ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1531 ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1532 ENA_REGS_CONTROLLER_VERSION_OFF);
1533
1534 if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
1535 (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
1536 ena_trc_err("Reg read timeout occurred\n");
1537 return ENA_COM_TIMER_EXPIRED;
1538 }
1539
1540 ena_trc_info("ena device version: %d.%d\n",
1541 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
1542 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
1543 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1544
1545 if (ver < MIN_ENA_VER) {
1546 ena_trc_err("ENA version is lower than the minimal version the driver supports\n");
1547 return -1;
1548 }
1549
1550 ena_trc_info("ena controller version: %d.%d.%d implementation version %d\n",
1551 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK)
1552 >> ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
1553 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK)
1554 >> ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
1555 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
1556 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
1557 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
1558
1559 ctrl_ver_masked =
1560 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1561 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1562 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1563
1564 /* Validate the ctrl version without the implementation ID */
1565 if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1566 ena_trc_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1567 return -1;
1568 }
1569
1570 return 0;
1571 }
1572
ena_com_admin_destroy(struct ena_com_dev * ena_dev)1573 void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1574 {
1575 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1576 struct ena_com_admin_cq *cq = &admin_queue->cq;
1577 struct ena_com_admin_sq *sq = &admin_queue->sq;
1578 struct ena_com_aenq *aenq = &ena_dev->aenq;
1579 u16 size;
1580
1581 ENA_WAIT_EVENT_DESTROY(admin_queue->comp_ctx->wait_event);
1582
1583 ENA_SPINLOCK_DESTROY(admin_queue->q_lock);
1584
1585 if (admin_queue->comp_ctx)
1586 ENA_MEM_FREE(ena_dev->dmadev, admin_queue->comp_ctx);
1587 admin_queue->comp_ctx = NULL;
1588 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
1589 if (sq->entries)
1590 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, sq->entries,
1591 sq->dma_addr, sq->mem_handle);
1592 sq->entries = NULL;
1593
1594 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
1595 if (cq->entries)
1596 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, cq->entries,
1597 cq->dma_addr, cq->mem_handle);
1598 cq->entries = NULL;
1599
1600 size = ADMIN_AENQ_SIZE(aenq->q_depth);
1601 if (ena_dev->aenq.entries)
1602 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, aenq->entries,
1603 aenq->dma_addr, aenq->mem_handle);
1604 aenq->entries = NULL;
1605 }
1606
ena_com_set_admin_polling_mode(struct ena_com_dev * ena_dev,bool polling)1607 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1608 {
1609 u32 mask_value = 0;
1610
1611 if (polling)
1612 mask_value = ENA_REGS_ADMIN_INTR_MASK;
1613
1614 ENA_REG_WRITE32(ena_dev->bus, mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1615 ena_dev->admin_queue.polling = polling;
1616 }
1617
ena_com_mmio_reg_read_request_init(struct ena_com_dev * ena_dev)1618 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1619 {
1620 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1621
1622 ENA_SPINLOCK_INIT(mmio_read->lock);
1623 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1624 sizeof(*mmio_read->read_resp),
1625 mmio_read->read_resp,
1626 mmio_read->read_resp_dma_addr,
1627 mmio_read->read_resp_mem_handle);
1628 if (unlikely(!mmio_read->read_resp))
1629 return ENA_COM_NO_MEM;
1630
1631 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1632
1633 mmio_read->read_resp->req_id = 0x0;
1634 mmio_read->seq_num = 0x0;
1635 mmio_read->readless_supported = true;
1636
1637 return 0;
1638 }
1639
ena_com_set_mmio_read_mode(struct ena_com_dev * ena_dev,bool readless_supported)1640 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
1641 {
1642 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1643
1644 mmio_read->readless_supported = readless_supported;
1645 }
1646
ena_com_mmio_reg_read_request_destroy(struct ena_com_dev * ena_dev)1647 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
1648 {
1649 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1650
1651 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1652 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1653
1654 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1655 sizeof(*mmio_read->read_resp),
1656 mmio_read->read_resp,
1657 mmio_read->read_resp_dma_addr,
1658 mmio_read->read_resp_mem_handle);
1659
1660 mmio_read->read_resp = NULL;
1661
1662 ENA_SPINLOCK_DESTROY(mmio_read->lock);
1663 }
1664
ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev * ena_dev)1665 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1666 {
1667 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1668 u32 addr_low, addr_high;
1669
1670 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
1671 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
1672
1673 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1674 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1675 }
1676
ena_com_admin_init(struct ena_com_dev * ena_dev,struct ena_aenq_handlers * aenq_handlers,bool init_spinlock)1677 int ena_com_admin_init(struct ena_com_dev *ena_dev,
1678 struct ena_aenq_handlers *aenq_handlers,
1679 bool init_spinlock)
1680 {
1681 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1682 u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
1683 int ret;
1684
1685 #ifdef ENA_INTERNAL
1686 ena_trc_info("ena_defs : Version:[%s] Build date [%s]",
1687 ENA_GEN_COMMIT, ENA_GEN_DATE);
1688 #endif
1689 dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1690
1691 if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
1692 ena_trc_err("Reg read timeout occurred\n");
1693 return ENA_COM_TIMER_EXPIRED;
1694 }
1695
1696 if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
1697 ena_trc_err("Device isn't ready, abort com init\n");
1698 return ENA_COM_NO_DEVICE;
1699 }
1700
1701 admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
1702
1703 admin_queue->bus = ena_dev->bus;
1704 admin_queue->q_dmadev = ena_dev->dmadev;
1705 admin_queue->polling = false;
1706 admin_queue->curr_cmd_id = 0;
1707
1708 ATOMIC32_SET(&admin_queue->outstanding_cmds, 0);
1709
1710 if (init_spinlock)
1711 ENA_SPINLOCK_INIT(admin_queue->q_lock);
1712
1713 ret = ena_com_init_comp_ctxt(admin_queue);
1714 if (ret)
1715 goto error;
1716
1717 ret = ena_com_admin_init_sq(admin_queue);
1718 if (ret)
1719 goto error;
1720
1721 ret = ena_com_admin_init_cq(admin_queue);
1722 if (ret)
1723 goto error;
1724
1725 admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1726 ENA_REGS_AQ_DB_OFF);
1727
1728 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
1729 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
1730
1731 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
1732 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
1733
1734 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
1735 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
1736
1737 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
1738 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
1739
1740 aq_caps = 0;
1741 aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
1742 aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
1743 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
1744 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
1745
1746 acq_caps = 0;
1747 acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
1748 acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
1749 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
1750 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
1751
1752 ENA_REG_WRITE32(ena_dev->bus, aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
1753 ENA_REG_WRITE32(ena_dev->bus, acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
1754 ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
1755 if (ret)
1756 goto error;
1757
1758 admin_queue->running_state = true;
1759
1760 return 0;
1761 error:
1762 ena_com_admin_destroy(ena_dev);
1763
1764 return ret;
1765 }
1766
ena_com_create_io_queue(struct ena_com_dev * ena_dev,struct ena_com_create_io_ctx * ctx)1767 int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
1768 struct ena_com_create_io_ctx *ctx)
1769 {
1770 struct ena_com_io_sq *io_sq;
1771 struct ena_com_io_cq *io_cq;
1772 int ret;
1773
1774 if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
1775 ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
1776 ctx->qid, ENA_TOTAL_NUM_QUEUES);
1777 return ENA_COM_INVAL;
1778 }
1779
1780 io_sq = &ena_dev->io_sq_queues[ctx->qid];
1781 io_cq = &ena_dev->io_cq_queues[ctx->qid];
1782
1783 memset(io_sq, 0x0, sizeof(*io_sq));
1784 memset(io_cq, 0x0, sizeof(*io_cq));
1785
1786 /* Init CQ */
1787 io_cq->q_depth = ctx->queue_size;
1788 io_cq->direction = ctx->direction;
1789 io_cq->qid = ctx->qid;
1790
1791 io_cq->msix_vector = ctx->msix_vector;
1792
1793 io_sq->q_depth = ctx->queue_size;
1794 io_sq->direction = ctx->direction;
1795 io_sq->qid = ctx->qid;
1796
1797 io_sq->mem_queue_type = ctx->mem_queue_type;
1798
1799 if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1800 /* header length is limited to 8 bits */
1801 io_sq->tx_max_header_size =
1802 ENA_MIN32(ena_dev->tx_max_header_size, SZ_256);
1803
1804 ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
1805 if (ret)
1806 goto error;
1807 ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
1808 if (ret)
1809 goto error;
1810
1811 ret = ena_com_create_io_cq(ena_dev, io_cq);
1812 if (ret)
1813 goto error;
1814
1815 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1816 if (ret)
1817 goto destroy_io_cq;
1818
1819 return 0;
1820
1821 destroy_io_cq:
1822 ena_com_destroy_io_cq(ena_dev, io_cq);
1823 error:
1824 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1825 return ret;
1826 }
1827
ena_com_destroy_io_queue(struct ena_com_dev * ena_dev,u16 qid)1828 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
1829 {
1830 struct ena_com_io_sq *io_sq;
1831 struct ena_com_io_cq *io_cq;
1832
1833 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1834 ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
1835 qid, ENA_TOTAL_NUM_QUEUES);
1836 return;
1837 }
1838
1839 io_sq = &ena_dev->io_sq_queues[qid];
1840 io_cq = &ena_dev->io_cq_queues[qid];
1841
1842 ena_com_destroy_io_sq(ena_dev, io_sq);
1843 ena_com_destroy_io_cq(ena_dev, io_cq);
1844
1845 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1846 }
1847
ena_com_get_link_params(struct ena_com_dev * ena_dev,struct ena_admin_get_feat_resp * resp)1848 int ena_com_get_link_params(struct ena_com_dev *ena_dev,
1849 struct ena_admin_get_feat_resp *resp)
1850 {
1851 return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG);
1852 }
1853
ena_com_get_dev_attr_feat(struct ena_com_dev * ena_dev,struct ena_com_dev_get_features_ctx * get_feat_ctx)1854 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
1855 struct ena_com_dev_get_features_ctx *get_feat_ctx)
1856 {
1857 struct ena_admin_get_feat_resp get_resp;
1858 int rc;
1859
1860 rc = ena_com_get_feature(ena_dev, &get_resp,
1861 ENA_ADMIN_DEVICE_ATTRIBUTES);
1862 if (rc)
1863 return rc;
1864
1865 memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
1866 sizeof(get_resp.u.dev_attr));
1867 ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
1868
1869 rc = ena_com_get_feature(ena_dev, &get_resp,
1870 ENA_ADMIN_MAX_QUEUES_NUM);
1871 if (rc)
1872 return rc;
1873
1874 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
1875 sizeof(get_resp.u.max_queue));
1876 ena_dev->tx_max_header_size = get_resp.u.max_queue.max_header_size;
1877
1878 rc = ena_com_get_feature(ena_dev, &get_resp,
1879 ENA_ADMIN_AENQ_CONFIG);
1880 if (rc)
1881 return rc;
1882
1883 memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
1884 sizeof(get_resp.u.aenq));
1885
1886 rc = ena_com_get_feature(ena_dev, &get_resp,
1887 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
1888 if (rc)
1889 return rc;
1890
1891 memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
1892 sizeof(get_resp.u.offload));
1893
1894 /* Driver hints isn't mandatory admin command. So in case the
1895 * command isn't supported set driver hints to 0
1896 */
1897 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS);
1898
1899 if (!rc)
1900 memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
1901 sizeof(get_resp.u.hw_hints));
1902 else if (rc == ENA_COM_UNSUPPORTED)
1903 memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints));
1904 else
1905 return rc;
1906
1907 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ);
1908 if (!rc)
1909 memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
1910 sizeof(get_resp.u.llq));
1911 else if (rc == ENA_COM_UNSUPPORTED)
1912 memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
1913 else
1914 return rc;
1915
1916 return 0;
1917 }
1918
ena_com_admin_q_comp_intr_handler(struct ena_com_dev * ena_dev)1919 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
1920 {
1921 ena_com_handle_admin_completion(&ena_dev->admin_queue);
1922 }
1923
1924 /* ena_handle_specific_aenq_event:
1925 * return the handler that is relevant to the specific event group
1926 */
ena_com_get_specific_aenq_cb(struct ena_com_dev * dev,u16 group)1927 static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
1928 u16 group)
1929 {
1930 struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
1931
1932 if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
1933 return aenq_handlers->handlers[group];
1934
1935 return aenq_handlers->unimplemented_handler;
1936 }
1937
1938 /* ena_aenq_intr_handler:
1939 * handles the aenq incoming events.
1940 * pop events from the queue and apply the specific handler
1941 */
ena_com_aenq_intr_handler(struct ena_com_dev * dev,void * data)1942 void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
1943 {
1944 struct ena_admin_aenq_entry *aenq_e;
1945 struct ena_admin_aenq_common_desc *aenq_common;
1946 struct ena_com_aenq *aenq = &dev->aenq;
1947 ena_aenq_handler handler_cb;
1948 unsigned long long timestamp;
1949 u16 masked_head, processed = 0;
1950 u8 phase;
1951
1952 masked_head = aenq->head & (aenq->q_depth - 1);
1953 phase = aenq->phase;
1954 aenq_e = &aenq->entries[masked_head]; /* Get first entry */
1955 aenq_common = &aenq_e->aenq_common_desc;
1956
1957 /* Go over all the events */
1958 while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) ==
1959 phase) {
1960 timestamp = (unsigned long long)aenq_common->timestamp_low |
1961 ((unsigned long long)aenq_common->timestamp_high << 32);
1962 ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
1963 aenq_common->group,
1964 aenq_common->syndrom,
1965 timestamp);
1966
1967 /* Handle specific event*/
1968 handler_cb = ena_com_get_specific_aenq_cb(dev,
1969 aenq_common->group);
1970 handler_cb(data, aenq_e); /* call the actual event handler*/
1971
1972 /* Get next event entry */
1973 masked_head++;
1974 processed++;
1975
1976 if (unlikely(masked_head == aenq->q_depth)) {
1977 masked_head = 0;
1978 phase = !phase;
1979 }
1980 aenq_e = &aenq->entries[masked_head];
1981 aenq_common = &aenq_e->aenq_common_desc;
1982 }
1983
1984 aenq->head += processed;
1985 aenq->phase = phase;
1986
1987 /* Don't update aenq doorbell if there weren't any processed events */
1988 if (!processed)
1989 return;
1990
1991 /* write the aenq doorbell after all AENQ descriptors were read */
1992 mb();
1993 ENA_REG_WRITE32(dev->bus, (u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1994 }
1995 #ifdef ENA_EXTENDED_STATS
1996 /*
1997 * Sets the function Idx and Queue Idx to be used for
1998 * get full statistics feature
1999 *
2000 */
ena_com_extended_stats_set_func_queue(struct ena_com_dev * ena_dev,u32 func_queue)2001 int ena_com_extended_stats_set_func_queue(struct ena_com_dev *ena_dev,
2002 u32 func_queue)
2003 {
2004
2005 /* Function & Queue is acquired from user in the following format :
2006 * Bottom Half word: funct
2007 * Top Half Word: queue
2008 */
2009 ena_dev->stats_func = ENA_EXTENDED_STAT_GET_FUNCT(func_queue);
2010 ena_dev->stats_queue = ENA_EXTENDED_STAT_GET_QUEUE(func_queue);
2011
2012 return 0;
2013 }
2014
2015 #endif /* ENA_EXTENDED_STATS */
2016
ena_com_dev_reset(struct ena_com_dev * ena_dev,enum ena_regs_reset_reason_types reset_reason)2017 int ena_com_dev_reset(struct ena_com_dev *ena_dev,
2018 enum ena_regs_reset_reason_types reset_reason)
2019 {
2020 u32 stat, timeout, cap, reset_val;
2021 int rc;
2022
2023 stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
2024 cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
2025
2026 if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
2027 (cap == ENA_MMIO_READ_TIMEOUT))) {
2028 ena_trc_err("Reg read32 timeout occurred\n");
2029 return ENA_COM_TIMER_EXPIRED;
2030 }
2031
2032 if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
2033 ena_trc_err("Device isn't ready, can't reset device\n");
2034 return ENA_COM_INVAL;
2035 }
2036
2037 timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
2038 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
2039 if (timeout == 0) {
2040 ena_trc_err("Invalid timeout value\n");
2041 return ENA_COM_INVAL;
2042 }
2043
2044 /* start reset */
2045 reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
2046 reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
2047 ENA_REGS_DEV_CTL_RESET_REASON_MASK;
2048 ENA_REG_WRITE32(ena_dev->bus, reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2049
2050 /* Write again the MMIO read request address */
2051 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
2052
2053 rc = wait_for_reset_state(ena_dev, timeout,
2054 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
2055 if (rc != 0) {
2056 ena_trc_err("Reset indication didn't turn on\n");
2057 return rc;
2058 }
2059
2060 /* reset done */
2061 ENA_REG_WRITE32(ena_dev->bus, 0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2062 rc = wait_for_reset_state(ena_dev, timeout, 0);
2063 if (rc != 0) {
2064 ena_trc_err("Reset indication didn't turn off\n");
2065 return rc;
2066 }
2067
2068 timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
2069 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
2070 if (timeout)
2071 /* the resolution of timeout reg is 100ms */
2072 ena_dev->admin_queue.completion_timeout = timeout * 100000;
2073 else
2074 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
2075
2076 return 0;
2077 }
2078
ena_get_dev_stats(struct ena_com_dev * ena_dev,struct ena_com_stats_ctx * ctx,enum ena_admin_get_stats_type type)2079 static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
2080 struct ena_com_stats_ctx *ctx,
2081 enum ena_admin_get_stats_type type)
2082 {
2083 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
2084 struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
2085 struct ena_com_admin_queue *admin_queue;
2086 int ret;
2087
2088 admin_queue = &ena_dev->admin_queue;
2089
2090 get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
2091 get_cmd->aq_common_descriptor.flags = 0;
2092 get_cmd->type = type;
2093
2094 ret = ena_com_execute_admin_command(admin_queue,
2095 (struct ena_admin_aq_entry *)get_cmd,
2096 sizeof(*get_cmd),
2097 (struct ena_admin_acq_entry *)get_resp,
2098 sizeof(*get_resp));
2099
2100 if (unlikely(ret))
2101 ena_trc_err("Failed to get stats. error: %d\n", ret);
2102
2103 return ret;
2104 }
2105
ena_com_get_dev_basic_stats(struct ena_com_dev * ena_dev,struct ena_admin_basic_stats * stats)2106 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
2107 struct ena_admin_basic_stats *stats)
2108 {
2109 struct ena_com_stats_ctx ctx;
2110 int ret;
2111
2112 memset(&ctx, 0x0, sizeof(ctx));
2113 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
2114 if (likely(ret == 0))
2115 memcpy(stats, &ctx.get_resp.basic_stats,
2116 sizeof(ctx.get_resp.basic_stats));
2117
2118 return ret;
2119 }
2120 #ifdef ENA_EXTENDED_STATS
2121
ena_com_get_dev_extended_stats(struct ena_com_dev * ena_dev,char * buff,u32 len)2122 int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff,
2123 u32 len)
2124 {
2125 struct ena_com_stats_ctx ctx;
2126 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx.get_cmd;
2127 ena_mem_handle_t mem_handle;
2128 void *virt_addr;
2129 dma_addr_t phys_addr;
2130 int ret;
2131
2132 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, len,
2133 virt_addr, phys_addr, mem_handle);
2134 if (!virt_addr) {
2135 ret = ENA_COM_NO_MEM;
2136 goto done;
2137 }
2138 memset(&ctx, 0x0, sizeof(ctx));
2139 ret = ena_com_mem_addr_set(ena_dev,
2140 &get_cmd->u.control_buffer.address,
2141 phys_addr);
2142 if (unlikely(ret)) {
2143 ena_trc_err("memory address set failed\n");
2144 return ret;
2145 }
2146 get_cmd->u.control_buffer.length = len;
2147
2148 get_cmd->device_id = ena_dev->stats_func;
2149 get_cmd->queue_idx = ena_dev->stats_queue;
2150
2151 ret = ena_get_dev_stats(ena_dev, &ctx,
2152 ENA_ADMIN_GET_STATS_TYPE_EXTENDED);
2153 if (ret < 0)
2154 goto free_ext_stats_mem;
2155
2156 ret = snprintf(buff, len, "%s", (char *)virt_addr);
2157
2158 free_ext_stats_mem:
2159 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, len, virt_addr, phys_addr,
2160 mem_handle);
2161 done:
2162 return ret;
2163 }
2164 #endif
2165
ena_com_set_dev_mtu(struct ena_com_dev * ena_dev,int mtu)2166 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
2167 {
2168 struct ena_com_admin_queue *admin_queue;
2169 struct ena_admin_set_feat_cmd cmd;
2170 struct ena_admin_set_feat_resp resp;
2171 int ret;
2172
2173 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
2174 ena_trc_dbg("Feature %d isn't supported\n", ENA_ADMIN_MTU);
2175 return ENA_COM_UNSUPPORTED;
2176 }
2177
2178 memset(&cmd, 0x0, sizeof(cmd));
2179 admin_queue = &ena_dev->admin_queue;
2180
2181 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2182 cmd.aq_common_descriptor.flags = 0;
2183 cmd.feat_common.feature_id = ENA_ADMIN_MTU;
2184 cmd.u.mtu.mtu = mtu;
2185
2186 ret = ena_com_execute_admin_command(admin_queue,
2187 (struct ena_admin_aq_entry *)&cmd,
2188 sizeof(cmd),
2189 (struct ena_admin_acq_entry *)&resp,
2190 sizeof(resp));
2191
2192 if (unlikely(ret))
2193 ena_trc_err("Failed to set mtu %d. error: %d\n", mtu, ret);
2194
2195 return ret;
2196 }
2197
ena_com_get_offload_settings(struct ena_com_dev * ena_dev,struct ena_admin_feature_offload_desc * offload)2198 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
2199 struct ena_admin_feature_offload_desc *offload)
2200 {
2201 int ret;
2202 struct ena_admin_get_feat_resp resp;
2203
2204 ret = ena_com_get_feature(ena_dev, &resp,
2205 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
2206 if (unlikely(ret)) {
2207 ena_trc_err("Failed to get offload capabilities %d\n", ret);
2208 return ret;
2209 }
2210
2211 memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
2212
2213 return 0;
2214 }
2215
ena_com_set_hash_function(struct ena_com_dev * ena_dev)2216 int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
2217 {
2218 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2219 struct ena_rss *rss = &ena_dev->rss;
2220 struct ena_admin_set_feat_cmd cmd;
2221 struct ena_admin_set_feat_resp resp;
2222 struct ena_admin_get_feat_resp get_resp;
2223 int ret;
2224
2225 if (!ena_com_check_supported_feature_id(ena_dev,
2226 ENA_ADMIN_RSS_HASH_FUNCTION)) {
2227 ena_trc_dbg("Feature %d isn't supported\n",
2228 ENA_ADMIN_RSS_HASH_FUNCTION);
2229 return ENA_COM_UNSUPPORTED;
2230 }
2231
2232 /* Validate hash function is supported */
2233 ret = ena_com_get_feature(ena_dev, &get_resp,
2234 ENA_ADMIN_RSS_HASH_FUNCTION);
2235 if (unlikely(ret))
2236 return ret;
2237
2238 if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) {
2239 ena_trc_err("Func hash %d isn't supported by device, abort\n",
2240 rss->hash_func);
2241 return ENA_COM_UNSUPPORTED;
2242 }
2243
2244 memset(&cmd, 0x0, sizeof(cmd));
2245
2246 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2247 cmd.aq_common_descriptor.flags =
2248 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2249 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2250 cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2251 cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2252
2253 ret = ena_com_mem_addr_set(ena_dev,
2254 &cmd.control_buffer.address,
2255 rss->hash_key_dma_addr);
2256 if (unlikely(ret)) {
2257 ena_trc_err("memory address set failed\n");
2258 return ret;
2259 }
2260
2261 cmd.control_buffer.length = sizeof(*rss->hash_key);
2262
2263 ret = ena_com_execute_admin_command(admin_queue,
2264 (struct ena_admin_aq_entry *)&cmd,
2265 sizeof(cmd),
2266 (struct ena_admin_acq_entry *)&resp,
2267 sizeof(resp));
2268 if (unlikely(ret)) {
2269 ena_trc_err("Failed to set hash function %d. error: %d\n",
2270 rss->hash_func, ret);
2271 return ENA_COM_INVAL;
2272 }
2273
2274 return 0;
2275 }
2276
ena_com_fill_hash_function(struct ena_com_dev * ena_dev,enum ena_admin_hash_functions func,const u8 * key,u16 key_len,u32 init_val)2277 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2278 enum ena_admin_hash_functions func,
2279 const u8 *key, u16 key_len, u32 init_val)
2280 {
2281 struct ena_rss *rss = &ena_dev->rss;
2282 struct ena_admin_get_feat_resp get_resp;
2283 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2284 rss->hash_key;
2285 int rc;
2286
2287 /* Make sure size is a mult of DWs */
2288 if (unlikely(key_len & 0x3))
2289 return ENA_COM_INVAL;
2290
2291 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2292 ENA_ADMIN_RSS_HASH_FUNCTION,
2293 rss->hash_key_dma_addr,
2294 sizeof(*rss->hash_key));
2295 if (unlikely(rc))
2296 return rc;
2297
2298 if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
2299 ena_trc_err("Flow hash function %d isn't supported\n", func);
2300 return ENA_COM_UNSUPPORTED;
2301 }
2302
2303 switch (func) {
2304 case ENA_ADMIN_TOEPLITZ:
2305 if (key_len > sizeof(hash_key->key)) {
2306 ena_trc_err("key len (%hu) is bigger than the max supported (%zu)\n",
2307 key_len, sizeof(hash_key->key));
2308 return ENA_COM_INVAL;
2309 }
2310
2311 memcpy(hash_key->key, key, key_len);
2312 rss->hash_init_val = init_val;
2313 hash_key->keys_num = key_len >> 2;
2314 break;
2315 case ENA_ADMIN_CRC32:
2316 rss->hash_init_val = init_val;
2317 break;
2318 default:
2319 ena_trc_err("Invalid hash function (%d)\n", func);
2320 return ENA_COM_INVAL;
2321 }
2322
2323 rc = ena_com_set_hash_function(ena_dev);
2324
2325 /* Restore the old function */
2326 if (unlikely(rc))
2327 ena_com_get_hash_function(ena_dev, NULL, NULL);
2328
2329 return rc;
2330 }
2331
ena_com_get_hash_function(struct ena_com_dev * ena_dev,enum ena_admin_hash_functions * func,u8 * key)2332 int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2333 enum ena_admin_hash_functions *func,
2334 u8 *key)
2335 {
2336 struct ena_rss *rss = &ena_dev->rss;
2337 struct ena_admin_get_feat_resp get_resp;
2338 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2339 rss->hash_key;
2340 int rc;
2341
2342 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2343 ENA_ADMIN_RSS_HASH_FUNCTION,
2344 rss->hash_key_dma_addr,
2345 sizeof(*rss->hash_key));
2346 if (unlikely(rc))
2347 return rc;
2348
2349 rss->hash_func = get_resp.u.flow_hash_func.selected_func;
2350 if (func)
2351 *func = rss->hash_func;
2352
2353 if (key)
2354 memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
2355
2356 return 0;
2357 }
2358
ena_com_get_hash_ctrl(struct ena_com_dev * ena_dev,enum ena_admin_flow_hash_proto proto,u16 * fields)2359 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2360 enum ena_admin_flow_hash_proto proto,
2361 u16 *fields)
2362 {
2363 struct ena_rss *rss = &ena_dev->rss;
2364 struct ena_admin_get_feat_resp get_resp;
2365 int rc;
2366
2367 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2368 ENA_ADMIN_RSS_HASH_INPUT,
2369 rss->hash_ctrl_dma_addr,
2370 sizeof(*rss->hash_ctrl));
2371 if (unlikely(rc))
2372 return rc;
2373
2374 if (fields)
2375 *fields = rss->hash_ctrl->selected_fields[proto].fields;
2376
2377 return 0;
2378 }
2379
ena_com_set_hash_ctrl(struct ena_com_dev * ena_dev)2380 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2381 {
2382 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2383 struct ena_rss *rss = &ena_dev->rss;
2384 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2385 struct ena_admin_set_feat_cmd cmd;
2386 struct ena_admin_set_feat_resp resp;
2387 int ret;
2388
2389 if (!ena_com_check_supported_feature_id(ena_dev,
2390 ENA_ADMIN_RSS_HASH_INPUT)) {
2391 ena_trc_dbg("Feature %d isn't supported\n",
2392 ENA_ADMIN_RSS_HASH_INPUT);
2393 return ENA_COM_UNSUPPORTED;
2394 }
2395
2396 memset(&cmd, 0x0, sizeof(cmd));
2397
2398 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2399 cmd.aq_common_descriptor.flags =
2400 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2401 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
2402 cmd.u.flow_hash_input.enabled_input_sort =
2403 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
2404 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
2405
2406 ret = ena_com_mem_addr_set(ena_dev,
2407 &cmd.control_buffer.address,
2408 rss->hash_ctrl_dma_addr);
2409 if (unlikely(ret)) {
2410 ena_trc_err("memory address set failed\n");
2411 return ret;
2412 }
2413 cmd.control_buffer.length = sizeof(*hash_ctrl);
2414
2415 ret = ena_com_execute_admin_command(admin_queue,
2416 (struct ena_admin_aq_entry *)&cmd,
2417 sizeof(cmd),
2418 (struct ena_admin_acq_entry *)&resp,
2419 sizeof(resp));
2420 if (unlikely(ret))
2421 ena_trc_err("Failed to set hash input. error: %d\n", ret);
2422
2423 return ret;
2424 }
2425
ena_com_set_default_hash_ctrl(struct ena_com_dev * ena_dev)2426 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2427 {
2428 struct ena_rss *rss = &ena_dev->rss;
2429 struct ena_admin_feature_rss_hash_control *hash_ctrl =
2430 rss->hash_ctrl;
2431 u16 available_fields = 0;
2432 int rc, i;
2433
2434 /* Get the supported hash input */
2435 rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2436 if (unlikely(rc))
2437 return rc;
2438
2439 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
2440 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2441 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2442
2443 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
2444 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2445 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2446
2447 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
2448 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2449 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2450
2451 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
2452 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2453 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2454
2455 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
2456 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2457
2458 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
2459 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2460
2461 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2462 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2463
2464 hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
2465 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
2466
2467 for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
2468 available_fields = hash_ctrl->selected_fields[i].fields &
2469 hash_ctrl->supported_fields[i].fields;
2470 if (available_fields != hash_ctrl->selected_fields[i].fields) {
2471 ena_trc_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2472 i, hash_ctrl->supported_fields[i].fields,
2473 hash_ctrl->selected_fields[i].fields);
2474 return ENA_COM_UNSUPPORTED;
2475 }
2476 }
2477
2478 rc = ena_com_set_hash_ctrl(ena_dev);
2479
2480 /* In case of failure, restore the old hash ctrl */
2481 if (unlikely(rc))
2482 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2483
2484 return rc;
2485 }
2486
ena_com_fill_hash_ctrl(struct ena_com_dev * ena_dev,enum ena_admin_flow_hash_proto proto,u16 hash_fields)2487 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
2488 enum ena_admin_flow_hash_proto proto,
2489 u16 hash_fields)
2490 {
2491 struct ena_rss *rss = &ena_dev->rss;
2492 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2493 u16 supported_fields;
2494 int rc;
2495
2496 if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
2497 ena_trc_err("Invalid proto num (%u)\n", proto);
2498 return ENA_COM_INVAL;
2499 }
2500
2501 /* Get the ctrl table */
2502 rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
2503 if (unlikely(rc))
2504 return rc;
2505
2506 /* Make sure all the fields are supported */
2507 supported_fields = hash_ctrl->supported_fields[proto].fields;
2508 if ((hash_fields & supported_fields) != hash_fields) {
2509 ena_trc_err("proto %d doesn't support the required fields %x. supports only: %x\n",
2510 proto, hash_fields, supported_fields);
2511 }
2512
2513 hash_ctrl->selected_fields[proto].fields = hash_fields;
2514
2515 rc = ena_com_set_hash_ctrl(ena_dev);
2516
2517 /* In case of failure, restore the old hash ctrl */
2518 if (unlikely(rc))
2519 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2520
2521 return 0;
2522 }
2523
ena_com_indirect_table_fill_entry(struct ena_com_dev * ena_dev,u16 entry_idx,u16 entry_value)2524 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
2525 u16 entry_idx, u16 entry_value)
2526 {
2527 struct ena_rss *rss = &ena_dev->rss;
2528
2529 if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
2530 return ENA_COM_INVAL;
2531
2532 if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
2533 return ENA_COM_INVAL;
2534
2535 rss->host_rss_ind_tbl[entry_idx] = entry_value;
2536
2537 return 0;
2538 }
2539
ena_com_indirect_table_set(struct ena_com_dev * ena_dev)2540 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
2541 {
2542 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2543 struct ena_rss *rss = &ena_dev->rss;
2544 struct ena_admin_set_feat_cmd cmd;
2545 struct ena_admin_set_feat_resp resp;
2546 int ret;
2547
2548 if (!ena_com_check_supported_feature_id(ena_dev,
2549 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
2550 ena_trc_dbg("Feature %d isn't supported\n",
2551 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
2552 return ENA_COM_UNSUPPORTED;
2553 }
2554
2555 ret = ena_com_ind_tbl_convert_to_device(ena_dev);
2556 if (ret) {
2557 ena_trc_err("Failed to convert host indirection table to device table\n");
2558 return ret;
2559 }
2560
2561 memset(&cmd, 0x0, sizeof(cmd));
2562
2563 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2564 cmd.aq_common_descriptor.flags =
2565 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2566 cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
2567 cmd.u.ind_table.size = rss->tbl_log_size;
2568 cmd.u.ind_table.inline_index = 0xFFFFFFFF;
2569
2570 ret = ena_com_mem_addr_set(ena_dev,
2571 &cmd.control_buffer.address,
2572 rss->rss_ind_tbl_dma_addr);
2573 if (unlikely(ret)) {
2574 ena_trc_err("memory address set failed\n");
2575 return ret;
2576 }
2577
2578 cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
2579 sizeof(struct ena_admin_rss_ind_table_entry);
2580
2581 ret = ena_com_execute_admin_command(admin_queue,
2582 (struct ena_admin_aq_entry *)&cmd,
2583 sizeof(cmd),
2584 (struct ena_admin_acq_entry *)&resp,
2585 sizeof(resp));
2586
2587 if (unlikely(ret))
2588 ena_trc_err("Failed to set indirect table. error: %d\n", ret);
2589
2590 return ret;
2591 }
2592
ena_com_indirect_table_get(struct ena_com_dev * ena_dev,u32 * ind_tbl)2593 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
2594 {
2595 struct ena_rss *rss = &ena_dev->rss;
2596 struct ena_admin_get_feat_resp get_resp;
2597 u32 tbl_size;
2598 int i, rc;
2599
2600 tbl_size = (1ULL << rss->tbl_log_size) *
2601 sizeof(struct ena_admin_rss_ind_table_entry);
2602
2603 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2604 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
2605 rss->rss_ind_tbl_dma_addr,
2606 tbl_size);
2607 if (unlikely(rc))
2608 return rc;
2609
2610 if (!ind_tbl)
2611 return 0;
2612
2613 rc = ena_com_ind_tbl_convert_from_device(ena_dev);
2614 if (unlikely(rc))
2615 return rc;
2616
2617 for (i = 0; i < (1 << rss->tbl_log_size); i++)
2618 ind_tbl[i] = rss->host_rss_ind_tbl[i];
2619
2620 return 0;
2621 }
2622
ena_com_rss_init(struct ena_com_dev * ena_dev,u16 indr_tbl_log_size)2623 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
2624 {
2625 int rc;
2626
2627 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2628
2629 rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
2630 if (unlikely(rc))
2631 goto err_indr_tbl;
2632
2633 rc = ena_com_hash_key_allocate(ena_dev);
2634 if (unlikely(rc))
2635 goto err_hash_key;
2636
2637 rc = ena_com_hash_ctrl_init(ena_dev);
2638 if (unlikely(rc))
2639 goto err_hash_ctrl;
2640
2641 return 0;
2642
2643 err_hash_ctrl:
2644 ena_com_hash_key_destroy(ena_dev);
2645 err_hash_key:
2646 ena_com_indirect_table_destroy(ena_dev);
2647 err_indr_tbl:
2648
2649 return rc;
2650 }
2651
ena_com_rss_destroy(struct ena_com_dev * ena_dev)2652 void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
2653 {
2654 ena_com_indirect_table_destroy(ena_dev);
2655 ena_com_hash_key_destroy(ena_dev);
2656 ena_com_hash_ctrl_destroy(ena_dev);
2657
2658 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2659 }
2660
ena_com_allocate_host_info(struct ena_com_dev * ena_dev)2661 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2662 {
2663 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2664
2665 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
2666 SZ_4K,
2667 host_attr->host_info,
2668 host_attr->host_info_dma_addr,
2669 host_attr->host_info_dma_handle);
2670 if (unlikely(!host_attr->host_info))
2671 return ENA_COM_NO_MEM;
2672
2673 return 0;
2674 }
2675
ena_com_allocate_debug_area(struct ena_com_dev * ena_dev,u32 debug_area_size)2676 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2677 u32 debug_area_size)
2678 {
2679 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2680
2681 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
2682 debug_area_size,
2683 host_attr->debug_area_virt_addr,
2684 host_attr->debug_area_dma_addr,
2685 host_attr->debug_area_dma_handle);
2686 if (unlikely(!host_attr->debug_area_virt_addr)) {
2687 host_attr->debug_area_size = 0;
2688 return ENA_COM_NO_MEM;
2689 }
2690
2691 host_attr->debug_area_size = debug_area_size;
2692
2693 return 0;
2694 }
2695
ena_com_delete_host_info(struct ena_com_dev * ena_dev)2696 void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
2697 {
2698 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2699
2700 if (host_attr->host_info) {
2701 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
2702 SZ_4K,
2703 host_attr->host_info,
2704 host_attr->host_info_dma_addr,
2705 host_attr->host_info_dma_handle);
2706 host_attr->host_info = NULL;
2707 }
2708 }
2709
ena_com_delete_debug_area(struct ena_com_dev * ena_dev)2710 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
2711 {
2712 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2713
2714 if (host_attr->debug_area_virt_addr) {
2715 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
2716 host_attr->debug_area_size,
2717 host_attr->debug_area_virt_addr,
2718 host_attr->debug_area_dma_addr,
2719 host_attr->debug_area_dma_handle);
2720 host_attr->debug_area_virt_addr = NULL;
2721 }
2722 }
2723
ena_com_set_host_attributes(struct ena_com_dev * ena_dev)2724 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
2725 {
2726 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2727 struct ena_com_admin_queue *admin_queue;
2728 struct ena_admin_set_feat_cmd cmd;
2729 struct ena_admin_set_feat_resp resp;
2730
2731 int ret;
2732
2733 /* Host attribute config is called before ena_com_get_dev_attr_feat
2734 * so ena_com can't check if the feature is supported.
2735 */
2736
2737 memset(&cmd, 0x0, sizeof(cmd));
2738 admin_queue = &ena_dev->admin_queue;
2739
2740 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2741 cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
2742
2743 ret = ena_com_mem_addr_set(ena_dev,
2744 &cmd.u.host_attr.debug_ba,
2745 host_attr->debug_area_dma_addr);
2746 if (unlikely(ret)) {
2747 ena_trc_err("memory address set failed\n");
2748 return ret;
2749 }
2750
2751 ret = ena_com_mem_addr_set(ena_dev,
2752 &cmd.u.host_attr.os_info_ba,
2753 host_attr->host_info_dma_addr);
2754 if (unlikely(ret)) {
2755 ena_trc_err("memory address set failed\n");
2756 return ret;
2757 }
2758
2759 cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
2760
2761 ret = ena_com_execute_admin_command(admin_queue,
2762 (struct ena_admin_aq_entry *)&cmd,
2763 sizeof(cmd),
2764 (struct ena_admin_acq_entry *)&resp,
2765 sizeof(resp));
2766
2767 if (unlikely(ret))
2768 ena_trc_err("Failed to set host attributes: %d\n", ret);
2769
2770 return ret;
2771 }
2772
2773 /* Interrupt moderation */
ena_com_interrupt_moderation_supported(struct ena_com_dev * ena_dev)2774 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
2775 {
2776 return ena_com_check_supported_feature_id(ena_dev,
2777 ENA_ADMIN_INTERRUPT_MODERATION);
2778 }
2779
ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev * ena_dev,u32 tx_coalesce_usecs)2780 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
2781 u32 tx_coalesce_usecs)
2782 {
2783 if (!ena_dev->intr_delay_resolution) {
2784 ena_trc_err("Illegal interrupt delay granularity value\n");
2785 return ENA_COM_FAULT;
2786 }
2787
2788 ena_dev->intr_moder_tx_interval = tx_coalesce_usecs /
2789 ena_dev->intr_delay_resolution;
2790
2791 return 0;
2792 }
2793
ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev * ena_dev,u32 rx_coalesce_usecs)2794 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
2795 u32 rx_coalesce_usecs)
2796 {
2797 if (!ena_dev->intr_delay_resolution) {
2798 ena_trc_err("Illegal interrupt delay granularity value\n");
2799 return ENA_COM_FAULT;
2800 }
2801
2802 /* We use LOWEST entry of moderation table for storing
2803 * nonadaptive interrupt coalescing values
2804 */
2805 ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
2806 rx_coalesce_usecs / ena_dev->intr_delay_resolution;
2807
2808 return 0;
2809 }
2810
ena_com_destroy_interrupt_moderation(struct ena_com_dev * ena_dev)2811 void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev)
2812 {
2813 if (ena_dev->intr_moder_tbl)
2814 ENA_MEM_FREE(ena_dev->dmadev, ena_dev->intr_moder_tbl);
2815 ena_dev->intr_moder_tbl = NULL;
2816 }
2817
ena_com_init_interrupt_moderation(struct ena_com_dev * ena_dev)2818 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
2819 {
2820 struct ena_admin_get_feat_resp get_resp;
2821 u16 delay_resolution;
2822 int rc;
2823
2824 rc = ena_com_get_feature(ena_dev, &get_resp,
2825 ENA_ADMIN_INTERRUPT_MODERATION);
2826
2827 if (rc) {
2828 if (rc == ENA_COM_UNSUPPORTED) {
2829 ena_trc_dbg("Feature %d isn't supported\n",
2830 ENA_ADMIN_INTERRUPT_MODERATION);
2831 rc = 0;
2832 } else {
2833 ena_trc_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
2834 rc);
2835 }
2836
2837 /* no moderation supported, disable adaptive support */
2838 ena_com_disable_adaptive_moderation(ena_dev);
2839 return rc;
2840 }
2841
2842 rc = ena_com_init_interrupt_moderation_table(ena_dev);
2843 if (rc)
2844 goto err;
2845
2846 /* if moderation is supported by device we set adaptive moderation */
2847 delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
2848 ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
2849 ena_com_enable_adaptive_moderation(ena_dev);
2850
2851 return 0;
2852 err:
2853 ena_com_destroy_interrupt_moderation(ena_dev);
2854 return rc;
2855 }
2856
ena_com_config_default_interrupt_moderation_table(struct ena_com_dev * ena_dev)2857 void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev)
2858 {
2859 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2860
2861 if (!intr_moder_tbl)
2862 return;
2863
2864 intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
2865 ENA_INTR_LOWEST_USECS;
2866 intr_moder_tbl[ENA_INTR_MODER_LOWEST].pkts_per_interval =
2867 ENA_INTR_LOWEST_PKTS;
2868 intr_moder_tbl[ENA_INTR_MODER_LOWEST].bytes_per_interval =
2869 ENA_INTR_LOWEST_BYTES;
2870
2871 intr_moder_tbl[ENA_INTR_MODER_LOW].intr_moder_interval =
2872 ENA_INTR_LOW_USECS;
2873 intr_moder_tbl[ENA_INTR_MODER_LOW].pkts_per_interval =
2874 ENA_INTR_LOW_PKTS;
2875 intr_moder_tbl[ENA_INTR_MODER_LOW].bytes_per_interval =
2876 ENA_INTR_LOW_BYTES;
2877
2878 intr_moder_tbl[ENA_INTR_MODER_MID].intr_moder_interval =
2879 ENA_INTR_MID_USECS;
2880 intr_moder_tbl[ENA_INTR_MODER_MID].pkts_per_interval =
2881 ENA_INTR_MID_PKTS;
2882 intr_moder_tbl[ENA_INTR_MODER_MID].bytes_per_interval =
2883 ENA_INTR_MID_BYTES;
2884
2885 intr_moder_tbl[ENA_INTR_MODER_HIGH].intr_moder_interval =
2886 ENA_INTR_HIGH_USECS;
2887 intr_moder_tbl[ENA_INTR_MODER_HIGH].pkts_per_interval =
2888 ENA_INTR_HIGH_PKTS;
2889 intr_moder_tbl[ENA_INTR_MODER_HIGH].bytes_per_interval =
2890 ENA_INTR_HIGH_BYTES;
2891
2892 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].intr_moder_interval =
2893 ENA_INTR_HIGHEST_USECS;
2894 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].pkts_per_interval =
2895 ENA_INTR_HIGHEST_PKTS;
2896 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].bytes_per_interval =
2897 ENA_INTR_HIGHEST_BYTES;
2898 }
2899
ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev * ena_dev)2900 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
2901 {
2902 return ena_dev->intr_moder_tx_interval;
2903 }
2904
ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev * ena_dev)2905 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
2906 {
2907 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2908
2909 if (intr_moder_tbl)
2910 return intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval;
2911
2912 return 0;
2913 }
2914
ena_com_init_intr_moderation_entry(struct ena_com_dev * ena_dev,enum ena_intr_moder_level level,struct ena_intr_moder_entry * entry)2915 void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
2916 enum ena_intr_moder_level level,
2917 struct ena_intr_moder_entry *entry)
2918 {
2919 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2920
2921 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
2922 return;
2923
2924 intr_moder_tbl[level].intr_moder_interval = entry->intr_moder_interval;
2925 if (ena_dev->intr_delay_resolution)
2926 intr_moder_tbl[level].intr_moder_interval /=
2927 ena_dev->intr_delay_resolution;
2928 intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval;
2929
2930 /* use hardcoded value until ethtool supports bytecount parameter */
2931 if (entry->bytes_per_interval != ENA_INTR_BYTE_COUNT_NOT_SUPPORTED)
2932 intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval;
2933 }
2934
ena_com_get_intr_moderation_entry(struct ena_com_dev * ena_dev,enum ena_intr_moder_level level,struct ena_intr_moder_entry * entry)2935 void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
2936 enum ena_intr_moder_level level,
2937 struct ena_intr_moder_entry *entry)
2938 {
2939 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2940
2941 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
2942 return;
2943
2944 entry->intr_moder_interval = intr_moder_tbl[level].intr_moder_interval;
2945 if (ena_dev->intr_delay_resolution)
2946 entry->intr_moder_interval *= ena_dev->intr_delay_resolution;
2947 entry->pkts_per_interval =
2948 intr_moder_tbl[level].pkts_per_interval;
2949 entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;
2950 }
2951
ena_com_config_dev_mode(struct ena_com_dev * ena_dev,struct ena_admin_feature_llq_desc * llq)2952 int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
2953 struct ena_admin_feature_llq_desc *llq)
2954 {
2955 int rc;
2956 int size;
2957
2958 if (llq->max_llq_num == 0) {
2959 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2960 return 0;
2961 }
2962
2963 rc = ena_com_config_llq_info(ena_dev, llq);
2964 if (rc)
2965 return rc;
2966
2967 /* Validate the descriptor is not too big */
2968 size = ena_dev->tx_max_header_size;
2969 size += ena_dev->llq_info.descs_num_before_header *
2970 sizeof(struct ena_eth_io_tx_desc);
2971
2972 if (unlikely(ena_dev->llq_info.desc_list_entry_size < size)) {
2973 ena_trc_err("the size of the LLQ entry is smaller than needed\n");
2974 return ENA_COM_INVAL;
2975 }
2976
2977 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
2978
2979 return 0;
2980 }
2981