1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2015-2023 Amazon.com, Inc. or its affiliates.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include "ena_com.h"
35
36 /*****************************************************************************/
37 /*****************************************************************************/
38
39 /* Timeout in micro-sec */
40 #define ADMIN_CMD_TIMEOUT_US (3000000)
41
42 #define ENA_ASYNC_QUEUE_DEPTH 16
43 #define ENA_ADMIN_QUEUE_DEPTH 32
44
45 #ifdef ENA_EXTENDED_STATS
46
47 #define ENA_HISTOGRAM_ACTIVE_MASK_OFFSET 0xF08
48 #define ENA_EXTENDED_STAT_GET_FUNCT(_funct_queue) (_funct_queue & 0xFFFF)
49 #define ENA_EXTENDED_STAT_GET_QUEUE(_funct_queue) (_funct_queue >> 16)
50
51 #endif /* ENA_EXTENDED_STATS */
52
53 #define ENA_CTRL_MAJOR 0
54 #define ENA_CTRL_MINOR 0
55 #define ENA_CTRL_SUB_MINOR 1
56
57 #define MIN_ENA_CTRL_VER \
58 (((ENA_CTRL_MAJOR) << \
59 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
60 ((ENA_CTRL_MINOR) << \
61 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
62 (ENA_CTRL_SUB_MINOR))
63
64 #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
65 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
66
67 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
68
69 #define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4
70
71 #define ENA_REGS_ADMIN_INTR_MASK 1
72
73 #define ENA_MIN_ADMIN_POLL_US 100
74
75 #define ENA_MAX_ADMIN_POLL_US 5000
76
77 /* PHC definitions */
78 #define ENA_PHC_DEFAULT_EXPIRE_TIMEOUT_USEC 20
79 #define ENA_PHC_DEFAULT_BLOCK_TIMEOUT_USEC 1000
80 #define ENA_PHC_TIMESTAMP_ERROR 0xFFFFFFFFFFFFFFFF
81 #define ENA_PHC_REQ_ID_OFFSET 0xDEAD
82
83 /*****************************************************************************/
84 /*****************************************************************************/
85 /*****************************************************************************/
86
87 enum ena_cmd_status {
88 ENA_CMD_SUBMITTED,
89 ENA_CMD_COMPLETED,
90 /* Abort - canceled by the driver */
91 ENA_CMD_ABORTED,
92 };
93
94 struct ena_comp_ctx {
95 ena_wait_event_t wait_event;
96 struct ena_admin_acq_entry *user_cqe;
97 u32 comp_size;
98 enum ena_cmd_status status;
99 /* status from the device */
100 u8 comp_status;
101 u8 cmd_opcode;
102 bool occupied;
103 };
104
105 struct ena_com_stats_ctx {
106 struct ena_admin_aq_get_stats_cmd get_cmd;
107 struct ena_admin_acq_get_stats_resp get_resp;
108 };
109
ena_com_mem_addr_set(struct ena_com_dev * ena_dev,struct ena_common_mem_addr * ena_addr,dma_addr_t addr)110 static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
111 struct ena_common_mem_addr *ena_addr,
112 dma_addr_t addr)
113 {
114 if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
115 ena_trc_err(ena_dev, "DMA address has more bits that the device supports\n");
116 return ENA_COM_INVAL;
117 }
118
119 ena_addr->mem_addr_low = lower_32_bits(addr);
120 ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
121
122 return 0;
123 }
124
ena_com_admin_init_sq(struct ena_com_admin_queue * admin_queue)125 static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue)
126 {
127 struct ena_com_dev *ena_dev = admin_queue->ena_dev;
128 struct ena_com_admin_sq *sq = &admin_queue->sq;
129 u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
130
131 ENA_MEM_ALLOC_COHERENT(admin_queue->q_dmadev, size, sq->entries, sq->dma_addr,
132 sq->mem_handle);
133
134 if (!sq->entries) {
135 ena_trc_err(ena_dev, "Memory allocation failed\n");
136 return ENA_COM_NO_MEM;
137 }
138
139 sq->head = 0;
140 sq->tail = 0;
141 sq->phase = 1;
142
143 sq->db_addr = NULL;
144
145 return 0;
146 }
147
ena_com_admin_init_cq(struct ena_com_admin_queue * admin_queue)148 static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue)
149 {
150 struct ena_com_dev *ena_dev = admin_queue->ena_dev;
151 struct ena_com_admin_cq *cq = &admin_queue->cq;
152 u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
153
154 ENA_MEM_ALLOC_COHERENT(admin_queue->q_dmadev, size, cq->entries, cq->dma_addr,
155 cq->mem_handle);
156
157 if (!cq->entries) {
158 ena_trc_err(ena_dev, "Memory allocation failed\n");
159 return ENA_COM_NO_MEM;
160 }
161
162 cq->head = 0;
163 cq->phase = 1;
164
165 return 0;
166 }
167
ena_com_admin_init_aenq(struct ena_com_dev * ena_dev,struct ena_aenq_handlers * aenq_handlers)168 static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
169 struct ena_aenq_handlers *aenq_handlers)
170 {
171 struct ena_com_aenq *aenq = &ena_dev->aenq;
172 u32 addr_low, addr_high, aenq_caps;
173 u16 size;
174
175 ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
176 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
177 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, size,
178 aenq->entries,
179 aenq->dma_addr,
180 aenq->mem_handle);
181
182 if (!aenq->entries) {
183 ena_trc_err(ena_dev, "Memory allocation failed\n");
184 return ENA_COM_NO_MEM;
185 }
186
187 aenq->head = aenq->q_depth;
188 aenq->phase = 1;
189
190 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
191 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
192
193 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
194 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
195
196 aenq_caps = 0;
197 aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
198 aenq_caps |= (sizeof(struct ena_admin_aenq_entry) <<
199 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
200 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
201 ENA_REG_WRITE32(ena_dev->bus, aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
202
203 if (unlikely(!aenq_handlers)) {
204 ena_trc_err(ena_dev, "AENQ handlers pointer is NULL\n");
205 return ENA_COM_INVAL;
206 }
207
208 aenq->aenq_handlers = aenq_handlers;
209
210 return 0;
211 }
212
comp_ctxt_release(struct ena_com_admin_queue * queue,struct ena_comp_ctx * comp_ctx)213 static void comp_ctxt_release(struct ena_com_admin_queue *queue,
214 struct ena_comp_ctx *comp_ctx)
215 {
216 comp_ctx->occupied = false;
217 ATOMIC32_DEC(&queue->outstanding_cmds);
218 }
219
get_comp_ctxt(struct ena_com_admin_queue * admin_queue,u16 command_id,bool capture)220 static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *admin_queue,
221 u16 command_id, bool capture)
222 {
223 if (unlikely(command_id >= admin_queue->q_depth)) {
224 ena_trc_err(admin_queue->ena_dev,
225 "Command id is larger than the queue size. cmd_id: %u queue size %d\n",
226 command_id, admin_queue->q_depth);
227 return NULL;
228 }
229
230 if (unlikely(!admin_queue->comp_ctx)) {
231 ena_trc_err(admin_queue->ena_dev,
232 "Completion context is NULL\n");
233 return NULL;
234 }
235
236 if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) {
237 ena_trc_err(admin_queue->ena_dev,
238 "Completion context is occupied\n");
239 return NULL;
240 }
241
242 if (capture) {
243 ATOMIC32_INC(&admin_queue->outstanding_cmds);
244 admin_queue->comp_ctx[command_id].occupied = true;
245 }
246
247 return &admin_queue->comp_ctx[command_id];
248 }
249
__ena_com_submit_admin_cmd(struct ena_com_admin_queue * admin_queue,struct ena_admin_aq_entry * cmd,size_t cmd_size_in_bytes,struct ena_admin_acq_entry * comp,size_t comp_size_in_bytes)250 static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
251 struct ena_admin_aq_entry *cmd,
252 size_t cmd_size_in_bytes,
253 struct ena_admin_acq_entry *comp,
254 size_t comp_size_in_bytes)
255 {
256 struct ena_comp_ctx *comp_ctx;
257 u16 tail_masked, cmd_id;
258 u16 queue_size_mask;
259 u16 cnt;
260
261 queue_size_mask = admin_queue->q_depth - 1;
262
263 tail_masked = admin_queue->sq.tail & queue_size_mask;
264
265 /* In case of queue FULL */
266 cnt = (u16)ATOMIC32_READ(&admin_queue->outstanding_cmds);
267 if (cnt >= admin_queue->q_depth) {
268 ena_trc_dbg(admin_queue->ena_dev, "Admin queue is full.\n");
269 admin_queue->stats.out_of_space++;
270 return ERR_PTR(ENA_COM_NO_SPACE);
271 }
272
273 cmd_id = admin_queue->curr_cmd_id;
274
275 cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
276 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
277
278 cmd->aq_common_descriptor.command_id |= cmd_id &
279 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
280
281 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
282 if (unlikely(!comp_ctx))
283 return ERR_PTR(ENA_COM_INVAL);
284
285 comp_ctx->status = ENA_CMD_SUBMITTED;
286 comp_ctx->comp_size = (u32)comp_size_in_bytes;
287 comp_ctx->user_cqe = comp;
288 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
289
290 ENA_WAIT_EVENT_CLEAR(comp_ctx->wait_event);
291
292 memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
293
294 admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
295 queue_size_mask;
296
297 admin_queue->sq.tail++;
298 admin_queue->stats.submitted_cmd++;
299
300 if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
301 admin_queue->sq.phase = !admin_queue->sq.phase;
302
303 ENA_DB_SYNC(&admin_queue->sq.mem_handle);
304 ENA_REG_WRITE32(admin_queue->bus, admin_queue->sq.tail,
305 admin_queue->sq.db_addr);
306
307 return comp_ctx;
308 }
309
ena_com_init_comp_ctxt(struct ena_com_admin_queue * admin_queue)310 static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue)
311 {
312 struct ena_com_dev *ena_dev = admin_queue->ena_dev;
313 size_t size = admin_queue->q_depth * sizeof(struct ena_comp_ctx);
314 struct ena_comp_ctx *comp_ctx;
315 u16 i;
316
317 admin_queue->comp_ctx = ENA_MEM_ALLOC(admin_queue->q_dmadev, size);
318 if (unlikely(!admin_queue->comp_ctx)) {
319 ena_trc_err(ena_dev, "Memory allocation failed\n");
320 return ENA_COM_NO_MEM;
321 }
322
323 for (i = 0; i < admin_queue->q_depth; i++) {
324 comp_ctx = get_comp_ctxt(admin_queue, i, false);
325 if (comp_ctx)
326 ENA_WAIT_EVENT_INIT(comp_ctx->wait_event);
327 }
328
329 return 0;
330 }
331
ena_com_submit_admin_cmd(struct ena_com_admin_queue * admin_queue,struct ena_admin_aq_entry * cmd,size_t cmd_size_in_bytes,struct ena_admin_acq_entry * comp,size_t comp_size_in_bytes)332 static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
333 struct ena_admin_aq_entry *cmd,
334 size_t cmd_size_in_bytes,
335 struct ena_admin_acq_entry *comp,
336 size_t comp_size_in_bytes)
337 {
338 unsigned long flags = 0;
339 struct ena_comp_ctx *comp_ctx;
340
341 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
342 if (unlikely(!admin_queue->running_state)) {
343 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
344 return ERR_PTR(ENA_COM_NO_DEVICE);
345 }
346 comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
347 cmd_size_in_bytes,
348 comp,
349 comp_size_in_bytes);
350 if (IS_ERR(comp_ctx))
351 admin_queue->running_state = false;
352 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
353
354 return comp_ctx;
355 }
356
ena_com_init_io_sq(struct ena_com_dev * ena_dev,struct ena_com_create_io_ctx * ctx,struct ena_com_io_sq * io_sq)357 static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
358 struct ena_com_create_io_ctx *ctx,
359 struct ena_com_io_sq *io_sq)
360 {
361 size_t size;
362 int dev_node = 0;
363
364 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
365
366 io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
367 io_sq->desc_entry_size =
368 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
369 sizeof(struct ena_eth_io_tx_desc) :
370 sizeof(struct ena_eth_io_rx_desc);
371
372 size = io_sq->desc_entry_size * io_sq->q_depth;
373 io_sq->bus = ena_dev->bus;
374
375 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
376 ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
377 size,
378 io_sq->desc_addr.virt_addr,
379 io_sq->desc_addr.phys_addr,
380 io_sq->desc_addr.mem_handle,
381 ctx->numa_node,
382 dev_node);
383 if (!io_sq->desc_addr.virt_addr) {
384 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
385 size,
386 io_sq->desc_addr.virt_addr,
387 io_sq->desc_addr.phys_addr,
388 io_sq->desc_addr.mem_handle);
389 }
390
391 if (!io_sq->desc_addr.virt_addr) {
392 ena_trc_err(ena_dev, "Memory allocation failed\n");
393 return ENA_COM_NO_MEM;
394 }
395 }
396
397 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
398 /* Allocate bounce buffers */
399 io_sq->bounce_buf_ctrl.buffer_size =
400 ena_dev->llq_info.desc_list_entry_size;
401 io_sq->bounce_buf_ctrl.buffers_num =
402 ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
403 io_sq->bounce_buf_ctrl.next_to_use = 0;
404
405 size = (size_t)io_sq->bounce_buf_ctrl.buffer_size *
406 io_sq->bounce_buf_ctrl.buffers_num;
407
408 ENA_MEM_ALLOC_NODE(ena_dev->dmadev,
409 size,
410 io_sq->bounce_buf_ctrl.base_buffer,
411 ctx->numa_node,
412 dev_node);
413 if (!io_sq->bounce_buf_ctrl.base_buffer)
414 io_sq->bounce_buf_ctrl.base_buffer = ENA_MEM_ALLOC(ena_dev->dmadev, size);
415
416 if (!io_sq->bounce_buf_ctrl.base_buffer) {
417 ena_trc_err(ena_dev, "Bounce buffer memory allocation failed\n");
418 return ENA_COM_NO_MEM;
419 }
420
421 memcpy(&io_sq->llq_info, &ena_dev->llq_info,
422 sizeof(io_sq->llq_info));
423
424 /* Initiate the first bounce buffer */
425 io_sq->llq_buf_ctrl.curr_bounce_buf =
426 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
427 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
428 0x0, io_sq->llq_info.desc_list_entry_size);
429 io_sq->llq_buf_ctrl.descs_left_in_line =
430 io_sq->llq_info.descs_num_before_header;
431 io_sq->disable_meta_caching =
432 io_sq->llq_info.disable_meta_caching;
433
434 if (io_sq->llq_info.max_entries_in_tx_burst > 0)
435 io_sq->entries_in_tx_burst_left =
436 io_sq->llq_info.max_entries_in_tx_burst;
437 }
438
439 io_sq->tail = 0;
440 io_sq->next_to_comp = 0;
441 io_sq->phase = 1;
442
443 return 0;
444 }
445
ena_com_init_io_cq(struct ena_com_dev * ena_dev,struct ena_com_create_io_ctx * ctx,struct ena_com_io_cq * io_cq)446 static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
447 struct ena_com_create_io_ctx *ctx,
448 struct ena_com_io_cq *io_cq)
449 {
450 size_t size;
451 int prev_node = 0;
452
453 memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
454
455 /* Use the basic completion descriptor for Rx */
456 io_cq->cdesc_entry_size_in_bytes =
457 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
458 sizeof(struct ena_eth_io_tx_cdesc) :
459 sizeof(struct ena_eth_io_rx_cdesc_base);
460
461 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
462 io_cq->bus = ena_dev->bus;
463
464 ENA_MEM_ALLOC_COHERENT_NODE_ALIGNED(ena_dev->dmadev,
465 size,
466 io_cq->cdesc_addr.virt_addr,
467 io_cq->cdesc_addr.phys_addr,
468 io_cq->cdesc_addr.mem_handle,
469 ctx->numa_node,
470 prev_node,
471 ENA_CDESC_RING_SIZE_ALIGNMENT);
472 if (!io_cq->cdesc_addr.virt_addr) {
473 ENA_MEM_ALLOC_COHERENT_ALIGNED(ena_dev->dmadev,
474 size,
475 io_cq->cdesc_addr.virt_addr,
476 io_cq->cdesc_addr.phys_addr,
477 io_cq->cdesc_addr.mem_handle,
478 ENA_CDESC_RING_SIZE_ALIGNMENT);
479 }
480
481 if (!io_cq->cdesc_addr.virt_addr) {
482 ena_trc_err(ena_dev, "Memory allocation failed\n");
483 return ENA_COM_NO_MEM;
484 }
485
486 io_cq->phase = 1;
487 io_cq->head = 0;
488
489 return 0;
490 }
491
ena_com_handle_single_admin_completion(struct ena_com_admin_queue * admin_queue,struct ena_admin_acq_entry * cqe)492 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
493 struct ena_admin_acq_entry *cqe)
494 {
495 struct ena_comp_ctx *comp_ctx;
496 u16 cmd_id;
497
498 cmd_id = cqe->acq_common_descriptor.command &
499 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
500
501 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
502 if (unlikely(!comp_ctx)) {
503 ena_trc_err(admin_queue->ena_dev,
504 "comp_ctx is NULL. Changing the admin queue running state\n");
505 admin_queue->running_state = false;
506 return;
507 }
508
509 comp_ctx->status = ENA_CMD_COMPLETED;
510 comp_ctx->comp_status = cqe->acq_common_descriptor.status;
511
512 if (comp_ctx->user_cqe)
513 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
514
515 if (!admin_queue->polling)
516 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
517 }
518
ena_com_handle_admin_completion(struct ena_com_admin_queue * admin_queue)519 static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
520 {
521 struct ena_admin_acq_entry *cqe = NULL;
522 u16 comp_num = 0;
523 u16 head_masked;
524 u8 phase;
525
526 head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
527 phase = admin_queue->cq.phase;
528
529 cqe = &admin_queue->cq.entries[head_masked];
530
531 /* Go over all the completions */
532 while ((READ_ONCE8(cqe->acq_common_descriptor.flags) &
533 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
534 /* Do not read the rest of the completion entry before the
535 * phase bit was validated
536 */
537 dma_rmb();
538 ena_com_handle_single_admin_completion(admin_queue, cqe);
539
540 head_masked++;
541 comp_num++;
542 if (unlikely(head_masked == admin_queue->q_depth)) {
543 head_masked = 0;
544 phase = !phase;
545 }
546
547 cqe = &admin_queue->cq.entries[head_masked];
548 }
549
550 admin_queue->cq.head += comp_num;
551 admin_queue->cq.phase = phase;
552 admin_queue->sq.head += comp_num;
553 admin_queue->stats.completed_cmd += comp_num;
554 }
555
ena_com_comp_status_to_errno(struct ena_com_admin_queue * admin_queue,u8 comp_status)556 static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue,
557 u8 comp_status)
558 {
559 if (unlikely(comp_status != 0))
560 ena_trc_err(admin_queue->ena_dev,
561 "Admin command failed[%u]\n", comp_status);
562
563 switch (comp_status) {
564 case ENA_ADMIN_SUCCESS:
565 return ENA_COM_OK;
566 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
567 return ENA_COM_NO_MEM;
568 case ENA_ADMIN_UNSUPPORTED_OPCODE:
569 return ENA_COM_UNSUPPORTED;
570 case ENA_ADMIN_BAD_OPCODE:
571 case ENA_ADMIN_MALFORMED_REQUEST:
572 case ENA_ADMIN_ILLEGAL_PARAMETER:
573 case ENA_ADMIN_UNKNOWN_ERROR:
574 return ENA_COM_INVAL;
575 case ENA_ADMIN_RESOURCE_BUSY:
576 return ENA_COM_TRY_AGAIN;
577 }
578
579 return ENA_COM_INVAL;
580 }
581
ena_delay_exponential_backoff_us(u32 exp,u32 delay_us)582 static void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us)
583 {
584 delay_us = ENA_MAX32(ENA_MIN_ADMIN_POLL_US, delay_us);
585 delay_us = ENA_MIN32(delay_us * (1U << exp), ENA_MAX_ADMIN_POLL_US);
586 ENA_USLEEP(delay_us);
587 }
588
ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx * comp_ctx,struct ena_com_admin_queue * admin_queue)589 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
590 struct ena_com_admin_queue *admin_queue)
591 {
592 unsigned long flags = 0;
593 ena_time_t timeout;
594 int ret;
595 u32 exp = 0;
596
597 timeout = ENA_GET_SYSTEM_TIMEOUT(admin_queue->completion_timeout);
598
599 while (1) {
600 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
601 ena_com_handle_admin_completion(admin_queue);
602 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
603
604 if (comp_ctx->status != ENA_CMD_SUBMITTED)
605 break;
606
607 if (ENA_TIME_EXPIRE(timeout)) {
608 ena_trc_err(admin_queue->ena_dev,
609 "Wait for completion (polling) timeout\n");
610 /* ENA didn't have any completion */
611 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
612 admin_queue->stats.no_completion++;
613 admin_queue->running_state = false;
614 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
615
616 ret = ENA_COM_TIMER_EXPIRED;
617 goto err;
618 }
619
620 ena_delay_exponential_backoff_us(exp++,
621 admin_queue->ena_dev->ena_min_poll_delay_us);
622 }
623
624 if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
625 ena_trc_err(admin_queue->ena_dev, "Command was aborted\n");
626 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
627 admin_queue->stats.aborted_cmd++;
628 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
629 ret = ENA_COM_NO_DEVICE;
630 goto err;
631 }
632
633 ENA_WARN(comp_ctx->status != ENA_CMD_COMPLETED,
634 admin_queue->ena_dev, "Invalid comp status %d\n",
635 comp_ctx->status);
636
637 ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
638 err:
639 comp_ctxt_release(admin_queue, comp_ctx);
640 return ret;
641 }
642
643 /*
644 * Set the LLQ configurations of the firmware
645 *
646 * The driver provides only the enabled feature values to the device,
647 * which in turn, checks if they are supported.
648 */
ena_com_set_llq(struct ena_com_dev * ena_dev)649 static int ena_com_set_llq(struct ena_com_dev *ena_dev)
650 {
651 struct ena_com_admin_queue *admin_queue;
652 struct ena_admin_set_feat_cmd cmd;
653 struct ena_admin_set_feat_resp resp;
654 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
655 int ret;
656
657 memset(&cmd, 0x0, sizeof(cmd));
658 admin_queue = &ena_dev->admin_queue;
659
660 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
661 cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
662
663 cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
664 cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
665 cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
666 cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
667
668 cmd.u.llq.accel_mode.u.set.enabled_flags =
669 BIT(ENA_ADMIN_DISABLE_META_CACHING) |
670 BIT(ENA_ADMIN_LIMIT_TX_BURST);
671
672 ret = ena_com_execute_admin_command(admin_queue,
673 (struct ena_admin_aq_entry *)&cmd,
674 sizeof(cmd),
675 (struct ena_admin_acq_entry *)&resp,
676 sizeof(resp));
677
678 if (unlikely(ret))
679 ena_trc_err(ena_dev, "Failed to set LLQ configurations: %d\n", ret);
680
681 return ret;
682 }
683
ena_com_config_llq_info(struct ena_com_dev * ena_dev,struct ena_admin_feature_llq_desc * llq_features,struct ena_llq_configurations * llq_default_cfg)684 static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
685 struct ena_admin_feature_llq_desc *llq_features,
686 struct ena_llq_configurations *llq_default_cfg)
687 {
688 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
689 struct ena_admin_accel_mode_get llq_accel_mode_get;
690 u16 supported_feat;
691 int rc;
692
693 memset(llq_info, 0, sizeof(*llq_info));
694
695 supported_feat = llq_features->header_location_ctrl_supported;
696
697 if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
698 llq_info->header_location_ctrl =
699 llq_default_cfg->llq_header_location;
700 } else {
701 ena_trc_err(ena_dev, "Invalid header location control, supported: 0x%x\n",
702 supported_feat);
703 return ENA_COM_INVAL;
704 }
705
706 if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
707 supported_feat = llq_features->descriptors_stride_ctrl_supported;
708 if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
709 llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
710 } else {
711 if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
712 llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
713 } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
714 llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
715 } else {
716 ena_trc_err(ena_dev, "Invalid desc_stride_ctrl, supported: 0x%x\n",
717 supported_feat);
718 return ENA_COM_INVAL;
719 }
720
721 ena_trc_err(ena_dev, "Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
722 llq_default_cfg->llq_stride_ctrl,
723 supported_feat,
724 llq_info->desc_stride_ctrl);
725 }
726 } else {
727 llq_info->desc_stride_ctrl = 0;
728 }
729
730 supported_feat = llq_features->entry_size_ctrl_supported;
731 if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
732 llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
733 llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
734 } else {
735 if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
736 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
737 llq_info->desc_list_entry_size = 128;
738 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
739 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
740 llq_info->desc_list_entry_size = 192;
741 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
742 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
743 llq_info->desc_list_entry_size = 256;
744 } else {
745 ena_trc_err(ena_dev, "Invalid entry_size_ctrl, supported: 0x%x\n",
746 supported_feat);
747 return ENA_COM_INVAL;
748 }
749
750 ena_trc_err(ena_dev, "Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
751 llq_default_cfg->llq_ring_entry_size,
752 supported_feat,
753 llq_info->desc_list_entry_size);
754 }
755 if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
756 /* The desc list entry size should be whole multiply of 8
757 * This requirement comes from __iowrite64_copy()
758 */
759 ena_trc_err(ena_dev, "Illegal entry size %d\n",
760 llq_info->desc_list_entry_size);
761 return ENA_COM_INVAL;
762 }
763
764 if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
765 llq_info->descs_per_entry = llq_info->desc_list_entry_size /
766 sizeof(struct ena_eth_io_tx_desc);
767 else
768 llq_info->descs_per_entry = 1;
769
770 supported_feat = llq_features->desc_num_before_header_supported;
771 if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
772 llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
773 } else {
774 if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
775 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
776 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
777 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
778 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
779 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
780 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
781 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
782 } else {
783 ena_trc_err(ena_dev, "Invalid descs_num_before_header, supported: 0x%x\n",
784 supported_feat);
785 return ENA_COM_INVAL;
786 }
787
788 ena_trc_err(ena_dev, "Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
789 llq_default_cfg->llq_num_decs_before_header,
790 supported_feat,
791 llq_info->descs_num_before_header);
792 }
793 /* Check for accelerated queue supported */
794 llq_accel_mode_get = llq_features->accel_mode.u.get;
795
796 llq_info->disable_meta_caching =
797 !!(llq_accel_mode_get.supported_flags &
798 BIT(ENA_ADMIN_DISABLE_META_CACHING));
799
800 if (llq_accel_mode_get.supported_flags & BIT(ENA_ADMIN_LIMIT_TX_BURST))
801 llq_info->max_entries_in_tx_burst =
802 llq_accel_mode_get.max_tx_burst_size /
803 llq_default_cfg->llq_ring_entry_size_value;
804
805 rc = ena_com_set_llq(ena_dev);
806 if (rc)
807 ena_trc_err(ena_dev, "Cannot set LLQ configuration: %d\n", rc);
808
809 return rc;
810 }
811
ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx * comp_ctx,struct ena_com_admin_queue * admin_queue)812 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
813 struct ena_com_admin_queue *admin_queue)
814 {
815 unsigned long flags = 0;
816 int ret;
817
818 ENA_WAIT_EVENT_WAIT(comp_ctx->wait_event,
819 admin_queue->completion_timeout);
820
821 /* In case the command wasn't completed find out the root cause.
822 * There might be 2 kinds of errors
823 * 1) No completion (timeout reached)
824 * 2) There is completion but the device didn't get any msi-x interrupt.
825 */
826 if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
827 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
828 ena_com_handle_admin_completion(admin_queue);
829 admin_queue->stats.no_completion++;
830 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
831
832 if (comp_ctx->status == ENA_CMD_COMPLETED) {
833 ena_trc_err(admin_queue->ena_dev,
834 "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
835 comp_ctx->cmd_opcode, admin_queue->auto_polling ? "ON" : "OFF");
836 /* Check if fallback to polling is enabled */
837 if (admin_queue->auto_polling)
838 admin_queue->polling = true;
839 } else {
840 ena_trc_err(admin_queue->ena_dev,
841 "The ena device didn't send a completion for the admin cmd %d status %d\n",
842 comp_ctx->cmd_opcode, comp_ctx->status);
843 }
844 /* Check if shifted to polling mode.
845 * This will happen if there is a completion without an interrupt
846 * and autopolling mode is enabled. Continuing normal execution in such case
847 */
848 if (!admin_queue->polling) {
849 admin_queue->running_state = false;
850 ret = ENA_COM_TIMER_EXPIRED;
851 goto err;
852 }
853 }
854
855 ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
856 err:
857 comp_ctxt_release(admin_queue, comp_ctx);
858 return ret;
859 }
860
861 /* This method read the hardware device register through posting writes
862 * and waiting for response
863 * On timeout the function will return ENA_MMIO_READ_TIMEOUT
864 */
ena_com_reg_bar_read32(struct ena_com_dev * ena_dev,u16 offset)865 static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
866 {
867 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
868 volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
869 mmio_read->read_resp;
870 u32 mmio_read_reg, ret, i;
871 unsigned long flags = 0;
872 u32 timeout = mmio_read->reg_read_to;
873
874 ENA_MIGHT_SLEEP();
875
876 if (timeout == 0)
877 timeout = ENA_REG_READ_TIMEOUT;
878
879 /* If readless is disabled, perform regular read */
880 if (!mmio_read->readless_supported)
881 return ENA_REG_READ32(ena_dev->bus, ena_dev->reg_bar + offset);
882
883 ENA_SPINLOCK_LOCK(mmio_read->lock, flags);
884 mmio_read->seq_num++;
885
886 read_resp->req_id = mmio_read->seq_num + 0xDEAD;
887 mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
888 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
889 mmio_read_reg |= mmio_read->seq_num &
890 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
891
892 ENA_REG_WRITE32(ena_dev->bus, mmio_read_reg,
893 ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
894
895 for (i = 0; i < timeout; i++) {
896 if (READ_ONCE16(read_resp->req_id) == mmio_read->seq_num)
897 break;
898
899 ENA_UDELAY(1);
900 }
901
902 if (unlikely(i == timeout)) {
903 ena_trc_err(ena_dev, "Reading reg failed for timeout. expected: req id[%u] offset[%u] actual: req id[%u] offset[%u]\n",
904 mmio_read->seq_num,
905 offset,
906 read_resp->req_id,
907 read_resp->reg_off);
908 ret = ENA_MMIO_READ_TIMEOUT;
909 goto err;
910 }
911
912 if (read_resp->reg_off != offset) {
913 ena_trc_err(ena_dev, "Read failure: wrong offset provided\n");
914 ret = ENA_MMIO_READ_TIMEOUT;
915 } else {
916 ret = read_resp->reg_val;
917 }
918 err:
919 ENA_SPINLOCK_UNLOCK(mmio_read->lock, flags);
920
921 return ret;
922 }
923
924 /* There are two types to wait for completion.
925 * Polling mode - wait until the completion is available.
926 * Async mode - wait on wait queue until the completion is ready
927 * (or the timeout expired).
928 * It is expected that the IRQ called ena_com_handle_admin_completion
929 * to mark the completions.
930 */
ena_com_wait_and_process_admin_cq(struct ena_comp_ctx * comp_ctx,struct ena_com_admin_queue * admin_queue)931 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
932 struct ena_com_admin_queue *admin_queue)
933 {
934 if (admin_queue->polling)
935 return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
936 admin_queue);
937
938 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
939 admin_queue);
940 }
941
ena_com_destroy_io_sq(struct ena_com_dev * ena_dev,struct ena_com_io_sq * io_sq)942 static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
943 struct ena_com_io_sq *io_sq)
944 {
945 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
946 struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
947 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
948 u8 direction;
949 int ret;
950
951 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
952
953 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
954 direction = ENA_ADMIN_SQ_DIRECTION_TX;
955 else
956 direction = ENA_ADMIN_SQ_DIRECTION_RX;
957
958 destroy_cmd.sq.sq_identity |= (direction <<
959 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
960 ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
961
962 destroy_cmd.sq.sq_idx = io_sq->idx;
963 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
964
965 ret = ena_com_execute_admin_command(admin_queue,
966 (struct ena_admin_aq_entry *)&destroy_cmd,
967 sizeof(destroy_cmd),
968 (struct ena_admin_acq_entry *)&destroy_resp,
969 sizeof(destroy_resp));
970
971 if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
972 ena_trc_err(ena_dev, "Failed to destroy io sq error: %d\n", ret);
973
974 return ret;
975 }
976
ena_com_io_queue_free(struct ena_com_dev * ena_dev,struct ena_com_io_sq * io_sq,struct ena_com_io_cq * io_cq)977 static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
978 struct ena_com_io_sq *io_sq,
979 struct ena_com_io_cq *io_cq)
980 {
981 size_t size;
982
983 if (io_cq->cdesc_addr.virt_addr) {
984 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
985
986 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
987 size,
988 io_cq->cdesc_addr.virt_addr,
989 io_cq->cdesc_addr.phys_addr,
990 io_cq->cdesc_addr.mem_handle);
991
992 io_cq->cdesc_addr.virt_addr = NULL;
993 }
994
995 if (io_sq->desc_addr.virt_addr) {
996 size = io_sq->desc_entry_size * io_sq->q_depth;
997
998 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
999 size,
1000 io_sq->desc_addr.virt_addr,
1001 io_sq->desc_addr.phys_addr,
1002 io_sq->desc_addr.mem_handle);
1003
1004 io_sq->desc_addr.virt_addr = NULL;
1005 }
1006
1007 if (io_sq->bounce_buf_ctrl.base_buffer) {
1008 ENA_MEM_FREE(ena_dev->dmadev,
1009 io_sq->bounce_buf_ctrl.base_buffer,
1010 (io_sq->llq_info.desc_list_entry_size * ENA_COM_BOUNCE_BUFFER_CNTRL_CNT));
1011 io_sq->bounce_buf_ctrl.base_buffer = NULL;
1012 }
1013 }
1014
wait_for_reset_state(struct ena_com_dev * ena_dev,u32 timeout,u16 exp_state)1015 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
1016 u16 exp_state)
1017 {
1018 u32 val, exp = 0;
1019 ena_time_t timeout_stamp;
1020
1021 /* Convert timeout from resolution of 100ms to us resolution. */
1022 timeout_stamp = ENA_GET_SYSTEM_TIMEOUT(100 * 1000 * timeout);
1023
1024 while (1) {
1025 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1026
1027 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
1028 ena_trc_err(ena_dev, "Reg read timeout occurred\n");
1029 return ENA_COM_TIMER_EXPIRED;
1030 }
1031
1032 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
1033 exp_state)
1034 return 0;
1035
1036 if (ENA_TIME_EXPIRE(timeout_stamp))
1037 return ENA_COM_TIMER_EXPIRED;
1038
1039 ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
1040 }
1041 }
1042
ena_com_check_supported_feature_id(struct ena_com_dev * ena_dev,enum ena_admin_aq_feature_id feature_id)1043 static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
1044 enum ena_admin_aq_feature_id feature_id)
1045 {
1046 u32 feature_mask = 1 << feature_id;
1047
1048 /* Device attributes is always supported */
1049 if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
1050 !(ena_dev->supported_features & feature_mask))
1051 return false;
1052
1053 return true;
1054 }
1055
ena_com_get_feature_ex(struct ena_com_dev * ena_dev,struct ena_admin_get_feat_resp * get_resp,enum ena_admin_aq_feature_id feature_id,dma_addr_t control_buf_dma_addr,u32 control_buff_size,u8 feature_ver)1056 static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
1057 struct ena_admin_get_feat_resp *get_resp,
1058 enum ena_admin_aq_feature_id feature_id,
1059 dma_addr_t control_buf_dma_addr,
1060 u32 control_buff_size,
1061 u8 feature_ver)
1062 {
1063 struct ena_com_admin_queue *admin_queue;
1064 struct ena_admin_get_feat_cmd get_cmd;
1065 int ret;
1066
1067 if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
1068 ena_trc_dbg(ena_dev, "Feature %d isn't supported\n", feature_id);
1069 return ENA_COM_UNSUPPORTED;
1070 }
1071
1072 memset(&get_cmd, 0x0, sizeof(get_cmd));
1073 admin_queue = &ena_dev->admin_queue;
1074
1075 get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
1076
1077 if (control_buff_size)
1078 get_cmd.aq_common_descriptor.flags =
1079 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
1080 else
1081 get_cmd.aq_common_descriptor.flags = 0;
1082
1083 ret = ena_com_mem_addr_set(ena_dev,
1084 &get_cmd.control_buffer.address,
1085 control_buf_dma_addr);
1086 if (unlikely(ret)) {
1087 ena_trc_err(ena_dev, "Memory address set failed\n");
1088 return ret;
1089 }
1090
1091 get_cmd.control_buffer.length = control_buff_size;
1092 get_cmd.feat_common.feature_version = feature_ver;
1093 get_cmd.feat_common.feature_id = feature_id;
1094
1095 ret = ena_com_execute_admin_command(admin_queue,
1096 (struct ena_admin_aq_entry *)
1097 &get_cmd,
1098 sizeof(get_cmd),
1099 (struct ena_admin_acq_entry *)
1100 get_resp,
1101 sizeof(*get_resp));
1102
1103 if (unlikely(ret))
1104 ena_trc_err(ena_dev, "Failed to submit get_feature command %d error: %d\n",
1105 feature_id, ret);
1106
1107 return ret;
1108 }
1109
ena_com_get_feature(struct ena_com_dev * ena_dev,struct ena_admin_get_feat_resp * get_resp,enum ena_admin_aq_feature_id feature_id,u8 feature_ver)1110 static int ena_com_get_feature(struct ena_com_dev *ena_dev,
1111 struct ena_admin_get_feat_resp *get_resp,
1112 enum ena_admin_aq_feature_id feature_id,
1113 u8 feature_ver)
1114 {
1115 return ena_com_get_feature_ex(ena_dev,
1116 get_resp,
1117 feature_id,
1118 0,
1119 0,
1120 feature_ver);
1121 }
1122
ena_com_get_current_hash_function(struct ena_com_dev * ena_dev)1123 int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev)
1124 {
1125 return ena_dev->rss.hash_func;
1126 }
1127
ena_com_hash_key_fill_default_key(struct ena_com_dev * ena_dev)1128 static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
1129 {
1130 struct ena_admin_feature_rss_flow_hash_control *hash_key =
1131 (ena_dev->rss).hash_key;
1132
1133 ENA_RSS_FILL_KEY(&hash_key->key, sizeof(hash_key->key));
1134 /* The key buffer is stored in the device in an array of
1135 * uint32 elements.
1136 */
1137 hash_key->key_parts = ENA_ADMIN_RSS_KEY_PARTS;
1138 }
1139
ena_com_hash_key_allocate(struct ena_com_dev * ena_dev)1140 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
1141 {
1142 struct ena_rss *rss = &ena_dev->rss;
1143
1144 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION))
1145 return ENA_COM_UNSUPPORTED;
1146
1147 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1148 sizeof(*rss->hash_key),
1149 rss->hash_key,
1150 rss->hash_key_dma_addr,
1151 rss->hash_key_mem_handle);
1152
1153 if (unlikely(!rss->hash_key))
1154 return ENA_COM_NO_MEM;
1155
1156 return 0;
1157 }
1158
ena_com_hash_key_destroy(struct ena_com_dev * ena_dev)1159 static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
1160 {
1161 struct ena_rss *rss = &ena_dev->rss;
1162
1163 if (rss->hash_key)
1164 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1165 sizeof(*rss->hash_key),
1166 rss->hash_key,
1167 rss->hash_key_dma_addr,
1168 rss->hash_key_mem_handle);
1169 rss->hash_key = NULL;
1170 }
1171
ena_com_hash_ctrl_init(struct ena_com_dev * ena_dev)1172 static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
1173 {
1174 struct ena_rss *rss = &ena_dev->rss;
1175
1176 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1177 sizeof(*rss->hash_ctrl),
1178 rss->hash_ctrl,
1179 rss->hash_ctrl_dma_addr,
1180 rss->hash_ctrl_mem_handle);
1181
1182 if (unlikely(!rss->hash_ctrl))
1183 return ENA_COM_NO_MEM;
1184
1185 return 0;
1186 }
1187
ena_com_hash_ctrl_destroy(struct ena_com_dev * ena_dev)1188 static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
1189 {
1190 struct ena_rss *rss = &ena_dev->rss;
1191
1192 if (rss->hash_ctrl)
1193 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1194 sizeof(*rss->hash_ctrl),
1195 rss->hash_ctrl,
1196 rss->hash_ctrl_dma_addr,
1197 rss->hash_ctrl_mem_handle);
1198 rss->hash_ctrl = NULL;
1199 }
1200
ena_com_indirect_table_allocate(struct ena_com_dev * ena_dev,u16 log_size)1201 static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
1202 u16 log_size)
1203 {
1204 struct ena_rss *rss = &ena_dev->rss;
1205 struct ena_admin_get_feat_resp get_resp;
1206 size_t tbl_size;
1207 int ret;
1208
1209 ret = ena_com_get_feature(ena_dev, &get_resp,
1210 ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG, 0);
1211 if (unlikely(ret))
1212 return ret;
1213
1214 if ((get_resp.u.ind_table.min_size > log_size) ||
1215 (get_resp.u.ind_table.max_size < log_size)) {
1216 ena_trc_err(ena_dev, "Indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
1217 1 << log_size,
1218 1 << get_resp.u.ind_table.min_size,
1219 1 << get_resp.u.ind_table.max_size);
1220 return ENA_COM_INVAL;
1221 }
1222
1223 tbl_size = (1ULL << log_size) *
1224 sizeof(struct ena_admin_rss_ind_table_entry);
1225
1226 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1227 tbl_size,
1228 rss->rss_ind_tbl,
1229 rss->rss_ind_tbl_dma_addr,
1230 rss->rss_ind_tbl_mem_handle);
1231 if (unlikely(!rss->rss_ind_tbl))
1232 goto mem_err1;
1233
1234 tbl_size = (1ULL << log_size) * sizeof(u16);
1235 rss->host_rss_ind_tbl =
1236 ENA_MEM_ALLOC(ena_dev->dmadev, tbl_size);
1237 if (unlikely(!rss->host_rss_ind_tbl))
1238 goto mem_err2;
1239
1240 rss->tbl_log_size = log_size;
1241
1242 return 0;
1243
1244 mem_err2:
1245 tbl_size = (1ULL << log_size) *
1246 sizeof(struct ena_admin_rss_ind_table_entry);
1247
1248 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1249 tbl_size,
1250 rss->rss_ind_tbl,
1251 rss->rss_ind_tbl_dma_addr,
1252 rss->rss_ind_tbl_mem_handle);
1253 rss->rss_ind_tbl = NULL;
1254 mem_err1:
1255 rss->tbl_log_size = 0;
1256 return ENA_COM_NO_MEM;
1257 }
1258
ena_com_indirect_table_destroy(struct ena_com_dev * ena_dev)1259 static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
1260 {
1261 struct ena_rss *rss = &ena_dev->rss;
1262 size_t tbl_size = (1ULL << rss->tbl_log_size) *
1263 sizeof(struct ena_admin_rss_ind_table_entry);
1264
1265 if (rss->rss_ind_tbl)
1266 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1267 tbl_size,
1268 rss->rss_ind_tbl,
1269 rss->rss_ind_tbl_dma_addr,
1270 rss->rss_ind_tbl_mem_handle);
1271 rss->rss_ind_tbl = NULL;
1272
1273 if (rss->host_rss_ind_tbl)
1274 ENA_MEM_FREE(ena_dev->dmadev,
1275 rss->host_rss_ind_tbl,
1276 ((1ULL << rss->tbl_log_size) * sizeof(u16)));
1277 rss->host_rss_ind_tbl = NULL;
1278 }
1279
ena_com_create_io_sq(struct ena_com_dev * ena_dev,struct ena_com_io_sq * io_sq,u16 cq_idx)1280 static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
1281 struct ena_com_io_sq *io_sq, u16 cq_idx)
1282 {
1283 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1284 struct ena_admin_aq_create_sq_cmd create_cmd;
1285 struct ena_admin_acq_create_sq_resp_desc cmd_completion;
1286 u8 direction;
1287 int ret;
1288
1289 memset(&create_cmd, 0x0, sizeof(create_cmd));
1290
1291 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
1292
1293 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1294 direction = ENA_ADMIN_SQ_DIRECTION_TX;
1295 else
1296 direction = ENA_ADMIN_SQ_DIRECTION_RX;
1297
1298 create_cmd.sq_identity |= (direction <<
1299 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
1300 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
1301
1302 create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
1303 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
1304
1305 create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
1306 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
1307 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
1308
1309 create_cmd.sq_caps_3 |=
1310 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
1311
1312 create_cmd.cq_idx = cq_idx;
1313 create_cmd.sq_depth = io_sq->q_depth;
1314
1315 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
1316 ret = ena_com_mem_addr_set(ena_dev,
1317 &create_cmd.sq_ba,
1318 io_sq->desc_addr.phys_addr);
1319 if (unlikely(ret)) {
1320 ena_trc_err(ena_dev, "Memory address set failed\n");
1321 return ret;
1322 }
1323 }
1324
1325 ret = ena_com_execute_admin_command(admin_queue,
1326 (struct ena_admin_aq_entry *)&create_cmd,
1327 sizeof(create_cmd),
1328 (struct ena_admin_acq_entry *)&cmd_completion,
1329 sizeof(cmd_completion));
1330 if (unlikely(ret)) {
1331 ena_trc_err(ena_dev, "Failed to create IO SQ. error: %d\n", ret);
1332 return ret;
1333 }
1334
1335 io_sq->idx = cmd_completion.sq_idx;
1336
1337 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1338 (uintptr_t)cmd_completion.sq_doorbell_offset);
1339
1340 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1341 io_sq->desc_addr.pbuf_dev_addr =
1342 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1343 cmd_completion.llq_descriptors_offset);
1344 }
1345
1346 ena_trc_dbg(ena_dev, "Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
1347
1348 return ret;
1349 }
1350
ena_com_ind_tbl_convert_to_device(struct ena_com_dev * ena_dev)1351 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1352 {
1353 struct ena_rss *rss = &ena_dev->rss;
1354 struct ena_com_io_sq *io_sq;
1355 u16 qid;
1356 int i;
1357
1358 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1359 qid = rss->host_rss_ind_tbl[i];
1360 if (qid >= ENA_TOTAL_NUM_QUEUES)
1361 return ENA_COM_INVAL;
1362
1363 io_sq = &ena_dev->io_sq_queues[qid];
1364
1365 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1366 return ENA_COM_INVAL;
1367
1368 rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1369 }
1370
1371 return 0;
1372 }
1373
ena_com_update_intr_delay_resolution(struct ena_com_dev * ena_dev,u16 intr_delay_resolution)1374 static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1375 u16 intr_delay_resolution)
1376 {
1377 u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution;
1378
1379 if (unlikely(!intr_delay_resolution)) {
1380 ena_trc_err(ena_dev, "Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1381 intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
1382 }
1383
1384 /* update Rx */
1385 ena_dev->intr_moder_rx_interval =
1386 ena_dev->intr_moder_rx_interval *
1387 prev_intr_delay_resolution /
1388 intr_delay_resolution;
1389
1390 /* update Tx */
1391 ena_dev->intr_moder_tx_interval =
1392 ena_dev->intr_moder_tx_interval *
1393 prev_intr_delay_resolution /
1394 intr_delay_resolution;
1395
1396 ena_dev->intr_delay_resolution = intr_delay_resolution;
1397 }
1398
1399 /*****************************************************************************/
1400 /******************************* API ******************************/
1401 /*****************************************************************************/
1402
ena_com_execute_admin_command(struct ena_com_admin_queue * admin_queue,struct ena_admin_aq_entry * cmd,size_t cmd_size,struct ena_admin_acq_entry * comp,size_t comp_size)1403 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1404 struct ena_admin_aq_entry *cmd,
1405 size_t cmd_size,
1406 struct ena_admin_acq_entry *comp,
1407 size_t comp_size)
1408 {
1409 struct ena_comp_ctx *comp_ctx;
1410 int ret;
1411
1412 comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1413 comp, comp_size);
1414 if (IS_ERR(comp_ctx)) {
1415 ret = PTR_ERR(comp_ctx);
1416 if (ret == ENA_COM_NO_DEVICE)
1417 ena_trc_dbg(admin_queue->ena_dev,
1418 "Failed to submit command [%d]\n",
1419 ret);
1420 else
1421 ena_trc_err(admin_queue->ena_dev,
1422 "Failed to submit command [%d]\n",
1423 ret);
1424
1425 return ret;
1426 }
1427
1428 ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1429 if (unlikely(ret)) {
1430 if (admin_queue->running_state)
1431 ena_trc_err(admin_queue->ena_dev,
1432 "Failed to process command. ret = %d\n", ret);
1433 else
1434 ena_trc_dbg(admin_queue->ena_dev,
1435 "Failed to process command. ret = %d\n", ret);
1436 }
1437 return ret;
1438 }
1439
ena_com_create_io_cq(struct ena_com_dev * ena_dev,struct ena_com_io_cq * io_cq)1440 int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1441 struct ena_com_io_cq *io_cq)
1442 {
1443 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1444 struct ena_admin_aq_create_cq_cmd create_cmd;
1445 struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1446 int ret;
1447
1448 memset(&create_cmd, 0x0, sizeof(create_cmd));
1449
1450 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1451
1452 create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1453 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1454 create_cmd.cq_caps_1 |=
1455 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1456
1457 create_cmd.msix_vector = io_cq->msix_vector;
1458 create_cmd.cq_depth = io_cq->q_depth;
1459
1460 ret = ena_com_mem_addr_set(ena_dev,
1461 &create_cmd.cq_ba,
1462 io_cq->cdesc_addr.phys_addr);
1463 if (unlikely(ret)) {
1464 ena_trc_err(ena_dev, "Memory address set failed\n");
1465 return ret;
1466 }
1467
1468 ret = ena_com_execute_admin_command(admin_queue,
1469 (struct ena_admin_aq_entry *)&create_cmd,
1470 sizeof(create_cmd),
1471 (struct ena_admin_acq_entry *)&cmd_completion,
1472 sizeof(cmd_completion));
1473 if (unlikely(ret)) {
1474 ena_trc_err(ena_dev, "Failed to create IO CQ. error: %d\n", ret);
1475 return ret;
1476 }
1477
1478 io_cq->idx = cmd_completion.cq_idx;
1479
1480 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1481 cmd_completion.cq_interrupt_unmask_register_offset);
1482
1483 if (cmd_completion.numa_node_register_offset)
1484 io_cq->numa_node_cfg_reg =
1485 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1486 cmd_completion.numa_node_register_offset);
1487
1488 ena_trc_dbg(ena_dev, "Created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
1489
1490 return ret;
1491 }
1492
ena_com_get_io_handlers(struct ena_com_dev * ena_dev,u16 qid,struct ena_com_io_sq ** io_sq,struct ena_com_io_cq ** io_cq)1493 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1494 struct ena_com_io_sq **io_sq,
1495 struct ena_com_io_cq **io_cq)
1496 {
1497 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1498 ena_trc_err(ena_dev, "Invalid queue number %d but the max is %d\n",
1499 qid, ENA_TOTAL_NUM_QUEUES);
1500 return ENA_COM_INVAL;
1501 }
1502
1503 *io_sq = &ena_dev->io_sq_queues[qid];
1504 *io_cq = &ena_dev->io_cq_queues[qid];
1505
1506 return 0;
1507 }
1508
ena_com_abort_admin_commands(struct ena_com_dev * ena_dev)1509 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1510 {
1511 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1512 struct ena_comp_ctx *comp_ctx;
1513 u16 i;
1514
1515 if (!admin_queue->comp_ctx)
1516 return;
1517
1518 for (i = 0; i < admin_queue->q_depth; i++) {
1519 comp_ctx = get_comp_ctxt(admin_queue, i, false);
1520 if (unlikely(!comp_ctx))
1521 break;
1522
1523 comp_ctx->status = ENA_CMD_ABORTED;
1524
1525 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
1526 }
1527 }
1528
ena_com_wait_for_abort_completion(struct ena_com_dev * ena_dev)1529 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1530 {
1531 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1532 unsigned long flags = 0;
1533 u32 exp = 0;
1534
1535 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1536 while (ATOMIC32_READ(&admin_queue->outstanding_cmds) != 0) {
1537 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1538 ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
1539 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1540 }
1541 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1542 }
1543
ena_com_destroy_io_cq(struct ena_com_dev * ena_dev,struct ena_com_io_cq * io_cq)1544 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1545 struct ena_com_io_cq *io_cq)
1546 {
1547 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1548 struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1549 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1550 int ret;
1551
1552 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
1553
1554 destroy_cmd.cq_idx = io_cq->idx;
1555 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1556
1557 ret = ena_com_execute_admin_command(admin_queue,
1558 (struct ena_admin_aq_entry *)&destroy_cmd,
1559 sizeof(destroy_cmd),
1560 (struct ena_admin_acq_entry *)&destroy_resp,
1561 sizeof(destroy_resp));
1562
1563 if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
1564 ena_trc_err(ena_dev, "Failed to destroy IO CQ. error: %d\n", ret);
1565
1566 return ret;
1567 }
1568
ena_com_get_admin_running_state(struct ena_com_dev * ena_dev)1569 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1570 {
1571 return ena_dev->admin_queue.running_state;
1572 }
1573
ena_com_set_admin_running_state(struct ena_com_dev * ena_dev,bool state)1574 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1575 {
1576 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1577 unsigned long flags = 0;
1578
1579 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1580 ena_dev->admin_queue.running_state = state;
1581 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1582 }
1583
ena_com_admin_aenq_enable(struct ena_com_dev * ena_dev)1584 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1585 {
1586 u16 depth = ena_dev->aenq.q_depth;
1587
1588 ENA_WARN(ena_dev->aenq.head != depth, ena_dev, "Invalid AENQ state\n");
1589
1590 /* Init head_db to mark that all entries in the queue
1591 * are initially available
1592 */
1593 ENA_REG_WRITE32(ena_dev->bus, depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1594 }
1595
ena_com_set_aenq_config(struct ena_com_dev * ena_dev,u32 groups_flag)1596 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1597 {
1598 struct ena_com_admin_queue *admin_queue;
1599 struct ena_admin_set_feat_cmd cmd;
1600 struct ena_admin_set_feat_resp resp;
1601 struct ena_admin_get_feat_resp get_resp;
1602 int ret;
1603
1604 ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0);
1605 if (ret) {
1606 ena_trc_info(ena_dev, "Can't get aenq configuration\n");
1607 return ret;
1608 }
1609
1610 if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1611 ena_trc_warn(ena_dev, "Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
1612 get_resp.u.aenq.supported_groups,
1613 groups_flag);
1614 return ENA_COM_UNSUPPORTED;
1615 }
1616
1617 memset(&cmd, 0x0, sizeof(cmd));
1618 admin_queue = &ena_dev->admin_queue;
1619
1620 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1621 cmd.aq_common_descriptor.flags = 0;
1622 cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1623 cmd.u.aenq.enabled_groups = groups_flag;
1624
1625 ret = ena_com_execute_admin_command(admin_queue,
1626 (struct ena_admin_aq_entry *)&cmd,
1627 sizeof(cmd),
1628 (struct ena_admin_acq_entry *)&resp,
1629 sizeof(resp));
1630
1631 if (unlikely(ret))
1632 ena_trc_err(ena_dev, "Failed to config AENQ ret: %d\n", ret);
1633
1634 return ret;
1635 }
1636
ena_com_get_dma_width(struct ena_com_dev * ena_dev)1637 int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1638 {
1639 u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1640 u32 width;
1641
1642 if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1643 ena_trc_err(ena_dev, "Reg read timeout occurred\n");
1644 return ENA_COM_TIMER_EXPIRED;
1645 }
1646
1647 width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
1648 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
1649
1650 ena_trc_dbg(ena_dev, "ENA dma width: %d\n", width);
1651
1652 if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
1653 ena_trc_err(ena_dev, "DMA width illegal value: %d\n", width);
1654 return ENA_COM_INVAL;
1655 }
1656
1657 ena_dev->dma_addr_bits = width;
1658
1659 return width;
1660 }
1661
ena_com_validate_version(struct ena_com_dev * ena_dev)1662 int ena_com_validate_version(struct ena_com_dev *ena_dev)
1663 {
1664 u32 ver;
1665 u32 ctrl_ver;
1666 u32 ctrl_ver_masked;
1667
1668 /* Make sure the ENA version and the controller version are at least
1669 * as the driver expects
1670 */
1671 ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1672 ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1673 ENA_REGS_CONTROLLER_VERSION_OFF);
1674
1675 if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
1676 (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
1677 ena_trc_err(ena_dev, "Reg read timeout occurred\n");
1678 return ENA_COM_TIMER_EXPIRED;
1679 }
1680
1681 ena_trc_info(ena_dev, "ENA device version: %d.%d\n",
1682 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
1683 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
1684 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1685
1686 ena_trc_info(ena_dev, "ENA controller version: %d.%d.%d implementation version %d\n",
1687 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK)
1688 >> ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
1689 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK)
1690 >> ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
1691 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
1692 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
1693 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
1694
1695 ctrl_ver_masked =
1696 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1697 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1698 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1699
1700 /* Validate the ctrl version without the implementation ID */
1701 if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1702 ena_trc_err(ena_dev, "ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1703 return -1;
1704 }
1705
1706 return 0;
1707 }
1708
1709 static void
ena_com_free_ena_admin_queue_comp_ctx(struct ena_com_dev * ena_dev,struct ena_com_admin_queue * admin_queue)1710 ena_com_free_ena_admin_queue_comp_ctx(struct ena_com_dev *ena_dev,
1711 struct ena_com_admin_queue *admin_queue)
1712
1713 {
1714 if (!admin_queue->comp_ctx)
1715 return;
1716
1717 ENA_WAIT_EVENTS_DESTROY(admin_queue);
1718 ENA_MEM_FREE(ena_dev->dmadev,
1719 admin_queue->comp_ctx,
1720 (admin_queue->q_depth * sizeof(struct ena_comp_ctx)));
1721
1722 admin_queue->comp_ctx = NULL;
1723 }
1724
ena_com_admin_destroy(struct ena_com_dev * ena_dev)1725 void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1726 {
1727 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1728 struct ena_com_admin_cq *cq = &admin_queue->cq;
1729 struct ena_com_admin_sq *sq = &admin_queue->sq;
1730 struct ena_com_aenq *aenq = &ena_dev->aenq;
1731 u16 size;
1732
1733 ena_com_free_ena_admin_queue_comp_ctx(ena_dev, admin_queue);
1734
1735 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
1736 if (sq->entries)
1737 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, sq->entries,
1738 sq->dma_addr, sq->mem_handle);
1739 sq->entries = NULL;
1740
1741 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
1742 if (cq->entries)
1743 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, cq->entries,
1744 cq->dma_addr, cq->mem_handle);
1745 cq->entries = NULL;
1746
1747 size = ADMIN_AENQ_SIZE(aenq->q_depth);
1748 if (ena_dev->aenq.entries)
1749 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, aenq->entries,
1750 aenq->dma_addr, aenq->mem_handle);
1751 aenq->entries = NULL;
1752 ENA_SPINLOCK_DESTROY(admin_queue->q_lock);
1753 }
1754
ena_com_set_admin_polling_mode(struct ena_com_dev * ena_dev,bool polling)1755 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1756 {
1757 u32 mask_value = 0;
1758
1759 if (polling)
1760 mask_value = ENA_REGS_ADMIN_INTR_MASK;
1761
1762 ENA_REG_WRITE32(ena_dev->bus, mask_value,
1763 ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1764 ena_dev->admin_queue.polling = polling;
1765 }
1766
ena_com_get_admin_polling_mode(struct ena_com_dev * ena_dev)1767 bool ena_com_get_admin_polling_mode(struct ena_com_dev *ena_dev)
1768 {
1769 return ena_dev->admin_queue.polling;
1770 }
1771
ena_com_set_admin_auto_polling_mode(struct ena_com_dev * ena_dev,bool polling)1772 void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
1773 bool polling)
1774 {
1775 ena_dev->admin_queue.auto_polling = polling;
1776 }
1777
ena_com_phc_supported(struct ena_com_dev * ena_dev)1778 bool ena_com_phc_supported(struct ena_com_dev *ena_dev)
1779 {
1780 return ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_PHC_CONFIG);
1781 }
1782
ena_com_phc_init(struct ena_com_dev * ena_dev)1783 int ena_com_phc_init(struct ena_com_dev *ena_dev)
1784 {
1785 struct ena_com_phc_info *phc = &ena_dev->phc;
1786
1787 memset(phc, 0x0, sizeof(*phc));
1788
1789 /* Allocate shared mem used PHC timestamp retrieved from device */
1790 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1791 sizeof(*phc->virt_addr),
1792 phc->virt_addr,
1793 phc->phys_addr,
1794 phc->mem_handle);
1795 if (unlikely(!phc->virt_addr))
1796 return ENA_COM_NO_MEM;
1797
1798 ENA_SPINLOCK_INIT(phc->lock);
1799
1800 phc->virt_addr->req_id = 0;
1801 phc->virt_addr->timestamp = 0;
1802
1803 return 0;
1804 }
1805
ena_com_phc_config(struct ena_com_dev * ena_dev)1806 int ena_com_phc_config(struct ena_com_dev *ena_dev)
1807 {
1808 struct ena_com_phc_info *phc = &ena_dev->phc;
1809 struct ena_admin_get_feat_resp get_feat_resp;
1810 struct ena_admin_set_feat_resp set_feat_resp;
1811 struct ena_admin_set_feat_cmd set_feat_cmd;
1812 int ret = 0;
1813
1814 /* Get device PHC default configuration */
1815 ret = ena_com_get_feature(ena_dev, &get_feat_resp, ENA_ADMIN_PHC_CONFIG, 0);
1816 if (unlikely(ret)) {
1817 ena_trc_err(ena_dev, "Failed to get PHC feature configuration, error: %d\n", ret);
1818 return ret;
1819 }
1820
1821 /* Suporting only readless PHC retrieval */
1822 if (get_feat_resp.u.phc.type != ENA_ADMIN_PHC_TYPE_READLESS) {
1823 ena_trc_err(ena_dev, "Unsupprted PHC type, error: %d\n", ENA_COM_UNSUPPORTED);
1824 return ENA_COM_UNSUPPORTED;
1825 }
1826
1827 /* Update PHC doorbell offset according to device value, used to write req_id to PHC bar */
1828 phc->doorbell_offset = get_feat_resp.u.phc.doorbell_offset;
1829
1830 /* Update PHC expire timeout according to device or default driver value */
1831 phc->expire_timeout_usec = (get_feat_resp.u.phc.expire_timeout_usec) ?
1832 get_feat_resp.u.phc.expire_timeout_usec :
1833 ENA_PHC_DEFAULT_EXPIRE_TIMEOUT_USEC;
1834
1835 /* Update PHC block timeout according to device or default driver value */
1836 phc->block_timeout_usec = (get_feat_resp.u.phc.block_timeout_usec) ?
1837 get_feat_resp.u.phc.block_timeout_usec :
1838 ENA_PHC_DEFAULT_BLOCK_TIMEOUT_USEC;
1839
1840 /* Sanity check - expire timeout must not be above skip timeout */
1841 if (phc->expire_timeout_usec > phc->block_timeout_usec)
1842 phc->expire_timeout_usec = phc->block_timeout_usec;
1843
1844 /* Prepare PHC feature command with PHC output address */
1845 memset(&set_feat_cmd, 0x0, sizeof(set_feat_cmd));
1846 set_feat_cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1847 set_feat_cmd.feat_common.feature_id = ENA_ADMIN_PHC_CONFIG;
1848 set_feat_cmd.u.phc.output_length = sizeof(*phc->virt_addr);
1849 ret = ena_com_mem_addr_set(ena_dev, &set_feat_cmd.u.phc.output_address, phc->phys_addr);
1850 if (unlikely(ret)) {
1851 ena_trc_err(ena_dev, "Failed setting PHC output address, error: %d\n", ret);
1852 return ret;
1853 }
1854
1855 /* Send PHC feature command to the device */
1856 ret = ena_com_execute_admin_command(&ena_dev->admin_queue,
1857 (struct ena_admin_aq_entry *)&set_feat_cmd,
1858 sizeof(set_feat_cmd),
1859 (struct ena_admin_acq_entry *)&set_feat_resp,
1860 sizeof(set_feat_resp));
1861
1862 if (unlikely(ret)) {
1863 ena_trc_err(ena_dev, "Failed to enable PHC, error: %d\n", ret);
1864 return ret;
1865 }
1866
1867 phc->active = true;
1868 ena_trc_dbg(ena_dev, "PHC is active in the device\n");
1869
1870 return ret;
1871 }
1872
ena_com_phc_destroy(struct ena_com_dev * ena_dev)1873 void ena_com_phc_destroy(struct ena_com_dev *ena_dev)
1874 {
1875 struct ena_com_phc_info *phc = &ena_dev->phc;
1876
1877 phc->active = false;
1878
1879 /* In case PHC is not supported by the device, silently exiting */
1880 if (!phc->virt_addr)
1881 return;
1882
1883 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1884 sizeof(*phc->virt_addr),
1885 phc->virt_addr,
1886 phc->phys_addr,
1887 phc->mem_handle);
1888 phc->virt_addr = NULL;
1889
1890 ENA_SPINLOCK_DESTROY(phc->lock);
1891 }
1892
ena_com_phc_get(struct ena_com_dev * ena_dev,u64 * timestamp)1893 int ena_com_phc_get(struct ena_com_dev *ena_dev, u64 *timestamp)
1894 {
1895 volatile struct ena_admin_phc_resp *read_resp = ena_dev->phc.virt_addr;
1896 struct ena_com_phc_info *phc = &ena_dev->phc;
1897 ena_time_high_res_t initial_time = ENA_TIME_INIT_HIGH_RES();
1898 static ena_time_high_res_t start_time;
1899 unsigned long flags = 0;
1900 ena_time_high_res_t expire_time;
1901 ena_time_high_res_t block_time;
1902 int ret = ENA_COM_OK;
1903
1904 if (!phc->active) {
1905 ena_trc_err(ena_dev, "PHC feature is not active in the device\n");
1906 return ENA_COM_UNSUPPORTED;
1907 }
1908
1909 ENA_SPINLOCK_LOCK(phc->lock, flags);
1910
1911 /* Check if PHC is in blocked state */
1912 if (unlikely(ENA_TIME_COMPARE_HIGH_RES(start_time, initial_time))) {
1913 /* Check if blocking time expired */
1914 block_time = ENA_GET_SYSTEM_TIMEOUT_HIGH_RES(start_time, phc->block_timeout_usec);
1915 if (!ENA_TIME_EXPIRE_HIGH_RES(block_time)) {
1916 /* PHC is still in blocked state, skip PHC request */
1917 phc->stats.phc_skp++;
1918 ret = ENA_COM_DEVICE_BUSY;
1919 goto skip;
1920 }
1921
1922 /* PHC is in active state, update statistics according to req_id and timestamp */
1923 if ((READ_ONCE16(read_resp->req_id) != phc->req_id) ||
1924 (read_resp->timestamp == ENA_PHC_TIMESTAMP_ERROR)) {
1925 /* Device didn't update req_id during blocking time or timestamp is invalid,
1926 * this indicates on a device error
1927 */
1928 phc->stats.phc_err++;
1929 } else {
1930 /* Device updated req_id during blocking time with valid timestamp */
1931 phc->stats.phc_exp++;
1932 }
1933 }
1934
1935 /* Setting relative timeouts */
1936 start_time = ENA_GET_SYSTEM_TIME_HIGH_RES();
1937 block_time = ENA_GET_SYSTEM_TIMEOUT_HIGH_RES(start_time, phc->block_timeout_usec);
1938 expire_time = ENA_GET_SYSTEM_TIMEOUT_HIGH_RES(start_time, phc->expire_timeout_usec);
1939
1940 /* We expect the device to return this req_id once the new PHC timestamp is updated */
1941 phc->req_id++;
1942
1943 /* Initialize PHC shared memory with different req_id value to be able to identify once the
1944 * device changes it to req_id
1945 */
1946 read_resp->req_id = phc->req_id + ENA_PHC_REQ_ID_OFFSET;
1947
1948 /* Writing req_id to PHC bar */
1949 ENA_REG_WRITE32(ena_dev->bus, phc->req_id, ena_dev->reg_bar + phc->doorbell_offset);
1950
1951 /* Stalling until the device updates req_id */
1952 while (1) {
1953 if (unlikely(ENA_TIME_EXPIRE_HIGH_RES(expire_time))) {
1954 /* Gave up waiting for updated req_id, PHC enters into blocked state until
1955 * passing blocking time
1956 */
1957 ret = ENA_COM_DEVICE_BUSY;
1958 break;
1959 }
1960
1961 /* Check if req_id was updated by the device */
1962 if (READ_ONCE16(read_resp->req_id) != phc->req_id) {
1963 /* req_id was not updated by the device, check again on next loop */
1964 continue;
1965 }
1966
1967 /* req_id was updated which indicates that PHC timestamp was updated too */
1968 *timestamp = read_resp->timestamp;
1969
1970 /* PHC timestamp validty check */
1971 if (unlikely(*timestamp == ENA_PHC_TIMESTAMP_ERROR)) {
1972 /* Retrieved invalid PHC timestamp, PHC enters into blocked state until
1973 * passing blocking time
1974 */
1975 ret = ENA_COM_DEVICE_BUSY;
1976 break;
1977 }
1978
1979 /* Retrieved valid PHC timestamp */
1980 phc->stats.phc_cnt++;
1981
1982 /* This indicates PHC state is active */
1983 start_time = initial_time;
1984 break;
1985 }
1986
1987 skip:
1988 ENA_SPINLOCK_UNLOCK(phc->lock, flags);
1989
1990 return ret;
1991 }
1992
ena_com_mmio_reg_read_request_init(struct ena_com_dev * ena_dev)1993 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1994 {
1995 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1996
1997 ENA_SPINLOCK_INIT(mmio_read->lock);
1998 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1999 sizeof(*mmio_read->read_resp),
2000 mmio_read->read_resp,
2001 mmio_read->read_resp_dma_addr,
2002 mmio_read->read_resp_mem_handle);
2003 if (unlikely(!mmio_read->read_resp))
2004 goto err;
2005
2006 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
2007
2008 mmio_read->read_resp->req_id = 0x0;
2009 mmio_read->seq_num = 0x0;
2010 mmio_read->readless_supported = true;
2011
2012 return 0;
2013
2014 err:
2015 ENA_SPINLOCK_DESTROY(mmio_read->lock);
2016 return ENA_COM_NO_MEM;
2017 }
2018
ena_com_set_mmio_read_mode(struct ena_com_dev * ena_dev,bool readless_supported)2019 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
2020 {
2021 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
2022
2023 mmio_read->readless_supported = readless_supported;
2024 }
2025
ena_com_mmio_reg_read_request_destroy(struct ena_com_dev * ena_dev)2026 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
2027 {
2028 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
2029
2030 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
2031 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
2032
2033 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
2034 sizeof(*mmio_read->read_resp),
2035 mmio_read->read_resp,
2036 mmio_read->read_resp_dma_addr,
2037 mmio_read->read_resp_mem_handle);
2038
2039 mmio_read->read_resp = NULL;
2040 ENA_SPINLOCK_DESTROY(mmio_read->lock);
2041 }
2042
ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev * ena_dev)2043 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
2044 {
2045 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
2046 u32 addr_low, addr_high;
2047
2048 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
2049 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
2050
2051 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
2052 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
2053 }
2054
ena_com_admin_init(struct ena_com_dev * ena_dev,struct ena_aenq_handlers * aenq_handlers)2055 int ena_com_admin_init(struct ena_com_dev *ena_dev,
2056 struct ena_aenq_handlers *aenq_handlers)
2057 {
2058 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2059 u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
2060 int ret;
2061
2062 dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
2063
2064 if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
2065 ena_trc_err(ena_dev, "Reg read timeout occurred\n");
2066 return ENA_COM_TIMER_EXPIRED;
2067 }
2068
2069 if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
2070 ena_trc_err(ena_dev, "Device isn't ready, abort com init\n");
2071 return ENA_COM_NO_DEVICE;
2072 }
2073
2074 admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
2075
2076 admin_queue->bus = ena_dev->bus;
2077 admin_queue->q_dmadev = ena_dev->dmadev;
2078 admin_queue->polling = false;
2079 admin_queue->curr_cmd_id = 0;
2080
2081 ATOMIC32_SET(&admin_queue->outstanding_cmds, 0);
2082
2083 ENA_SPINLOCK_INIT(admin_queue->q_lock);
2084
2085 ret = ena_com_init_comp_ctxt(admin_queue);
2086 if (ret)
2087 goto error;
2088
2089 ret = ena_com_admin_init_sq(admin_queue);
2090 if (ret)
2091 goto error;
2092
2093 ret = ena_com_admin_init_cq(admin_queue);
2094 if (ret)
2095 goto error;
2096
2097 admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
2098 ENA_REGS_AQ_DB_OFF);
2099
2100 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
2101 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
2102
2103 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
2104 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
2105
2106 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
2107 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
2108
2109 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
2110 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
2111
2112 aq_caps = 0;
2113 aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
2114 aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
2115 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
2116 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
2117
2118 acq_caps = 0;
2119 acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
2120 acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
2121 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
2122 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
2123
2124 ENA_REG_WRITE32(ena_dev->bus, aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
2125 ENA_REG_WRITE32(ena_dev->bus, acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
2126 ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
2127 if (ret)
2128 goto error;
2129
2130 admin_queue->ena_dev = ena_dev;
2131 admin_queue->running_state = true;
2132
2133 return 0;
2134 error:
2135 ena_com_admin_destroy(ena_dev);
2136
2137 return ret;
2138 }
2139
ena_com_create_io_queue(struct ena_com_dev * ena_dev,struct ena_com_create_io_ctx * ctx)2140 int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
2141 struct ena_com_create_io_ctx *ctx)
2142 {
2143 struct ena_com_io_sq *io_sq;
2144 struct ena_com_io_cq *io_cq;
2145 int ret;
2146
2147 if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
2148 ena_trc_err(ena_dev, "Qid (%d) is bigger than max num of queues (%d)\n",
2149 ctx->qid, ENA_TOTAL_NUM_QUEUES);
2150 return ENA_COM_INVAL;
2151 }
2152
2153 io_sq = &ena_dev->io_sq_queues[ctx->qid];
2154 io_cq = &ena_dev->io_cq_queues[ctx->qid];
2155
2156 memset(io_sq, 0x0, sizeof(*io_sq));
2157 memset(io_cq, 0x0, sizeof(*io_cq));
2158
2159 /* Init CQ */
2160 io_cq->q_depth = ctx->queue_size;
2161 io_cq->direction = ctx->direction;
2162 io_cq->qid = ctx->qid;
2163
2164 io_cq->msix_vector = ctx->msix_vector;
2165
2166 io_sq->q_depth = ctx->queue_size;
2167 io_sq->direction = ctx->direction;
2168 io_sq->qid = ctx->qid;
2169
2170 io_sq->mem_queue_type = ctx->mem_queue_type;
2171
2172 if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
2173 /* header length is limited to 8 bits */
2174 io_sq->tx_max_header_size =
2175 ENA_MIN32(ena_dev->tx_max_header_size, SZ_256);
2176
2177 ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
2178 if (ret)
2179 goto error;
2180 ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
2181 if (ret)
2182 goto error;
2183
2184 ret = ena_com_create_io_cq(ena_dev, io_cq);
2185 if (ret)
2186 goto error;
2187
2188 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
2189 if (ret)
2190 goto destroy_io_cq;
2191
2192 return 0;
2193
2194 destroy_io_cq:
2195 ena_com_destroy_io_cq(ena_dev, io_cq);
2196 error:
2197 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
2198 return ret;
2199 }
2200
ena_com_destroy_io_queue(struct ena_com_dev * ena_dev,u16 qid)2201 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
2202 {
2203 struct ena_com_io_sq *io_sq;
2204 struct ena_com_io_cq *io_cq;
2205
2206 if (qid >= ENA_TOTAL_NUM_QUEUES) {
2207 ena_trc_err(ena_dev, "Qid (%d) is bigger than max num of queues (%d)\n",
2208 qid, ENA_TOTAL_NUM_QUEUES);
2209 return;
2210 }
2211
2212 io_sq = &ena_dev->io_sq_queues[qid];
2213 io_cq = &ena_dev->io_cq_queues[qid];
2214
2215 ena_com_destroy_io_sq(ena_dev, io_sq);
2216 ena_com_destroy_io_cq(ena_dev, io_cq);
2217
2218 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
2219 }
2220
ena_com_get_link_params(struct ena_com_dev * ena_dev,struct ena_admin_get_feat_resp * resp)2221 int ena_com_get_link_params(struct ena_com_dev *ena_dev,
2222 struct ena_admin_get_feat_resp *resp)
2223 {
2224 return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
2225 }
2226
ena_get_dev_stats(struct ena_com_dev * ena_dev,struct ena_com_stats_ctx * ctx,enum ena_admin_get_stats_type type)2227 static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
2228 struct ena_com_stats_ctx *ctx,
2229 enum ena_admin_get_stats_type type)
2230 {
2231 struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
2232 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
2233 struct ena_com_admin_queue *admin_queue;
2234 int ret;
2235
2236 admin_queue = &ena_dev->admin_queue;
2237
2238 get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
2239 get_cmd->aq_common_descriptor.flags = 0;
2240 get_cmd->type = type;
2241
2242 ret = ena_com_execute_admin_command(admin_queue,
2243 (struct ena_admin_aq_entry *)get_cmd,
2244 sizeof(*get_cmd),
2245 (struct ena_admin_acq_entry *)get_resp,
2246 sizeof(*get_resp));
2247
2248 if (unlikely(ret))
2249 ena_trc_err(ena_dev, "Failed to get stats. error: %d\n", ret);
2250
2251 return ret;
2252 }
2253
ena_com_set_supported_customer_metrics(struct ena_com_dev * ena_dev)2254 static void ena_com_set_supported_customer_metrics(struct ena_com_dev *ena_dev)
2255 {
2256 struct ena_customer_metrics *customer_metrics;
2257 struct ena_com_stats_ctx ctx;
2258 int ret;
2259
2260 customer_metrics = &ena_dev->customer_metrics;
2261 if (!ena_com_get_cap(ena_dev, ENA_ADMIN_CUSTOMER_METRICS)) {
2262 customer_metrics->supported_metrics = ENA_ADMIN_CUSTOMER_METRICS_MIN_SUPPORT_MASK;
2263 return;
2264 }
2265
2266 memset(&ctx, 0x0, sizeof(ctx));
2267 ctx.get_cmd.requested_metrics = ENA_ADMIN_CUSTOMER_METRICS_SUPPORT_MASK;
2268 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_CUSTOMER_METRICS);
2269 if (likely(ret == 0))
2270 customer_metrics->supported_metrics =
2271 ctx.get_resp.u.customer_metrics.reported_metrics;
2272 else
2273 ena_trc_err(ena_dev, "Failed to query customer metrics support. error: %d\n", ret);
2274 }
2275
ena_com_get_dev_attr_feat(struct ena_com_dev * ena_dev,struct ena_com_dev_get_features_ctx * get_feat_ctx)2276 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
2277 struct ena_com_dev_get_features_ctx *get_feat_ctx)
2278 {
2279 struct ena_admin_get_feat_resp get_resp;
2280 int rc;
2281
2282 rc = ena_com_get_feature(ena_dev, &get_resp,
2283 ENA_ADMIN_DEVICE_ATTRIBUTES, 0);
2284 if (rc)
2285 return rc;
2286
2287 memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
2288 sizeof(get_resp.u.dev_attr));
2289
2290 ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
2291 ena_dev->capabilities = get_resp.u.dev_attr.capabilities;
2292
2293 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
2294 rc = ena_com_get_feature(ena_dev, &get_resp,
2295 ENA_ADMIN_MAX_QUEUES_EXT,
2296 ENA_FEATURE_MAX_QUEUE_EXT_VER);
2297 if (rc)
2298 return rc;
2299
2300 if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
2301 return ENA_COM_INVAL;
2302
2303 memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
2304 sizeof(get_resp.u.max_queue_ext));
2305 ena_dev->tx_max_header_size =
2306 get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size;
2307 } else {
2308 rc = ena_com_get_feature(ena_dev, &get_resp,
2309 ENA_ADMIN_MAX_QUEUES_NUM, 0);
2310 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
2311 sizeof(get_resp.u.max_queue));
2312 ena_dev->tx_max_header_size =
2313 get_resp.u.max_queue.max_header_size;
2314
2315 if (rc)
2316 return rc;
2317 }
2318
2319 rc = ena_com_get_feature(ena_dev, &get_resp,
2320 ENA_ADMIN_AENQ_CONFIG, 0);
2321 if (rc)
2322 return rc;
2323
2324 memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
2325 sizeof(get_resp.u.aenq));
2326
2327 rc = ena_com_get_feature(ena_dev, &get_resp,
2328 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2329 if (rc)
2330 return rc;
2331
2332 memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
2333 sizeof(get_resp.u.offload));
2334
2335 /* Driver hints isn't mandatory admin command. So in case the
2336 * command isn't supported set driver hints to 0
2337 */
2338 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
2339
2340 if (!rc)
2341 memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
2342 sizeof(get_resp.u.hw_hints));
2343 else if (rc == ENA_COM_UNSUPPORTED)
2344 memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints));
2345 else
2346 return rc;
2347
2348 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
2349 if (!rc)
2350 memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
2351 sizeof(get_resp.u.llq));
2352 else if (rc == ENA_COM_UNSUPPORTED)
2353 memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
2354 else
2355 return rc;
2356
2357 ena_com_set_supported_customer_metrics(ena_dev);
2358
2359 return 0;
2360 }
2361
ena_com_admin_q_comp_intr_handler(struct ena_com_dev * ena_dev)2362 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
2363 {
2364 ena_com_handle_admin_completion(&ena_dev->admin_queue);
2365 }
2366
2367 /* ena_handle_specific_aenq_event:
2368 * return the handler that is relevant to the specific event group
2369 */
ena_com_get_specific_aenq_cb(struct ena_com_dev * ena_dev,u16 group)2370 static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *ena_dev,
2371 u16 group)
2372 {
2373 struct ena_aenq_handlers *aenq_handlers = ena_dev->aenq.aenq_handlers;
2374
2375 if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
2376 return aenq_handlers->handlers[group];
2377
2378 return aenq_handlers->unimplemented_handler;
2379 }
2380
2381 /* ena_aenq_intr_handler:
2382 * handles the aenq incoming events.
2383 * pop events from the queue and apply the specific handler
2384 */
ena_com_aenq_intr_handler(struct ena_com_dev * ena_dev,void * data)2385 void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
2386 {
2387 struct ena_admin_aenq_entry *aenq_e;
2388 struct ena_admin_aenq_common_desc *aenq_common;
2389 struct ena_com_aenq *aenq = &ena_dev->aenq;
2390 u64 timestamp;
2391 ena_aenq_handler handler_cb;
2392 u16 masked_head, processed = 0;
2393 u8 phase;
2394
2395 masked_head = aenq->head & (aenq->q_depth - 1);
2396 phase = aenq->phase;
2397 aenq_e = &aenq->entries[masked_head]; /* Get first entry */
2398 aenq_common = &aenq_e->aenq_common_desc;
2399
2400 /* Go over all the events */
2401 while ((READ_ONCE8(aenq_common->flags) &
2402 ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
2403 /* Make sure the phase bit (ownership) is as expected before
2404 * reading the rest of the descriptor.
2405 */
2406 dma_rmb();
2407
2408 timestamp = (u64)aenq_common->timestamp_low |
2409 ((u64)aenq_common->timestamp_high << 32);
2410
2411 ena_trc_dbg(ena_dev, "AENQ! Group[%x] Syndrome[%x] timestamp: [%" ENA_PRIu64 "s]\n",
2412 aenq_common->group,
2413 aenq_common->syndrome,
2414 timestamp);
2415
2416 /* Handle specific event*/
2417 handler_cb = ena_com_get_specific_aenq_cb(ena_dev,
2418 aenq_common->group);
2419 handler_cb(data, aenq_e); /* call the actual event handler*/
2420
2421 /* Get next event entry */
2422 masked_head++;
2423 processed++;
2424
2425 if (unlikely(masked_head == aenq->q_depth)) {
2426 masked_head = 0;
2427 phase = !phase;
2428 }
2429 aenq_e = &aenq->entries[masked_head];
2430 aenq_common = &aenq_e->aenq_common_desc;
2431 }
2432
2433 aenq->head += processed;
2434 aenq->phase = phase;
2435
2436 /* Don't update aenq doorbell if there weren't any processed events */
2437 if (!processed)
2438 return;
2439
2440 /* write the aenq doorbell after all AENQ descriptors were read */
2441 mb();
2442 ENA_REG_WRITE32_RELAXED(ena_dev->bus, (u32)aenq->head,
2443 ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
2444 mmiowb();
2445 }
2446 #ifdef ENA_EXTENDED_STATS
2447 /*
2448 * Sets the function Idx and Queue Idx to be used for
2449 * get full statistics feature
2450 *
2451 */
ena_com_extended_stats_set_func_queue(struct ena_com_dev * ena_dev,u32 func_queue)2452 int ena_com_extended_stats_set_func_queue(struct ena_com_dev *ena_dev,
2453 u32 func_queue)
2454 {
2455
2456 /* Function & Queue is acquired from user in the following format :
2457 * Bottom Half word: funct
2458 * Top Half Word: queue
2459 */
2460 ena_dev->stats_func = ENA_EXTENDED_STAT_GET_FUNCT(func_queue);
2461 ena_dev->stats_queue = ENA_EXTENDED_STAT_GET_QUEUE(func_queue);
2462
2463 return 0;
2464 }
2465
2466 #endif /* ENA_EXTENDED_STATS */
2467
ena_com_dev_reset(struct ena_com_dev * ena_dev,enum ena_regs_reset_reason_types reset_reason)2468 int ena_com_dev_reset(struct ena_com_dev *ena_dev,
2469 enum ena_regs_reset_reason_types reset_reason)
2470 {
2471 u32 stat, timeout, cap, reset_val;
2472 int rc;
2473
2474 stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
2475 cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
2476
2477 if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
2478 (cap == ENA_MMIO_READ_TIMEOUT))) {
2479 ena_trc_err(ena_dev, "Reg read32 timeout occurred\n");
2480 return ENA_COM_TIMER_EXPIRED;
2481 }
2482
2483 if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
2484 ena_trc_err(ena_dev, "Device isn't ready, can't reset device\n");
2485 return ENA_COM_INVAL;
2486 }
2487
2488 timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
2489 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
2490 if (timeout == 0) {
2491 ena_trc_err(ena_dev, "Invalid timeout value\n");
2492 return ENA_COM_INVAL;
2493 }
2494
2495 /* start reset */
2496 reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
2497 reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
2498 ENA_REGS_DEV_CTL_RESET_REASON_MASK;
2499 ENA_REG_WRITE32(ena_dev->bus, reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2500
2501 /* Write again the MMIO read request address */
2502 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
2503
2504 rc = wait_for_reset_state(ena_dev, timeout,
2505 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
2506 if (rc != 0) {
2507 ena_trc_err(ena_dev, "Reset indication didn't turn on\n");
2508 return rc;
2509 }
2510
2511 /* reset done */
2512 ENA_REG_WRITE32(ena_dev->bus, 0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2513 rc = wait_for_reset_state(ena_dev, timeout, 0);
2514 if (rc != 0) {
2515 ena_trc_err(ena_dev, "Reset indication didn't turn off\n");
2516 return rc;
2517 }
2518
2519 timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
2520 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
2521 if (timeout)
2522 /* the resolution of timeout reg is 100ms */
2523 ena_dev->admin_queue.completion_timeout = timeout * 100000;
2524 else
2525 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
2526
2527 return 0;
2528 }
2529
ena_com_get_eni_stats(struct ena_com_dev * ena_dev,struct ena_admin_eni_stats * stats)2530 int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
2531 struct ena_admin_eni_stats *stats)
2532 {
2533 struct ena_com_stats_ctx ctx;
2534 int ret;
2535
2536 if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) {
2537 ena_trc_err(ena_dev, "Capability %d isn't supported\n", ENA_ADMIN_ENI_STATS);
2538 return ENA_COM_UNSUPPORTED;
2539 }
2540
2541 memset(&ctx, 0x0, sizeof(ctx));
2542 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI);
2543 if (likely(ret == 0))
2544 memcpy(stats, &ctx.get_resp.u.eni_stats,
2545 sizeof(ctx.get_resp.u.eni_stats));
2546
2547 return ret;
2548 }
2549
ena_com_get_ena_srd_info(struct ena_com_dev * ena_dev,struct ena_admin_ena_srd_info * info)2550 int ena_com_get_ena_srd_info(struct ena_com_dev *ena_dev,
2551 struct ena_admin_ena_srd_info *info)
2552 {
2553 struct ena_com_stats_ctx ctx;
2554 int ret;
2555
2556 if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENA_SRD_INFO)) {
2557 ena_trc_err(ena_dev, "Capability %d isn't supported\n", ENA_ADMIN_ENA_SRD_INFO);
2558 return ENA_COM_UNSUPPORTED;
2559 }
2560
2561 memset(&ctx, 0x0, sizeof(ctx));
2562 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENA_SRD);
2563 if (likely(ret == 0))
2564 memcpy(info, &ctx.get_resp.u.ena_srd_info,
2565 sizeof(ctx.get_resp.u.ena_srd_info));
2566
2567 return ret;
2568 }
2569
ena_com_get_dev_basic_stats(struct ena_com_dev * ena_dev,struct ena_admin_basic_stats * stats)2570 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
2571 struct ena_admin_basic_stats *stats)
2572 {
2573 struct ena_com_stats_ctx ctx;
2574 int ret;
2575
2576 memset(&ctx, 0x0, sizeof(ctx));
2577 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
2578 if (likely(ret == 0))
2579 memcpy(stats, &ctx.get_resp.u.basic_stats,
2580 sizeof(ctx.get_resp.u.basic_stats));
2581
2582 return ret;
2583 }
2584 #ifdef ENA_EXTENDED_STATS
2585
ena_com_get_dev_extended_stats(struct ena_com_dev * ena_dev,char * buff,u32 len)2586 int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff,
2587 u32 len)
2588 {
2589 struct ena_com_stats_ctx ctx;
2590 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx.get_cmd;
2591 ena_mem_handle_t mem_handle;
2592 void *virt_addr;
2593 dma_addr_t phys_addr;
2594 int ret;
2595
2596 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, len,
2597 virt_addr, phys_addr, mem_handle);
2598 if (!virt_addr) {
2599 ret = ENA_COM_NO_MEM;
2600 goto done;
2601 }
2602 memset(&ctx, 0x0, sizeof(ctx));
2603 ret = ena_com_mem_addr_set(ena_dev,
2604 &get_cmd->u.control_buffer.address,
2605 phys_addr);
2606 if (unlikely(ret)) {
2607 ena_trc_err(ena_dev, "Memory address set failed\n");
2608 goto free_ext_stats_mem;
2609 }
2610 get_cmd->u.control_buffer.length = len;
2611
2612 get_cmd->device_id = ena_dev->stats_func;
2613 get_cmd->queue_idx = ena_dev->stats_queue;
2614
2615 ret = ena_get_dev_stats(ena_dev, &ctx,
2616 ENA_ADMIN_GET_STATS_TYPE_EXTENDED);
2617 if (ret < 0)
2618 goto free_ext_stats_mem;
2619
2620 ret = snprintf(buff, len, "%s", (char *)virt_addr);
2621
2622 free_ext_stats_mem:
2623 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, len, virt_addr, phys_addr,
2624 mem_handle);
2625 done:
2626 return ret;
2627 }
2628 #endif
2629
ena_com_get_customer_metrics(struct ena_com_dev * ena_dev,char * buffer,u32 len)2630 int ena_com_get_customer_metrics(struct ena_com_dev *ena_dev, char *buffer, u32 len)
2631 {
2632 struct ena_admin_aq_get_stats_cmd *get_cmd;
2633 struct ena_com_stats_ctx ctx;
2634 int ret;
2635
2636 if (unlikely(len > ena_dev->customer_metrics.buffer_len)) {
2637 ena_trc_err(ena_dev, "Invalid buffer size %u. The given buffer is too big.\n", len);
2638 return ENA_COM_INVAL;
2639 }
2640
2641 if (!ena_com_get_cap(ena_dev, ENA_ADMIN_CUSTOMER_METRICS)) {
2642 ena_trc_err(ena_dev, "Capability %d not supported.\n", ENA_ADMIN_CUSTOMER_METRICS);
2643 return ENA_COM_UNSUPPORTED;
2644 }
2645
2646 if (!ena_dev->customer_metrics.supported_metrics) {
2647 ena_trc_err(ena_dev, "No supported customer metrics.\n");
2648 return ENA_COM_UNSUPPORTED;
2649 }
2650
2651 get_cmd = &ctx.get_cmd;
2652 memset(&ctx, 0x0, sizeof(ctx));
2653 ret = ena_com_mem_addr_set(ena_dev,
2654 &get_cmd->u.control_buffer.address,
2655 ena_dev->customer_metrics.buffer_dma_addr);
2656 if (unlikely(ret)) {
2657 ena_trc_err(ena_dev, "Memory address set failed.\n");
2658 return ret;
2659 }
2660
2661 get_cmd->u.control_buffer.length = ena_dev->customer_metrics.buffer_len;
2662 get_cmd->requested_metrics = ena_dev->customer_metrics.supported_metrics;
2663 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_CUSTOMER_METRICS);
2664 if (likely(ret == 0))
2665 memcpy(buffer, ena_dev->customer_metrics.buffer_virt_addr, len);
2666 else
2667 ena_trc_err(ena_dev, "Failed to get customer metrics. error: %d\n", ret);
2668
2669 return ret;
2670 }
2671
ena_com_set_dev_mtu(struct ena_com_dev * ena_dev,u32 mtu)2672 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu)
2673 {
2674 struct ena_com_admin_queue *admin_queue;
2675 struct ena_admin_set_feat_cmd cmd;
2676 struct ena_admin_set_feat_resp resp;
2677 int ret;
2678
2679 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
2680 ena_trc_dbg(ena_dev, "Feature %d isn't supported\n", ENA_ADMIN_MTU);
2681 return ENA_COM_UNSUPPORTED;
2682 }
2683
2684 memset(&cmd, 0x0, sizeof(cmd));
2685 admin_queue = &ena_dev->admin_queue;
2686
2687 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2688 cmd.aq_common_descriptor.flags = 0;
2689 cmd.feat_common.feature_id = ENA_ADMIN_MTU;
2690 cmd.u.mtu.mtu = mtu;
2691
2692 ret = ena_com_execute_admin_command(admin_queue,
2693 (struct ena_admin_aq_entry *)&cmd,
2694 sizeof(cmd),
2695 (struct ena_admin_acq_entry *)&resp,
2696 sizeof(resp));
2697
2698 if (unlikely(ret))
2699 ena_trc_err(ena_dev, "Failed to set mtu %d. error: %d\n", mtu, ret);
2700
2701 return ret;
2702 }
2703
ena_com_get_offload_settings(struct ena_com_dev * ena_dev,struct ena_admin_feature_offload_desc * offload)2704 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
2705 struct ena_admin_feature_offload_desc *offload)
2706 {
2707 int ret;
2708 struct ena_admin_get_feat_resp resp;
2709
2710 ret = ena_com_get_feature(ena_dev, &resp,
2711 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2712 if (unlikely(ret)) {
2713 ena_trc_err(ena_dev, "Failed to get offload capabilities %d\n", ret);
2714 return ret;
2715 }
2716
2717 memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
2718
2719 return 0;
2720 }
2721
ena_com_set_hash_function(struct ena_com_dev * ena_dev)2722 int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
2723 {
2724 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2725 struct ena_rss *rss = &ena_dev->rss;
2726 struct ena_admin_set_feat_cmd cmd;
2727 struct ena_admin_set_feat_resp resp;
2728 struct ena_admin_get_feat_resp get_resp;
2729 int ret;
2730
2731 if (!ena_com_check_supported_feature_id(ena_dev,
2732 ENA_ADMIN_RSS_HASH_FUNCTION)) {
2733 ena_trc_dbg(ena_dev, "Feature %d isn't supported\n",
2734 ENA_ADMIN_RSS_HASH_FUNCTION);
2735 return ENA_COM_UNSUPPORTED;
2736 }
2737
2738 /* Validate hash function is supported */
2739 ret = ena_com_get_feature(ena_dev, &get_resp,
2740 ENA_ADMIN_RSS_HASH_FUNCTION, 0);
2741 if (unlikely(ret))
2742 return ret;
2743
2744 if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
2745 ena_trc_err(ena_dev, "Func hash %d isn't supported by device, abort\n",
2746 rss->hash_func);
2747 return ENA_COM_UNSUPPORTED;
2748 }
2749
2750 memset(&cmd, 0x0, sizeof(cmd));
2751
2752 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2753 cmd.aq_common_descriptor.flags =
2754 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2755 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2756 cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2757 cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2758
2759 ret = ena_com_mem_addr_set(ena_dev,
2760 &cmd.control_buffer.address,
2761 rss->hash_key_dma_addr);
2762 if (unlikely(ret)) {
2763 ena_trc_err(ena_dev, "Memory address set failed\n");
2764 return ret;
2765 }
2766
2767 cmd.control_buffer.length = sizeof(*rss->hash_key);
2768
2769 ret = ena_com_execute_admin_command(admin_queue,
2770 (struct ena_admin_aq_entry *)&cmd,
2771 sizeof(cmd),
2772 (struct ena_admin_acq_entry *)&resp,
2773 sizeof(resp));
2774 if (unlikely(ret)) {
2775 ena_trc_err(ena_dev, "Failed to set hash function %d. error: %d\n",
2776 rss->hash_func, ret);
2777 return ENA_COM_INVAL;
2778 }
2779
2780 return 0;
2781 }
2782
ena_com_fill_hash_function(struct ena_com_dev * ena_dev,enum ena_admin_hash_functions func,const u8 * key,u16 key_len,u32 init_val)2783 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2784 enum ena_admin_hash_functions func,
2785 const u8 *key, u16 key_len, u32 init_val)
2786 {
2787 struct ena_admin_feature_rss_flow_hash_control *hash_key;
2788 struct ena_admin_get_feat_resp get_resp;
2789 enum ena_admin_hash_functions old_func;
2790 struct ena_rss *rss = &ena_dev->rss;
2791 int rc;
2792
2793 hash_key = rss->hash_key;
2794
2795 /* Make sure size is a mult of DWs */
2796 if (unlikely(key_len & 0x3))
2797 return ENA_COM_INVAL;
2798
2799 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2800 ENA_ADMIN_RSS_HASH_FUNCTION,
2801 rss->hash_key_dma_addr,
2802 sizeof(*rss->hash_key), 0);
2803 if (unlikely(rc))
2804 return rc;
2805
2806 if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) {
2807 ena_trc_err(ena_dev, "Flow hash function %d isn't supported\n", func);
2808 return ENA_COM_UNSUPPORTED;
2809 }
2810
2811 if ((func == ENA_ADMIN_TOEPLITZ) && key) {
2812 if (key_len != sizeof(hash_key->key)) {
2813 ena_trc_err(ena_dev, "key len (%u) doesn't equal the supported size (%zu)\n",
2814 key_len, sizeof(hash_key->key));
2815 return ENA_COM_INVAL;
2816 }
2817 memcpy(hash_key->key, key, key_len);
2818 hash_key->key_parts = key_len / sizeof(hash_key->key[0]);
2819 }
2820
2821 rss->hash_init_val = init_val;
2822 old_func = rss->hash_func;
2823 rss->hash_func = func;
2824 rc = ena_com_set_hash_function(ena_dev);
2825
2826 /* Restore the old function */
2827 if (unlikely(rc))
2828 rss->hash_func = old_func;
2829
2830 return rc;
2831 }
2832
ena_com_get_hash_function(struct ena_com_dev * ena_dev,enum ena_admin_hash_functions * func)2833 int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2834 enum ena_admin_hash_functions *func)
2835 {
2836 struct ena_rss *rss = &ena_dev->rss;
2837 struct ena_admin_get_feat_resp get_resp;
2838 int rc;
2839
2840 if (unlikely(!func))
2841 return ENA_COM_INVAL;
2842
2843 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2844 ENA_ADMIN_RSS_HASH_FUNCTION,
2845 rss->hash_key_dma_addr,
2846 sizeof(*rss->hash_key), 0);
2847 if (unlikely(rc))
2848 return rc;
2849
2850 /* ENA_FFS() returns 1 in case the lsb is set */
2851 rss->hash_func = ENA_FFS(get_resp.u.flow_hash_func.selected_func);
2852 if (rss->hash_func)
2853 rss->hash_func--;
2854
2855 *func = rss->hash_func;
2856
2857 return 0;
2858 }
2859
ena_com_get_hash_key(struct ena_com_dev * ena_dev,u8 * key)2860 int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key)
2861 {
2862 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2863 ena_dev->rss.hash_key;
2864
2865 if (key)
2866 memcpy(key, hash_key->key,
2867 (size_t)(hash_key->key_parts) * sizeof(hash_key->key[0]));
2868
2869 return 0;
2870 }
2871
ena_com_get_hash_ctrl(struct ena_com_dev * ena_dev,enum ena_admin_flow_hash_proto proto,u16 * fields)2872 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2873 enum ena_admin_flow_hash_proto proto,
2874 u16 *fields)
2875 {
2876 struct ena_rss *rss = &ena_dev->rss;
2877 struct ena_admin_get_feat_resp get_resp;
2878 int rc;
2879
2880 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2881 ENA_ADMIN_RSS_HASH_INPUT,
2882 rss->hash_ctrl_dma_addr,
2883 sizeof(*rss->hash_ctrl), 0);
2884 if (unlikely(rc))
2885 return rc;
2886
2887 if (fields)
2888 *fields = rss->hash_ctrl->selected_fields[proto].fields;
2889
2890 return 0;
2891 }
2892
ena_com_set_hash_ctrl(struct ena_com_dev * ena_dev)2893 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2894 {
2895 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2896 struct ena_rss *rss = &ena_dev->rss;
2897 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2898 struct ena_admin_set_feat_cmd cmd;
2899 struct ena_admin_set_feat_resp resp;
2900 int ret;
2901
2902 if (!ena_com_check_supported_feature_id(ena_dev,
2903 ENA_ADMIN_RSS_HASH_INPUT)) {
2904 ena_trc_dbg(ena_dev, "Feature %d isn't supported\n",
2905 ENA_ADMIN_RSS_HASH_INPUT);
2906 return ENA_COM_UNSUPPORTED;
2907 }
2908
2909 memset(&cmd, 0x0, sizeof(cmd));
2910
2911 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2912 cmd.aq_common_descriptor.flags =
2913 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2914 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
2915 cmd.u.flow_hash_input.enabled_input_sort =
2916 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
2917 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
2918
2919 ret = ena_com_mem_addr_set(ena_dev,
2920 &cmd.control_buffer.address,
2921 rss->hash_ctrl_dma_addr);
2922 if (unlikely(ret)) {
2923 ena_trc_err(ena_dev, "Memory address set failed\n");
2924 return ret;
2925 }
2926 cmd.control_buffer.length = sizeof(*hash_ctrl);
2927
2928 ret = ena_com_execute_admin_command(admin_queue,
2929 (struct ena_admin_aq_entry *)&cmd,
2930 sizeof(cmd),
2931 (struct ena_admin_acq_entry *)&resp,
2932 sizeof(resp));
2933 if (unlikely(ret))
2934 ena_trc_err(ena_dev, "Failed to set hash input. error: %d\n", ret);
2935
2936 return ret;
2937 }
2938
ena_com_set_default_hash_ctrl(struct ena_com_dev * ena_dev)2939 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2940 {
2941 struct ena_rss *rss = &ena_dev->rss;
2942 struct ena_admin_feature_rss_hash_control *hash_ctrl =
2943 rss->hash_ctrl;
2944 u16 available_fields = 0;
2945 int rc, i;
2946
2947 /* Get the supported hash input */
2948 rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2949 if (unlikely(rc))
2950 return rc;
2951
2952 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
2953 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2954 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2955
2956 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
2957 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2958 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2959
2960 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
2961 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2962 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2963
2964 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
2965 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2966 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2967
2968 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
2969 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2970
2971 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
2972 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2973
2974 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2975 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2976
2977 hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
2978 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
2979
2980 for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
2981 available_fields = hash_ctrl->selected_fields[i].fields &
2982 hash_ctrl->supported_fields[i].fields;
2983 if (available_fields != hash_ctrl->selected_fields[i].fields) {
2984 ena_trc_err(ena_dev, "Hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2985 i, hash_ctrl->supported_fields[i].fields,
2986 hash_ctrl->selected_fields[i].fields);
2987 return ENA_COM_UNSUPPORTED;
2988 }
2989 }
2990
2991 rc = ena_com_set_hash_ctrl(ena_dev);
2992
2993 /* In case of failure, restore the old hash ctrl */
2994 if (unlikely(rc))
2995 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2996
2997 return rc;
2998 }
2999
ena_com_fill_hash_ctrl(struct ena_com_dev * ena_dev,enum ena_admin_flow_hash_proto proto,u16 hash_fields)3000 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
3001 enum ena_admin_flow_hash_proto proto,
3002 u16 hash_fields)
3003 {
3004 struct ena_rss *rss = &ena_dev->rss;
3005 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
3006 u16 supported_fields;
3007 int rc;
3008
3009 if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
3010 ena_trc_err(ena_dev, "Invalid proto num (%u)\n", proto);
3011 return ENA_COM_INVAL;
3012 }
3013
3014 /* Get the ctrl table */
3015 rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
3016 if (unlikely(rc))
3017 return rc;
3018
3019 /* Make sure all the fields are supported */
3020 supported_fields = hash_ctrl->supported_fields[proto].fields;
3021 if ((hash_fields & supported_fields) != hash_fields) {
3022 ena_trc_err(ena_dev, "Proto %d doesn't support the required fields %x. supports only: %x\n",
3023 proto, hash_fields, supported_fields);
3024 }
3025
3026 hash_ctrl->selected_fields[proto].fields = hash_fields;
3027
3028 rc = ena_com_set_hash_ctrl(ena_dev);
3029
3030 /* In case of failure, restore the old hash ctrl */
3031 if (unlikely(rc))
3032 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
3033
3034 return 0;
3035 }
3036
ena_com_indirect_table_fill_entry(struct ena_com_dev * ena_dev,u16 entry_idx,u16 entry_value)3037 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
3038 u16 entry_idx, u16 entry_value)
3039 {
3040 struct ena_rss *rss = &ena_dev->rss;
3041
3042 if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
3043 return ENA_COM_INVAL;
3044
3045 if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
3046 return ENA_COM_INVAL;
3047
3048 rss->host_rss_ind_tbl[entry_idx] = entry_value;
3049
3050 return 0;
3051 }
3052
ena_com_indirect_table_set(struct ena_com_dev * ena_dev)3053 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
3054 {
3055 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
3056 struct ena_rss *rss = &ena_dev->rss;
3057 struct ena_admin_set_feat_cmd cmd;
3058 struct ena_admin_set_feat_resp resp;
3059 int ret;
3060
3061 if (!ena_com_check_supported_feature_id(ena_dev,
3062 ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) {
3063 ena_trc_dbg(ena_dev, "Feature %d isn't supported\n",
3064 ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG);
3065 return ENA_COM_UNSUPPORTED;
3066 }
3067
3068 ret = ena_com_ind_tbl_convert_to_device(ena_dev);
3069 if (ret) {
3070 ena_trc_err(ena_dev, "Failed to convert host indirection table to device table\n");
3071 return ret;
3072 }
3073
3074 memset(&cmd, 0x0, sizeof(cmd));
3075
3076 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
3077 cmd.aq_common_descriptor.flags =
3078 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
3079 cmd.feat_common.feature_id = ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG;
3080 cmd.u.ind_table.size = rss->tbl_log_size;
3081 cmd.u.ind_table.inline_index = 0xFFFFFFFF;
3082
3083 ret = ena_com_mem_addr_set(ena_dev,
3084 &cmd.control_buffer.address,
3085 rss->rss_ind_tbl_dma_addr);
3086 if (unlikely(ret)) {
3087 ena_trc_err(ena_dev, "Memory address set failed\n");
3088 return ret;
3089 }
3090
3091 cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
3092 sizeof(struct ena_admin_rss_ind_table_entry);
3093
3094 ret = ena_com_execute_admin_command(admin_queue,
3095 (struct ena_admin_aq_entry *)&cmd,
3096 sizeof(cmd),
3097 (struct ena_admin_acq_entry *)&resp,
3098 sizeof(resp));
3099
3100 if (unlikely(ret))
3101 ena_trc_err(ena_dev, "Failed to set indirect table. error: %d\n", ret);
3102
3103 return ret;
3104 }
3105
ena_com_indirect_table_get(struct ena_com_dev * ena_dev,u32 * ind_tbl)3106 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
3107 {
3108 struct ena_rss *rss = &ena_dev->rss;
3109 struct ena_admin_get_feat_resp get_resp;
3110 u32 tbl_size;
3111 int i, rc;
3112
3113 tbl_size = (1ULL << rss->tbl_log_size) *
3114 sizeof(struct ena_admin_rss_ind_table_entry);
3115
3116 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
3117 ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG,
3118 rss->rss_ind_tbl_dma_addr,
3119 tbl_size, 0);
3120 if (unlikely(rc))
3121 return rc;
3122
3123 if (!ind_tbl)
3124 return 0;
3125
3126 for (i = 0; i < (1 << rss->tbl_log_size); i++)
3127 ind_tbl[i] = rss->host_rss_ind_tbl[i];
3128
3129 return 0;
3130 }
3131
ena_com_rss_init(struct ena_com_dev * ena_dev,u16 indr_tbl_log_size)3132 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
3133 {
3134 int rc;
3135
3136 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
3137
3138 rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
3139 if (unlikely(rc))
3140 goto err_indr_tbl;
3141
3142 /* The following function might return unsupported in case the
3143 * device doesn't support setting the key / hash function. We can safely
3144 * ignore this error and have indirection table support only.
3145 */
3146 rc = ena_com_hash_key_allocate(ena_dev);
3147 if (likely(!rc))
3148 ena_com_hash_key_fill_default_key(ena_dev);
3149 else if (rc != ENA_COM_UNSUPPORTED)
3150 goto err_hash_key;
3151
3152 rc = ena_com_hash_ctrl_init(ena_dev);
3153 if (unlikely(rc))
3154 goto err_hash_ctrl;
3155
3156 return 0;
3157
3158 err_hash_ctrl:
3159 ena_com_hash_key_destroy(ena_dev);
3160 err_hash_key:
3161 ena_com_indirect_table_destroy(ena_dev);
3162 err_indr_tbl:
3163
3164 return rc;
3165 }
3166
ena_com_rss_destroy(struct ena_com_dev * ena_dev)3167 void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
3168 {
3169 ena_com_indirect_table_destroy(ena_dev);
3170 ena_com_hash_key_destroy(ena_dev);
3171 ena_com_hash_ctrl_destroy(ena_dev);
3172
3173 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
3174 }
3175
ena_com_allocate_host_info(struct ena_com_dev * ena_dev)3176 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
3177 {
3178 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
3179
3180 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
3181 SZ_4K,
3182 host_attr->host_info,
3183 host_attr->host_info_dma_addr,
3184 host_attr->host_info_dma_handle);
3185 if (unlikely(!host_attr->host_info))
3186 return ENA_COM_NO_MEM;
3187
3188 host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
3189 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
3190 (ENA_COMMON_SPEC_VERSION_MINOR));
3191
3192 return 0;
3193 }
3194
ena_com_allocate_debug_area(struct ena_com_dev * ena_dev,u32 debug_area_size)3195 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
3196 u32 debug_area_size)
3197 {
3198 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
3199
3200 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
3201 debug_area_size,
3202 host_attr->debug_area_virt_addr,
3203 host_attr->debug_area_dma_addr,
3204 host_attr->debug_area_dma_handle);
3205 if (unlikely(!host_attr->debug_area_virt_addr)) {
3206 host_attr->debug_area_size = 0;
3207 return ENA_COM_NO_MEM;
3208 }
3209
3210 host_attr->debug_area_size = debug_area_size;
3211
3212 return 0;
3213 }
3214
ena_com_allocate_customer_metrics_buffer(struct ena_com_dev * ena_dev)3215 int ena_com_allocate_customer_metrics_buffer(struct ena_com_dev *ena_dev)
3216 {
3217 struct ena_customer_metrics *customer_metrics = &ena_dev->customer_metrics;
3218
3219 customer_metrics->buffer_len = ENA_CUSTOMER_METRICS_BUFFER_SIZE;
3220 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
3221 customer_metrics->buffer_len,
3222 customer_metrics->buffer_virt_addr,
3223 customer_metrics->buffer_dma_addr,
3224 customer_metrics->buffer_dma_handle);
3225 if (!customer_metrics->buffer_virt_addr)
3226 return ENA_COM_NO_MEM;
3227
3228 return 0;
3229 }
3230
ena_com_delete_host_info(struct ena_com_dev * ena_dev)3231 void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
3232 {
3233 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
3234
3235 if (host_attr->host_info) {
3236 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
3237 SZ_4K,
3238 host_attr->host_info,
3239 host_attr->host_info_dma_addr,
3240 host_attr->host_info_dma_handle);
3241 host_attr->host_info = NULL;
3242 }
3243 }
3244
ena_com_delete_debug_area(struct ena_com_dev * ena_dev)3245 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
3246 {
3247 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
3248
3249 if (host_attr->debug_area_virt_addr) {
3250 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
3251 host_attr->debug_area_size,
3252 host_attr->debug_area_virt_addr,
3253 host_attr->debug_area_dma_addr,
3254 host_attr->debug_area_dma_handle);
3255 host_attr->debug_area_virt_addr = NULL;
3256 }
3257 }
3258
ena_com_delete_customer_metrics_buffer(struct ena_com_dev * ena_dev)3259 void ena_com_delete_customer_metrics_buffer(struct ena_com_dev *ena_dev)
3260 {
3261 struct ena_customer_metrics *customer_metrics = &ena_dev->customer_metrics;
3262
3263 if (customer_metrics->buffer_virt_addr) {
3264 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
3265 customer_metrics->buffer_len,
3266 customer_metrics->buffer_virt_addr,
3267 customer_metrics->buffer_dma_addr,
3268 customer_metrics->buffer_dma_handle);
3269 customer_metrics->buffer_virt_addr = NULL;
3270 }
3271 }
3272
ena_com_set_host_attributes(struct ena_com_dev * ena_dev)3273 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
3274 {
3275 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
3276 struct ena_com_admin_queue *admin_queue;
3277 struct ena_admin_set_feat_cmd cmd;
3278 struct ena_admin_set_feat_resp resp;
3279
3280 int ret;
3281
3282 /* Host attribute config is called before ena_com_get_dev_attr_feat
3283 * so ena_com can't check if the feature is supported.
3284 */
3285
3286 memset(&cmd, 0x0, sizeof(cmd));
3287 admin_queue = &ena_dev->admin_queue;
3288
3289 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
3290 cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
3291
3292 ret = ena_com_mem_addr_set(ena_dev,
3293 &cmd.u.host_attr.debug_ba,
3294 host_attr->debug_area_dma_addr);
3295 if (unlikely(ret)) {
3296 ena_trc_err(ena_dev, "Memory address set failed\n");
3297 return ret;
3298 }
3299
3300 ret = ena_com_mem_addr_set(ena_dev,
3301 &cmd.u.host_attr.os_info_ba,
3302 host_attr->host_info_dma_addr);
3303 if (unlikely(ret)) {
3304 ena_trc_err(ena_dev, "Memory address set failed\n");
3305 return ret;
3306 }
3307
3308 cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
3309
3310 ret = ena_com_execute_admin_command(admin_queue,
3311 (struct ena_admin_aq_entry *)&cmd,
3312 sizeof(cmd),
3313 (struct ena_admin_acq_entry *)&resp,
3314 sizeof(resp));
3315
3316 if (unlikely(ret))
3317 ena_trc_err(ena_dev, "Failed to set host attributes: %d\n", ret);
3318
3319 return ret;
3320 }
3321
3322 /* Interrupt moderation */
ena_com_interrupt_moderation_supported(struct ena_com_dev * ena_dev)3323 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
3324 {
3325 return ena_com_check_supported_feature_id(ena_dev,
3326 ENA_ADMIN_INTERRUPT_MODERATION);
3327 }
3328
ena_com_update_nonadaptive_moderation_interval(struct ena_com_dev * ena_dev,u32 coalesce_usecs,u32 intr_delay_resolution,u32 * intr_moder_interval)3329 static int ena_com_update_nonadaptive_moderation_interval(struct ena_com_dev *ena_dev,
3330 u32 coalesce_usecs,
3331 u32 intr_delay_resolution,
3332 u32 *intr_moder_interval)
3333 {
3334 if (!intr_delay_resolution) {
3335 ena_trc_err(ena_dev, "Illegal interrupt delay granularity value\n");
3336 return ENA_COM_FAULT;
3337 }
3338
3339 *intr_moder_interval = coalesce_usecs / intr_delay_resolution;
3340
3341 return 0;
3342 }
3343
ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev * ena_dev,u32 tx_coalesce_usecs)3344 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
3345 u32 tx_coalesce_usecs)
3346 {
3347 return ena_com_update_nonadaptive_moderation_interval(ena_dev,
3348 tx_coalesce_usecs,
3349 ena_dev->intr_delay_resolution,
3350 &ena_dev->intr_moder_tx_interval);
3351 }
3352
ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev * ena_dev,u32 rx_coalesce_usecs)3353 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
3354 u32 rx_coalesce_usecs)
3355 {
3356 return ena_com_update_nonadaptive_moderation_interval(ena_dev,
3357 rx_coalesce_usecs,
3358 ena_dev->intr_delay_resolution,
3359 &ena_dev->intr_moder_rx_interval);
3360 }
3361
ena_com_init_interrupt_moderation(struct ena_com_dev * ena_dev)3362 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
3363 {
3364 struct ena_admin_get_feat_resp get_resp;
3365 u16 delay_resolution;
3366 int rc;
3367
3368 rc = ena_com_get_feature(ena_dev, &get_resp,
3369 ENA_ADMIN_INTERRUPT_MODERATION, 0);
3370
3371 if (rc) {
3372 if (rc == ENA_COM_UNSUPPORTED) {
3373 ena_trc_dbg(ena_dev, "Feature %d isn't supported\n",
3374 ENA_ADMIN_INTERRUPT_MODERATION);
3375 rc = 0;
3376 } else {
3377 ena_trc_err(ena_dev,
3378 "Failed to get interrupt moderation admin cmd. rc: %d\n", rc);
3379 }
3380
3381 /* no moderation supported, disable adaptive support */
3382 ena_com_disable_adaptive_moderation(ena_dev);
3383 return rc;
3384 }
3385
3386 /* if moderation is supported by device we set adaptive moderation */
3387 delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
3388 ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
3389
3390 /* Disable adaptive moderation by default - can be enabled later */
3391 ena_com_disable_adaptive_moderation(ena_dev);
3392
3393 return 0;
3394 }
3395
ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev * ena_dev)3396 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
3397 {
3398 return ena_dev->intr_moder_tx_interval;
3399 }
3400
ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev * ena_dev)3401 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
3402 {
3403 return ena_dev->intr_moder_rx_interval;
3404 }
3405
ena_com_config_dev_mode(struct ena_com_dev * ena_dev,struct ena_admin_feature_llq_desc * llq_features,struct ena_llq_configurations * llq_default_cfg)3406 int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
3407 struct ena_admin_feature_llq_desc *llq_features,
3408 struct ena_llq_configurations *llq_default_cfg)
3409 {
3410 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
3411 int rc;
3412
3413 if (!llq_features->max_llq_num) {
3414 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3415 return 0;
3416 }
3417
3418 rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
3419 if (rc)
3420 return rc;
3421
3422 ena_dev->tx_max_header_size = llq_info->desc_list_entry_size -
3423 (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
3424
3425 if (unlikely(ena_dev->tx_max_header_size == 0)) {
3426 ena_trc_err(ena_dev, "The size of the LLQ entry is smaller than needed\n");
3427 return ENA_COM_INVAL;
3428 }
3429
3430 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
3431
3432 return 0;
3433 }
3434