1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2015-2023 Amazon.com, Inc. or its affiliates.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include "ena_com.h"
35
36 /*****************************************************************************/
37 /*****************************************************************************/
38
39 /* Timeout in micro-sec */
40 #define ADMIN_CMD_TIMEOUT_US (3000000)
41
42 #define ENA_ASYNC_QUEUE_DEPTH 16
43 #define ENA_ADMIN_QUEUE_DEPTH 32
44
45 #ifdef ENA_EXTENDED_STATS
46
47 #define ENA_HISTOGRAM_ACTIVE_MASK_OFFSET 0xF08
48 #define ENA_EXTENDED_STAT_GET_FUNCT(_funct_queue) (_funct_queue & 0xFFFF)
49 #define ENA_EXTENDED_STAT_GET_QUEUE(_funct_queue) (_funct_queue >> 16)
50
51 #endif /* ENA_EXTENDED_STATS */
52
53 #define ENA_CTRL_MAJOR 0
54 #define ENA_CTRL_MINOR 0
55 #define ENA_CTRL_SUB_MINOR 1
56
57 #define MIN_ENA_CTRL_VER \
58 (((ENA_CTRL_MAJOR) << \
59 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
60 ((ENA_CTRL_MINOR) << \
61 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
62 (ENA_CTRL_SUB_MINOR))
63
64 #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
65 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
66
67 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
68
69 #define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4
70
71 #define ENA_REGS_ADMIN_INTR_MASK 1
72
73 #define ENA_MAX_BACKOFF_DELAY_EXP 16U
74
75 #define ENA_MIN_ADMIN_POLL_US 100
76
77 #define ENA_MAX_ADMIN_POLL_US 5000
78
79 /* PHC definitions */
80 #define ENA_PHC_DEFAULT_EXPIRE_TIMEOUT_USEC 10
81 #define ENA_PHC_DEFAULT_BLOCK_TIMEOUT_USEC 1000
82 #define ENA_PHC_MAX_ERROR_BOUND 0xFFFFFFFF
83 #define ENA_PHC_REQ_ID_OFFSET 0xDEAD
84 #define ENA_PHC_ERROR_FLAGS (ENA_ADMIN_PHC_ERROR_FLAG_TIMESTAMP | \
85 ENA_ADMIN_PHC_ERROR_FLAG_ERROR_BOUND)
86
87 /*****************************************************************************/
88 /*****************************************************************************/
89 /*****************************************************************************/
90
91 enum ena_cmd_status {
92 ENA_CMD_SUBMITTED,
93 ENA_CMD_COMPLETED,
94 /* Abort - canceled by the driver */
95 ENA_CMD_ABORTED,
96 };
97
98 struct ena_comp_ctx {
99 ena_wait_event_t wait_event;
100 struct ena_admin_acq_entry *user_cqe;
101 u32 comp_size;
102 enum ena_cmd_status status;
103 /* status from the device */
104 u8 comp_status;
105 u8 cmd_opcode;
106 bool occupied;
107 };
108
109 struct ena_com_stats_ctx {
110 struct ena_admin_aq_get_stats_cmd get_cmd;
111 struct ena_admin_acq_get_stats_resp get_resp;
112 };
113
ena_com_mem_addr_set(struct ena_com_dev * ena_dev,struct ena_common_mem_addr * ena_addr,dma_addr_t addr)114 static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
115 struct ena_common_mem_addr *ena_addr,
116 dma_addr_t addr)
117 {
118 if (unlikely((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr)) {
119 ena_trc_err(ena_dev, "DMA address has more bits that the device supports\n");
120 return ENA_COM_INVAL;
121 }
122
123 ena_addr->mem_addr_low = lower_32_bits(addr);
124 ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
125
126 return 0;
127 }
128
ena_com_admin_init_sq(struct ena_com_admin_queue * admin_queue)129 static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue)
130 {
131 struct ena_com_dev *ena_dev = admin_queue->ena_dev;
132 struct ena_com_admin_sq *sq = &admin_queue->sq;
133 u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
134
135 ENA_MEM_ALLOC_COHERENT(admin_queue->q_dmadev, size, sq->entries, sq->dma_addr,
136 sq->mem_handle);
137
138 if (unlikely(!sq->entries)) {
139 ena_trc_err(ena_dev, "Memory allocation failed\n");
140 return ENA_COM_NO_MEM;
141 }
142
143 sq->head = 0;
144 sq->tail = 0;
145 sq->phase = 1;
146
147 sq->db_addr = NULL;
148
149 return 0;
150 }
151
ena_com_admin_init_cq(struct ena_com_admin_queue * admin_queue)152 static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue)
153 {
154 struct ena_com_dev *ena_dev = admin_queue->ena_dev;
155 struct ena_com_admin_cq *cq = &admin_queue->cq;
156 u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
157
158 ENA_MEM_ALLOC_COHERENT(admin_queue->q_dmadev, size, cq->entries, cq->dma_addr,
159 cq->mem_handle);
160
161 if (unlikely(!cq->entries)) {
162 ena_trc_err(ena_dev, "Memory allocation failed\n");
163 return ENA_COM_NO_MEM;
164 }
165
166 cq->head = 0;
167 cq->phase = 1;
168
169 return 0;
170 }
171
ena_com_admin_init_aenq(struct ena_com_dev * ena_dev,struct ena_aenq_handlers * aenq_handlers)172 static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
173 struct ena_aenq_handlers *aenq_handlers)
174 {
175 struct ena_com_aenq *aenq = &ena_dev->aenq;
176 u32 addr_low, addr_high, aenq_caps;
177 u16 size;
178
179 ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
180 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
181 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, size,
182 aenq->entries,
183 aenq->dma_addr,
184 aenq->mem_handle);
185
186 if (unlikely(!aenq->entries)) {
187 ena_trc_err(ena_dev, "Memory allocation failed\n");
188 return ENA_COM_NO_MEM;
189 }
190
191 aenq->head = aenq->q_depth;
192 aenq->phase = 1;
193
194 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
195 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
196
197 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
198 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
199
200 aenq_caps = 0;
201 aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
202 aenq_caps |= (sizeof(struct ena_admin_aenq_entry) <<
203 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
204 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
205 ENA_REG_WRITE32(ena_dev->bus, aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
206
207 if (unlikely(!aenq_handlers)) {
208 ena_trc_err(ena_dev, "AENQ handlers pointer is NULL\n");
209 return ENA_COM_INVAL;
210 }
211
212 aenq->aenq_handlers = aenq_handlers;
213
214 return 0;
215 }
216
comp_ctxt_release(struct ena_com_admin_queue * queue,struct ena_comp_ctx * comp_ctx)217 static void comp_ctxt_release(struct ena_com_admin_queue *queue,
218 struct ena_comp_ctx *comp_ctx)
219 {
220 comp_ctx->user_cqe = NULL;
221 comp_ctx->occupied = false;
222 ATOMIC32_DEC(&queue->outstanding_cmds);
223 }
224
get_comp_ctxt(struct ena_com_admin_queue * admin_queue,u16 command_id,bool capture)225 static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *admin_queue,
226 u16 command_id, bool capture)
227 {
228 if (unlikely(command_id >= admin_queue->q_depth)) {
229 ena_trc_err(admin_queue->ena_dev,
230 "Command id is larger than the queue size. cmd_id: %u queue size %d\n",
231 command_id, admin_queue->q_depth);
232 return NULL;
233 }
234
235 if (unlikely(!admin_queue->comp_ctx)) {
236 ena_trc_err(admin_queue->ena_dev,
237 "Completion context is NULL\n");
238 return NULL;
239 }
240
241 if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) {
242 ena_trc_err(admin_queue->ena_dev,
243 "Completion context is occupied\n");
244 return NULL;
245 }
246
247 if (capture) {
248 ATOMIC32_INC(&admin_queue->outstanding_cmds);
249 admin_queue->comp_ctx[command_id].occupied = true;
250 }
251
252 return &admin_queue->comp_ctx[command_id];
253 }
254
__ena_com_submit_admin_cmd(struct ena_com_admin_queue * admin_queue,struct ena_admin_aq_entry * cmd,size_t cmd_size_in_bytes,struct ena_admin_acq_entry * comp,size_t comp_size_in_bytes)255 static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
256 struct ena_admin_aq_entry *cmd,
257 size_t cmd_size_in_bytes,
258 struct ena_admin_acq_entry *comp,
259 size_t comp_size_in_bytes)
260 {
261 struct ena_comp_ctx *comp_ctx;
262 u16 tail_masked, cmd_id;
263 u16 queue_size_mask;
264 u16 cnt;
265
266 queue_size_mask = admin_queue->q_depth - 1;
267
268 tail_masked = admin_queue->sq.tail & queue_size_mask;
269
270 /* In case of queue FULL */
271 cnt = (u16)ATOMIC32_READ(&admin_queue->outstanding_cmds);
272 if (unlikely(cnt >= admin_queue->q_depth)) {
273 ena_trc_dbg(admin_queue->ena_dev, "Admin queue is full.\n");
274 admin_queue->stats.out_of_space++;
275 return ERR_PTR(ENA_COM_NO_SPACE);
276 }
277
278 cmd_id = admin_queue->curr_cmd_id;
279
280 cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
281 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
282
283 cmd->aq_common_descriptor.command_id |= cmd_id &
284 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
285
286 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
287 if (unlikely(!comp_ctx))
288 return ERR_PTR(ENA_COM_INVAL);
289
290 comp_ctx->status = ENA_CMD_SUBMITTED;
291 comp_ctx->comp_size = (u32)comp_size_in_bytes;
292 comp_ctx->user_cqe = comp;
293 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
294
295 ENA_WAIT_EVENT_CLEAR(comp_ctx->wait_event);
296
297 memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
298
299 admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
300 queue_size_mask;
301
302 admin_queue->sq.tail++;
303 admin_queue->stats.submitted_cmd++;
304
305 if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
306 admin_queue->sq.phase = !admin_queue->sq.phase;
307
308 ENA_DB_SYNC(&admin_queue->sq.mem_handle);
309 ENA_REG_WRITE32(admin_queue->bus, admin_queue->sq.tail,
310 admin_queue->sq.db_addr);
311
312 return comp_ctx;
313 }
314
ena_com_init_comp_ctxt(struct ena_com_admin_queue * admin_queue)315 static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue)
316 {
317 struct ena_com_dev *ena_dev = admin_queue->ena_dev;
318 size_t size = admin_queue->q_depth * sizeof(struct ena_comp_ctx);
319 struct ena_comp_ctx *comp_ctx;
320 u16 i;
321
322 admin_queue->comp_ctx = ENA_MEM_ALLOC(admin_queue->q_dmadev, size);
323 if (unlikely(!admin_queue->comp_ctx)) {
324 ena_trc_err(ena_dev, "Memory allocation failed\n");
325 return ENA_COM_NO_MEM;
326 }
327
328 for (i = 0; i < admin_queue->q_depth; i++) {
329 comp_ctx = get_comp_ctxt(admin_queue, i, false);
330 if (comp_ctx)
331 ENA_WAIT_EVENT_INIT(comp_ctx->wait_event);
332 }
333
334 return 0;
335 }
336
ena_com_submit_admin_cmd(struct ena_com_admin_queue * admin_queue,struct ena_admin_aq_entry * cmd,size_t cmd_size_in_bytes,struct ena_admin_acq_entry * comp,size_t comp_size_in_bytes)337 static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
338 struct ena_admin_aq_entry *cmd,
339 size_t cmd_size_in_bytes,
340 struct ena_admin_acq_entry *comp,
341 size_t comp_size_in_bytes)
342 {
343 unsigned long flags = 0;
344 struct ena_comp_ctx *comp_ctx;
345
346 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
347 if (unlikely(!admin_queue->running_state)) {
348 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
349 return ERR_PTR(ENA_COM_NO_DEVICE);
350 }
351 comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
352 cmd_size_in_bytes,
353 comp,
354 comp_size_in_bytes);
355 if (IS_ERR(comp_ctx))
356 admin_queue->running_state = false;
357 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
358
359 return comp_ctx;
360 }
361
ena_com_init_io_sq(struct ena_com_dev * ena_dev,struct ena_com_create_io_ctx * ctx,struct ena_com_io_sq * io_sq)362 static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
363 struct ena_com_create_io_ctx *ctx,
364 struct ena_com_io_sq *io_sq)
365 {
366 size_t size;
367 int dev_node = 0;
368
369 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
370
371 io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
372 io_sq->desc_entry_size =
373 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
374 sizeof(struct ena_eth_io_tx_desc) :
375 sizeof(struct ena_eth_io_rx_desc);
376
377 size = io_sq->desc_entry_size * io_sq->q_depth;
378 io_sq->bus = ena_dev->bus;
379
380 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
381 ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
382 size,
383 io_sq->desc_addr.virt_addr,
384 io_sq->desc_addr.phys_addr,
385 io_sq->desc_addr.mem_handle,
386 ctx->numa_node,
387 dev_node);
388 if (!io_sq->desc_addr.virt_addr) {
389 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
390 size,
391 io_sq->desc_addr.virt_addr,
392 io_sq->desc_addr.phys_addr,
393 io_sq->desc_addr.mem_handle);
394 }
395
396 if (unlikely(!io_sq->desc_addr.virt_addr)) {
397 ena_trc_err(ena_dev, "Memory allocation failed\n");
398 return ENA_COM_NO_MEM;
399 }
400 }
401
402 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
403 /* Allocate bounce buffers */
404 io_sq->bounce_buf_ctrl.buffer_size =
405 ena_dev->llq_info.desc_list_entry_size;
406 io_sq->bounce_buf_ctrl.buffers_num =
407 ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
408 io_sq->bounce_buf_ctrl.next_to_use = 0;
409
410 size = (size_t)io_sq->bounce_buf_ctrl.buffer_size *
411 io_sq->bounce_buf_ctrl.buffers_num;
412
413 ENA_MEM_ALLOC_NODE(ena_dev->dmadev,
414 size,
415 io_sq->bounce_buf_ctrl.base_buffer,
416 ctx->numa_node,
417 dev_node);
418 if (!io_sq->bounce_buf_ctrl.base_buffer)
419 io_sq->bounce_buf_ctrl.base_buffer = ENA_MEM_ALLOC(ena_dev->dmadev, size);
420
421 if (unlikely(!io_sq->bounce_buf_ctrl.base_buffer)) {
422 ena_trc_err(ena_dev, "Bounce buffer memory allocation failed\n");
423 return ENA_COM_NO_MEM;
424 }
425
426 memcpy(&io_sq->llq_info, &ena_dev->llq_info,
427 sizeof(io_sq->llq_info));
428
429 /* Initiate the first bounce buffer */
430 io_sq->llq_buf_ctrl.curr_bounce_buf =
431 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
432 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
433 0x0, io_sq->llq_info.desc_list_entry_size);
434 io_sq->llq_buf_ctrl.descs_left_in_line =
435 io_sq->llq_info.descs_num_before_header;
436 io_sq->disable_meta_caching =
437 io_sq->llq_info.disable_meta_caching;
438
439 if (io_sq->llq_info.max_entries_in_tx_burst > 0)
440 io_sq->entries_in_tx_burst_left =
441 io_sq->llq_info.max_entries_in_tx_burst;
442 }
443
444 io_sq->tail = 0;
445 io_sq->next_to_comp = 0;
446 io_sq->phase = 1;
447
448 return 0;
449 }
450
ena_com_init_io_cq(struct ena_com_dev * ena_dev,struct ena_com_create_io_ctx * ctx,struct ena_com_io_cq * io_cq)451 static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
452 struct ena_com_create_io_ctx *ctx,
453 struct ena_com_io_cq *io_cq)
454 {
455 size_t size;
456 int prev_node = 0;
457
458 memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
459
460 /* Use the basic completion descriptor for Rx */
461 io_cq->cdesc_entry_size_in_bytes =
462 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
463 sizeof(struct ena_eth_io_tx_cdesc) :
464 sizeof(struct ena_eth_io_rx_cdesc_base);
465
466 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
467 io_cq->bus = ena_dev->bus;
468
469 ENA_MEM_ALLOC_COHERENT_NODE_ALIGNED(ena_dev->dmadev,
470 size,
471 io_cq->cdesc_addr.virt_addr,
472 io_cq->cdesc_addr.phys_addr,
473 io_cq->cdesc_addr.mem_handle,
474 ctx->numa_node,
475 prev_node,
476 ENA_CDESC_RING_SIZE_ALIGNMENT);
477 if (!io_cq->cdesc_addr.virt_addr) {
478 ENA_MEM_ALLOC_COHERENT_ALIGNED(ena_dev->dmadev,
479 size,
480 io_cq->cdesc_addr.virt_addr,
481 io_cq->cdesc_addr.phys_addr,
482 io_cq->cdesc_addr.mem_handle,
483 ENA_CDESC_RING_SIZE_ALIGNMENT);
484 }
485
486 if (unlikely(!io_cq->cdesc_addr.virt_addr)) {
487 ena_trc_err(ena_dev, "Memory allocation failed\n");
488 return ENA_COM_NO_MEM;
489 }
490
491 io_cq->phase = 1;
492 io_cq->head = 0;
493
494 return 0;
495 }
496
ena_com_handle_single_admin_completion(struct ena_com_admin_queue * admin_queue,struct ena_admin_acq_entry * cqe)497 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
498 struct ena_admin_acq_entry *cqe)
499 {
500 struct ena_comp_ctx *comp_ctx;
501 u16 cmd_id;
502
503 cmd_id = cqe->acq_common_descriptor.command &
504 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
505
506 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
507 if (unlikely(!comp_ctx)) {
508 ena_trc_err(admin_queue->ena_dev,
509 "comp_ctx is NULL. Changing the admin queue running state\n");
510 admin_queue->running_state = false;
511 return;
512 }
513
514 if (!comp_ctx->occupied)
515 return;
516
517 comp_ctx->status = ENA_CMD_COMPLETED;
518 comp_ctx->comp_status = cqe->acq_common_descriptor.status;
519
520 if (comp_ctx->user_cqe)
521 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
522
523 if (!admin_queue->polling)
524 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
525 }
526
ena_com_handle_admin_completion(struct ena_com_admin_queue * admin_queue)527 static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
528 {
529 struct ena_admin_acq_entry *cqe = NULL;
530 u16 comp_num = 0;
531 u16 head_masked;
532 u8 phase;
533
534 head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
535 phase = admin_queue->cq.phase;
536
537 cqe = &admin_queue->cq.entries[head_masked];
538
539 /* Go over all the completions */
540 while ((READ_ONCE8(cqe->acq_common_descriptor.flags) &
541 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
542 /* Do not read the rest of the completion entry before the
543 * phase bit was validated
544 */
545 dma_rmb();
546 ena_com_handle_single_admin_completion(admin_queue, cqe);
547
548 head_masked++;
549 comp_num++;
550 if (unlikely(head_masked == admin_queue->q_depth)) {
551 head_masked = 0;
552 phase = !phase;
553 }
554
555 cqe = &admin_queue->cq.entries[head_masked];
556 }
557
558 admin_queue->cq.head += comp_num;
559 admin_queue->cq.phase = phase;
560 admin_queue->sq.head += comp_num;
561 admin_queue->stats.completed_cmd += comp_num;
562 }
563
ena_com_comp_status_to_errno(struct ena_com_admin_queue * admin_queue,u8 comp_status)564 static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue,
565 u8 comp_status)
566 {
567 if (unlikely(comp_status != 0))
568 ena_trc_err(admin_queue->ena_dev,
569 "Admin command failed[%u]\n", comp_status);
570
571 switch (comp_status) {
572 case ENA_ADMIN_SUCCESS:
573 return ENA_COM_OK;
574 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
575 return ENA_COM_NO_MEM;
576 case ENA_ADMIN_UNSUPPORTED_OPCODE:
577 return ENA_COM_UNSUPPORTED;
578 case ENA_ADMIN_BAD_OPCODE:
579 case ENA_ADMIN_MALFORMED_REQUEST:
580 case ENA_ADMIN_ILLEGAL_PARAMETER:
581 case ENA_ADMIN_UNKNOWN_ERROR:
582 return ENA_COM_INVAL;
583 case ENA_ADMIN_RESOURCE_BUSY:
584 return ENA_COM_TRY_AGAIN;
585 }
586
587 return ENA_COM_INVAL;
588 }
589
ena_delay_exponential_backoff_us(u32 exp,u32 delay_us)590 static void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us)
591 {
592 exp = ENA_MIN32(ENA_MAX_BACKOFF_DELAY_EXP, exp);
593 delay_us = ENA_MAX32(ENA_MIN_ADMIN_POLL_US, delay_us);
594 delay_us = ENA_MIN32(ENA_MAX_ADMIN_POLL_US, delay_us * (1U << exp));
595 ENA_USLEEP(delay_us);
596 }
597
ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx * comp_ctx,struct ena_com_admin_queue * admin_queue)598 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
599 struct ena_com_admin_queue *admin_queue)
600 {
601 unsigned long flags = 0;
602 ena_time_t timeout;
603 int ret;
604 u32 exp = 0;
605
606 timeout = ENA_GET_SYSTEM_TIMEOUT(admin_queue->completion_timeout);
607
608 while (1) {
609 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
610 ena_com_handle_admin_completion(admin_queue);
611 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
612
613 if (comp_ctx->status != ENA_CMD_SUBMITTED)
614 break;
615
616 if (unlikely(ENA_TIME_EXPIRE(timeout))) {
617 ena_trc_err(admin_queue->ena_dev,
618 "Wait for completion (polling) timeout\n");
619 /* ENA didn't have any completion */
620 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
621 admin_queue->stats.no_completion++;
622 admin_queue->running_state = false;
623 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
624
625 ret = ENA_COM_TIMER_EXPIRED;
626 goto err;
627 }
628
629 ena_delay_exponential_backoff_us(exp++,
630 admin_queue->ena_dev->ena_min_poll_delay_us);
631 }
632
633 if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
634 ena_trc_err(admin_queue->ena_dev, "Command was aborted\n");
635 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
636 admin_queue->stats.aborted_cmd++;
637 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
638 ret = ENA_COM_NO_DEVICE;
639 goto err;
640 }
641
642 ENA_WARN(comp_ctx->status != ENA_CMD_COMPLETED,
643 admin_queue->ena_dev, "Invalid comp status %d\n",
644 comp_ctx->status);
645
646 ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
647 err:
648 comp_ctxt_release(admin_queue, comp_ctx);
649 return ret;
650 }
651
652 /*
653 * Set the LLQ configurations of the firmware
654 *
655 * The driver provides only the enabled feature values to the device,
656 * which in turn, checks if they are supported.
657 */
ena_com_set_llq(struct ena_com_dev * ena_dev)658 static int ena_com_set_llq(struct ena_com_dev *ena_dev)
659 {
660 struct ena_com_admin_queue *admin_queue;
661 struct ena_admin_set_feat_cmd cmd;
662 struct ena_admin_set_feat_resp resp;
663 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
664 int ret;
665
666 memset(&cmd, 0x0, sizeof(cmd));
667 admin_queue = &ena_dev->admin_queue;
668
669 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
670 cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
671
672 cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
673 cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
674 cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
675 cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
676
677 cmd.u.llq.accel_mode.u.set.enabled_flags =
678 BIT(ENA_ADMIN_DISABLE_META_CACHING) |
679 BIT(ENA_ADMIN_LIMIT_TX_BURST);
680
681 ret = ena_com_execute_admin_command(admin_queue,
682 (struct ena_admin_aq_entry *)&cmd,
683 sizeof(cmd),
684 (struct ena_admin_acq_entry *)&resp,
685 sizeof(resp));
686
687 if (unlikely(ret))
688 ena_trc_err(ena_dev, "Failed to set LLQ configurations: %d\n", ret);
689
690 return ret;
691 }
692
ena_com_config_llq_info(struct ena_com_dev * ena_dev,struct ena_admin_feature_llq_desc * llq_features,struct ena_llq_configurations * llq_default_cfg)693 static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
694 struct ena_admin_feature_llq_desc *llq_features,
695 struct ena_llq_configurations *llq_default_cfg)
696 {
697 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
698 struct ena_admin_accel_mode_get llq_accel_mode_get;
699 u16 supported_feat;
700 int rc;
701
702 memset(llq_info, 0, sizeof(*llq_info));
703
704 supported_feat = llq_features->header_location_ctrl_supported;
705
706 if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
707 llq_info->header_location_ctrl =
708 llq_default_cfg->llq_header_location;
709 } else {
710 ena_trc_err(ena_dev, "Invalid header location control, supported: 0x%x\n",
711 supported_feat);
712 return ENA_COM_INVAL;
713 }
714
715 if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
716 supported_feat = llq_features->descriptors_stride_ctrl_supported;
717 if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
718 llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
719 } else {
720 if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
721 llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
722 } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
723 llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
724 } else {
725 ena_trc_err(ena_dev, "Invalid desc_stride_ctrl, supported: 0x%x\n",
726 supported_feat);
727 return ENA_COM_INVAL;
728 }
729
730 ena_trc_err(ena_dev, "Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
731 llq_default_cfg->llq_stride_ctrl,
732 supported_feat,
733 llq_info->desc_stride_ctrl);
734 }
735 } else {
736 llq_info->desc_stride_ctrl = 0;
737 }
738
739 supported_feat = llq_features->entry_size_ctrl_supported;
740 if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
741 llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
742 llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
743 } else {
744 if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
745 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
746 llq_info->desc_list_entry_size = 128;
747 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
748 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
749 llq_info->desc_list_entry_size = 192;
750 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
751 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
752 llq_info->desc_list_entry_size = 256;
753 } else {
754 ena_trc_err(ena_dev, "Invalid entry_size_ctrl, supported: 0x%x\n",
755 supported_feat);
756 return ENA_COM_INVAL;
757 }
758
759 ena_trc_err(ena_dev, "Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
760 llq_default_cfg->llq_ring_entry_size,
761 supported_feat,
762 llq_info->desc_list_entry_size);
763 }
764 if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
765 /* The desc list entry size should be whole multiply of 8
766 * This requirement comes from __iowrite64_copy()
767 */
768 ena_trc_err(ena_dev, "Illegal entry size %d\n",
769 llq_info->desc_list_entry_size);
770 return ENA_COM_INVAL;
771 }
772
773 if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
774 llq_info->descs_per_entry = llq_info->desc_list_entry_size /
775 sizeof(struct ena_eth_io_tx_desc);
776 else
777 llq_info->descs_per_entry = 1;
778
779 supported_feat = llq_features->desc_num_before_header_supported;
780 if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
781 llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
782 } else {
783 if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
784 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
785 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
786 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
787 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
788 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
789 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
790 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
791 } else {
792 ena_trc_err(ena_dev, "Invalid descs_num_before_header, supported: 0x%x\n",
793 supported_feat);
794 return ENA_COM_INVAL;
795 }
796
797 ena_trc_err(ena_dev, "Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
798 llq_default_cfg->llq_num_decs_before_header,
799 supported_feat,
800 llq_info->descs_num_before_header);
801 }
802 /* Check for accelerated queue supported */
803 llq_accel_mode_get = llq_features->accel_mode.u.get;
804
805 llq_info->disable_meta_caching =
806 !!(llq_accel_mode_get.supported_flags &
807 BIT(ENA_ADMIN_DISABLE_META_CACHING));
808
809 if (llq_accel_mode_get.supported_flags & BIT(ENA_ADMIN_LIMIT_TX_BURST))
810 llq_info->max_entries_in_tx_burst =
811 llq_accel_mode_get.max_tx_burst_size /
812 llq_default_cfg->llq_ring_entry_size_value;
813
814 rc = ena_com_set_llq(ena_dev);
815 if (unlikely(rc))
816 ena_trc_err(ena_dev, "Cannot set LLQ configuration: %d\n", rc);
817
818 return rc;
819 }
820
ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx * comp_ctx,struct ena_com_admin_queue * admin_queue)821 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
822 struct ena_com_admin_queue *admin_queue)
823 {
824 unsigned long flags = 0;
825 int ret;
826
827 ENA_WAIT_EVENT_WAIT(comp_ctx->wait_event,
828 admin_queue->completion_timeout);
829
830 /* In case the command wasn't completed find out the root cause.
831 * There might be 2 kinds of errors
832 * 1) No completion (timeout reached)
833 * 2) There is completion but the device didn't get any msi-x interrupt.
834 */
835 if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
836 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
837 ena_com_handle_admin_completion(admin_queue);
838 admin_queue->stats.no_completion++;
839 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
840
841 if (comp_ctx->status == ENA_CMD_COMPLETED) {
842 admin_queue->is_missing_admin_interrupt = true;
843 ena_trc_err(admin_queue->ena_dev,
844 "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
845 comp_ctx->cmd_opcode, admin_queue->auto_polling ? "ON" : "OFF");
846 /* Check if fallback to polling is enabled */
847 if (admin_queue->auto_polling)
848 admin_queue->polling = true;
849 } else {
850 ena_trc_err(admin_queue->ena_dev,
851 "The ena device didn't send a completion for the admin cmd %d status %d\n",
852 comp_ctx->cmd_opcode, comp_ctx->status);
853 }
854 /* Check if shifted to polling mode.
855 * This will happen if there is a completion without an interrupt
856 * and autopolling mode is enabled. Continuing normal execution in such case
857 */
858 if (!admin_queue->polling) {
859 admin_queue->running_state = false;
860 ret = ENA_COM_TIMER_EXPIRED;
861 goto err;
862 }
863 } else if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
864 ena_trc_err(admin_queue->ena_dev, "Command was aborted\n");
865 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
866 admin_queue->stats.aborted_cmd++;
867 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
868 ret = ENA_COM_NO_DEVICE;
869 goto err;
870 }
871
872 ENA_WARN(comp_ctx->status != ENA_CMD_COMPLETED,
873 admin_queue->ena_dev, "Invalid comp status %d\n",
874 comp_ctx->status);
875
876 ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
877 err:
878 comp_ctxt_release(admin_queue, comp_ctx);
879 return ret;
880 }
881
882 /* This method read the hardware device register through posting writes
883 * and waiting for response
884 * On timeout the function will return ENA_MMIO_READ_TIMEOUT
885 */
ena_com_reg_bar_read32(struct ena_com_dev * ena_dev,u16 offset)886 static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
887 {
888 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
889 volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
890 mmio_read->read_resp;
891 u32 mmio_read_reg, ret, i;
892 unsigned long flags = 0;
893 u32 timeout = mmio_read->reg_read_to;
894
895 ENA_MIGHT_SLEEP();
896
897 if (timeout == 0)
898 timeout = ENA_REG_READ_TIMEOUT;
899
900 /* If readless is disabled, perform regular read */
901 if (!mmio_read->readless_supported)
902 return ENA_REG_READ32(ena_dev->bus, ena_dev->reg_bar + offset);
903
904 ENA_SPINLOCK_LOCK(mmio_read->lock, flags);
905 mmio_read->seq_num++;
906
907 read_resp->req_id = mmio_read->seq_num + 0xDEAD;
908 mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
909 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
910 mmio_read_reg |= mmio_read->seq_num &
911 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
912
913 ENA_REG_WRITE32(ena_dev->bus, mmio_read_reg,
914 ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
915
916 for (i = 0; i < timeout; i++) {
917 if (READ_ONCE16(read_resp->req_id) == mmio_read->seq_num)
918 break;
919
920 ENA_UDELAY(1);
921 }
922
923 if (unlikely(i == timeout)) {
924 ena_trc_err(ena_dev, "Reading reg failed for timeout. expected: req id[%u] offset[%u] actual: req id[%u] offset[%u]\n",
925 mmio_read->seq_num,
926 offset,
927 read_resp->req_id,
928 read_resp->reg_off);
929 ret = ENA_MMIO_READ_TIMEOUT;
930 goto err;
931 }
932
933 if (unlikely(read_resp->reg_off != offset)) {
934 ena_trc_err(ena_dev, "Read failure: wrong offset provided\n");
935 ret = ENA_MMIO_READ_TIMEOUT;
936 } else {
937 ret = read_resp->reg_val;
938 }
939 err:
940 ENA_SPINLOCK_UNLOCK(mmio_read->lock, flags);
941
942 return ret;
943 }
944
945 /* There are two types to wait for completion.
946 * Polling mode - wait until the completion is available.
947 * Async mode - wait on wait queue until the completion is ready
948 * (or the timeout expired).
949 * It is expected that the IRQ called ena_com_handle_admin_completion
950 * to mark the completions.
951 */
ena_com_wait_and_process_admin_cq(struct ena_comp_ctx * comp_ctx,struct ena_com_admin_queue * admin_queue)952 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
953 struct ena_com_admin_queue *admin_queue)
954 {
955 if (admin_queue->polling)
956 return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
957 admin_queue);
958
959 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
960 admin_queue);
961 }
962
ena_com_destroy_io_sq(struct ena_com_dev * ena_dev,struct ena_com_io_sq * io_sq)963 static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
964 struct ena_com_io_sq *io_sq)
965 {
966 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
967 struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
968 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
969 u8 direction;
970 int ret;
971
972 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
973
974 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
975 direction = ENA_ADMIN_SQ_DIRECTION_TX;
976 else
977 direction = ENA_ADMIN_SQ_DIRECTION_RX;
978
979 destroy_cmd.sq.sq_identity |= (direction <<
980 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
981 ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
982
983 destroy_cmd.sq.sq_idx = io_sq->idx;
984 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
985
986 ret = ena_com_execute_admin_command(admin_queue,
987 (struct ena_admin_aq_entry *)&destroy_cmd,
988 sizeof(destroy_cmd),
989 (struct ena_admin_acq_entry *)&destroy_resp,
990 sizeof(destroy_resp));
991
992 if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
993 ena_trc_err(ena_dev, "Failed to destroy io sq error: %d\n", ret);
994
995 return ret;
996 }
997
ena_com_io_queue_free(struct ena_com_dev * ena_dev,struct ena_com_io_sq * io_sq,struct ena_com_io_cq * io_cq)998 static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
999 struct ena_com_io_sq *io_sq,
1000 struct ena_com_io_cq *io_cq)
1001 {
1002 size_t size;
1003
1004 if (io_cq->cdesc_addr.virt_addr) {
1005 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
1006
1007 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1008 size,
1009 io_cq->cdesc_addr.virt_addr,
1010 io_cq->cdesc_addr.phys_addr,
1011 io_cq->cdesc_addr.mem_handle);
1012
1013 io_cq->cdesc_addr.virt_addr = NULL;
1014 }
1015
1016 if (io_sq->desc_addr.virt_addr) {
1017 size = io_sq->desc_entry_size * io_sq->q_depth;
1018
1019 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1020 size,
1021 io_sq->desc_addr.virt_addr,
1022 io_sq->desc_addr.phys_addr,
1023 io_sq->desc_addr.mem_handle);
1024
1025 io_sq->desc_addr.virt_addr = NULL;
1026 }
1027
1028 if (io_sq->bounce_buf_ctrl.base_buffer) {
1029 ENA_MEM_FREE(ena_dev->dmadev,
1030 io_sq->bounce_buf_ctrl.base_buffer,
1031 (io_sq->llq_info.desc_list_entry_size * ENA_COM_BOUNCE_BUFFER_CNTRL_CNT));
1032 io_sq->bounce_buf_ctrl.base_buffer = NULL;
1033 }
1034 }
1035
wait_for_reset_state(struct ena_com_dev * ena_dev,u32 timeout,u16 exp_state)1036 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
1037 u16 exp_state)
1038 {
1039 u32 val, exp = 0;
1040 ena_time_t timeout_stamp;
1041
1042 /* Convert timeout from resolution of 100ms to us resolution. */
1043 timeout_stamp = ENA_GET_SYSTEM_TIMEOUT(100 * 1000 * timeout);
1044
1045 while (1) {
1046 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1047
1048 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
1049 ena_trc_err(ena_dev, "Reg read timeout occurred\n");
1050 return ENA_COM_TIMER_EXPIRED;
1051 }
1052
1053 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
1054 exp_state)
1055 return 0;
1056
1057 if (unlikely(ENA_TIME_EXPIRE(timeout_stamp)))
1058 return ENA_COM_TIMER_EXPIRED;
1059
1060 ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
1061 }
1062 }
1063
ena_com_check_supported_feature_id(struct ena_com_dev * ena_dev,enum ena_admin_aq_feature_id feature_id)1064 static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
1065 enum ena_admin_aq_feature_id feature_id)
1066 {
1067 u32 feature_mask = 1 << feature_id;
1068
1069 /* Device attributes is always supported */
1070 if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
1071 !(ena_dev->supported_features & feature_mask))
1072 return false;
1073
1074 return true;
1075 }
1076
ena_com_get_feature_ex(struct ena_com_dev * ena_dev,struct ena_admin_get_feat_resp * get_resp,enum ena_admin_aq_feature_id feature_id,dma_addr_t control_buf_dma_addr,u32 control_buff_size,u8 feature_ver)1077 static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
1078 struct ena_admin_get_feat_resp *get_resp,
1079 enum ena_admin_aq_feature_id feature_id,
1080 dma_addr_t control_buf_dma_addr,
1081 u32 control_buff_size,
1082 u8 feature_ver)
1083 {
1084 struct ena_com_admin_queue *admin_queue;
1085 struct ena_admin_get_feat_cmd get_cmd;
1086 int ret;
1087
1088 if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
1089 ena_trc_dbg(ena_dev, "Feature %d isn't supported\n", feature_id);
1090 return ENA_COM_UNSUPPORTED;
1091 }
1092
1093 memset(&get_cmd, 0x0, sizeof(get_cmd));
1094 admin_queue = &ena_dev->admin_queue;
1095
1096 get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
1097
1098 if (control_buff_size)
1099 get_cmd.aq_common_descriptor.flags =
1100 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
1101 else
1102 get_cmd.aq_common_descriptor.flags = 0;
1103
1104 ret = ena_com_mem_addr_set(ena_dev,
1105 &get_cmd.control_buffer.address,
1106 control_buf_dma_addr);
1107 if (unlikely(ret)) {
1108 ena_trc_err(ena_dev, "Memory address set failed\n");
1109 return ret;
1110 }
1111
1112 get_cmd.control_buffer.length = control_buff_size;
1113 get_cmd.feat_common.feature_version = feature_ver;
1114 get_cmd.feat_common.feature_id = feature_id;
1115
1116 ret = ena_com_execute_admin_command(admin_queue,
1117 (struct ena_admin_aq_entry *)
1118 &get_cmd,
1119 sizeof(get_cmd),
1120 (struct ena_admin_acq_entry *)
1121 get_resp,
1122 sizeof(*get_resp));
1123
1124 if (unlikely(ret))
1125 ena_trc_err(ena_dev, "Failed to submit get_feature command %d error: %d\n",
1126 feature_id, ret);
1127
1128 return ret;
1129 }
1130
ena_com_get_feature(struct ena_com_dev * ena_dev,struct ena_admin_get_feat_resp * get_resp,enum ena_admin_aq_feature_id feature_id,u8 feature_ver)1131 static int ena_com_get_feature(struct ena_com_dev *ena_dev,
1132 struct ena_admin_get_feat_resp *get_resp,
1133 enum ena_admin_aq_feature_id feature_id,
1134 u8 feature_ver)
1135 {
1136 return ena_com_get_feature_ex(ena_dev,
1137 get_resp,
1138 feature_id,
1139 0,
1140 0,
1141 feature_ver);
1142 }
1143
ena_com_get_current_hash_function(struct ena_com_dev * ena_dev)1144 int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev)
1145 {
1146 return ena_dev->rss.hash_func;
1147 }
1148
ena_com_hash_key_fill_default_key(struct ena_com_dev * ena_dev)1149 static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
1150 {
1151 struct ena_admin_feature_rss_flow_hash_control *hash_key =
1152 (ena_dev->rss).hash_key;
1153
1154 ENA_RSS_FILL_KEY(&hash_key->key, sizeof(hash_key->key));
1155 /* The key buffer is stored in the device in an array of
1156 * uint32 elements.
1157 */
1158 hash_key->key_parts = ENA_ADMIN_RSS_KEY_PARTS;
1159 }
1160
ena_com_hash_key_allocate(struct ena_com_dev * ena_dev)1161 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
1162 {
1163 struct ena_rss *rss = &ena_dev->rss;
1164
1165 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION))
1166 return ENA_COM_UNSUPPORTED;
1167
1168 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1169 sizeof(*rss->hash_key),
1170 rss->hash_key,
1171 rss->hash_key_dma_addr,
1172 rss->hash_key_mem_handle);
1173
1174 if (unlikely(!rss->hash_key))
1175 return ENA_COM_NO_MEM;
1176
1177 return 0;
1178 }
1179
ena_com_hash_key_destroy(struct ena_com_dev * ena_dev)1180 static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
1181 {
1182 struct ena_rss *rss = &ena_dev->rss;
1183
1184 if (rss->hash_key)
1185 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1186 sizeof(*rss->hash_key),
1187 rss->hash_key,
1188 rss->hash_key_dma_addr,
1189 rss->hash_key_mem_handle);
1190 rss->hash_key = NULL;
1191 }
1192
ena_com_hash_ctrl_init(struct ena_com_dev * ena_dev)1193 static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
1194 {
1195 struct ena_rss *rss = &ena_dev->rss;
1196
1197 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1198 sizeof(*rss->hash_ctrl),
1199 rss->hash_ctrl,
1200 rss->hash_ctrl_dma_addr,
1201 rss->hash_ctrl_mem_handle);
1202
1203 if (unlikely(!rss->hash_ctrl))
1204 return ENA_COM_NO_MEM;
1205
1206 return 0;
1207 }
1208
ena_com_hash_ctrl_destroy(struct ena_com_dev * ena_dev)1209 static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
1210 {
1211 struct ena_rss *rss = &ena_dev->rss;
1212
1213 if (rss->hash_ctrl)
1214 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1215 sizeof(*rss->hash_ctrl),
1216 rss->hash_ctrl,
1217 rss->hash_ctrl_dma_addr,
1218 rss->hash_ctrl_mem_handle);
1219 rss->hash_ctrl = NULL;
1220 }
1221
ena_com_indirect_table_allocate(struct ena_com_dev * ena_dev,u16 log_size)1222 static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
1223 u16 log_size)
1224 {
1225 struct ena_rss *rss = &ena_dev->rss;
1226 struct ena_admin_get_feat_resp get_resp;
1227 size_t tbl_size;
1228 int ret;
1229
1230 ret = ena_com_get_feature(ena_dev, &get_resp,
1231 ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG, 0);
1232 if (unlikely(ret))
1233 return ret;
1234
1235 if ((get_resp.u.ind_table.min_size > log_size) ||
1236 (get_resp.u.ind_table.max_size < log_size)) {
1237 ena_trc_err(ena_dev, "Indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
1238 1 << log_size,
1239 1 << get_resp.u.ind_table.min_size,
1240 1 << get_resp.u.ind_table.max_size);
1241 return ENA_COM_INVAL;
1242 }
1243
1244 tbl_size = (1ULL << log_size) *
1245 sizeof(struct ena_admin_rss_ind_table_entry);
1246
1247 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1248 tbl_size,
1249 rss->rss_ind_tbl,
1250 rss->rss_ind_tbl_dma_addr,
1251 rss->rss_ind_tbl_mem_handle);
1252 if (unlikely(!rss->rss_ind_tbl))
1253 goto mem_err1;
1254
1255 tbl_size = (1ULL << log_size) * sizeof(u16);
1256 rss->host_rss_ind_tbl =
1257 ENA_MEM_ALLOC(ena_dev->dmadev, tbl_size);
1258 if (unlikely(!rss->host_rss_ind_tbl))
1259 goto mem_err2;
1260
1261 rss->tbl_log_size = log_size;
1262
1263 return 0;
1264
1265 mem_err2:
1266 tbl_size = (1ULL << log_size) *
1267 sizeof(struct ena_admin_rss_ind_table_entry);
1268
1269 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1270 tbl_size,
1271 rss->rss_ind_tbl,
1272 rss->rss_ind_tbl_dma_addr,
1273 rss->rss_ind_tbl_mem_handle);
1274 rss->rss_ind_tbl = NULL;
1275 mem_err1:
1276 rss->tbl_log_size = 0;
1277 return ENA_COM_NO_MEM;
1278 }
1279
ena_com_indirect_table_destroy(struct ena_com_dev * ena_dev)1280 static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
1281 {
1282 struct ena_rss *rss = &ena_dev->rss;
1283 size_t tbl_size = (1ULL << rss->tbl_log_size) *
1284 sizeof(struct ena_admin_rss_ind_table_entry);
1285
1286 if (rss->rss_ind_tbl)
1287 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1288 tbl_size,
1289 rss->rss_ind_tbl,
1290 rss->rss_ind_tbl_dma_addr,
1291 rss->rss_ind_tbl_mem_handle);
1292 rss->rss_ind_tbl = NULL;
1293
1294 if (rss->host_rss_ind_tbl)
1295 ENA_MEM_FREE(ena_dev->dmadev,
1296 rss->host_rss_ind_tbl,
1297 ((1ULL << rss->tbl_log_size) * sizeof(u16)));
1298 rss->host_rss_ind_tbl = NULL;
1299 }
1300
ena_com_create_io_sq(struct ena_com_dev * ena_dev,struct ena_com_io_sq * io_sq,u16 cq_idx)1301 static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
1302 struct ena_com_io_sq *io_sq, u16 cq_idx)
1303 {
1304 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1305 struct ena_admin_aq_create_sq_cmd create_cmd;
1306 struct ena_admin_acq_create_sq_resp_desc cmd_completion;
1307 u8 direction;
1308 int ret;
1309
1310 memset(&create_cmd, 0x0, sizeof(create_cmd));
1311
1312 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
1313
1314 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1315 direction = ENA_ADMIN_SQ_DIRECTION_TX;
1316 else
1317 direction = ENA_ADMIN_SQ_DIRECTION_RX;
1318
1319 create_cmd.sq_identity |= (direction <<
1320 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
1321 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
1322
1323 create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
1324 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
1325
1326 create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
1327 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
1328 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
1329
1330 create_cmd.sq_caps_3 |=
1331 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
1332
1333 create_cmd.cq_idx = cq_idx;
1334 create_cmd.sq_depth = io_sq->q_depth;
1335
1336 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
1337 ret = ena_com_mem_addr_set(ena_dev,
1338 &create_cmd.sq_ba,
1339 io_sq->desc_addr.phys_addr);
1340 if (unlikely(ret)) {
1341 ena_trc_err(ena_dev, "Memory address set failed\n");
1342 return ret;
1343 }
1344 }
1345
1346 ret = ena_com_execute_admin_command(admin_queue,
1347 (struct ena_admin_aq_entry *)&create_cmd,
1348 sizeof(create_cmd),
1349 (struct ena_admin_acq_entry *)&cmd_completion,
1350 sizeof(cmd_completion));
1351 if (unlikely(ret)) {
1352 ena_trc_err(ena_dev, "Failed to create IO SQ. error: %d\n", ret);
1353 return ret;
1354 }
1355
1356 io_sq->idx = cmd_completion.sq_idx;
1357
1358 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1359 (uintptr_t)cmd_completion.sq_doorbell_offset);
1360
1361 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1362 io_sq->desc_addr.pbuf_dev_addr =
1363 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1364 cmd_completion.llq_descriptors_offset);
1365 }
1366
1367 ena_trc_dbg(ena_dev, "Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
1368
1369 return ret;
1370 }
1371
ena_com_ind_tbl_convert_to_device(struct ena_com_dev * ena_dev)1372 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1373 {
1374 struct ena_rss *rss = &ena_dev->rss;
1375 struct ena_com_io_sq *io_sq;
1376 u16 qid;
1377 int i;
1378
1379 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1380 qid = rss->host_rss_ind_tbl[i];
1381 if (qid >= ENA_TOTAL_NUM_QUEUES)
1382 return ENA_COM_INVAL;
1383
1384 io_sq = &ena_dev->io_sq_queues[qid];
1385
1386 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1387 return ENA_COM_INVAL;
1388
1389 rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1390 }
1391
1392 return 0;
1393 }
1394
ena_com_update_intr_delay_resolution(struct ena_com_dev * ena_dev,u16 intr_delay_resolution)1395 static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1396 u16 intr_delay_resolution)
1397 {
1398 u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution;
1399
1400 if (unlikely(!intr_delay_resolution)) {
1401 ena_trc_err(ena_dev, "Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1402 intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
1403 }
1404
1405 /* update Rx */
1406 ena_dev->intr_moder_rx_interval =
1407 ena_dev->intr_moder_rx_interval *
1408 prev_intr_delay_resolution /
1409 intr_delay_resolution;
1410
1411 /* update Tx */
1412 ena_dev->intr_moder_tx_interval =
1413 ena_dev->intr_moder_tx_interval *
1414 prev_intr_delay_resolution /
1415 intr_delay_resolution;
1416
1417 ena_dev->intr_delay_resolution = intr_delay_resolution;
1418 }
1419
1420 /*****************************************************************************/
1421 /******************************* API ******************************/
1422 /*****************************************************************************/
1423
ena_com_execute_admin_command(struct ena_com_admin_queue * admin_queue,struct ena_admin_aq_entry * cmd,size_t cmd_size,struct ena_admin_acq_entry * comp,size_t comp_size)1424 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1425 struct ena_admin_aq_entry *cmd,
1426 size_t cmd_size,
1427 struct ena_admin_acq_entry *comp,
1428 size_t comp_size)
1429 {
1430 struct ena_comp_ctx *comp_ctx;
1431 int ret;
1432
1433 comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1434 comp, comp_size);
1435 if (IS_ERR(comp_ctx)) {
1436 ret = PTR_ERR(comp_ctx);
1437 if (ret == ENA_COM_NO_DEVICE)
1438 ena_trc_dbg(admin_queue->ena_dev,
1439 "Failed to submit command [%d]\n",
1440 ret);
1441 else
1442 ena_trc_err(admin_queue->ena_dev,
1443 "Failed to submit command [%d]\n",
1444 ret);
1445
1446 return ret;
1447 }
1448
1449 ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1450 if (unlikely(ret)) {
1451 if (admin_queue->running_state)
1452 ena_trc_err(admin_queue->ena_dev,
1453 "Failed to process command. ret = %d\n", ret);
1454 else
1455 ena_trc_dbg(admin_queue->ena_dev,
1456 "Failed to process command. ret = %d\n", ret);
1457 }
1458 return ret;
1459 }
1460
ena_com_create_io_cq(struct ena_com_dev * ena_dev,struct ena_com_io_cq * io_cq)1461 int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1462 struct ena_com_io_cq *io_cq)
1463 {
1464 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1465 struct ena_admin_aq_create_cq_cmd create_cmd;
1466 struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1467 int ret;
1468
1469 memset(&create_cmd, 0x0, sizeof(create_cmd));
1470
1471 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1472
1473 create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1474 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1475 create_cmd.cq_caps_1 |=
1476 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1477
1478 create_cmd.msix_vector = io_cq->msix_vector;
1479 create_cmd.cq_depth = io_cq->q_depth;
1480
1481 ret = ena_com_mem_addr_set(ena_dev,
1482 &create_cmd.cq_ba,
1483 io_cq->cdesc_addr.phys_addr);
1484 if (unlikely(ret)) {
1485 ena_trc_err(ena_dev, "Memory address set failed\n");
1486 return ret;
1487 }
1488
1489 ret = ena_com_execute_admin_command(admin_queue,
1490 (struct ena_admin_aq_entry *)&create_cmd,
1491 sizeof(create_cmd),
1492 (struct ena_admin_acq_entry *)&cmd_completion,
1493 sizeof(cmd_completion));
1494 if (unlikely(ret)) {
1495 ena_trc_err(ena_dev, "Failed to create IO CQ. error: %d\n", ret);
1496 return ret;
1497 }
1498
1499 io_cq->idx = cmd_completion.cq_idx;
1500
1501 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1502 cmd_completion.cq_interrupt_unmask_register_offset);
1503
1504 if (cmd_completion.numa_node_register_offset)
1505 io_cq->numa_node_cfg_reg =
1506 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1507 cmd_completion.numa_node_register_offset);
1508
1509 ena_trc_dbg(ena_dev, "Created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
1510
1511 return ret;
1512 }
1513
ena_com_get_io_handlers(struct ena_com_dev * ena_dev,u16 qid,struct ena_com_io_sq ** io_sq,struct ena_com_io_cq ** io_cq)1514 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1515 struct ena_com_io_sq **io_sq,
1516 struct ena_com_io_cq **io_cq)
1517 {
1518 if (unlikely(qid >= ENA_TOTAL_NUM_QUEUES)) {
1519 ena_trc_err(ena_dev, "Invalid queue number %d but the max is %d\n",
1520 qid, ENA_TOTAL_NUM_QUEUES);
1521 return ENA_COM_INVAL;
1522 }
1523
1524 *io_sq = &ena_dev->io_sq_queues[qid];
1525 *io_cq = &ena_dev->io_cq_queues[qid];
1526
1527 return 0;
1528 }
1529
ena_com_abort_admin_commands(struct ena_com_dev * ena_dev)1530 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1531 {
1532 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1533 struct ena_comp_ctx *comp_ctx;
1534 u16 i;
1535
1536 if (!admin_queue->comp_ctx)
1537 return;
1538
1539 for (i = 0; i < admin_queue->q_depth; i++) {
1540 comp_ctx = get_comp_ctxt(admin_queue, i, false);
1541 if (unlikely(!comp_ctx))
1542 break;
1543
1544 comp_ctx->status = ENA_CMD_ABORTED;
1545
1546 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
1547 }
1548 }
1549
ena_com_wait_for_abort_completion(struct ena_com_dev * ena_dev)1550 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1551 {
1552 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1553 unsigned long flags = 0;
1554 u32 exp = 0;
1555
1556 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1557 while (ATOMIC32_READ(&admin_queue->outstanding_cmds) != 0) {
1558 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1559 ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
1560 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1561 }
1562 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1563 }
1564
ena_com_destroy_io_cq(struct ena_com_dev * ena_dev,struct ena_com_io_cq * io_cq)1565 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1566 struct ena_com_io_cq *io_cq)
1567 {
1568 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1569 struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1570 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1571 int ret;
1572
1573 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
1574
1575 destroy_cmd.cq_idx = io_cq->idx;
1576 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1577
1578 ret = ena_com_execute_admin_command(admin_queue,
1579 (struct ena_admin_aq_entry *)&destroy_cmd,
1580 sizeof(destroy_cmd),
1581 (struct ena_admin_acq_entry *)&destroy_resp,
1582 sizeof(destroy_resp));
1583
1584 if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
1585 ena_trc_err(ena_dev, "Failed to destroy IO CQ. error: %d\n", ret);
1586
1587 return ret;
1588 }
1589
ena_com_get_admin_running_state(struct ena_com_dev * ena_dev)1590 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1591 {
1592 return ena_dev->admin_queue.running_state;
1593 }
1594
ena_com_set_admin_running_state(struct ena_com_dev * ena_dev,bool state)1595 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1596 {
1597 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1598 unsigned long flags = 0;
1599
1600 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1601 ena_dev->admin_queue.running_state = state;
1602 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1603 }
1604
ena_com_admin_aenq_enable(struct ena_com_dev * ena_dev)1605 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1606 {
1607 u16 depth = ena_dev->aenq.q_depth;
1608
1609 ENA_WARN(ena_dev->aenq.head != depth, ena_dev, "Invalid AENQ state\n");
1610
1611 /* Init head_db to mark that all entries in the queue
1612 * are initially available
1613 */
1614 ENA_REG_WRITE32(ena_dev->bus, depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1615 }
1616
ena_com_set_aenq_config(struct ena_com_dev * ena_dev,u32 groups_flag)1617 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1618 {
1619 struct ena_com_admin_queue *admin_queue;
1620 struct ena_admin_set_feat_cmd cmd;
1621 struct ena_admin_set_feat_resp resp;
1622 struct ena_admin_get_feat_resp get_resp;
1623 int ret;
1624
1625 ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0);
1626 if (unlikely(ret)) {
1627 ena_trc_info(ena_dev, "Can't get aenq configuration\n");
1628 return ret;
1629 }
1630
1631 if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1632 ena_trc_warn(ena_dev, "Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
1633 get_resp.u.aenq.supported_groups,
1634 groups_flag);
1635 return ENA_COM_UNSUPPORTED;
1636 }
1637
1638 memset(&cmd, 0x0, sizeof(cmd));
1639 admin_queue = &ena_dev->admin_queue;
1640
1641 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1642 cmd.aq_common_descriptor.flags = 0;
1643 cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1644 cmd.u.aenq.enabled_groups = groups_flag;
1645
1646 ret = ena_com_execute_admin_command(admin_queue,
1647 (struct ena_admin_aq_entry *)&cmd,
1648 sizeof(cmd),
1649 (struct ena_admin_acq_entry *)&resp,
1650 sizeof(resp));
1651
1652 if (unlikely(ret))
1653 ena_trc_err(ena_dev, "Failed to config AENQ ret: %d\n", ret);
1654
1655 return ret;
1656 }
1657
ena_com_get_dma_width(struct ena_com_dev * ena_dev)1658 int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1659 {
1660 u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1661 u32 width;
1662
1663 if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1664 ena_trc_err(ena_dev, "Reg read timeout occurred\n");
1665 return ENA_COM_TIMER_EXPIRED;
1666 }
1667
1668 width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
1669 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
1670
1671 ena_trc_dbg(ena_dev, "ENA dma width: %d\n", width);
1672
1673 if (unlikely((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS)) {
1674 ena_trc_err(ena_dev, "DMA width illegal value: %d\n", width);
1675 return ENA_COM_INVAL;
1676 }
1677
1678 ena_dev->dma_addr_bits = width;
1679
1680 return width;
1681 }
1682
ena_com_validate_version(struct ena_com_dev * ena_dev)1683 int ena_com_validate_version(struct ena_com_dev *ena_dev)
1684 {
1685 u32 ver;
1686 u32 ctrl_ver;
1687 u32 ctrl_ver_masked;
1688
1689 /* Make sure the ENA version and the controller version are at least
1690 * as the driver expects
1691 */
1692 ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1693 ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1694 ENA_REGS_CONTROLLER_VERSION_OFF);
1695
1696 if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
1697 (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
1698 ena_trc_err(ena_dev, "Reg read timeout occurred\n");
1699 return ENA_COM_TIMER_EXPIRED;
1700 }
1701
1702 ena_trc_info(ena_dev, "ENA device version: %d.%d\n",
1703 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
1704 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
1705 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1706
1707 ena_trc_info(ena_dev, "ENA controller version: %d.%d.%d implementation version %d\n",
1708 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK)
1709 >> ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
1710 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK)
1711 >> ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
1712 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
1713 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
1714 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
1715
1716 ctrl_ver_masked =
1717 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1718 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1719 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1720
1721 /* Validate the ctrl version without the implementation ID */
1722 if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1723 ena_trc_err(ena_dev, "ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1724 return -1;
1725 }
1726
1727 return 0;
1728 }
1729
1730 static void
ena_com_free_ena_admin_queue_comp_ctx(struct ena_com_dev * ena_dev,struct ena_com_admin_queue * admin_queue)1731 ena_com_free_ena_admin_queue_comp_ctx(struct ena_com_dev *ena_dev,
1732 struct ena_com_admin_queue *admin_queue)
1733
1734 {
1735 if (!admin_queue->comp_ctx)
1736 return;
1737
1738 ENA_WAIT_EVENTS_DESTROY(admin_queue);
1739 ENA_MEM_FREE(ena_dev->dmadev,
1740 admin_queue->comp_ctx,
1741 (admin_queue->q_depth * sizeof(struct ena_comp_ctx)));
1742
1743 admin_queue->comp_ctx = NULL;
1744 }
1745
ena_com_admin_destroy(struct ena_com_dev * ena_dev)1746 void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1747 {
1748 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1749 struct ena_com_admin_cq *cq = &admin_queue->cq;
1750 struct ena_com_admin_sq *sq = &admin_queue->sq;
1751 struct ena_com_aenq *aenq = &ena_dev->aenq;
1752 u16 size;
1753
1754 ena_com_free_ena_admin_queue_comp_ctx(ena_dev, admin_queue);
1755
1756 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
1757 if (sq->entries)
1758 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, sq->entries,
1759 sq->dma_addr, sq->mem_handle);
1760 sq->entries = NULL;
1761
1762 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
1763 if (cq->entries)
1764 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, cq->entries,
1765 cq->dma_addr, cq->mem_handle);
1766 cq->entries = NULL;
1767
1768 size = ADMIN_AENQ_SIZE(aenq->q_depth);
1769 if (ena_dev->aenq.entries)
1770 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, aenq->entries,
1771 aenq->dma_addr, aenq->mem_handle);
1772 aenq->entries = NULL;
1773 ENA_SPINLOCK_DESTROY(admin_queue->q_lock);
1774 }
1775
ena_com_set_admin_polling_mode(struct ena_com_dev * ena_dev,bool polling)1776 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1777 {
1778 u32 mask_value = 0;
1779
1780 if (polling)
1781 mask_value = ENA_REGS_ADMIN_INTR_MASK;
1782
1783 ENA_REG_WRITE32(ena_dev->bus, mask_value,
1784 ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1785 ena_dev->admin_queue.polling = polling;
1786 }
1787
ena_com_get_admin_polling_mode(struct ena_com_dev * ena_dev)1788 bool ena_com_get_admin_polling_mode(struct ena_com_dev *ena_dev)
1789 {
1790 return ena_dev->admin_queue.polling;
1791 }
1792
ena_com_set_admin_auto_polling_mode(struct ena_com_dev * ena_dev,bool polling)1793 void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
1794 bool polling)
1795 {
1796 ena_dev->admin_queue.auto_polling = polling;
1797 }
1798
ena_com_phc_supported(struct ena_com_dev * ena_dev)1799 bool ena_com_phc_supported(struct ena_com_dev *ena_dev)
1800 {
1801 return ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_PHC_CONFIG);
1802 }
1803
ena_com_phc_init(struct ena_com_dev * ena_dev)1804 int ena_com_phc_init(struct ena_com_dev *ena_dev)
1805 {
1806 struct ena_com_phc_info *phc = &ena_dev->phc;
1807
1808 memset(phc, 0x0, sizeof(*phc));
1809
1810 /* Allocate shared mem used PHC timestamp retrieved from device */
1811 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1812 sizeof(*phc->virt_addr),
1813 phc->virt_addr,
1814 phc->phys_addr,
1815 phc->mem_handle);
1816 if (unlikely(!phc->virt_addr))
1817 return ENA_COM_NO_MEM;
1818
1819 ENA_SPINLOCK_INIT(phc->lock);
1820
1821 phc->virt_addr->req_id = 0;
1822 phc->virt_addr->timestamp = 0;
1823
1824 return 0;
1825 }
1826
ena_com_phc_config(struct ena_com_dev * ena_dev)1827 int ena_com_phc_config(struct ena_com_dev *ena_dev)
1828 {
1829 struct ena_com_phc_info *phc = &ena_dev->phc;
1830 struct ena_admin_get_feat_resp get_feat_resp;
1831 struct ena_admin_set_feat_resp set_feat_resp;
1832 struct ena_admin_set_feat_cmd set_feat_cmd;
1833 int ret = 0;
1834
1835 /* Get default device PHC configuration */
1836 ret = ena_com_get_feature(ena_dev,
1837 &get_feat_resp,
1838 ENA_ADMIN_PHC_CONFIG,
1839 ENA_ADMIN_PHC_FEATURE_VERSION_0);
1840 if (unlikely(ret)) {
1841 ena_trc_err(ena_dev, "Failed to get PHC feature configuration, error: %d\n", ret);
1842 return ret;
1843 }
1844
1845 /* Supporting only PHC V0 (readless mode with error bound) */
1846 if (get_feat_resp.u.phc.version != ENA_ADMIN_PHC_FEATURE_VERSION_0) {
1847 ena_trc_err(ena_dev, "Unsupprted PHC version (0x%X), error: %d\n",
1848 get_feat_resp.u.phc.version,
1849 ENA_COM_UNSUPPORTED);
1850 return ENA_COM_UNSUPPORTED;
1851 }
1852
1853 /* Update PHC doorbell offset according to device value, used to write req_id to PHC bar */
1854 phc->doorbell_offset = get_feat_resp.u.phc.doorbell_offset;
1855
1856 /* Update PHC expire timeout according to device or default driver value */
1857 phc->expire_timeout_usec = (get_feat_resp.u.phc.expire_timeout_usec) ?
1858 get_feat_resp.u.phc.expire_timeout_usec :
1859 ENA_PHC_DEFAULT_EXPIRE_TIMEOUT_USEC;
1860
1861 /* Update PHC block timeout according to device or default driver value */
1862 phc->block_timeout_usec = (get_feat_resp.u.phc.block_timeout_usec) ?
1863 get_feat_resp.u.phc.block_timeout_usec :
1864 ENA_PHC_DEFAULT_BLOCK_TIMEOUT_USEC;
1865
1866 /* Sanity check - expire timeout must not exceed block timeout */
1867 if (phc->expire_timeout_usec > phc->block_timeout_usec)
1868 phc->expire_timeout_usec = phc->block_timeout_usec;
1869
1870 /* Prepare PHC config feature command */
1871 memset(&set_feat_cmd, 0x0, sizeof(set_feat_cmd));
1872 set_feat_cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1873 set_feat_cmd.feat_common.feature_id = ENA_ADMIN_PHC_CONFIG;
1874 set_feat_cmd.u.phc.output_length = sizeof(*phc->virt_addr);
1875 ret = ena_com_mem_addr_set(ena_dev, &set_feat_cmd.u.phc.output_address, phc->phys_addr);
1876 if (unlikely(ret)) {
1877 ena_trc_err(ena_dev, "Failed setting PHC output address, error: %d\n", ret);
1878 return ret;
1879 }
1880
1881 /* Send PHC feature command to the device */
1882 ret = ena_com_execute_admin_command(&ena_dev->admin_queue,
1883 (struct ena_admin_aq_entry *)&set_feat_cmd,
1884 sizeof(set_feat_cmd),
1885 (struct ena_admin_acq_entry *)&set_feat_resp,
1886 sizeof(set_feat_resp));
1887
1888 if (unlikely(ret)) {
1889 ena_trc_err(ena_dev, "Failed to enable PHC, error: %d\n", ret);
1890 return ret;
1891 }
1892
1893 phc->active = true;
1894 ena_trc_dbg(ena_dev, "PHC is active in the device\n");
1895
1896 return ret;
1897 }
1898
ena_com_phc_destroy(struct ena_com_dev * ena_dev)1899 void ena_com_phc_destroy(struct ena_com_dev *ena_dev)
1900 {
1901 struct ena_com_phc_info *phc = &ena_dev->phc;
1902 unsigned long flags = 0;
1903
1904 /* In case PHC is not supported by the device, silently exiting */
1905 if (!phc->virt_addr)
1906 return;
1907
1908 ENA_SPINLOCK_LOCK(phc->lock, flags);
1909 phc->active = false;
1910 ENA_SPINLOCK_UNLOCK(phc->lock, flags);
1911
1912 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1913 sizeof(*phc->virt_addr),
1914 phc->virt_addr,
1915 phc->phys_addr,
1916 phc->mem_handle);
1917 phc->virt_addr = NULL;
1918
1919 ENA_SPINLOCK_DESTROY(phc->lock);
1920 }
1921
ena_com_phc_get_timestamp(struct ena_com_dev * ena_dev,u64 * timestamp)1922 int ena_com_phc_get_timestamp(struct ena_com_dev *ena_dev, u64 *timestamp)
1923 {
1924 volatile struct ena_admin_phc_resp *read_resp = ena_dev->phc.virt_addr;
1925 const ena_time_high_res_t zero_system_time = ENA_TIME_INIT_HIGH_RES();
1926 struct ena_com_phc_info *phc = &ena_dev->phc;
1927 ena_time_high_res_t expire_time;
1928 ena_time_high_res_t block_time;
1929 unsigned long flags = 0;
1930 int ret = ENA_COM_OK;
1931
1932 if (!phc->active) {
1933 ena_trc_err(ena_dev, "PHC feature is not active in the device\n");
1934 return ENA_COM_UNSUPPORTED;
1935 }
1936
1937 ENA_SPINLOCK_LOCK(phc->lock, flags);
1938
1939 /* Check if PHC is in blocked state */
1940 if (unlikely(ENA_TIME_COMPARE_HIGH_RES(phc->system_time, zero_system_time))) {
1941 /* Check if blocking time expired */
1942 block_time = ENA_GET_SYSTEM_TIMEOUT_HIGH_RES(phc->system_time,
1943 phc->block_timeout_usec);
1944 if (!ENA_TIME_EXPIRE_HIGH_RES(block_time)) {
1945 /* PHC is still in blocked state, skip PHC request */
1946 phc->stats.phc_skp++;
1947 ret = ENA_COM_DEVICE_BUSY;
1948 goto skip;
1949 }
1950
1951 /* PHC is in active state, update statistics according to req_id and error_flags */
1952 if ((READ_ONCE16(read_resp->req_id) != phc->req_id) ||
1953 (read_resp->error_flags & ENA_PHC_ERROR_FLAGS)) {
1954 /* Device didn't update req_id during blocking time or timestamp is invalid,
1955 * this indicates on a device error
1956 */
1957 phc->stats.phc_err++;
1958 } else {
1959 /* Device updated req_id during blocking time with valid timestamp */
1960 phc->stats.phc_exp++;
1961 }
1962 }
1963
1964 /* Setting relative timeouts */
1965 phc->system_time = ENA_GET_SYSTEM_TIME_HIGH_RES();
1966 block_time = ENA_GET_SYSTEM_TIMEOUT_HIGH_RES(phc->system_time, phc->block_timeout_usec);
1967 expire_time = ENA_GET_SYSTEM_TIMEOUT_HIGH_RES(phc->system_time, phc->expire_timeout_usec);
1968
1969 /* We expect the device to return this req_id once the new PHC timestamp is updated */
1970 phc->req_id++;
1971
1972 /* Initialize PHC shared memory with different req_id value to be able to identify once the
1973 * device changes it to req_id
1974 */
1975 read_resp->req_id = phc->req_id + ENA_PHC_REQ_ID_OFFSET;
1976
1977 /* Writing req_id to PHC bar */
1978 ENA_REG_WRITE32(ena_dev->bus, phc->req_id, ena_dev->reg_bar + phc->doorbell_offset);
1979
1980 /* Stalling until the device updates req_id */
1981 while (1) {
1982 if (unlikely(ENA_TIME_EXPIRE_HIGH_RES(expire_time))) {
1983 /* Gave up waiting for updated req_id, PHC enters into blocked state until
1984 * passing blocking time, during this time any get PHC timestamp or
1985 * error bound requests will fail with device busy error
1986 */
1987 phc->error_bound = ENA_PHC_MAX_ERROR_BOUND;
1988 ret = ENA_COM_DEVICE_BUSY;
1989 break;
1990 }
1991
1992 /* Check if req_id was updated by the device */
1993 if (READ_ONCE16(read_resp->req_id) != phc->req_id) {
1994 /* req_id was not updated by the device yet, check again on next loop */
1995 continue;
1996 }
1997
1998 /* req_id was updated by the device which indicates that PHC timestamp, error_bound
1999 * and error_flags are updated too, checking errors before retrieving timestamp and
2000 * error_bound values
2001 */
2002 if (unlikely(read_resp->error_flags & ENA_PHC_ERROR_FLAGS)) {
2003 /* Retrieved timestamp or error bound errors, PHC enters into blocked state
2004 * until passing blocking time, during this time any get PHC timestamp or
2005 * error bound requests will fail with device busy error
2006 */
2007 phc->error_bound = ENA_PHC_MAX_ERROR_BOUND;
2008 ret = ENA_COM_DEVICE_BUSY;
2009 break;
2010 }
2011
2012 /* PHC timestamp value is returned to the caller */
2013 *timestamp = read_resp->timestamp;
2014
2015 /* Error bound value is cached for future retrieval by caller */
2016 phc->error_bound = read_resp->error_bound;
2017
2018 /* Update statistic on valid PHC timestamp retrieval */
2019 phc->stats.phc_cnt++;
2020
2021 /* This indicates PHC state is active */
2022 phc->system_time = zero_system_time;
2023 break;
2024 }
2025
2026 skip:
2027 ENA_SPINLOCK_UNLOCK(phc->lock, flags);
2028
2029 return ret;
2030 }
2031
ena_com_phc_get_error_bound(struct ena_com_dev * ena_dev,u32 * error_bound)2032 int ena_com_phc_get_error_bound(struct ena_com_dev *ena_dev, u32 *error_bound)
2033 {
2034 struct ena_com_phc_info *phc = &ena_dev->phc;
2035 u32 local_error_bound = phc->error_bound;
2036
2037 if (!phc->active) {
2038 ena_trc_err(ena_dev, "PHC feature is not active in the device\n");
2039 return ENA_COM_UNSUPPORTED;
2040 }
2041
2042 if (local_error_bound == ENA_PHC_MAX_ERROR_BOUND)
2043 return ENA_COM_DEVICE_BUSY;
2044
2045 *error_bound = local_error_bound;
2046
2047 return ENA_COM_OK;
2048 }
2049
ena_com_mmio_reg_read_request_init(struct ena_com_dev * ena_dev)2050 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
2051 {
2052 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
2053
2054 ENA_SPINLOCK_INIT(mmio_read->lock);
2055 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
2056 sizeof(*mmio_read->read_resp),
2057 mmio_read->read_resp,
2058 mmio_read->read_resp_dma_addr,
2059 mmio_read->read_resp_mem_handle);
2060 if (unlikely(!mmio_read->read_resp))
2061 goto err;
2062
2063 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
2064
2065 mmio_read->read_resp->req_id = 0x0;
2066 mmio_read->seq_num = 0x0;
2067 mmio_read->readless_supported = true;
2068
2069 return 0;
2070
2071 err:
2072 ENA_SPINLOCK_DESTROY(mmio_read->lock);
2073 return ENA_COM_NO_MEM;
2074 }
2075
ena_com_set_mmio_read_mode(struct ena_com_dev * ena_dev,bool readless_supported)2076 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
2077 {
2078 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
2079
2080 mmio_read->readless_supported = readless_supported;
2081 }
2082
ena_com_mmio_reg_read_request_destroy(struct ena_com_dev * ena_dev)2083 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
2084 {
2085 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
2086
2087 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
2088 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
2089
2090 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
2091 sizeof(*mmio_read->read_resp),
2092 mmio_read->read_resp,
2093 mmio_read->read_resp_dma_addr,
2094 mmio_read->read_resp_mem_handle);
2095
2096 mmio_read->read_resp = NULL;
2097 ENA_SPINLOCK_DESTROY(mmio_read->lock);
2098 }
2099
ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev * ena_dev)2100 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
2101 {
2102 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
2103 u32 addr_low, addr_high;
2104
2105 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
2106 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
2107
2108 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
2109 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
2110 }
2111
ena_com_admin_init(struct ena_com_dev * ena_dev,struct ena_aenq_handlers * aenq_handlers)2112 int ena_com_admin_init(struct ena_com_dev *ena_dev,
2113 struct ena_aenq_handlers *aenq_handlers)
2114 {
2115 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2116 u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
2117 int ret;
2118
2119 dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
2120
2121 if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
2122 ena_trc_err(ena_dev, "Reg read timeout occurred\n");
2123 return ENA_COM_TIMER_EXPIRED;
2124 }
2125
2126 if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
2127 ena_trc_err(ena_dev, "Device isn't ready, abort com init\n");
2128 return ENA_COM_NO_DEVICE;
2129 }
2130
2131 admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
2132
2133 admin_queue->bus = ena_dev->bus;
2134 admin_queue->q_dmadev = ena_dev->dmadev;
2135 admin_queue->polling = false;
2136 admin_queue->curr_cmd_id = 0;
2137
2138 ATOMIC32_SET(&admin_queue->outstanding_cmds, 0);
2139
2140 ENA_SPINLOCK_INIT(admin_queue->q_lock);
2141
2142 ret = ena_com_init_comp_ctxt(admin_queue);
2143 if (unlikely(ret))
2144 goto error;
2145
2146 ret = ena_com_admin_init_sq(admin_queue);
2147 if (unlikely(ret))
2148 goto error;
2149
2150 ret = ena_com_admin_init_cq(admin_queue);
2151 if (unlikely(ret))
2152 goto error;
2153
2154 admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
2155 ENA_REGS_AQ_DB_OFF);
2156
2157 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
2158 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
2159
2160 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
2161 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
2162
2163 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
2164 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
2165
2166 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
2167 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
2168
2169 aq_caps = 0;
2170 aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
2171 aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
2172 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
2173 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
2174
2175 acq_caps = 0;
2176 acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
2177 acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
2178 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
2179 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
2180
2181 ENA_REG_WRITE32(ena_dev->bus, aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
2182 ENA_REG_WRITE32(ena_dev->bus, acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
2183 ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
2184 if (unlikely(ret))
2185 goto error;
2186
2187 admin_queue->ena_dev = ena_dev;
2188 admin_queue->running_state = true;
2189 admin_queue->is_missing_admin_interrupt = false;
2190
2191 return 0;
2192 error:
2193 ena_com_admin_destroy(ena_dev);
2194
2195 return ret;
2196 }
2197
ena_com_create_io_queue(struct ena_com_dev * ena_dev,struct ena_com_create_io_ctx * ctx)2198 int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
2199 struct ena_com_create_io_ctx *ctx)
2200 {
2201 struct ena_com_io_sq *io_sq;
2202 struct ena_com_io_cq *io_cq;
2203 int ret;
2204
2205 if (unlikely(ctx->qid >= ENA_TOTAL_NUM_QUEUES)) {
2206 ena_trc_err(ena_dev, "Qid (%d) is bigger than max num of queues (%d)\n",
2207 ctx->qid, ENA_TOTAL_NUM_QUEUES);
2208 return ENA_COM_INVAL;
2209 }
2210
2211 io_sq = &ena_dev->io_sq_queues[ctx->qid];
2212 io_cq = &ena_dev->io_cq_queues[ctx->qid];
2213
2214 memset(io_sq, 0x0, sizeof(*io_sq));
2215 memset(io_cq, 0x0, sizeof(*io_cq));
2216
2217 /* Init CQ */
2218 io_cq->q_depth = ctx->queue_size;
2219 io_cq->direction = ctx->direction;
2220 io_cq->qid = ctx->qid;
2221
2222 io_cq->msix_vector = ctx->msix_vector;
2223
2224 io_sq->q_depth = ctx->queue_size;
2225 io_sq->direction = ctx->direction;
2226 io_sq->qid = ctx->qid;
2227
2228 io_sq->mem_queue_type = ctx->mem_queue_type;
2229
2230 if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
2231 /* header length is limited to 8 bits */
2232 io_sq->tx_max_header_size =
2233 ENA_MIN32(ena_dev->tx_max_header_size, SZ_256);
2234
2235 ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
2236 if (unlikely(ret))
2237 goto error;
2238 ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
2239 if (unlikely(ret))
2240 goto error;
2241
2242 ret = ena_com_create_io_cq(ena_dev, io_cq);
2243 if (unlikely(ret))
2244 goto error;
2245
2246 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
2247 if (unlikely(ret))
2248 goto destroy_io_cq;
2249
2250 return 0;
2251
2252 destroy_io_cq:
2253 ena_com_destroy_io_cq(ena_dev, io_cq);
2254 error:
2255 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
2256 return ret;
2257 }
2258
ena_com_destroy_io_queue(struct ena_com_dev * ena_dev,u16 qid)2259 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
2260 {
2261 struct ena_com_io_sq *io_sq;
2262 struct ena_com_io_cq *io_cq;
2263
2264 if (unlikely(qid >= ENA_TOTAL_NUM_QUEUES)) {
2265 ena_trc_err(ena_dev, "Qid (%d) is bigger than max num of queues (%d)\n",
2266 qid, ENA_TOTAL_NUM_QUEUES);
2267 return;
2268 }
2269
2270 io_sq = &ena_dev->io_sq_queues[qid];
2271 io_cq = &ena_dev->io_cq_queues[qid];
2272
2273 ena_com_destroy_io_sq(ena_dev, io_sq);
2274 ena_com_destroy_io_cq(ena_dev, io_cq);
2275
2276 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
2277 }
2278
ena_com_get_link_params(struct ena_com_dev * ena_dev,struct ena_admin_get_feat_resp * resp)2279 int ena_com_get_link_params(struct ena_com_dev *ena_dev,
2280 struct ena_admin_get_feat_resp *resp)
2281 {
2282 return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
2283 }
2284
ena_get_dev_stats(struct ena_com_dev * ena_dev,struct ena_com_stats_ctx * ctx,enum ena_admin_get_stats_type type)2285 static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
2286 struct ena_com_stats_ctx *ctx,
2287 enum ena_admin_get_stats_type type)
2288 {
2289 struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
2290 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
2291 struct ena_com_admin_queue *admin_queue;
2292 int ret;
2293
2294 admin_queue = &ena_dev->admin_queue;
2295
2296 get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
2297 get_cmd->aq_common_descriptor.flags = 0;
2298 get_cmd->type = type;
2299
2300 ret = ena_com_execute_admin_command(admin_queue,
2301 (struct ena_admin_aq_entry *)get_cmd,
2302 sizeof(*get_cmd),
2303 (struct ena_admin_acq_entry *)get_resp,
2304 sizeof(*get_resp));
2305
2306 if (unlikely(ret))
2307 ena_trc_err(ena_dev, "Failed to get stats. error: %d\n", ret);
2308
2309 return ret;
2310 }
2311
ena_com_set_supported_customer_metrics(struct ena_com_dev * ena_dev)2312 static void ena_com_set_supported_customer_metrics(struct ena_com_dev *ena_dev)
2313 {
2314 struct ena_customer_metrics *customer_metrics;
2315 struct ena_com_stats_ctx ctx;
2316 int ret;
2317
2318 customer_metrics = &ena_dev->customer_metrics;
2319 if (!ena_com_get_cap(ena_dev, ENA_ADMIN_CUSTOMER_METRICS)) {
2320 customer_metrics->supported_metrics = ENA_ADMIN_CUSTOMER_METRICS_MIN_SUPPORT_MASK;
2321 return;
2322 }
2323
2324 memset(&ctx, 0x0, sizeof(ctx));
2325 ctx.get_cmd.requested_metrics = ENA_ADMIN_CUSTOMER_METRICS_SUPPORT_MASK;
2326 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_CUSTOMER_METRICS);
2327 if (likely(ret == 0))
2328 customer_metrics->supported_metrics =
2329 ctx.get_resp.u.customer_metrics.reported_metrics;
2330 else
2331 ena_trc_err(ena_dev, "Failed to query customer metrics support. error: %d\n", ret);
2332 }
2333
ena_com_get_dev_attr_feat(struct ena_com_dev * ena_dev,struct ena_com_dev_get_features_ctx * get_feat_ctx)2334 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
2335 struct ena_com_dev_get_features_ctx *get_feat_ctx)
2336 {
2337 struct ena_admin_get_feat_resp get_resp;
2338 int rc;
2339
2340 rc = ena_com_get_feature(ena_dev, &get_resp,
2341 ENA_ADMIN_DEVICE_ATTRIBUTES, 0);
2342 if (rc)
2343 return rc;
2344
2345 memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
2346 sizeof(get_resp.u.dev_attr));
2347
2348 ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
2349 ena_dev->capabilities = get_resp.u.dev_attr.capabilities;
2350
2351 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
2352 rc = ena_com_get_feature(ena_dev, &get_resp,
2353 ENA_ADMIN_MAX_QUEUES_EXT,
2354 ENA_FEATURE_MAX_QUEUE_EXT_VER);
2355 if (rc)
2356 return rc;
2357
2358 if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
2359 return ENA_COM_INVAL;
2360
2361 memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
2362 sizeof(get_resp.u.max_queue_ext));
2363 ena_dev->tx_max_header_size =
2364 get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size;
2365 } else {
2366 rc = ena_com_get_feature(ena_dev, &get_resp,
2367 ENA_ADMIN_MAX_QUEUES_NUM, 0);
2368 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
2369 sizeof(get_resp.u.max_queue));
2370 ena_dev->tx_max_header_size =
2371 get_resp.u.max_queue.max_header_size;
2372
2373 if (rc)
2374 return rc;
2375 }
2376
2377 rc = ena_com_get_feature(ena_dev, &get_resp,
2378 ENA_ADMIN_AENQ_CONFIG, 0);
2379 if (rc)
2380 return rc;
2381
2382 memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
2383 sizeof(get_resp.u.aenq));
2384
2385 rc = ena_com_get_feature(ena_dev, &get_resp,
2386 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2387 if (rc)
2388 return rc;
2389
2390 memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
2391 sizeof(get_resp.u.offload));
2392
2393 /* Driver hints isn't mandatory admin command. So in case the
2394 * command isn't supported set driver hints to 0
2395 */
2396 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
2397
2398 if (!rc)
2399 memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
2400 sizeof(get_resp.u.hw_hints));
2401 else if (rc == ENA_COM_UNSUPPORTED)
2402 memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints));
2403 else
2404 return rc;
2405
2406 rc = ena_com_get_feature(ena_dev, &get_resp,
2407 ENA_ADMIN_LLQ, ENA_ADMIN_LLQ_FEATURE_VERSION_1);
2408 if (!rc)
2409 memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
2410 sizeof(get_resp.u.llq));
2411 else if (rc == ENA_COM_UNSUPPORTED)
2412 memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
2413 else
2414 return rc;
2415
2416 ena_com_set_supported_customer_metrics(ena_dev);
2417
2418 return 0;
2419 }
2420
ena_com_admin_q_comp_intr_handler(struct ena_com_dev * ena_dev)2421 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
2422 {
2423 ena_com_handle_admin_completion(&ena_dev->admin_queue);
2424 }
2425
2426 /* ena_handle_specific_aenq_event:
2427 * return the handler that is relevant to the specific event group
2428 */
ena_com_get_specific_aenq_cb(struct ena_com_dev * ena_dev,u16 group)2429 static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *ena_dev,
2430 u16 group)
2431 {
2432 struct ena_aenq_handlers *aenq_handlers = ena_dev->aenq.aenq_handlers;
2433
2434 if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
2435 return aenq_handlers->handlers[group];
2436
2437 return aenq_handlers->unimplemented_handler;
2438 }
2439
2440 /* ena_aenq_intr_handler:
2441 * handles the aenq incoming events.
2442 * pop events from the queue and apply the specific handler
2443 */
ena_com_aenq_intr_handler(struct ena_com_dev * ena_dev,void * data)2444 void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
2445 {
2446 struct ena_admin_aenq_entry *aenq_e;
2447 struct ena_admin_aenq_common_desc *aenq_common;
2448 struct ena_com_aenq *aenq = &ena_dev->aenq;
2449 u64 timestamp;
2450 ena_aenq_handler handler_cb;
2451 u16 masked_head, processed = 0;
2452 u8 phase;
2453
2454 masked_head = aenq->head & (aenq->q_depth - 1);
2455 phase = aenq->phase;
2456 aenq_e = &aenq->entries[masked_head]; /* Get first entry */
2457 aenq_common = &aenq_e->aenq_common_desc;
2458
2459 /* Go over all the events */
2460 while ((READ_ONCE8(aenq_common->flags) &
2461 ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
2462 /* Make sure the device finished writing the rest of the descriptor
2463 * before reading it.
2464 */
2465 dma_rmb();
2466
2467 timestamp = (u64)aenq_common->timestamp_low |
2468 ((u64)aenq_common->timestamp_high << 32);
2469
2470 ena_trc_dbg(ena_dev, "AENQ! Group[%x] Syndrome[%x] timestamp: [%" ENA_PRIu64 "s]\n",
2471 aenq_common->group,
2472 aenq_common->syndrome,
2473 timestamp);
2474
2475 /* Handle specific event*/
2476 handler_cb = ena_com_get_specific_aenq_cb(ena_dev,
2477 aenq_common->group);
2478 handler_cb(data, aenq_e); /* call the actual event handler*/
2479
2480 /* Get next event entry */
2481 masked_head++;
2482 processed++;
2483
2484 if (unlikely(masked_head == aenq->q_depth)) {
2485 masked_head = 0;
2486 phase = !phase;
2487 }
2488 aenq_e = &aenq->entries[masked_head];
2489 aenq_common = &aenq_e->aenq_common_desc;
2490 }
2491
2492 aenq->head += processed;
2493 aenq->phase = phase;
2494
2495 /* Don't update aenq doorbell if there weren't any processed events */
2496 if (!processed)
2497 return;
2498
2499 /* write the aenq doorbell after all AENQ descriptors were read */
2500 mb();
2501 ENA_REG_WRITE32_RELAXED(ena_dev->bus, (u32)aenq->head,
2502 ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
2503 mmiowb();
2504 }
2505
ena_com_aenq_has_keep_alive(struct ena_com_dev * ena_dev)2506 bool ena_com_aenq_has_keep_alive(struct ena_com_dev *ena_dev)
2507 {
2508 struct ena_admin_aenq_common_desc *aenq_common;
2509 struct ena_com_aenq *aenq = &ena_dev->aenq;
2510 struct ena_admin_aenq_entry *aenq_e;
2511 u8 phase = aenq->phase;
2512 u16 masked_head;
2513
2514 masked_head = aenq->head & (aenq->q_depth - 1);
2515 aenq_e = &aenq->entries[masked_head]; /* Get first entry */
2516 aenq_common = &aenq_e->aenq_common_desc;
2517
2518 /* Go over all the events */
2519 while ((READ_ONCE8(aenq_common->flags) &
2520 ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
2521 /* Make sure the device finished writing the rest of the descriptor
2522 * before reading it.
2523 */
2524 dma_rmb();
2525
2526 if (aenq_common->group == ENA_ADMIN_KEEP_ALIVE)
2527 return true;
2528
2529 /* Get next event entry */
2530 masked_head++;
2531
2532 if (unlikely(masked_head == aenq->q_depth)) {
2533 masked_head = 0;
2534 phase = !phase;
2535 }
2536
2537 aenq_e = &aenq->entries[masked_head];
2538 aenq_common = &aenq_e->aenq_common_desc;
2539 }
2540
2541 return false;
2542 }
2543
2544 #ifdef ENA_EXTENDED_STATS
2545 /*
2546 * Sets the function Idx and Queue Idx to be used for
2547 * get full statistics feature
2548 *
2549 */
ena_com_extended_stats_set_func_queue(struct ena_com_dev * ena_dev,u32 func_queue)2550 int ena_com_extended_stats_set_func_queue(struct ena_com_dev *ena_dev,
2551 u32 func_queue)
2552 {
2553
2554 /* Function & Queue is acquired from user in the following format :
2555 * Bottom Half word: funct
2556 * Top Half Word: queue
2557 */
2558 ena_dev->stats_func = ENA_EXTENDED_STAT_GET_FUNCT(func_queue);
2559 ena_dev->stats_queue = ENA_EXTENDED_STAT_GET_QUEUE(func_queue);
2560
2561 return 0;
2562 }
2563
2564 #endif /* ENA_EXTENDED_STATS */
2565
ena_com_dev_reset(struct ena_com_dev * ena_dev,enum ena_regs_reset_reason_types reset_reason)2566 int ena_com_dev_reset(struct ena_com_dev *ena_dev,
2567 enum ena_regs_reset_reason_types reset_reason)
2568 {
2569 u32 reset_reason_msb, reset_reason_lsb;
2570 u32 stat, timeout, cap, reset_val;
2571 int rc;
2572
2573 stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
2574 cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
2575
2576 if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
2577 (cap == ENA_MMIO_READ_TIMEOUT))) {
2578 ena_trc_err(ena_dev, "Reg read32 timeout occurred\n");
2579 return ENA_COM_TIMER_EXPIRED;
2580 }
2581
2582 if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
2583 ena_trc_err(ena_dev, "Device isn't ready, can't reset device\n");
2584 return ENA_COM_INVAL;
2585 }
2586
2587 timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
2588 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
2589 if (timeout == 0) {
2590 ena_trc_err(ena_dev, "Invalid timeout value\n");
2591 return ENA_COM_INVAL;
2592 }
2593
2594 /* start reset */
2595 reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
2596
2597 /* For backward compatibility, device will interpret
2598 * bits 24-27 as MSB, bits 28-31 as LSB
2599 */
2600 reset_reason_lsb = ENA_FIELD_GET(reset_reason, ENA_RESET_REASON_LSB_MASK,
2601 ENA_RESET_REASON_LSB_OFFSET);
2602
2603 reset_reason_msb = ENA_FIELD_GET(reset_reason, ENA_RESET_REASON_MSB_MASK,
2604 ENA_RESET_REASON_MSB_OFFSET);
2605
2606 reset_val |= reset_reason_lsb << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT;
2607
2608 if (ena_com_get_cap(ena_dev, ENA_ADMIN_EXTENDED_RESET_REASONS))
2609 reset_val |= reset_reason_msb << ENA_REGS_DEV_CTL_RESET_REASON_EXT_SHIFT;
2610 else if (reset_reason_msb) {
2611 /* In case the device does not support intended
2612 * extended reset reason fallback to generic
2613 */
2614 reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
2615 reset_val |= (ENA_REGS_RESET_GENERIC << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
2616 ENA_REGS_DEV_CTL_RESET_REASON_MASK;
2617 }
2618 ENA_REG_WRITE32(ena_dev->bus, reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2619
2620 /* Write again the MMIO read request address */
2621 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
2622
2623 rc = wait_for_reset_state(ena_dev, timeout,
2624 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
2625 if (unlikely(rc)) {
2626 ena_trc_err(ena_dev, "Reset indication didn't turn on\n");
2627 return rc;
2628 }
2629
2630 /* reset done */
2631 ENA_REG_WRITE32(ena_dev->bus, 0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2632 rc = wait_for_reset_state(ena_dev, timeout, 0);
2633 if (unlikely(rc)) {
2634 ena_trc_err(ena_dev, "Reset indication didn't turn off\n");
2635 return rc;
2636 }
2637
2638 timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
2639 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
2640 if (timeout)
2641 /* the resolution of timeout reg is 100ms */
2642 ena_dev->admin_queue.completion_timeout = timeout * 100000;
2643 else
2644 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
2645
2646 return 0;
2647 }
2648
ena_com_get_eni_stats(struct ena_com_dev * ena_dev,struct ena_admin_eni_stats * stats)2649 int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
2650 struct ena_admin_eni_stats *stats)
2651 {
2652 struct ena_com_stats_ctx ctx;
2653 int ret;
2654
2655 if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) {
2656 ena_trc_err(ena_dev, "Capability %d isn't supported\n", ENA_ADMIN_ENI_STATS);
2657 return ENA_COM_UNSUPPORTED;
2658 }
2659
2660 memset(&ctx, 0x0, sizeof(ctx));
2661 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI);
2662 if (likely(ret == 0))
2663 memcpy(stats, &ctx.get_resp.u.eni_stats,
2664 sizeof(ctx.get_resp.u.eni_stats));
2665
2666 return ret;
2667 }
2668
ena_com_get_ena_srd_info(struct ena_com_dev * ena_dev,struct ena_admin_ena_srd_info * info)2669 int ena_com_get_ena_srd_info(struct ena_com_dev *ena_dev,
2670 struct ena_admin_ena_srd_info *info)
2671 {
2672 struct ena_com_stats_ctx ctx;
2673 int ret;
2674
2675 if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENA_SRD_INFO)) {
2676 ena_trc_err(ena_dev, "Capability %d isn't supported\n", ENA_ADMIN_ENA_SRD_INFO);
2677 return ENA_COM_UNSUPPORTED;
2678 }
2679
2680 memset(&ctx, 0x0, sizeof(ctx));
2681 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENA_SRD);
2682 if (likely(ret == 0))
2683 memcpy(info, &ctx.get_resp.u.ena_srd_info,
2684 sizeof(ctx.get_resp.u.ena_srd_info));
2685
2686 return ret;
2687 }
2688
ena_com_get_dev_basic_stats(struct ena_com_dev * ena_dev,struct ena_admin_basic_stats * stats)2689 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
2690 struct ena_admin_basic_stats *stats)
2691 {
2692 struct ena_com_stats_ctx ctx;
2693 int ret;
2694
2695 memset(&ctx, 0x0, sizeof(ctx));
2696 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
2697 if (likely(ret == 0))
2698 memcpy(stats, &ctx.get_resp.u.basic_stats,
2699 sizeof(ctx.get_resp.u.basic_stats));
2700
2701 return ret;
2702 }
2703 #ifdef ENA_EXTENDED_STATS
2704
ena_com_get_dev_extended_stats(struct ena_com_dev * ena_dev,char * buff,u32 len)2705 int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff,
2706 u32 len)
2707 {
2708 struct ena_com_stats_ctx ctx;
2709 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx.get_cmd;
2710 ena_mem_handle_t mem_handle;
2711 void *virt_addr;
2712 dma_addr_t phys_addr;
2713 int ret;
2714
2715 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, len,
2716 virt_addr, phys_addr, mem_handle);
2717 if (!virt_addr) {
2718 ret = ENA_COM_NO_MEM;
2719 goto done;
2720 }
2721 memset(&ctx, 0x0, sizeof(ctx));
2722 ret = ena_com_mem_addr_set(ena_dev,
2723 &get_cmd->u.control_buffer.address,
2724 phys_addr);
2725 if (unlikely(ret)) {
2726 ena_trc_err(ena_dev, "Memory address set failed\n");
2727 goto free_ext_stats_mem;
2728 }
2729 get_cmd->u.control_buffer.length = len;
2730
2731 get_cmd->device_id = ena_dev->stats_func;
2732 get_cmd->queue_idx = ena_dev->stats_queue;
2733
2734 ret = ena_get_dev_stats(ena_dev, &ctx,
2735 ENA_ADMIN_GET_STATS_TYPE_EXTENDED);
2736 if (unlikely(ret < 0))
2737 goto free_ext_stats_mem;
2738
2739 ret = snprintf(buff, len, "%s", (char *)virt_addr);
2740
2741 free_ext_stats_mem:
2742 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, len, virt_addr, phys_addr,
2743 mem_handle);
2744 done:
2745 return ret;
2746 }
2747 #endif
2748
ena_com_get_customer_metrics(struct ena_com_dev * ena_dev,char * buffer,u32 len)2749 int ena_com_get_customer_metrics(struct ena_com_dev *ena_dev, char *buffer, u32 len)
2750 {
2751 struct ena_admin_aq_get_stats_cmd *get_cmd;
2752 struct ena_com_stats_ctx ctx;
2753 int ret;
2754
2755 if (unlikely(len > ena_dev->customer_metrics.buffer_len)) {
2756 ena_trc_err(ena_dev, "Invalid buffer size %u. The given buffer is too big.\n", len);
2757 return ENA_COM_INVAL;
2758 }
2759
2760 if (!ena_com_get_cap(ena_dev, ENA_ADMIN_CUSTOMER_METRICS)) {
2761 ena_trc_err(ena_dev, "Capability %d not supported.\n", ENA_ADMIN_CUSTOMER_METRICS);
2762 return ENA_COM_UNSUPPORTED;
2763 }
2764
2765 if (!ena_dev->customer_metrics.supported_metrics) {
2766 ena_trc_err(ena_dev, "No supported customer metrics.\n");
2767 return ENA_COM_UNSUPPORTED;
2768 }
2769
2770 get_cmd = &ctx.get_cmd;
2771 memset(&ctx, 0x0, sizeof(ctx));
2772 ret = ena_com_mem_addr_set(ena_dev,
2773 &get_cmd->u.control_buffer.address,
2774 ena_dev->customer_metrics.buffer_dma_addr);
2775 if (unlikely(ret)) {
2776 ena_trc_err(ena_dev, "Memory address set failed.\n");
2777 return ret;
2778 }
2779
2780 get_cmd->u.control_buffer.length = ena_dev->customer_metrics.buffer_len;
2781 get_cmd->requested_metrics = ena_dev->customer_metrics.supported_metrics;
2782 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_CUSTOMER_METRICS);
2783 if (likely(ret == 0))
2784 memcpy(buffer, ena_dev->customer_metrics.buffer_virt_addr, len);
2785 else
2786 ena_trc_err(ena_dev, "Failed to get customer metrics. error: %d\n", ret);
2787
2788 return ret;
2789 }
2790
ena_com_set_dev_mtu(struct ena_com_dev * ena_dev,u32 mtu)2791 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu)
2792 {
2793 struct ena_com_admin_queue *admin_queue;
2794 struct ena_admin_set_feat_cmd cmd;
2795 struct ena_admin_set_feat_resp resp;
2796 int ret;
2797
2798 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
2799 ena_trc_dbg(ena_dev, "Feature %d isn't supported\n", ENA_ADMIN_MTU);
2800 return ENA_COM_UNSUPPORTED;
2801 }
2802
2803 memset(&cmd, 0x0, sizeof(cmd));
2804 admin_queue = &ena_dev->admin_queue;
2805
2806 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2807 cmd.aq_common_descriptor.flags = 0;
2808 cmd.feat_common.feature_id = ENA_ADMIN_MTU;
2809 cmd.u.mtu.mtu = mtu;
2810
2811 ret = ena_com_execute_admin_command(admin_queue,
2812 (struct ena_admin_aq_entry *)&cmd,
2813 sizeof(cmd),
2814 (struct ena_admin_acq_entry *)&resp,
2815 sizeof(resp));
2816
2817 if (unlikely(ret))
2818 ena_trc_err(ena_dev, "Failed to set mtu %d. error: %d\n", mtu, ret);
2819
2820 return ret;
2821 }
2822
ena_com_get_offload_settings(struct ena_com_dev * ena_dev,struct ena_admin_feature_offload_desc * offload)2823 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
2824 struct ena_admin_feature_offload_desc *offload)
2825 {
2826 int ret;
2827 struct ena_admin_get_feat_resp resp;
2828
2829 ret = ena_com_get_feature(ena_dev, &resp,
2830 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2831 if (unlikely(ret)) {
2832 ena_trc_err(ena_dev, "Failed to get offload capabilities %d\n", ret);
2833 return ret;
2834 }
2835
2836 memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
2837
2838 return 0;
2839 }
2840
ena_com_set_hash_function(struct ena_com_dev * ena_dev)2841 int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
2842 {
2843 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2844 struct ena_rss *rss = &ena_dev->rss;
2845 struct ena_admin_set_feat_cmd cmd;
2846 struct ena_admin_set_feat_resp resp;
2847 struct ena_admin_get_feat_resp get_resp;
2848 int ret;
2849
2850 if (!ena_com_check_supported_feature_id(ena_dev,
2851 ENA_ADMIN_RSS_HASH_FUNCTION)) {
2852 ena_trc_dbg(ena_dev, "Feature %d isn't supported\n",
2853 ENA_ADMIN_RSS_HASH_FUNCTION);
2854 return ENA_COM_UNSUPPORTED;
2855 }
2856
2857 /* Validate hash function is supported */
2858 ret = ena_com_get_feature(ena_dev, &get_resp,
2859 ENA_ADMIN_RSS_HASH_FUNCTION, 0);
2860 if (unlikely(ret))
2861 return ret;
2862
2863 if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
2864 ena_trc_err(ena_dev, "Func hash %d isn't supported by device, abort\n",
2865 rss->hash_func);
2866 return ENA_COM_UNSUPPORTED;
2867 }
2868
2869 memset(&cmd, 0x0, sizeof(cmd));
2870
2871 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2872 cmd.aq_common_descriptor.flags =
2873 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2874 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2875 cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2876 cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2877
2878 ret = ena_com_mem_addr_set(ena_dev,
2879 &cmd.control_buffer.address,
2880 rss->hash_key_dma_addr);
2881 if (unlikely(ret)) {
2882 ena_trc_err(ena_dev, "Memory address set failed\n");
2883 return ret;
2884 }
2885
2886 cmd.control_buffer.length = sizeof(*rss->hash_key);
2887
2888 ret = ena_com_execute_admin_command(admin_queue,
2889 (struct ena_admin_aq_entry *)&cmd,
2890 sizeof(cmd),
2891 (struct ena_admin_acq_entry *)&resp,
2892 sizeof(resp));
2893 if (unlikely(ret)) {
2894 ena_trc_err(ena_dev, "Failed to set hash function %d. error: %d\n",
2895 rss->hash_func, ret);
2896 return ENA_COM_INVAL;
2897 }
2898
2899 return 0;
2900 }
2901
ena_com_fill_hash_function(struct ena_com_dev * ena_dev,enum ena_admin_hash_functions func,const u8 * key,u16 key_len,u32 init_val)2902 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2903 enum ena_admin_hash_functions func,
2904 const u8 *key, u16 key_len, u32 init_val)
2905 {
2906 struct ena_admin_feature_rss_flow_hash_control *hash_key;
2907 struct ena_admin_get_feat_resp get_resp;
2908 enum ena_admin_hash_functions old_func;
2909 struct ena_rss *rss = &ena_dev->rss;
2910 int rc;
2911
2912 hash_key = rss->hash_key;
2913
2914 /* Make sure size is a mult of DWs */
2915 if (unlikely(key_len & 0x3))
2916 return ENA_COM_INVAL;
2917
2918 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2919 ENA_ADMIN_RSS_HASH_FUNCTION,
2920 rss->hash_key_dma_addr,
2921 sizeof(*rss->hash_key), 0);
2922 if (unlikely(rc))
2923 return rc;
2924
2925 if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) {
2926 ena_trc_err(ena_dev, "Flow hash function %d isn't supported\n", func);
2927 return ENA_COM_UNSUPPORTED;
2928 }
2929
2930 if ((func == ENA_ADMIN_TOEPLITZ) && key) {
2931 if (key_len != sizeof(hash_key->key)) {
2932 ena_trc_err(ena_dev, "key len (%u) doesn't equal the supported size (%zu)\n",
2933 key_len, sizeof(hash_key->key));
2934 return ENA_COM_INVAL;
2935 }
2936 memcpy(hash_key->key, key, key_len);
2937 hash_key->key_parts = key_len / sizeof(hash_key->key[0]);
2938 }
2939
2940 rss->hash_init_val = init_val;
2941 old_func = rss->hash_func;
2942 rss->hash_func = func;
2943 rc = ena_com_set_hash_function(ena_dev);
2944
2945 /* Restore the old function */
2946 if (unlikely(rc))
2947 rss->hash_func = old_func;
2948
2949 return rc;
2950 }
2951
ena_com_get_hash_function(struct ena_com_dev * ena_dev,enum ena_admin_hash_functions * func)2952 int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2953 enum ena_admin_hash_functions *func)
2954 {
2955 struct ena_rss *rss = &ena_dev->rss;
2956 struct ena_admin_get_feat_resp get_resp;
2957 int rc;
2958
2959 if (unlikely(!func))
2960 return ENA_COM_INVAL;
2961
2962 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2963 ENA_ADMIN_RSS_HASH_FUNCTION,
2964 rss->hash_key_dma_addr,
2965 sizeof(*rss->hash_key), 0);
2966 if (unlikely(rc))
2967 return rc;
2968
2969 /* ENA_FFS() returns 1 in case the lsb is set */
2970 rss->hash_func = ENA_FFS(get_resp.u.flow_hash_func.selected_func);
2971 if (rss->hash_func)
2972 rss->hash_func--;
2973
2974 *func = rss->hash_func;
2975
2976 return 0;
2977 }
2978
ena_com_get_hash_key(struct ena_com_dev * ena_dev,u8 * key)2979 int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key)
2980 {
2981 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2982 ena_dev->rss.hash_key;
2983
2984 if (key)
2985 memcpy(key, hash_key->key,
2986 (size_t)(hash_key->key_parts) * sizeof(hash_key->key[0]));
2987
2988 return 0;
2989 }
2990
ena_com_get_hash_ctrl(struct ena_com_dev * ena_dev,enum ena_admin_flow_hash_proto proto,u16 * fields)2991 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2992 enum ena_admin_flow_hash_proto proto,
2993 u16 *fields)
2994 {
2995 struct ena_rss *rss = &ena_dev->rss;
2996 struct ena_admin_get_feat_resp get_resp;
2997 int rc;
2998
2999 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
3000 ENA_ADMIN_RSS_HASH_INPUT,
3001 rss->hash_ctrl_dma_addr,
3002 sizeof(*rss->hash_ctrl), 0);
3003 if (unlikely(rc))
3004 return rc;
3005
3006 if (fields)
3007 *fields = rss->hash_ctrl->selected_fields[proto].fields;
3008
3009 return 0;
3010 }
3011
ena_com_set_hash_ctrl(struct ena_com_dev * ena_dev)3012 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
3013 {
3014 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
3015 struct ena_rss *rss = &ena_dev->rss;
3016 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
3017 struct ena_admin_set_feat_cmd cmd;
3018 struct ena_admin_set_feat_resp resp;
3019 int ret;
3020
3021 if (!ena_com_check_supported_feature_id(ena_dev,
3022 ENA_ADMIN_RSS_HASH_INPUT)) {
3023 ena_trc_dbg(ena_dev, "Feature %d isn't supported\n",
3024 ENA_ADMIN_RSS_HASH_INPUT);
3025 return ENA_COM_UNSUPPORTED;
3026 }
3027
3028 memset(&cmd, 0x0, sizeof(cmd));
3029
3030 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
3031 cmd.aq_common_descriptor.flags =
3032 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
3033 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
3034 cmd.u.flow_hash_input.enabled_input_sort =
3035 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
3036 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
3037
3038 ret = ena_com_mem_addr_set(ena_dev,
3039 &cmd.control_buffer.address,
3040 rss->hash_ctrl_dma_addr);
3041 if (unlikely(ret)) {
3042 ena_trc_err(ena_dev, "Memory address set failed\n");
3043 return ret;
3044 }
3045 cmd.control_buffer.length = sizeof(*hash_ctrl);
3046
3047 ret = ena_com_execute_admin_command(admin_queue,
3048 (struct ena_admin_aq_entry *)&cmd,
3049 sizeof(cmd),
3050 (struct ena_admin_acq_entry *)&resp,
3051 sizeof(resp));
3052 if (unlikely(ret))
3053 ena_trc_err(ena_dev, "Failed to set hash input. error: %d\n", ret);
3054
3055 return ret;
3056 }
3057
ena_com_set_default_hash_ctrl(struct ena_com_dev * ena_dev)3058 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
3059 {
3060 struct ena_rss *rss = &ena_dev->rss;
3061 struct ena_admin_feature_rss_hash_control *hash_ctrl =
3062 rss->hash_ctrl;
3063 u16 available_fields = 0;
3064 int rc, i;
3065
3066 /* Get the supported hash input */
3067 rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
3068 if (unlikely(rc))
3069 return rc;
3070
3071 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
3072 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
3073 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
3074
3075 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
3076 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
3077 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
3078
3079 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
3080 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
3081 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
3082
3083 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
3084 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
3085 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
3086
3087 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
3088 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
3089
3090 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
3091 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
3092
3093 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
3094 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
3095
3096 hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
3097 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
3098
3099 for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
3100 available_fields = hash_ctrl->selected_fields[i].fields &
3101 hash_ctrl->supported_fields[i].fields;
3102 if (available_fields != hash_ctrl->selected_fields[i].fields) {
3103 ena_trc_err(ena_dev, "Hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
3104 i, hash_ctrl->supported_fields[i].fields,
3105 hash_ctrl->selected_fields[i].fields);
3106 return ENA_COM_UNSUPPORTED;
3107 }
3108 }
3109
3110 rc = ena_com_set_hash_ctrl(ena_dev);
3111
3112 /* In case of failure, restore the old hash ctrl */
3113 if (unlikely(rc))
3114 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
3115
3116 return rc;
3117 }
3118
ena_com_fill_hash_ctrl(struct ena_com_dev * ena_dev,enum ena_admin_flow_hash_proto proto,u16 hash_fields)3119 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
3120 enum ena_admin_flow_hash_proto proto,
3121 u16 hash_fields)
3122 {
3123 struct ena_rss *rss = &ena_dev->rss;
3124 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
3125 u16 supported_fields;
3126 int rc;
3127
3128 if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
3129 ena_trc_err(ena_dev, "Invalid proto num (%u)\n", proto);
3130 return ENA_COM_INVAL;
3131 }
3132
3133 /* Get the ctrl table */
3134 rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
3135 if (unlikely(rc))
3136 return rc;
3137
3138 /* Make sure all the fields are supported */
3139 supported_fields = hash_ctrl->supported_fields[proto].fields;
3140 if ((hash_fields & supported_fields) != hash_fields) {
3141 ena_trc_err(ena_dev, "Proto %d doesn't support the required fields %x. supports only: %x\n",
3142 proto, hash_fields, supported_fields);
3143 }
3144
3145 hash_ctrl->selected_fields[proto].fields = hash_fields;
3146
3147 rc = ena_com_set_hash_ctrl(ena_dev);
3148
3149 /* In case of failure, restore the old hash ctrl */
3150 if (unlikely(rc))
3151 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
3152
3153 return 0;
3154 }
3155
ena_com_indirect_table_fill_entry(struct ena_com_dev * ena_dev,u16 entry_idx,u16 entry_value)3156 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
3157 u16 entry_idx, u16 entry_value)
3158 {
3159 struct ena_rss *rss = &ena_dev->rss;
3160
3161 if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
3162 return ENA_COM_INVAL;
3163
3164 if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
3165 return ENA_COM_INVAL;
3166
3167 rss->host_rss_ind_tbl[entry_idx] = entry_value;
3168
3169 return 0;
3170 }
3171
ena_com_indirect_table_set(struct ena_com_dev * ena_dev)3172 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
3173 {
3174 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
3175 struct ena_rss *rss = &ena_dev->rss;
3176 struct ena_admin_set_feat_cmd cmd;
3177 struct ena_admin_set_feat_resp resp;
3178 int ret;
3179
3180 if (!ena_com_check_supported_feature_id(ena_dev,
3181 ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) {
3182 ena_trc_dbg(ena_dev, "Feature %d isn't supported\n",
3183 ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG);
3184 return ENA_COM_UNSUPPORTED;
3185 }
3186
3187 ret = ena_com_ind_tbl_convert_to_device(ena_dev);
3188 if (ret) {
3189 ena_trc_err(ena_dev, "Failed to convert host indirection table to device table\n");
3190 return ret;
3191 }
3192
3193 memset(&cmd, 0x0, sizeof(cmd));
3194
3195 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
3196 cmd.aq_common_descriptor.flags =
3197 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
3198 cmd.feat_common.feature_id = ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG;
3199 cmd.u.ind_table.size = rss->tbl_log_size;
3200 cmd.u.ind_table.inline_index = 0xFFFFFFFF;
3201
3202 ret = ena_com_mem_addr_set(ena_dev,
3203 &cmd.control_buffer.address,
3204 rss->rss_ind_tbl_dma_addr);
3205 if (unlikely(ret)) {
3206 ena_trc_err(ena_dev, "Memory address set failed\n");
3207 return ret;
3208 }
3209
3210 cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
3211 sizeof(struct ena_admin_rss_ind_table_entry);
3212
3213 ret = ena_com_execute_admin_command(admin_queue,
3214 (struct ena_admin_aq_entry *)&cmd,
3215 sizeof(cmd),
3216 (struct ena_admin_acq_entry *)&resp,
3217 sizeof(resp));
3218
3219 if (unlikely(ret))
3220 ena_trc_err(ena_dev, "Failed to set indirect table. error: %d\n", ret);
3221
3222 return ret;
3223 }
3224
ena_com_indirect_table_get(struct ena_com_dev * ena_dev,u32 * ind_tbl)3225 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
3226 {
3227 struct ena_rss *rss = &ena_dev->rss;
3228 struct ena_admin_get_feat_resp get_resp;
3229 u32 tbl_size;
3230 int i, rc;
3231
3232 tbl_size = (1ULL << rss->tbl_log_size) *
3233 sizeof(struct ena_admin_rss_ind_table_entry);
3234
3235 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
3236 ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG,
3237 rss->rss_ind_tbl_dma_addr,
3238 tbl_size, 0);
3239 if (unlikely(rc))
3240 return rc;
3241
3242 if (!ind_tbl)
3243 return 0;
3244
3245 for (i = 0; i < (1 << rss->tbl_log_size); i++)
3246 ind_tbl[i] = rss->host_rss_ind_tbl[i];
3247
3248 return 0;
3249 }
3250
ena_com_rss_init(struct ena_com_dev * ena_dev,u16 indr_tbl_log_size)3251 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
3252 {
3253 int rc;
3254
3255 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
3256
3257 rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
3258 if (unlikely(rc))
3259 goto err_indr_tbl;
3260
3261 /* The following function might return unsupported in case the
3262 * device doesn't support setting the key / hash function. We can safely
3263 * ignore this error and have indirection table support only.
3264 */
3265 rc = ena_com_hash_key_allocate(ena_dev);
3266 if (likely(!rc))
3267 ena_com_hash_key_fill_default_key(ena_dev);
3268 else if (rc != ENA_COM_UNSUPPORTED)
3269 goto err_hash_key;
3270
3271 rc = ena_com_hash_ctrl_init(ena_dev);
3272 if (unlikely(rc))
3273 goto err_hash_ctrl;
3274
3275 return 0;
3276
3277 err_hash_ctrl:
3278 ena_com_hash_key_destroy(ena_dev);
3279 err_hash_key:
3280 ena_com_indirect_table_destroy(ena_dev);
3281 err_indr_tbl:
3282
3283 return rc;
3284 }
3285
ena_com_rss_destroy(struct ena_com_dev * ena_dev)3286 void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
3287 {
3288 ena_com_indirect_table_destroy(ena_dev);
3289 ena_com_hash_key_destroy(ena_dev);
3290 ena_com_hash_ctrl_destroy(ena_dev);
3291
3292 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
3293 }
3294
ena_com_allocate_host_info(struct ena_com_dev * ena_dev)3295 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
3296 {
3297 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
3298
3299 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
3300 SZ_4K,
3301 host_attr->host_info,
3302 host_attr->host_info_dma_addr,
3303 host_attr->host_info_dma_handle);
3304 if (unlikely(!host_attr->host_info))
3305 return ENA_COM_NO_MEM;
3306
3307 host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
3308 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
3309 (ENA_COMMON_SPEC_VERSION_MINOR));
3310
3311 return 0;
3312 }
3313
ena_com_allocate_debug_area(struct ena_com_dev * ena_dev,u32 debug_area_size)3314 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
3315 u32 debug_area_size)
3316 {
3317 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
3318
3319 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
3320 debug_area_size,
3321 host_attr->debug_area_virt_addr,
3322 host_attr->debug_area_dma_addr,
3323 host_attr->debug_area_dma_handle);
3324 if (unlikely(!host_attr->debug_area_virt_addr)) {
3325 host_attr->debug_area_size = 0;
3326 return ENA_COM_NO_MEM;
3327 }
3328
3329 host_attr->debug_area_size = debug_area_size;
3330
3331 return 0;
3332 }
3333
ena_com_allocate_customer_metrics_buffer(struct ena_com_dev * ena_dev)3334 int ena_com_allocate_customer_metrics_buffer(struct ena_com_dev *ena_dev)
3335 {
3336 struct ena_customer_metrics *customer_metrics = &ena_dev->customer_metrics;
3337
3338 customer_metrics->buffer_len = ENA_CUSTOMER_METRICS_BUFFER_SIZE;
3339 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
3340 customer_metrics->buffer_len,
3341 customer_metrics->buffer_virt_addr,
3342 customer_metrics->buffer_dma_addr,
3343 customer_metrics->buffer_dma_handle);
3344 if (unlikely(!customer_metrics->buffer_virt_addr))
3345 return ENA_COM_NO_MEM;
3346
3347 return 0;
3348 }
3349
ena_com_delete_host_info(struct ena_com_dev * ena_dev)3350 void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
3351 {
3352 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
3353
3354 if (host_attr->host_info) {
3355 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
3356 SZ_4K,
3357 host_attr->host_info,
3358 host_attr->host_info_dma_addr,
3359 host_attr->host_info_dma_handle);
3360 host_attr->host_info = NULL;
3361 }
3362 }
3363
ena_com_delete_debug_area(struct ena_com_dev * ena_dev)3364 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
3365 {
3366 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
3367
3368 if (host_attr->debug_area_virt_addr) {
3369 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
3370 host_attr->debug_area_size,
3371 host_attr->debug_area_virt_addr,
3372 host_attr->debug_area_dma_addr,
3373 host_attr->debug_area_dma_handle);
3374 host_attr->debug_area_virt_addr = NULL;
3375 }
3376 }
3377
ena_com_delete_customer_metrics_buffer(struct ena_com_dev * ena_dev)3378 void ena_com_delete_customer_metrics_buffer(struct ena_com_dev *ena_dev)
3379 {
3380 struct ena_customer_metrics *customer_metrics = &ena_dev->customer_metrics;
3381
3382 if (customer_metrics->buffer_virt_addr) {
3383 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
3384 customer_metrics->buffer_len,
3385 customer_metrics->buffer_virt_addr,
3386 customer_metrics->buffer_dma_addr,
3387 customer_metrics->buffer_dma_handle);
3388 customer_metrics->buffer_virt_addr = NULL;
3389 }
3390 }
3391
ena_com_set_host_attributes(struct ena_com_dev * ena_dev)3392 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
3393 {
3394 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
3395 struct ena_com_admin_queue *admin_queue;
3396 struct ena_admin_set_feat_cmd cmd;
3397 struct ena_admin_set_feat_resp resp;
3398
3399 int ret;
3400
3401 /* Host attribute config is called before ena_com_get_dev_attr_feat
3402 * so ena_com can't check if the feature is supported.
3403 */
3404
3405 memset(&cmd, 0x0, sizeof(cmd));
3406 admin_queue = &ena_dev->admin_queue;
3407
3408 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
3409 cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
3410
3411 ret = ena_com_mem_addr_set(ena_dev,
3412 &cmd.u.host_attr.debug_ba,
3413 host_attr->debug_area_dma_addr);
3414 if (unlikely(ret)) {
3415 ena_trc_err(ena_dev, "Memory address set failed\n");
3416 return ret;
3417 }
3418
3419 ret = ena_com_mem_addr_set(ena_dev,
3420 &cmd.u.host_attr.os_info_ba,
3421 host_attr->host_info_dma_addr);
3422 if (unlikely(ret)) {
3423 ena_trc_err(ena_dev, "Memory address set failed\n");
3424 return ret;
3425 }
3426
3427 cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
3428
3429 ret = ena_com_execute_admin_command(admin_queue,
3430 (struct ena_admin_aq_entry *)&cmd,
3431 sizeof(cmd),
3432 (struct ena_admin_acq_entry *)&resp,
3433 sizeof(resp));
3434
3435 if (unlikely(ret))
3436 ena_trc_err(ena_dev, "Failed to set host attributes: %d\n", ret);
3437
3438 return ret;
3439 }
3440
3441 /* Interrupt moderation */
ena_com_interrupt_moderation_supported(struct ena_com_dev * ena_dev)3442 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
3443 {
3444 return ena_com_check_supported_feature_id(ena_dev,
3445 ENA_ADMIN_INTERRUPT_MODERATION);
3446 }
3447
ena_com_update_nonadaptive_moderation_interval(struct ena_com_dev * ena_dev,u32 coalesce_usecs,u32 intr_delay_resolution,u32 * intr_moder_interval)3448 static int ena_com_update_nonadaptive_moderation_interval(struct ena_com_dev *ena_dev,
3449 u32 coalesce_usecs,
3450 u32 intr_delay_resolution,
3451 u32 *intr_moder_interval)
3452 {
3453 if (!intr_delay_resolution) {
3454 ena_trc_err(ena_dev, "Illegal interrupt delay granularity value\n");
3455 return ENA_COM_FAULT;
3456 }
3457
3458 *intr_moder_interval = coalesce_usecs / intr_delay_resolution;
3459
3460 return 0;
3461 }
3462
ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev * ena_dev,u32 tx_coalesce_usecs)3463 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
3464 u32 tx_coalesce_usecs)
3465 {
3466 return ena_com_update_nonadaptive_moderation_interval(ena_dev,
3467 tx_coalesce_usecs,
3468 ena_dev->intr_delay_resolution,
3469 &ena_dev->intr_moder_tx_interval);
3470 }
3471
ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev * ena_dev,u32 rx_coalesce_usecs)3472 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
3473 u32 rx_coalesce_usecs)
3474 {
3475 return ena_com_update_nonadaptive_moderation_interval(ena_dev,
3476 rx_coalesce_usecs,
3477 ena_dev->intr_delay_resolution,
3478 &ena_dev->intr_moder_rx_interval);
3479 }
3480
ena_com_init_interrupt_moderation(struct ena_com_dev * ena_dev)3481 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
3482 {
3483 struct ena_admin_get_feat_resp get_resp;
3484 u16 delay_resolution;
3485 int rc;
3486
3487 rc = ena_com_get_feature(ena_dev, &get_resp,
3488 ENA_ADMIN_INTERRUPT_MODERATION, 0);
3489
3490 if (rc) {
3491 if (rc == ENA_COM_UNSUPPORTED) {
3492 ena_trc_dbg(ena_dev, "Feature %d isn't supported\n",
3493 ENA_ADMIN_INTERRUPT_MODERATION);
3494 rc = 0;
3495 } else {
3496 ena_trc_err(ena_dev,
3497 "Failed to get interrupt moderation admin cmd. rc: %d\n", rc);
3498 }
3499
3500 /* no moderation supported, disable adaptive support */
3501 ena_com_disable_adaptive_moderation(ena_dev);
3502 return rc;
3503 }
3504
3505 /* if moderation is supported by device we set adaptive moderation */
3506 delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
3507 ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
3508
3509 /* Disable adaptive moderation by default - can be enabled later */
3510 ena_com_disable_adaptive_moderation(ena_dev);
3511
3512 return 0;
3513 }
3514
ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev * ena_dev)3515 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
3516 {
3517 return ena_dev->intr_moder_tx_interval;
3518 }
3519
ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev * ena_dev)3520 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
3521 {
3522 return ena_dev->intr_moder_rx_interval;
3523 }
3524
ena_com_config_dev_mode(struct ena_com_dev * ena_dev,struct ena_admin_feature_llq_desc * llq_features,struct ena_llq_configurations * llq_default_cfg)3525 int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
3526 struct ena_admin_feature_llq_desc *llq_features,
3527 struct ena_llq_configurations *llq_default_cfg)
3528 {
3529 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
3530 int rc;
3531
3532 if (!llq_features->max_llq_num) {
3533 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3534 return 0;
3535 }
3536
3537 rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
3538 if (unlikely(rc))
3539 return rc;
3540
3541 ena_dev->tx_max_header_size = llq_info->desc_list_entry_size -
3542 (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
3543
3544 if (unlikely(ena_dev->tx_max_header_size == 0)) {
3545 ena_trc_err(ena_dev, "The size of the LLQ entry is smaller than needed\n");
3546 return ENA_COM_INVAL;
3547 }
3548
3549 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
3550
3551 return 0;
3552 }
3553