1 /*
2 * Copyright (C) 2018 Michael Brown <mbrown@fensystems.co.uk>.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA.
18 *
19 * You can also choose to distribute this program under the terms of
20 * the Unmodified Binary Distribution Licence (as given in the file
21 * COPYING.UBDL), provided that you have satisfied its requirements.
22 */
23
24 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
25
26 #include <stdint.h>
27 #include <string.h>
28 #include <unistd.h>
29 #include <errno.h>
30 #include <byteswap.h>
31 #include <ipxe/netdevice.h>
32 #include <ipxe/ethernet.h>
33 #include <ipxe/if_ether.h>
34 #include <ipxe/iobuf.h>
35 #include <ipxe/malloc.h>
36 #include <ipxe/pci.h>
37 #include "ena.h"
38
39 /** @file
40 *
41 * Amazon ENA network driver
42 *
43 */
44
45 /**
46 * Get direction name (for debugging)
47 *
48 * @v direction Direction
49 * @ret name Direction name
50 */
ena_direction(unsigned int direction)51 static const char * ena_direction ( unsigned int direction ) {
52
53 switch ( direction ) {
54 case ENA_SQ_TX: return "TX";
55 case ENA_SQ_RX: return "RX";
56 default: return "<UNKNOWN>";
57 }
58 }
59
60 /******************************************************************************
61 *
62 * Device reset
63 *
64 ******************************************************************************
65 */
66
67 /**
68 * Reset hardware
69 *
70 * @v ena ENA device
71 * @ret rc Return status code
72 */
ena_reset(struct ena_nic * ena)73 static int ena_reset ( struct ena_nic *ena ) {
74 uint32_t stat;
75 unsigned int i;
76
77 /* Trigger reset */
78 writel ( ENA_CTRL_RESET, ( ena->regs + ENA_CTRL ) );
79
80 /* Wait for reset to complete */
81 for ( i = 0 ; i < ENA_RESET_MAX_WAIT_MS ; i++ ) {
82
83 /* Check if device is ready */
84 stat = readl ( ena->regs + ENA_STAT );
85 if ( stat & ENA_STAT_READY )
86 return 0;
87
88 /* Delay */
89 mdelay ( 1 );
90 }
91
92 DBGC ( ena, "ENA %p timed out waiting for reset (status %#08x)\n",
93 ena, stat );
94 return -ETIMEDOUT;
95 }
96
97 /******************************************************************************
98 *
99 * Admin queue
100 *
101 ******************************************************************************
102 */
103
104 /**
105 * Set queue base address
106 *
107 * @v ena ENA device
108 * @v offset Register offset
109 * @v address Base address
110 */
ena_set_base(struct ena_nic * ena,unsigned int offset,void * base)111 static inline void ena_set_base ( struct ena_nic *ena, unsigned int offset,
112 void *base ) {
113 physaddr_t phys = virt_to_bus ( base );
114
115 /* Program base address registers */
116 writel ( ( phys & 0xffffffffUL ),
117 ( ena->regs + offset + ENA_BASE_LO ) );
118 if ( sizeof ( phys ) > sizeof ( uint32_t ) ) {
119 writel ( ( ( ( uint64_t ) phys ) >> 32 ),
120 ( ena->regs + offset + ENA_BASE_HI ) );
121 } else {
122 writel ( 0, ( ena->regs + offset + ENA_BASE_HI ) );
123 }
124 }
125
126 /**
127 * Set queue capabilities
128 *
129 * @v ena ENA device
130 * @v offset Register offset
131 * @v count Number of entries
132 * @v size Size of each entry
133 */
134 static inline __attribute__ (( always_inline )) void
ena_set_caps(struct ena_nic * ena,unsigned int offset,unsigned int count,size_t size)135 ena_set_caps ( struct ena_nic *ena, unsigned int offset, unsigned int count,
136 size_t size ) {
137
138 /* Program capabilities register */
139 writel ( ENA_CAPS ( count, size ), ( ena->regs + offset ) );
140 }
141
142 /**
143 * Clear queue capabilities
144 *
145 * @v ena ENA device
146 * @v offset Register offset
147 */
148 static inline __attribute__ (( always_inline )) void
ena_clear_caps(struct ena_nic * ena,unsigned int offset)149 ena_clear_caps ( struct ena_nic *ena, unsigned int offset ) {
150
151 /* Clear capabilities register */
152 writel ( 0, ( ena->regs + offset ) );
153 }
154
155 /**
156 * Create admin queues
157 *
158 * @v ena ENA device
159 * @ret rc Return status code
160 */
ena_create_admin(struct ena_nic * ena)161 static int ena_create_admin ( struct ena_nic *ena ) {
162 size_t aq_len = ( ENA_AQ_COUNT * sizeof ( ena->aq.req[0] ) );
163 size_t acq_len = ( ENA_ACQ_COUNT * sizeof ( ena->acq.rsp[0] ) );
164 int rc;
165
166 /* Allocate admin completion queue */
167 ena->acq.rsp = malloc_dma ( acq_len, acq_len );
168 if ( ! ena->acq.rsp ) {
169 rc = -ENOMEM;
170 goto err_alloc_acq;
171 }
172 memset ( ena->acq.rsp, 0, acq_len );
173
174 /* Allocate admin queue */
175 ena->aq.req = malloc_dma ( aq_len, aq_len );
176 if ( ! ena->aq.req ) {
177 rc = -ENOMEM;
178 goto err_alloc_aq;
179 }
180 memset ( ena->aq.req, 0, aq_len );
181
182 /* Program queue addresses and capabilities */
183 ena_set_base ( ena, ENA_ACQ_BASE, ena->acq.rsp );
184 ena_set_caps ( ena, ENA_ACQ_CAPS, ENA_ACQ_COUNT,
185 sizeof ( ena->acq.rsp[0] ) );
186 ena_set_base ( ena, ENA_AQ_BASE, ena->aq.req );
187 ena_set_caps ( ena, ENA_AQ_CAPS, ENA_AQ_COUNT,
188 sizeof ( ena->aq.req[0] ) );
189
190 DBGC ( ena, "ENA %p AQ [%08lx,%08lx) ACQ [%08lx,%08lx)\n",
191 ena, virt_to_phys ( ena->aq.req ),
192 ( virt_to_phys ( ena->aq.req ) + aq_len ),
193 virt_to_phys ( ena->acq.rsp ),
194 ( virt_to_phys ( ena->acq.rsp ) + acq_len ) );
195 return 0;
196
197 ena_clear_caps ( ena, ENA_AQ_CAPS );
198 ena_clear_caps ( ena, ENA_ACQ_CAPS );
199 free_dma ( ena->aq.req, aq_len );
200 err_alloc_aq:
201 free_dma ( ena->acq.rsp, acq_len );
202 err_alloc_acq:
203 return rc;
204 }
205
206 /**
207 * Destroy admin queues
208 *
209 * @v ena ENA device
210 */
ena_destroy_admin(struct ena_nic * ena)211 static void ena_destroy_admin ( struct ena_nic *ena ) {
212 size_t aq_len = ( ENA_AQ_COUNT * sizeof ( ena->aq.req[0] ) );
213 size_t acq_len = ( ENA_ACQ_COUNT * sizeof ( ena->acq.rsp[0] ) );
214
215 /* Clear queue capabilities */
216 ena_clear_caps ( ena, ENA_AQ_CAPS );
217 ena_clear_caps ( ena, ENA_ACQ_CAPS );
218 wmb();
219
220 /* Free queues */
221 free_dma ( ena->aq.req, aq_len );
222 free_dma ( ena->acq.rsp, acq_len );
223 DBGC ( ena, "ENA %p AQ and ACQ destroyed\n", ena );
224 }
225
226 /**
227 * Get next available admin queue request
228 *
229 * @v ena ENA device
230 * @ret req Admin queue request
231 */
ena_admin_req(struct ena_nic * ena)232 static union ena_aq_req * ena_admin_req ( struct ena_nic *ena ) {
233 union ena_aq_req *req;
234 unsigned int index;
235
236 /* Get next request */
237 index = ( ena->aq.prod % ENA_AQ_COUNT );
238 req = &ena->aq.req[index];
239
240 /* Initialise request */
241 memset ( ( ( ( void * ) req ) + sizeof ( req->header ) ), 0,
242 ( sizeof ( *req ) - sizeof ( req->header ) ) );
243 req->header.id = ena->aq.prod;
244
245 /* Increment producer counter */
246 ena->aq.prod++;
247
248 return req;
249 }
250
251 /**
252 * Issue admin queue request
253 *
254 * @v ena ENA device
255 * @v req Admin queue request
256 * @v rsp Admin queue response to fill in
257 * @ret rc Return status code
258 */
ena_admin(struct ena_nic * ena,union ena_aq_req * req,union ena_acq_rsp ** rsp)259 static int ena_admin ( struct ena_nic *ena, union ena_aq_req *req,
260 union ena_acq_rsp **rsp ) {
261 unsigned int index;
262 unsigned int i;
263 int rc;
264
265 /* Locate response */
266 index = ( ena->acq.cons % ENA_ACQ_COUNT );
267 *rsp = &ena->acq.rsp[index];
268
269 /* Mark request as ready */
270 req->header.flags ^= ENA_AQ_PHASE;
271 wmb();
272 DBGC2 ( ena, "ENA %p admin request %#x:\n",
273 ena, le16_to_cpu ( req->header.id ) );
274 DBGC2_HDA ( ena, virt_to_phys ( req ), req, sizeof ( *req ) );
275
276 /* Ring doorbell */
277 writel ( ena->aq.prod, ( ena->regs + ENA_AQ_DB ) );
278
279 /* Wait for response */
280 for ( i = 0 ; i < ENA_ADMIN_MAX_WAIT_MS ; i++ ) {
281
282 /* Check for response */
283 if ( ( (*rsp)->header.flags ^ ena->acq.phase ) & ENA_ACQ_PHASE){
284 mdelay ( 1 );
285 continue;
286 }
287 DBGC2 ( ena, "ENA %p admin response %#x:\n",
288 ena, le16_to_cpu ( (*rsp)->header.id ) );
289 DBGC2_HDA ( ena, virt_to_phys ( *rsp ), *rsp, sizeof ( **rsp ));
290
291 /* Increment consumer counter */
292 ena->acq.cons++;
293 if ( ( ena->acq.cons % ENA_ACQ_COUNT ) == 0 )
294 ena->acq.phase ^= ENA_ACQ_PHASE;
295
296 /* Check command identifier */
297 if ( (*rsp)->header.id != req->header.id ) {
298 DBGC ( ena, "ENA %p admin response %#x mismatch:\n",
299 ena, le16_to_cpu ( (*rsp)->header.id ) );
300 rc = -EILSEQ;
301 goto err;
302 }
303
304 /* Check status */
305 if ( (*rsp)->header.status != 0 ) {
306 DBGC ( ena, "ENA %p admin response %#x status %d:\n",
307 ena, le16_to_cpu ( (*rsp)->header.id ),
308 (*rsp)->header.status );
309 rc = -EIO;
310 goto err;
311 }
312
313 /* Success */
314 return 0;
315 }
316
317 rc = -ETIMEDOUT;
318 DBGC ( ena, "ENA %p timed out waiting for admin request %#x:\n",
319 ena, le16_to_cpu ( req->header.id ) );
320 err:
321 DBGC_HDA ( ena, virt_to_phys ( req ), req, sizeof ( *req ) );
322 DBGC_HDA ( ena, virt_to_phys ( *rsp ), *rsp, sizeof ( **rsp ) );
323 return rc;
324 }
325
326 /**
327 * Create submission queue
328 *
329 * @v ena ENA device
330 * @v sq Submission queue
331 * @v cq Corresponding completion queue
332 * @ret rc Return status code
333 */
ena_create_sq(struct ena_nic * ena,struct ena_sq * sq,struct ena_cq * cq)334 static int ena_create_sq ( struct ena_nic *ena, struct ena_sq *sq,
335 struct ena_cq *cq ) {
336 union ena_aq_req *req;
337 union ena_acq_rsp *rsp;
338 int rc;
339
340 /* Allocate submission queue entries */
341 sq->sqe.raw = malloc_dma ( sq->len, ENA_ALIGN );
342 if ( ! sq->sqe.raw ) {
343 rc = -ENOMEM;
344 goto err_alloc;
345 }
346 memset ( sq->sqe.raw, 0, sq->len );
347
348 /* Construct request */
349 req = ena_admin_req ( ena );
350 req->header.opcode = ENA_CREATE_SQ;
351 req->create_sq.direction = sq->direction;
352 req->create_sq.policy = cpu_to_le16 ( ENA_SQ_HOST_MEMORY |
353 ENA_SQ_CONTIGUOUS );
354 req->create_sq.cq_id = cpu_to_le16 ( cq->id );
355 req->create_sq.count = cpu_to_le16 ( sq->count );
356 req->create_sq.address = cpu_to_le64 ( virt_to_bus ( sq->sqe.raw ) );
357
358 /* Issue request */
359 if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 )
360 goto err_admin;
361
362 /* Parse response */
363 sq->id = le16_to_cpu ( rsp->create_sq.id );
364 sq->doorbell = le32_to_cpu ( rsp->create_sq.doorbell );
365
366 /* Reset producer counter and phase */
367 sq->prod = 0;
368 sq->phase = ENA_SQE_PHASE;
369
370 DBGC ( ena, "ENA %p %s SQ%d at [%08lx,%08lx) db +%04x CQ%d\n",
371 ena, ena_direction ( sq->direction ), sq->id,
372 virt_to_phys ( sq->sqe.raw ),
373 ( virt_to_phys ( sq->sqe.raw ) + sq->len ),
374 sq->doorbell, cq->id );
375 return 0;
376
377 err_admin:
378 free_dma ( sq->sqe.raw, sq->len );
379 err_alloc:
380 return rc;
381 }
382
383 /**
384 * Destroy submission queue
385 *
386 * @v ena ENA device
387 * @v sq Submission queue
388 * @ret rc Return status code
389 */
ena_destroy_sq(struct ena_nic * ena,struct ena_sq * sq)390 static int ena_destroy_sq ( struct ena_nic *ena, struct ena_sq *sq ) {
391 union ena_aq_req *req;
392 union ena_acq_rsp *rsp;
393 int rc;
394
395 /* Construct request */
396 req = ena_admin_req ( ena );
397 req->header.opcode = ENA_DESTROY_SQ;
398 req->destroy_sq.id = cpu_to_le16 ( sq->id );
399 req->destroy_sq.direction = sq->direction;
400
401 /* Issue request */
402 if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 )
403 return rc;
404
405 /* Free submission queue entries */
406 free_dma ( sq->sqe.raw, sq->len );
407
408 DBGC ( ena, "ENA %p %s SQ%d destroyed\n",
409 ena, ena_direction ( sq->direction ), sq->id );
410 return 0;
411 }
412
413 /**
414 * Create completion queue
415 *
416 * @v ena ENA device
417 * @v cq Completion queue
418 * @ret rc Return status code
419 */
ena_create_cq(struct ena_nic * ena,struct ena_cq * cq)420 static int ena_create_cq ( struct ena_nic *ena, struct ena_cq *cq ) {
421 union ena_aq_req *req;
422 union ena_acq_rsp *rsp;
423 int rc;
424
425 /* Allocate completion queue entries */
426 cq->cqe.raw = malloc_dma ( cq->len, ENA_ALIGN );
427 if ( ! cq->cqe.raw ) {
428 rc = -ENOMEM;
429 goto err_alloc;
430 }
431 memset ( cq->cqe.raw, 0, cq->len );
432
433 /* Construct request */
434 req = ena_admin_req ( ena );
435 req->header.opcode = ENA_CREATE_CQ;
436 req->create_cq.size = cq->size;
437 req->create_cq.count = cpu_to_le16 ( cq->requested );
438 req->create_cq.address = cpu_to_le64 ( virt_to_bus ( cq->cqe.raw ) );
439
440 /* Issue request */
441 if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 )
442 goto err_admin;
443
444 /* Parse response */
445 cq->id = le16_to_cpu ( rsp->create_cq.id );
446 cq->actual = le16_to_cpu ( rsp->create_cq.count );
447 cq->doorbell = le32_to_cpu ( rsp->create_cq.doorbell );
448 cq->mask = ( cq->actual - 1 );
449 if ( cq->actual != cq->requested ) {
450 DBGC ( ena, "ENA %p CQ%d requested %d actual %d\n",
451 ena, cq->id, cq->requested, cq->actual );
452 }
453
454 /* Reset consumer counter and phase */
455 cq->cons = 0;
456 cq->phase = ENA_CQE_PHASE;
457
458 DBGC ( ena, "ENA %p CQ%d at [%08lx,%08lx) db +%04x\n",
459 ena, cq->id, virt_to_phys ( cq->cqe.raw ),
460 ( virt_to_phys ( cq->cqe.raw ) + cq->len ), cq->doorbell );
461 return 0;
462
463 err_admin:
464 free_dma ( cq->cqe.raw, cq->len );
465 err_alloc:
466 return rc;
467 }
468
469 /**
470 * Destroy completion queue
471 *
472 * @v ena ENA device
473 * @v cq Completion queue
474 * @ret rc Return status code
475 */
ena_destroy_cq(struct ena_nic * ena,struct ena_cq * cq)476 static int ena_destroy_cq ( struct ena_nic *ena, struct ena_cq *cq ) {
477 union ena_aq_req *req;
478 union ena_acq_rsp *rsp;
479 int rc;
480
481 /* Construct request */
482 req = ena_admin_req ( ena );
483 req->header.opcode = ENA_DESTROY_CQ;
484 req->destroy_cq.id = cpu_to_le16 ( cq->id );
485
486 /* Issue request */
487 if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 )
488 return rc;
489
490 /* Free completion queue entries */
491 free_dma ( cq->cqe.raw, cq->len );
492
493 DBGC ( ena, "ENA %p CQ%d destroyed\n", ena, cq->id );
494 return 0;
495 }
496
497 /**
498 * Create queue pair
499 *
500 * @v ena ENA device
501 * @v qp Queue pair
502 * @ret rc Return status code
503 */
ena_create_qp(struct ena_nic * ena,struct ena_qp * qp)504 static int ena_create_qp ( struct ena_nic *ena, struct ena_qp *qp ) {
505 int rc;
506
507 /* Create completion queue */
508 if ( ( rc = ena_create_cq ( ena, &qp->cq ) ) != 0 )
509 goto err_create_cq;
510
511 /* Create submission queue */
512 if ( ( rc = ena_create_sq ( ena, &qp->sq, &qp->cq ) ) != 0 )
513 goto err_create_sq;
514
515 return 0;
516
517 ena_destroy_sq ( ena, &qp->sq );
518 err_create_sq:
519 ena_destroy_cq ( ena, &qp->cq );
520 err_create_cq:
521 return rc;
522 }
523
524 /**
525 * Destroy queue pair
526 *
527 * @v ena ENA device
528 * @v qp Queue pair
529 * @ret rc Return status code
530 */
ena_destroy_qp(struct ena_nic * ena,struct ena_qp * qp)531 static int ena_destroy_qp ( struct ena_nic *ena, struct ena_qp *qp ) {
532
533 /* Destroy submission queue */
534 ena_destroy_sq ( ena, &qp->sq );
535
536 /* Destroy completion queue */
537 ena_destroy_cq ( ena, &qp->cq );
538
539 return 0;
540 }
541
542 /**
543 * Get device attributes
544 *
545 * @v netdev Network device
546 * @ret rc Return status code
547 */
ena_get_device_attributes(struct net_device * netdev)548 static int ena_get_device_attributes ( struct net_device *netdev ) {
549 struct ena_nic *ena = netdev->priv;
550 union ena_aq_req *req;
551 union ena_acq_rsp *rsp;
552 union ena_feature *feature;
553 int rc;
554
555 /* Construct request */
556 req = ena_admin_req ( ena );
557 req->header.opcode = ENA_GET_FEATURE;
558 req->get_feature.id = ENA_DEVICE_ATTRIBUTES;
559
560 /* Issue request */
561 if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 )
562 return rc;
563
564 /* Parse response */
565 feature = &rsp->get_feature.feature;
566 memcpy ( netdev->hw_addr, feature->device.mac, ETH_ALEN );
567 netdev->max_pkt_len = le32_to_cpu ( feature->device.mtu );
568 netdev->mtu = ( netdev->max_pkt_len - ETH_HLEN );
569
570 DBGC ( ena, "ENA %p MAC %s MTU %zd\n",
571 ena, eth_ntoa ( netdev->hw_addr ), netdev->max_pkt_len );
572 return 0;
573 }
574
575 /**
576 * Get statistics (for debugging)
577 *
578 * @v ena ENA device
579 * @ret rc Return status code
580 */
ena_get_stats(struct ena_nic * ena)581 static int ena_get_stats ( struct ena_nic *ena ) {
582 union ena_aq_req *req;
583 union ena_acq_rsp *rsp;
584 struct ena_get_stats_rsp *stats;
585 int rc;
586
587 /* Do nothing unless debug messages are enabled */
588 if ( ! DBG_LOG )
589 return 0;
590
591 /* Construct request */
592 req = ena_admin_req ( ena );
593 req->header.opcode = ENA_GET_STATS;
594 req->get_stats.type = ENA_STATS_TYPE_BASIC;
595 req->get_stats.scope = ENA_STATS_SCOPE_ETH;
596 req->get_stats.device = ENA_DEVICE_MINE;
597
598 /* Issue request */
599 if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 )
600 return rc;
601
602 /* Parse response */
603 stats = &rsp->get_stats;
604 DBGC ( ena, "ENA %p TX bytes %#llx packets %#llx\n", ena,
605 ( ( unsigned long long ) le64_to_cpu ( stats->tx_bytes ) ),
606 ( ( unsigned long long ) le64_to_cpu ( stats->tx_packets ) ) );
607 DBGC ( ena, "ENA %p RX bytes %#llx packets %#llx drops %#llx\n", ena,
608 ( ( unsigned long long ) le64_to_cpu ( stats->rx_bytes ) ),
609 ( ( unsigned long long ) le64_to_cpu ( stats->rx_packets ) ),
610 ( ( unsigned long long ) le64_to_cpu ( stats->rx_drops ) ) );
611
612 return 0;
613 }
614
615 /******************************************************************************
616 *
617 * Network device interface
618 *
619 ******************************************************************************
620 */
621
622 /**
623 * Refill receive queue
624 *
625 * @v netdev Network device
626 */
ena_refill_rx(struct net_device * netdev)627 static void ena_refill_rx ( struct net_device *netdev ) {
628 struct ena_nic *ena = netdev->priv;
629 struct io_buffer *iobuf;
630 struct ena_rx_sqe *sqe;
631 unsigned int index;
632 physaddr_t address;
633 size_t len = netdev->max_pkt_len;
634 unsigned int refilled = 0;
635
636 /* Refill queue */
637 while ( ( ena->rx.sq.prod - ena->rx.cq.cons ) < ENA_RX_COUNT ) {
638
639 /* Allocate I/O buffer */
640 iobuf = alloc_iob ( len );
641 if ( ! iobuf ) {
642 /* Wait for next refill */
643 break;
644 }
645
646 /* Get next submission queue entry */
647 index = ( ena->rx.sq.prod % ENA_RX_COUNT );
648 sqe = &ena->rx.sq.sqe.rx[index];
649
650 /* Construct submission queue entry */
651 address = virt_to_bus ( iobuf->data );
652 sqe->len = cpu_to_le16 ( len );
653 sqe->id = cpu_to_le16 ( ena->rx.sq.prod );
654 sqe->address = cpu_to_le64 ( address );
655 wmb();
656 sqe->flags = ( ENA_SQE_FIRST | ENA_SQE_LAST | ENA_SQE_CPL |
657 ena->rx.sq.phase );
658
659 /* Increment producer counter */
660 ena->rx.sq.prod++;
661 if ( ( ena->rx.sq.prod % ENA_RX_COUNT ) == 0 )
662 ena->rx.sq.phase ^= ENA_SQE_PHASE;
663
664 /* Record I/O buffer */
665 assert ( ena->rx_iobuf[index] == NULL );
666 ena->rx_iobuf[index] = iobuf;
667
668 DBGC2 ( ena, "ENA %p RX %d at [%08llx,%08llx)\n", ena, sqe->id,
669 ( ( unsigned long long ) address ),
670 ( ( unsigned long long ) address + len ) );
671 refilled++;
672 }
673
674 /* Ring doorbell, if applicable */
675 if ( refilled ) {
676 wmb();
677 writel ( ena->rx.sq.prod, ( ena->regs + ena->rx.sq.doorbell ) );
678 }
679 }
680
681 /**
682 * Discard unused receive I/O buffers
683 *
684 * @v ena ENA device
685 */
ena_empty_rx(struct ena_nic * ena)686 static void ena_empty_rx ( struct ena_nic *ena ) {
687 unsigned int i;
688
689 for ( i = 0 ; i < ENA_RX_COUNT ; i++ ) {
690 if ( ena->rx_iobuf[i] )
691 free_iob ( ena->rx_iobuf[i] );
692 ena->rx_iobuf[i] = NULL;
693 }
694 }
695
696 /**
697 * Open network device
698 *
699 * @v netdev Network device
700 * @ret rc Return status code
701 */
ena_open(struct net_device * netdev)702 static int ena_open ( struct net_device *netdev ) {
703 struct ena_nic *ena = netdev->priv;
704 int rc;
705
706 /* Create transmit queue pair */
707 if ( ( rc = ena_create_qp ( ena, &ena->tx ) ) != 0 )
708 goto err_create_tx;
709
710 /* Create receive queue pair */
711 if ( ( rc = ena_create_qp ( ena, &ena->rx ) ) != 0 )
712 goto err_create_rx;
713
714 /* Refill receive queue */
715 ena_refill_rx ( netdev );
716
717 return 0;
718
719 ena_destroy_qp ( ena, &ena->rx );
720 err_create_rx:
721 ena_destroy_qp ( ena, &ena->tx );
722 err_create_tx:
723 return rc;
724 }
725
726 /**
727 * Close network device
728 *
729 * @v netdev Network device
730 */
ena_close(struct net_device * netdev)731 static void ena_close ( struct net_device *netdev ) {
732 struct ena_nic *ena = netdev->priv;
733
734 /* Dump statistics (for debugging) */
735 ena_get_stats ( ena );
736
737 /* Destroy receive queue pair */
738 ena_destroy_qp ( ena, &ena->rx );
739
740 /* Discard any unused receive buffers */
741 ena_empty_rx ( ena );
742
743 /* Destroy transmit queue pair */
744 ena_destroy_qp ( ena, &ena->tx );
745 }
746
747 /**
748 * Transmit packet
749 *
750 * @v netdev Network device
751 * @v iobuf I/O buffer
752 * @ret rc Return status code
753 */
ena_transmit(struct net_device * netdev,struct io_buffer * iobuf)754 static int ena_transmit ( struct net_device *netdev, struct io_buffer *iobuf ) {
755 struct ena_nic *ena = netdev->priv;
756 struct ena_tx_sqe *sqe;
757 unsigned int index;
758 physaddr_t address;
759 size_t len;
760
761 /* Get next submission queue entry */
762 if ( ( ena->tx.sq.prod - ena->tx.cq.cons ) >= ENA_TX_COUNT ) {
763 DBGC ( ena, "ENA %p out of transmit descriptors\n", ena );
764 return -ENOBUFS;
765 }
766 index = ( ena->tx.sq.prod % ENA_TX_COUNT );
767 sqe = &ena->tx.sq.sqe.tx[index];
768
769 /* Construct submission queue entry */
770 address = virt_to_bus ( iobuf->data );
771 len = iob_len ( iobuf );
772 sqe->len = cpu_to_le16 ( len );
773 sqe->id = ena->tx.sq.prod;
774 sqe->address = cpu_to_le64 ( address );
775 wmb();
776 sqe->flags = ( ENA_SQE_FIRST | ENA_SQE_LAST | ENA_SQE_CPL |
777 ena->tx.sq.phase );
778 wmb();
779
780 /* Increment producer counter */
781 ena->tx.sq.prod++;
782 if ( ( ena->tx.sq.prod % ENA_TX_COUNT ) == 0 )
783 ena->tx.sq.phase ^= ENA_SQE_PHASE;
784
785 /* Ring doorbell */
786 writel ( ena->tx.sq.prod, ( ena->regs + ena->tx.sq.doorbell ) );
787
788 DBGC2 ( ena, "ENA %p TX %d at [%08llx,%08llx)\n", ena, sqe->id,
789 ( ( unsigned long long ) address ),
790 ( ( unsigned long long ) address + len ) );
791 return 0;
792 }
793
794 /**
795 * Poll for completed transmissions
796 *
797 * @v netdev Network device
798 */
ena_poll_tx(struct net_device * netdev)799 static void ena_poll_tx ( struct net_device *netdev ) {
800 struct ena_nic *ena = netdev->priv;
801 struct ena_tx_cqe *cqe;
802 unsigned int index;
803
804 /* Check for completed packets */
805 while ( ena->tx.cq.cons != ena->tx.sq.prod ) {
806
807 /* Get next completion queue entry */
808 index = ( ena->tx.cq.cons & ena->tx.cq.mask );
809 cqe = &ena->tx.cq.cqe.tx[index];
810
811 /* Stop if completion queue entry is empty */
812 if ( ( cqe->flags ^ ena->tx.cq.phase ) & ENA_CQE_PHASE )
813 return;
814 DBGC2 ( ena, "ENA %p TX %d complete\n", ena,
815 ( le16_to_cpu ( cqe->id ) >> 2 /* Don't ask */ ) );
816
817 /* Increment consumer counter */
818 ena->tx.cq.cons++;
819 if ( ! ( ena->tx.cq.cons & ena->tx.cq.mask ) )
820 ena->tx.cq.phase ^= ENA_CQE_PHASE;
821
822 /* Complete transmit */
823 netdev_tx_complete_next ( netdev );
824 }
825 }
826
827 /**
828 * Poll for received packets
829 *
830 * @v netdev Network device
831 */
ena_poll_rx(struct net_device * netdev)832 static void ena_poll_rx ( struct net_device *netdev ) {
833 struct ena_nic *ena = netdev->priv;
834 struct ena_rx_cqe *cqe;
835 struct io_buffer *iobuf;
836 unsigned int index;
837 size_t len;
838
839 /* Check for received packets */
840 while ( ena->rx.cq.cons != ena->rx.sq.prod ) {
841
842 /* Get next completion queue entry */
843 index = ( ena->rx.cq.cons % ENA_RX_COUNT );
844 cqe = &ena->rx.cq.cqe.rx[index];
845
846 /* Stop if completion queue entry is empty */
847 if ( ( cqe->flags ^ ena->rx.cq.phase ) & ENA_CQE_PHASE )
848 return;
849
850 /* Increment consumer counter */
851 ena->rx.cq.cons++;
852 if ( ! ( ena->rx.cq.cons & ena->rx.cq.mask ) )
853 ena->rx.cq.phase ^= ENA_CQE_PHASE;
854
855 /* Populate I/O buffer */
856 iobuf = ena->rx_iobuf[index];
857 ena->rx_iobuf[index] = NULL;
858 len = le16_to_cpu ( cqe->len );
859 iob_put ( iobuf, len );
860
861 /* Hand off to network stack */
862 DBGC2 ( ena, "ENA %p RX %d complete (length %zd)\n",
863 ena, le16_to_cpu ( cqe->id ), len );
864 netdev_rx ( netdev, iobuf );
865 }
866 }
867
868 /**
869 * Poll for completed and received packets
870 *
871 * @v netdev Network device
872 */
ena_poll(struct net_device * netdev)873 static void ena_poll ( struct net_device *netdev ) {
874
875 /* Poll for transmit completions */
876 ena_poll_tx ( netdev );
877
878 /* Poll for receive completions */
879 ena_poll_rx ( netdev );
880
881 /* Refill receive ring */
882 ena_refill_rx ( netdev );
883 }
884
885 /** ENA network device operations */
886 static struct net_device_operations ena_operations = {
887 .open = ena_open,
888 .close = ena_close,
889 .transmit = ena_transmit,
890 .poll = ena_poll,
891 };
892
893 /******************************************************************************
894 *
895 * PCI interface
896 *
897 ******************************************************************************
898 */
899
900 /**
901 * Probe PCI device
902 *
903 * @v pci PCI device
904 * @ret rc Return status code
905 */
ena_probe(struct pci_device * pci)906 static int ena_probe ( struct pci_device *pci ) {
907 struct net_device *netdev;
908 struct ena_nic *ena;
909 int rc;
910
911 /* Allocate and initialise net device */
912 netdev = alloc_etherdev ( sizeof ( *ena ) );
913 if ( ! netdev ) {
914 rc = -ENOMEM;
915 goto err_alloc;
916 }
917 netdev_init ( netdev, &ena_operations );
918 ena = netdev->priv;
919 pci_set_drvdata ( pci, netdev );
920 netdev->dev = &pci->dev;
921 memset ( ena, 0, sizeof ( *ena ) );
922 ena->acq.phase = ENA_ACQ_PHASE;
923 ena_cq_init ( &ena->tx.cq, ENA_TX_COUNT,
924 sizeof ( ena->tx.cq.cqe.tx[0] ) );
925 ena_sq_init ( &ena->tx.sq, ENA_SQ_TX, ENA_TX_COUNT,
926 sizeof ( ena->tx.sq.sqe.tx[0] ) );
927 ena_cq_init ( &ena->rx.cq, ENA_RX_COUNT,
928 sizeof ( ena->rx.cq.cqe.rx[0] ) );
929 ena_sq_init ( &ena->rx.sq, ENA_SQ_RX, ENA_RX_COUNT,
930 sizeof ( ena->rx.sq.sqe.rx[0] ) );
931
932 /* Fix up PCI device */
933 adjust_pci_device ( pci );
934
935 /* Map registers */
936 ena->regs = ioremap ( pci->membase, ENA_BAR_SIZE );
937 if ( ! ena->regs ) {
938 rc = -ENODEV;
939 goto err_ioremap;
940 }
941
942 /* Reset the NIC */
943 if ( ( rc = ena_reset ( ena ) ) != 0 )
944 goto err_reset;
945
946 /* Create admin queues */
947 if ( ( rc = ena_create_admin ( ena ) ) != 0 )
948 goto err_create_admin;
949
950 /* Fetch MAC address */
951 if ( ( rc = ena_get_device_attributes ( netdev ) ) != 0 )
952 goto err_get_device_attributes;
953
954 /* Register network device */
955 if ( ( rc = register_netdev ( netdev ) ) != 0 )
956 goto err_register_netdev;
957
958 /* Mark as link up, since we have no way to test link state on
959 * this hardware.
960 */
961 netdev_link_up ( netdev );
962
963 return 0;
964
965 unregister_netdev ( netdev );
966 err_register_netdev:
967 err_get_device_attributes:
968 ena_destroy_admin ( ena );
969 err_create_admin:
970 ena_reset ( ena );
971 err_reset:
972 iounmap ( ena->regs );
973 err_ioremap:
974 netdev_nullify ( netdev );
975 netdev_put ( netdev );
976 err_alloc:
977 return rc;
978 }
979
980 /**
981 * Remove PCI device
982 *
983 * @v pci PCI device
984 */
ena_remove(struct pci_device * pci)985 static void ena_remove ( struct pci_device *pci ) {
986 struct net_device *netdev = pci_get_drvdata ( pci );
987 struct ena_nic *ena = netdev->priv;
988
989 /* Unregister network device */
990 unregister_netdev ( netdev );
991
992 /* Destroy admin queues */
993 ena_destroy_admin ( ena );
994
995 /* Reset card */
996 ena_reset ( ena );
997
998 /* Free network device */
999 iounmap ( ena->regs );
1000 netdev_nullify ( netdev );
1001 netdev_put ( netdev );
1002 }
1003
1004 /** ENA PCI device IDs */
1005 static struct pci_device_id ena_nics[] = {
1006 PCI_ROM ( 0x1d0f, 0xec20, "ena-vf", "ENA VF", 0 ),
1007 PCI_ROM ( 0x1d0f, 0xec21, "ena-vf-llq", "ENA VF (LLQ)", 0 ),
1008 };
1009
1010 /** ENA PCI driver */
1011 struct pci_driver ena_driver __pci_driver = {
1012 .ids = ena_nics,
1013 .id_count = ( sizeof ( ena_nics ) / sizeof ( ena_nics[0] ) ),
1014 .probe = ena_probe,
1015 .remove = ena_remove,
1016 };
1017