1 /*
2 * Copyright (C) 2016 Michael Brown <mbrown@fensystems.co.uk>.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA.
18 *
19 * You can also choose to distribute this program under the terms of
20 * the Unmodified Binary Distribution Licence (as given in the file
21 * COPYING.UBDL), provided that you have satisfied its requirements.
22 */
23
24 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
25
26 #include <stdint.h>
27 #include <string.h>
28 #include <strings.h>
29 #include <stdio.h>
30 #include <unistd.h>
31 #include <errno.h>
32 #include <assert.h>
33 #include <byteswap.h>
34 #include <ipxe/netdevice.h>
35 #include <ipxe/ethernet.h>
36 #include <ipxe/if_ether.h>
37 #include <ipxe/iobuf.h>
38 #include <ipxe/malloc.h>
39 #include <ipxe/pci.h>
40 #include <ipxe/pciea.h>
41 #include <ipxe/umalloc.h>
42 #include "thunderx.h"
43 #include "thunderxcfg.h"
44
45 /** @file
46 *
47 * Cavium ThunderX Ethernet driver
48 *
49 */
50
51 /** List of BGX Ethernet interfaces */
52 static LIST_HEAD ( txnic_bgxs );
53
54 /** List of physical functions */
55 static LIST_HEAD ( txnic_pfs );
56
57 /** Debug colour for physical function and BGX messages */
58 #define TXNICCOL(x) ( &txnic_pfs + (x)->node )
59
60 /** Board configuration protocol */
61 static EFI_THUNDER_CONFIG_PROTOCOL *txcfg;
62 EFI_REQUEST_PROTOCOL ( EFI_THUNDER_CONFIG_PROTOCOL, &txcfg );
63
64 /******************************************************************************
65 *
66 * Diagnostics
67 *
68 ******************************************************************************
69 */
70
71 /**
72 * Show virtual NIC diagnostics (for debugging)
73 *
74 * @v vnic Virtual NIC
75 */
txnic_diag(struct txnic * vnic)76 static __attribute__ (( unused )) void txnic_diag ( struct txnic *vnic ) {
77
78 DBGC ( vnic, "TXNIC %s SQ %05zx(%05llx)/%05zx(%05llx) %08llx\n",
79 vnic->name,
80 ( ( vnic->sq.prod % TXNIC_SQES ) * TXNIC_SQ_STRIDE ),
81 readq ( vnic->regs + TXNIC_QS_SQ_TAIL(0) ),
82 ( ( vnic->sq.cons % TXNIC_SQES ) * TXNIC_SQ_STRIDE ),
83 readq ( vnic->regs + TXNIC_QS_SQ_HEAD(0) ),
84 readq ( vnic->regs + TXNIC_QS_SQ_STATUS(0) ) );
85 DBGC ( vnic, "TXNIC %s RQ %05zx(%05llx)/%05zx(%05llx) %016llx\n",
86 vnic->name,
87 ( ( vnic->rq.prod % TXNIC_RQES ) * TXNIC_RQ_STRIDE ),
88 readq ( vnic->regs + TXNIC_QS_RBDR_TAIL(0) ),
89 ( ( vnic->rq.cons % TXNIC_RQES ) * TXNIC_RQ_STRIDE ),
90 readq ( vnic->regs + TXNIC_QS_RBDR_HEAD(0) ),
91 readq ( vnic->regs + TXNIC_QS_RBDR_STATUS0(0) ) );
92 DBGC ( vnic, "TXNIC %s CQ xxxxx(%05llx)/%05x(%05llx) %08llx:%08llx\n",
93 vnic->name, readq ( vnic->regs + TXNIC_QS_CQ_TAIL(0) ),
94 ( ( vnic->cq.cons % TXNIC_CQES ) * TXNIC_CQ_STRIDE ),
95 readq ( vnic->regs + TXNIC_QS_CQ_HEAD(0) ),
96 readq ( vnic->regs + TXNIC_QS_CQ_STATUS(0) ),
97 readq ( vnic->regs + TXNIC_QS_CQ_STATUS2(0) ) );
98 }
99
100 /******************************************************************************
101 *
102 * Send queue
103 *
104 ******************************************************************************
105 */
106
107 /**
108 * Create send queue
109 *
110 * @v vnic Virtual NIC
111 * @ret rc Return status code
112 */
txnic_create_sq(struct txnic * vnic)113 static int txnic_create_sq ( struct txnic *vnic ) {
114
115 /* Reset send queue */
116 vnic->sq.prod = 0;
117 vnic->sq.cons = 0;
118 writeq ( TXNIC_QS_SQ_CFG_RESET, ( vnic->regs + TXNIC_QS_SQ_CFG(0) ) );
119
120 /* Configure and enable send queue */
121 writeq ( user_to_phys ( vnic->sq.sqe, 0 ),
122 ( vnic->regs + TXNIC_QS_SQ_BASE(0) ) );
123 writeq ( ( TXNIC_QS_SQ_CFG_ENA | TXNIC_QS_SQ_CFG_QSIZE_1K ),
124 ( vnic->regs + TXNIC_QS_SQ_CFG(0) ) );
125
126 DBGC ( vnic, "TXNIC %s SQ at [%08lx,%08lx)\n",
127 vnic->name, user_to_phys ( vnic->sq.sqe, 0 ),
128 user_to_phys ( vnic->sq.sqe, TXNIC_SQ_SIZE ) );
129 return 0;
130 }
131
132 /**
133 * Disable send queue
134 *
135 * @v vnic Virtual NIC
136 * @ret rc Return status code
137 */
txnic_disable_sq(struct txnic * vnic)138 static int txnic_disable_sq ( struct txnic *vnic ) {
139 uint64_t status;
140 unsigned int i;
141
142 /* Disable send queue */
143 writeq ( 0, ( vnic->regs + TXNIC_QS_SQ_CFG(0) ) );
144
145 /* Wait for send queue to be stopped */
146 for ( i = 0 ; i < TXNIC_SQ_STOP_MAX_WAIT_MS ; i++ ) {
147
148 /* Check if send queue is stopped */
149 status = readq ( vnic->regs + TXNIC_QS_SQ_STATUS(0) );
150 if ( status & TXNIC_QS_SQ_STATUS_STOPPED )
151 return 0;
152
153 /* Delay */
154 mdelay ( 1 );
155 }
156
157 DBGC ( vnic, "TXNIC %s SQ disable timed out\n", vnic->name );
158 return -ETIMEDOUT;
159 }
160
161 /**
162 * Destroy send queue
163 *
164 * @v vnic Virtual NIC
165 */
txnic_destroy_sq(struct txnic * vnic)166 static void txnic_destroy_sq ( struct txnic *vnic ) {
167 int rc;
168
169 /* Disable send queue */
170 if ( ( rc = txnic_disable_sq ( vnic ) ) != 0 ) {
171 /* Nothing else we can do */
172 return;
173 }
174
175 /* Reset send queue */
176 writeq ( TXNIC_QS_SQ_CFG_RESET, ( vnic->regs + TXNIC_QS_SQ_CFG(0) ) );
177 }
178
179 /**
180 * Send packet
181 *
182 * @v vnic Virtual NIC
183 * @v iobuf I/O buffer
184 * @ret rc Return status code
185 */
txnic_send(struct txnic * vnic,struct io_buffer * iobuf)186 static int txnic_send ( struct txnic *vnic, struct io_buffer *iobuf ) {
187 struct txnic_sqe sqe;
188 unsigned int sq_idx;
189 size_t offset;
190 size_t len;
191
192 /* Get next send queue entry */
193 if ( ( vnic->sq.prod - vnic->sq.cons ) >= TXNIC_SQ_FILL ) {
194 DBGC ( vnic, "TXNIC %s out of send queue entries\n",
195 vnic->name );
196 return -ENOBUFS;
197 }
198 sq_idx = ( vnic->sq.prod++ % TXNIC_SQES );
199 offset = ( sq_idx * TXNIC_SQ_STRIDE );
200
201 /* Populate send descriptor */
202 len = iob_len ( iobuf );
203 memset ( &sqe, 0, sizeof ( sqe ) );
204 sqe.hdr.total = cpu_to_le32 ( ( len >= ETH_ZLEN ) ? len : ETH_ZLEN );
205 sqe.hdr.subdcnt = ( TXNIC_SQE_SUBDESCS - 1 );
206 sqe.hdr.flags = TXNIC_SEND_HDR_FLAGS;
207 sqe.gather.size = cpu_to_le16 ( len );
208 sqe.gather.flags = TXNIC_SEND_GATHER_FLAGS;
209 sqe.gather.addr = cpu_to_le64 ( virt_to_bus ( iobuf->data ) );
210 DBGC2 ( vnic, "TXNIC %s SQE %#03x is [%08lx,%08lx)\n",
211 vnic->name, sq_idx, virt_to_bus ( iobuf->data ),
212 ( virt_to_bus ( iobuf->data ) + len ) );
213
214 /* Copy send descriptor to ring */
215 copy_to_user ( vnic->sq.sqe, offset, &sqe, sizeof ( sqe ) );
216
217 /* Ring doorbell */
218 wmb();
219 writeq ( TXNIC_SQE_SUBDESCS, ( vnic->regs + TXNIC_QS_SQ_DOOR(0) ) );
220
221 return 0;
222 }
223
224 /**
225 * Complete send queue entry
226 *
227 * @v vnic Virtual NIC
228 * @v cqe Send completion queue entry
229 */
txnic_complete_sqe(struct txnic * vnic,struct txnic_cqe_send * cqe)230 static void txnic_complete_sqe ( struct txnic *vnic,
231 struct txnic_cqe_send *cqe ) {
232 struct net_device *netdev = vnic->netdev;
233 unsigned int sq_idx;
234 unsigned int status;
235
236 /* Parse completion */
237 sq_idx = ( le16_to_cpu ( cqe->sqe_ptr ) / TXNIC_SQE_SUBDESCS );
238 status = cqe->send_status;
239
240 /* Sanity check */
241 assert ( sq_idx == ( vnic->sq.cons % TXNIC_SQES ) );
242
243 /* Free send queue entry */
244 vnic->sq.cons++;
245
246 /* Complete transmission */
247 if ( status ) {
248 DBGC ( vnic, "TXNIC %s SQE %#03x complete (status %#02x)\n",
249 vnic->name, sq_idx, status );
250 netdev_tx_complete_next_err ( netdev, -EIO );
251 } else {
252 DBGC2 ( vnic, "TXNIC %s SQE %#03x complete\n",
253 vnic->name, sq_idx );
254 netdev_tx_complete_next ( netdev );
255 }
256 }
257
258 /******************************************************************************
259 *
260 * Receive queue
261 *
262 ******************************************************************************
263 */
264
265 /**
266 * Create receive queue
267 *
268 * @v vnic Virtual NIC
269 * @ret rc Return status code
270 */
txnic_create_rq(struct txnic * vnic)271 static int txnic_create_rq ( struct txnic *vnic ) {
272
273 /* Reset receive buffer descriptor ring */
274 vnic->rq.prod = 0;
275 vnic->rq.cons = 0;
276 writeq ( TXNIC_QS_RBDR_CFG_RESET,
277 ( vnic->regs + TXNIC_QS_RBDR_CFG(0) ) );
278
279 /* Configure and enable receive buffer descriptor ring */
280 writeq ( user_to_phys ( vnic->rq.rqe, 0 ),
281 ( vnic->regs + TXNIC_QS_RBDR_BASE(0) ) );
282 writeq ( ( TXNIC_QS_RBDR_CFG_ENA | TXNIC_QS_RBDR_CFG_QSIZE_8K |
283 TXNIC_QS_RBDR_CFG_LINES ( TXNIC_RQE_SIZE /
284 TXNIC_LINE_SIZE ) ),
285 ( vnic->regs + TXNIC_QS_RBDR_CFG(0) ) );
286
287 /* Enable receive queue */
288 writeq ( TXNIC_QS_RQ_CFG_ENA, ( vnic->regs + TXNIC_QS_RQ_CFG(0) ) );
289
290 DBGC ( vnic, "TXNIC %s RQ at [%08lx,%08lx)\n",
291 vnic->name, user_to_phys ( vnic->rq.rqe, 0 ),
292 user_to_phys ( vnic->rq.rqe, TXNIC_RQ_SIZE ) );
293 return 0;
294 }
295
296 /**
297 * Disable receive queue
298 *
299 * @v vnic Virtual NIC
300 * @ret rc Return status code
301 */
txnic_disable_rq(struct txnic * vnic)302 static int txnic_disable_rq ( struct txnic *vnic ) {
303 uint64_t cfg;
304 unsigned int i;
305
306 /* Disable receive queue */
307 writeq ( 0, ( vnic->regs + TXNIC_QS_RQ_CFG(0) ) );
308
309 /* Wait for receive queue to be disabled */
310 for ( i = 0 ; i < TXNIC_RQ_DISABLE_MAX_WAIT_MS ; i++ ) {
311
312 /* Check if receive queue is disabled */
313 cfg = readq ( vnic->regs + TXNIC_QS_RQ_CFG(0) );
314 if ( ! ( cfg & TXNIC_QS_RQ_CFG_ENA ) )
315 return 0;
316
317 /* Delay */
318 mdelay ( 1 );
319 }
320
321 DBGC ( vnic, "TXNIC %s RQ disable timed out\n", vnic->name );
322 return -ETIMEDOUT;
323 }
324
325 /**
326 * Destroy receive queue
327 *
328 * @v vnic Virtual NIC
329 */
txnic_destroy_rq(struct txnic * vnic)330 static void txnic_destroy_rq ( struct txnic *vnic ) {
331 unsigned int i;
332 int rc;
333
334 /* Disable receive queue */
335 if ( ( rc = txnic_disable_rq ( vnic ) ) != 0 ) {
336 /* Leak memory; there's nothing else we can do */
337 return;
338 }
339
340 /* Disable receive buffer descriptor ring */
341 writeq ( 0, ( vnic->regs + TXNIC_QS_RBDR_CFG(0) ) );
342
343 /* Reset receive buffer descriptor ring */
344 writeq ( TXNIC_QS_RBDR_CFG_RESET,
345 ( vnic->regs + TXNIC_QS_RBDR_CFG(0) ) );
346
347 /* Free any unused I/O buffers */
348 for ( i = 0 ; i < TXNIC_RQ_FILL ; i++ ) {
349 if ( vnic->rq.iobuf[i] )
350 free_iob ( vnic->rq.iobuf[i] );
351 vnic->rq.iobuf[i] = NULL;
352 }
353 }
354
355 /**
356 * Refill receive queue
357 *
358 * @v vnic Virtual NIC
359 */
txnic_refill_rq(struct txnic * vnic)360 static void txnic_refill_rq ( struct txnic *vnic ) {
361 struct io_buffer *iobuf;
362 struct txnic_rqe rqe;
363 unsigned int rq_idx;
364 unsigned int rq_iobuf_idx;
365 unsigned int refilled = 0;
366 size_t offset;
367
368 /* Refill ring */
369 while ( ( vnic->rq.prod - vnic->rq.cons ) < TXNIC_RQ_FILL ) {
370
371 /* Allocate I/O buffer */
372 iobuf = alloc_iob ( TXNIC_RQE_SIZE );
373 if ( ! iobuf ) {
374 /* Wait for next refill */
375 break;
376 }
377
378 /* Get next receive descriptor */
379 rq_idx = ( vnic->rq.prod++ % TXNIC_RQES );
380 offset = ( rq_idx * TXNIC_RQ_STRIDE );
381
382 /* Populate receive descriptor */
383 rqe.rbdre.addr = cpu_to_le64 ( virt_to_bus ( iobuf->data ) );
384 DBGC2 ( vnic, "TXNIC %s RQE %#03x is [%08lx,%08lx)\n",
385 vnic->name, rq_idx, virt_to_bus ( iobuf->data ),
386 ( virt_to_bus ( iobuf->data ) + TXNIC_RQE_SIZE ) );
387
388 /* Copy receive descriptor to ring */
389 copy_to_user ( vnic->rq.rqe, offset, &rqe, sizeof ( rqe ) );
390 refilled++;
391
392 /* Record I/O buffer */
393 rq_iobuf_idx = ( rq_idx % TXNIC_RQ_FILL );
394 assert ( vnic->rq.iobuf[rq_iobuf_idx] == NULL );
395 vnic->rq.iobuf[rq_iobuf_idx] = iobuf;
396 }
397
398 /* Ring doorbell */
399 wmb();
400 writeq ( refilled, ( vnic->regs + TXNIC_QS_RBDR_DOOR(0) ) );
401 }
402
403 /**
404 * Complete receive queue entry
405 *
406 * @v vnic Virtual NIC
407 * @v cqe Receive completion queue entry
408 */
txnic_complete_rqe(struct txnic * vnic,struct txnic_cqe_rx * cqe)409 static void txnic_complete_rqe ( struct txnic *vnic,
410 struct txnic_cqe_rx *cqe ) {
411 struct net_device *netdev = vnic->netdev;
412 struct io_buffer *iobuf;
413 unsigned int errop;
414 unsigned int rq_idx;
415 unsigned int rq_iobuf_idx;
416 size_t apad_len;
417 size_t len;
418
419 /* Parse completion */
420 errop = cqe->errop;
421 apad_len = TXNIC_CQE_RX_APAD_LEN ( cqe->apad );
422 len = le16_to_cpu ( cqe->len );
423
424 /* Get next receive I/O buffer */
425 rq_idx = ( vnic->rq.cons++ % TXNIC_RQES );
426 rq_iobuf_idx = ( rq_idx % TXNIC_RQ_FILL );
427 iobuf = vnic->rq.iobuf[rq_iobuf_idx];
428 vnic->rq.iobuf[rq_iobuf_idx] = NULL;
429
430 /* Populate I/O buffer */
431 iob_reserve ( iobuf, apad_len );
432 iob_put ( iobuf, len );
433
434 /* Hand off to network stack */
435 if ( errop ) {
436 DBGC ( vnic, "TXNIC %s RQE %#03x error (length %zd, errop "
437 "%#02x)\n", vnic->name, rq_idx, len, errop );
438 netdev_rx_err ( netdev, iobuf, -EIO );
439 } else {
440 DBGC2 ( vnic, "TXNIC %s RQE %#03x complete (length %zd)\n",
441 vnic->name, rq_idx, len );
442 netdev_rx ( netdev, iobuf );
443 }
444 }
445
446 /******************************************************************************
447 *
448 * Completion queue
449 *
450 ******************************************************************************
451 */
452
453 /**
454 * Create completion queue
455 *
456 * @v vnic Virtual NIC
457 * @ret rc Return status code
458 */
txnic_create_cq(struct txnic * vnic)459 static int txnic_create_cq ( struct txnic *vnic ) {
460
461 /* Reset completion queue */
462 vnic->cq.cons = 0;
463 writeq ( TXNIC_QS_CQ_CFG_RESET, ( vnic->regs + TXNIC_QS_CQ_CFG(0) ) );
464
465 /* Configure and enable completion queue */
466 writeq ( user_to_phys ( vnic->cq.cqe, 0 ),
467 ( vnic->regs + TXNIC_QS_CQ_BASE(0) ) );
468 writeq ( ( TXNIC_QS_CQ_CFG_ENA | TXNIC_QS_CQ_CFG_QSIZE_256 ),
469 ( vnic->regs + TXNIC_QS_CQ_CFG(0) ) );
470
471 DBGC ( vnic, "TXNIC %s CQ at [%08lx,%08lx)\n",
472 vnic->name, user_to_phys ( vnic->cq.cqe, 0 ),
473 user_to_phys ( vnic->cq.cqe, TXNIC_CQ_SIZE ) );
474 return 0;
475 }
476
477 /**
478 * Disable completion queue
479 *
480 * @v vnic Virtual NIC
481 * @ret rc Return status code
482 */
txnic_disable_cq(struct txnic * vnic)483 static int txnic_disable_cq ( struct txnic *vnic ) {
484 uint64_t cfg;
485 unsigned int i;
486
487 /* Disable completion queue */
488 writeq ( 0, ( vnic->regs + TXNIC_QS_CQ_CFG(0) ) );
489
490 /* Wait for completion queue to be disabled */
491 for ( i = 0 ; i < TXNIC_CQ_DISABLE_MAX_WAIT_MS ; i++ ) {
492
493 /* Check if completion queue is disabled */
494 cfg = readq ( vnic->regs + TXNIC_QS_CQ_CFG(0) );
495 if ( ! ( cfg & TXNIC_QS_CQ_CFG_ENA ) )
496 return 0;
497
498 /* Delay */
499 mdelay ( 1 );
500 }
501
502 DBGC ( vnic, "TXNIC %s CQ disable timed out\n", vnic->name );
503 return -ETIMEDOUT;
504 }
505
506 /**
507 * Destroy completion queue
508 *
509 * @v vnic Virtual NIC
510 */
txnic_destroy_cq(struct txnic * vnic)511 static void txnic_destroy_cq ( struct txnic *vnic ) {
512 int rc;
513
514 /* Disable completion queue */
515 if ( ( rc = txnic_disable_cq ( vnic ) ) != 0 ) {
516 /* Leak memory; there's nothing else we can do */
517 return;
518 }
519
520 /* Reset completion queue */
521 writeq ( TXNIC_QS_CQ_CFG_RESET, ( vnic->regs + TXNIC_QS_CQ_CFG(0) ) );
522 }
523
524 /**
525 * Poll completion queue
526 *
527 * @v vnic Virtual NIC
528 */
txnic_poll_cq(struct txnic * vnic)529 static void txnic_poll_cq ( struct txnic *vnic ) {
530 union txnic_cqe cqe;
531 uint64_t status;
532 size_t offset;
533 unsigned int qcount;
534 unsigned int cq_idx;
535 unsigned int i;
536
537 /* Get number of completions */
538 status = readq ( vnic->regs + TXNIC_QS_CQ_STATUS(0) );
539 qcount = TXNIC_QS_CQ_STATUS_QCOUNT ( status );
540 if ( ! qcount )
541 return;
542
543 /* Process completion queue entries */
544 for ( i = 0 ; i < qcount ; i++ ) {
545
546 /* Get completion queue entry */
547 cq_idx = ( vnic->cq.cons++ % TXNIC_CQES );
548 offset = ( cq_idx * TXNIC_CQ_STRIDE );
549 copy_from_user ( &cqe, vnic->cq.cqe, offset, sizeof ( cqe ) );
550
551 /* Process completion queue entry */
552 switch ( cqe.common.cqe_type ) {
553 case TXNIC_CQE_TYPE_SEND:
554 txnic_complete_sqe ( vnic, &cqe.send );
555 break;
556 case TXNIC_CQE_TYPE_RX:
557 txnic_complete_rqe ( vnic, &cqe.rx );
558 break;
559 default:
560 DBGC ( vnic, "TXNIC %s unknown completion type %d\n",
561 vnic->name, cqe.common.cqe_type );
562 DBGC_HDA ( vnic, user_to_phys ( vnic->cq.cqe, offset ),
563 &cqe, sizeof ( cqe ) );
564 break;
565 }
566 }
567
568 /* Ring doorbell */
569 writeq ( qcount, ( vnic->regs + TXNIC_QS_CQ_DOOR(0) ) );
570 }
571
572 /******************************************************************************
573 *
574 * Virtual NIC
575 *
576 ******************************************************************************
577 */
578
579 /**
580 * Open virtual NIC
581 *
582 * @v vnic Virtual NIC
583 * @ret rc Return status code
584 */
txnic_open(struct txnic * vnic)585 static int txnic_open ( struct txnic *vnic ) {
586 int rc;
587
588 /* Create completion queue */
589 if ( ( rc = txnic_create_cq ( vnic ) ) != 0 )
590 goto err_create_cq;
591
592 /* Create send queue */
593 if ( ( rc = txnic_create_sq ( vnic ) ) != 0 )
594 goto err_create_sq;
595
596 /* Create receive queue */
597 if ( ( rc = txnic_create_rq ( vnic ) ) != 0 )
598 goto err_create_rq;
599
600 /* Refill receive queue */
601 txnic_refill_rq ( vnic );
602
603 return 0;
604
605 txnic_destroy_rq ( vnic );
606 err_create_rq:
607 txnic_destroy_sq ( vnic );
608 err_create_sq:
609 txnic_destroy_cq ( vnic );
610 err_create_cq:
611 return rc;
612 }
613
614 /**
615 * Close virtual NIC
616 *
617 * @v vnic Virtual NIC
618 */
txnic_close(struct txnic * vnic)619 static void txnic_close ( struct txnic *vnic ) {
620
621 /* Destroy receive queue */
622 txnic_destroy_rq ( vnic );
623
624 /* Destroy send queue */
625 txnic_destroy_sq ( vnic );
626
627 /* Destroy completion queue */
628 txnic_destroy_cq ( vnic );
629 }
630
631 /**
632 * Poll virtual NIC
633 *
634 * @v vnic Virtual NIC
635 */
txnic_poll(struct txnic * vnic)636 static void txnic_poll ( struct txnic *vnic ) {
637
638 /* Poll completion queue */
639 txnic_poll_cq ( vnic );
640
641 /* Refill receive queue */
642 txnic_refill_rq ( vnic );
643 }
644
645 /**
646 * Allocate virtual NIC
647 *
648 * @v dev Underlying device
649 * @v membase Register base address
650 * @ret vnic Virtual NIC, or NULL on failure
651 */
txnic_alloc(struct device * dev,unsigned long membase)652 static struct txnic * txnic_alloc ( struct device *dev,
653 unsigned long membase ) {
654 struct net_device *netdev;
655 struct txnic *vnic;
656
657 /* Allocate network device */
658 netdev = alloc_etherdev ( sizeof ( *vnic ) );
659 if ( ! netdev )
660 goto err_alloc_netdev;
661 netdev->dev = dev;
662 vnic = netdev->priv;
663 vnic->netdev = netdev;
664 vnic->name = dev->name;
665
666 /* Allow caller to reuse netdev->priv. (The generic virtual
667 * NIC code never assumes that netdev->priv==vnic.)
668 */
669 netdev->priv = NULL;
670
671 /* Allocate completion queue */
672 vnic->cq.cqe = umalloc ( TXNIC_CQ_SIZE );
673 if ( ! vnic->cq.cqe )
674 goto err_alloc_cq;
675
676 /* Allocate send queue */
677 vnic->sq.sqe = umalloc ( TXNIC_SQ_SIZE );
678 if ( ! vnic->sq.sqe )
679 goto err_alloc_sq;
680
681 /* Allocate receive queue */
682 vnic->rq.rqe = umalloc ( TXNIC_RQ_SIZE );
683 if ( ! vnic->rq.rqe )
684 goto err_alloc_rq;
685
686 /* Map registers */
687 vnic->regs = ioremap ( membase, TXNIC_VF_BAR_SIZE );
688 if ( ! vnic->regs )
689 goto err_ioremap;
690
691 return vnic;
692
693 iounmap ( vnic->regs );
694 err_ioremap:
695 ufree ( vnic->rq.rqe );
696 err_alloc_rq:
697 ufree ( vnic->sq.sqe );
698 err_alloc_sq:
699 ufree ( vnic->cq.cqe );
700 err_alloc_cq:
701 netdev_nullify ( netdev );
702 netdev_put ( netdev );
703 err_alloc_netdev:
704 return NULL;
705 }
706
707 /**
708 * Free virtual NIC
709 *
710 * @v vnic Virtual NIC
711 */
txnic_free(struct txnic * vnic)712 static void txnic_free ( struct txnic *vnic ) {
713 struct net_device *netdev = vnic->netdev;
714
715 /* Unmap registers */
716 iounmap ( vnic->regs );
717
718 /* Free receive queue */
719 ufree ( vnic->rq.rqe );
720
721 /* Free send queue */
722 ufree ( vnic->sq.sqe );
723
724 /* Free completion queue */
725 ufree ( vnic->cq.cqe );
726
727 /* Free network device */
728 netdev_nullify ( netdev );
729 netdev_put ( netdev );
730 }
731
732 /******************************************************************************
733 *
734 * Logical MAC virtual NICs
735 *
736 ******************************************************************************
737 */
738
739 /**
740 * Show LMAC diagnostics (for debugging)
741 *
742 * @v lmac Logical MAC
743 */
744 static __attribute__ (( unused )) void
txnic_lmac_diag(struct txnic_lmac * lmac)745 txnic_lmac_diag ( struct txnic_lmac *lmac ) {
746 struct txnic *vnic = lmac->vnic;
747 uint64_t status1;
748 uint64_t status2;
749 uint64_t br_status1;
750 uint64_t br_status2;
751 uint64_t br_algn_status;
752 uint64_t br_pmd_status;
753 uint64_t an_status;
754
755 /* Read status (clearing latching bits) */
756 writeq ( BGX_SPU_STATUS1_RCV_LNK, ( lmac->regs + BGX_SPU_STATUS1 ) );
757 writeq ( BGX_SPU_STATUS2_RCVFLT, ( lmac->regs + BGX_SPU_STATUS2 ) );
758 status1 = readq ( lmac->regs + BGX_SPU_STATUS1 );
759 status2 = readq ( lmac->regs + BGX_SPU_STATUS2 );
760 DBGC ( vnic, "TXNIC %s SPU %02llx:%04llx%s%s%s\n",
761 vnic->name, status1, status2,
762 ( ( status1 & BGX_SPU_STATUS1_FLT ) ? " FLT" : "" ),
763 ( ( status1 & BGX_SPU_STATUS1_RCV_LNK ) ? " RCV_LNK" : "" ),
764 ( ( status2 & BGX_SPU_STATUS2_RCVFLT ) ? " RCVFLT" : "" ) );
765
766 /* Read BASE-R status (clearing latching bits) */
767 writeq ( ( BGX_SPU_BR_STATUS2_LATCHED_LOCK |
768 BGX_SPU_BR_STATUS2_LATCHED_BER ),
769 ( lmac->regs + BGX_SPU_BR_STATUS2 ) );
770 br_status1 = readq ( lmac->regs + BGX_SPU_BR_STATUS1 );
771 br_status2 = readq ( lmac->regs + BGX_SPU_BR_STATUS2 );
772 DBGC ( vnic, "TXNIC %s BR %04llx:%04llx%s%s%s%s%s\n",
773 vnic->name, br_status2, br_status2,
774 ( ( br_status1 & BGX_SPU_BR_STATUS1_RCV_LNK ) ? " RCV_LNK" : ""),
775 ( ( br_status1 & BGX_SPU_BR_STATUS1_HI_BER ) ? " HI_BER" : "" ),
776 ( ( br_status1 & BGX_SPU_BR_STATUS1_BLK_LOCK ) ?
777 " BLK_LOCK" : "" ),
778 ( ( br_status2 & BGX_SPU_BR_STATUS2_LATCHED_LOCK ) ?
779 " LATCHED_LOCK" : "" ),
780 ( ( br_status2 & BGX_SPU_BR_STATUS2_LATCHED_BER ) ?
781 " LATCHED_BER" : "" ) );
782
783 /* Read BASE-R alignment status */
784 br_algn_status = readq ( lmac->regs + BGX_SPU_BR_ALGN_STATUS );
785 DBGC ( vnic, "TXNIC %s BR ALGN %016llx%s\n", vnic->name, br_algn_status,
786 ( ( br_algn_status & BGX_SPU_BR_ALGN_STATUS_ALIGND ) ?
787 " ALIGND" : "" ) );
788
789 /* Read BASE-R link training status */
790 br_pmd_status = readq ( lmac->regs + BGX_SPU_BR_PMD_STATUS );
791 DBGC ( vnic, "TXNIC %s BR PMD %04llx\n", vnic->name, br_pmd_status );
792
793 /* Read autonegotiation status (clearing latching bits) */
794 writeq ( ( BGX_SPU_AN_STATUS_PAGE_RX | BGX_SPU_AN_STATUS_LINK_STATUS ),
795 ( lmac->regs + BGX_SPU_AN_STATUS ) );
796 an_status = readq ( lmac->regs + BGX_SPU_AN_STATUS );
797 DBGC ( vnic, "TXNIC %s BR AN %04llx%s%s%s%s%s\n", vnic->name, an_status,
798 ( ( an_status & BGX_SPU_AN_STATUS_XNP_STAT ) ? " XNP_STAT" : ""),
799 ( ( an_status & BGX_SPU_AN_STATUS_PAGE_RX ) ? " PAGE_RX" : "" ),
800 ( ( an_status & BGX_SPU_AN_STATUS_AN_COMPLETE ) ?
801 " AN_COMPLETE" : "" ),
802 ( ( an_status & BGX_SPU_AN_STATUS_LINK_STATUS ) ?
803 " LINK_STATUS" : "" ),
804 ( ( an_status & BGX_SPU_AN_STATUS_LP_AN_ABLE ) ?
805 " LP_AN_ABLE" : "" ) );
806
807 /* Read transmit statistics */
808 DBGC ( vnic, "TXNIC %s TXF xc %#llx xd %#llx mc %#llx sc %#llx ok "
809 "%#llx bc %#llx mc %#llx un %#llx pa %#llx\n", vnic->name,
810 readq ( lmac->regs + BGX_CMR_TX_STAT0 ),
811 readq ( lmac->regs + BGX_CMR_TX_STAT1 ),
812 readq ( lmac->regs + BGX_CMR_TX_STAT2 ),
813 readq ( lmac->regs + BGX_CMR_TX_STAT3 ),
814 readq ( lmac->regs + BGX_CMR_TX_STAT5 ),
815 readq ( lmac->regs + BGX_CMR_TX_STAT14 ),
816 readq ( lmac->regs + BGX_CMR_TX_STAT15 ),
817 readq ( lmac->regs + BGX_CMR_TX_STAT16 ),
818 readq ( lmac->regs + BGX_CMR_TX_STAT17 ) );
819 DBGC ( vnic, "TXNIC %s TXB ok %#llx hist %#llx:%#llx:%#llx:%#llx:"
820 "%#llx:%#llx:%#llx:%#llx\n", vnic->name,
821 readq ( lmac->regs + BGX_CMR_TX_STAT4 ),
822 readq ( lmac->regs + BGX_CMR_TX_STAT6 ),
823 readq ( lmac->regs + BGX_CMR_TX_STAT7 ),
824 readq ( lmac->regs + BGX_CMR_TX_STAT8 ),
825 readq ( lmac->regs + BGX_CMR_TX_STAT9 ),
826 readq ( lmac->regs + BGX_CMR_TX_STAT10 ),
827 readq ( lmac->regs + BGX_CMR_TX_STAT11 ),
828 readq ( lmac->regs + BGX_CMR_TX_STAT12 ),
829 readq ( lmac->regs + BGX_CMR_TX_STAT13 ) );
830
831 /* Read receive statistics */
832 DBGC ( vnic, "TXNIC %s RXF ok %#llx pa %#llx nm %#llx ov %#llx er "
833 "%#llx nc %#llx\n", vnic->name,
834 readq ( lmac->regs + BGX_CMR_RX_STAT0 ),
835 readq ( lmac->regs + BGX_CMR_RX_STAT2 ),
836 readq ( lmac->regs + BGX_CMR_RX_STAT4 ),
837 readq ( lmac->regs + BGX_CMR_RX_STAT6 ),
838 readq ( lmac->regs + BGX_CMR_RX_STAT8 ),
839 readq ( lmac->regs + BGX_CMR_RX_STAT9 ) );
840 DBGC ( vnic, "TXNIC %s RXB ok %#llx pa %#llx nm %#llx ov %#llx nc "
841 "%#llx\n", vnic->name,
842 readq ( lmac->regs + BGX_CMR_RX_STAT1 ),
843 readq ( lmac->regs + BGX_CMR_RX_STAT3 ),
844 readq ( lmac->regs + BGX_CMR_RX_STAT5 ),
845 readq ( lmac->regs + BGX_CMR_RX_STAT7 ),
846 readq ( lmac->regs + BGX_CMR_RX_STAT10 ) );
847 }
848
849 /**
850 * Update LMAC link state
851 *
852 * @v lmac Logical MAC
853 */
txnic_lmac_update_link(struct txnic_lmac * lmac)854 static void txnic_lmac_update_link ( struct txnic_lmac *lmac ) {
855 struct txnic *vnic = lmac->vnic;
856 struct net_device *netdev = vnic->netdev;
857 uint64_t status1;
858
859 /* Read status (clearing latching bits) */
860 writeq ( BGX_SPU_STATUS1_RCV_LNK, ( lmac->regs + BGX_SPU_STATUS1 ) );
861 status1 = readq ( lmac->regs + BGX_SPU_STATUS1 );
862
863 /* Report link status */
864 if ( status1 & BGX_SPU_STATUS1_RCV_LNK ) {
865 netdev_link_up ( netdev );
866 } else {
867 netdev_link_down ( netdev );
868 }
869 }
870
871 /**
872 * Poll LMAC link state
873 *
874 * @v lmac Logical MAC
875 */
txnic_lmac_poll_link(struct txnic_lmac * lmac)876 static void txnic_lmac_poll_link ( struct txnic_lmac *lmac ) {
877 struct txnic *vnic = lmac->vnic;
878 uint64_t intr;
879
880 /* Get interrupt status */
881 intr = readq ( lmac->regs + BGX_SPU_INT );
882 if ( ! intr )
883 return;
884 DBGC ( vnic, "TXNIC %s INT %04llx%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
885 vnic->name, intr,
886 ( ( intr & BGX_SPU_INT_TRAINING_FAIL ) ? " TRAINING_FAIL" : "" ),
887 ( ( intr & BGX_SPU_INT_TRAINING_DONE ) ? " TRAINING_DONE" : "" ),
888 ( ( intr & BGX_SPU_INT_AN_COMPLETE ) ? " AN_COMPLETE" : "" ),
889 ( ( intr & BGX_SPU_INT_AN_LINK_GOOD ) ? " AN_LINK_GOOD" : "" ),
890 ( ( intr & BGX_SPU_INT_AN_PAGE_RX ) ? " AN_PAGE_RX" : "" ),
891 ( ( intr & BGX_SPU_INT_FEC_UNCORR ) ? " FEC_UNCORR" : "" ),
892 ( ( intr & BGX_SPU_INT_FEC_CORR ) ? " FEC_CORR" : "" ),
893 ( ( intr & BGX_SPU_INT_BIP_ERR ) ? " BIP_ERR" : "" ),
894 ( ( intr & BGX_SPU_INT_DBG_SYNC ) ? " DBG_SYNC" : "" ),
895 ( ( intr & BGX_SPU_INT_ALGNLOS ) ? " ALGNLOS" : "" ),
896 ( ( intr & BGX_SPU_INT_SYNLOS ) ? " SYNLOS" : "" ),
897 ( ( intr & BGX_SPU_INT_BITLCKLS ) ? " BITLCKLS" : "" ),
898 ( ( intr & BGX_SPU_INT_ERR_BLK ) ? " ERR_BLK" : "" ),
899 ( ( intr & BGX_SPU_INT_RX_LINK_DOWN ) ? " RX_LINK_DOWN" : "" ),
900 ( ( intr & BGX_SPU_INT_RX_LINK_UP ) ? " RX_LINK_UP" : "" ) );
901
902 /* Clear interrupt status */
903 writeq ( intr, ( lmac->regs + BGX_SPU_INT ) );
904
905 /* Update link state */
906 txnic_lmac_update_link ( lmac );
907 }
908
909 /**
910 * Reset LMAC
911 *
912 * @v lmac Logical MAC
913 */
txnic_lmac_reset(struct txnic_lmac * lmac)914 static void txnic_lmac_reset ( struct txnic_lmac *lmac ) {
915 struct txnic_bgx *bgx = lmac->bgx;
916 struct txnic_pf *pf = bgx->pf;
917 void *qsregs = ( pf->regs + TXNIC_PF_QS ( lmac->idx ) );
918
919 /* There is no reset available for the physical function
920 * aspects of a virtual NIC; we have to explicitly reload a
921 * sensible set of default values.
922 */
923 writeq ( 0, ( qsregs + TXNIC_PF_QS_CFG ) );
924 writeq ( 0, ( qsregs + TXNIC_PF_QS_RQ_CFG(0) ) );
925 writeq ( 0, ( qsregs + TXNIC_PF_QS_RQ_DROP_CFG(0) ) );
926 writeq ( 0, ( qsregs + TXNIC_PF_QS_RQ_BP_CFG(0) ) );
927 writeq ( 0, ( qsregs + TXNIC_PF_QS_SQ_CFG(0) ) );
928 }
929
930 /**
931 * Open network device
932 *
933 * @v netdev Network device
934 * @ret rc Return status code
935 */
txnic_lmac_open(struct net_device * netdev)936 static int txnic_lmac_open ( struct net_device *netdev ) {
937 struct txnic_lmac *lmac = netdev->priv;
938 struct txnic_bgx *bgx = lmac->bgx;
939 struct txnic_pf *pf = bgx->pf;
940 struct txnic *vnic = lmac->vnic;
941 unsigned int vnic_idx = lmac->idx;
942 unsigned int chan_idx = TXNIC_CHAN_IDX ( vnic_idx );
943 unsigned int tl4_idx = TXNIC_TL4_IDX ( vnic_idx );
944 unsigned int tl3_idx = TXNIC_TL3_IDX ( vnic_idx );
945 unsigned int tl2_idx = TXNIC_TL2_IDX ( vnic_idx );
946 void *lmregs = ( pf->regs + TXNIC_PF_LMAC ( vnic_idx ) );
947 void *chregs = ( pf->regs + TXNIC_PF_CHAN ( chan_idx ) );
948 void *qsregs = ( pf->regs + TXNIC_PF_QS ( vnic_idx ) );
949 size_t max_pkt_size;
950 int rc;
951
952 /* Configure channel/match parse indices */
953 writeq ( ( TXNIC_PF_MPI_CFG_VNIC ( vnic_idx ) |
954 TXNIC_PF_MPI_CFG_RSSI_BASE ( vnic_idx ) ),
955 ( TXNIC_PF_MPI_CFG ( vnic_idx ) + pf->regs ) );
956 writeq ( ( TXNIC_PF_RSSI_RQ_RQ_QS ( vnic_idx ) ),
957 ( TXNIC_PF_RSSI_RQ ( vnic_idx ) + pf->regs ) );
958
959 /* Configure LMAC */
960 max_pkt_size = ( netdev->max_pkt_len + 4 /* possible VLAN */ );
961 writeq ( ( TXNIC_PF_LMAC_CFG_ADJUST_DEFAULT |
962 TXNIC_PF_LMAC_CFG_MIN_PKT_SIZE ( ETH_ZLEN ) ),
963 ( TXNIC_PF_LMAC_CFG + lmregs ) );
964 writeq ( ( TXNIC_PF_LMAC_CFG2_MAX_PKT_SIZE ( max_pkt_size ) ),
965 ( TXNIC_PF_LMAC_CFG2 + lmregs ) );
966 writeq ( ( TXNIC_PF_LMAC_CREDIT_CC_UNIT_CNT_DEFAULT |
967 TXNIC_PF_LMAC_CREDIT_CC_PACKET_CNT_DEFAULT |
968 TXNIC_PF_LMAC_CREDIT_CC_ENABLE ),
969 ( TXNIC_PF_LMAC_CREDIT + lmregs ) );
970
971 /* Configure channels */
972 writeq ( ( TXNIC_PF_CHAN_TX_CFG_BP_ENA ),
973 ( TXNIC_PF_CHAN_TX_CFG + chregs ) );
974 writeq ( ( TXNIC_PF_CHAN_RX_CFG_CPI_BASE ( vnic_idx ) ),
975 ( TXNIC_PF_CHAN_RX_CFG + chregs ) );
976 writeq ( ( TXNIC_PF_CHAN_RX_BP_CFG_ENA |
977 TXNIC_PF_CHAN_RX_BP_CFG_BPID ( vnic_idx ) ),
978 ( TXNIC_PF_CHAN_RX_BP_CFG + chregs ) );
979
980 /* Configure traffic limiters */
981 writeq ( ( TXNIC_PF_TL2_CFG_RR_QUANTUM_DEFAULT ),
982 ( TXNIC_PF_TL2_CFG ( tl2_idx ) + pf->regs ) );
983 writeq ( ( TXNIC_PF_TL3_CFG_RR_QUANTUM_DEFAULT ),
984 ( TXNIC_PF_TL3_CFG ( tl3_idx ) + pf->regs ) );
985 writeq ( ( TXNIC_PF_TL3_CHAN_CHAN ( chan_idx ) ),
986 ( TXNIC_PF_TL3_CHAN ( tl3_idx ) + pf->regs ) );
987 writeq ( ( TXNIC_PF_TL4_CFG_SQ_QS ( vnic_idx ) |
988 TXNIC_PF_TL4_CFG_RR_QUANTUM_DEFAULT ),
989 ( TXNIC_PF_TL4_CFG ( tl4_idx ) + pf->regs ) );
990
991 /* Configure send queue */
992 writeq ( ( TXNIC_PF_QS_SQ_CFG_CQ_QS ( vnic_idx ) ),
993 ( TXNIC_PF_QS_SQ_CFG(0) + qsregs ) );
994 writeq ( ( TXNIC_PF_QS_SQ_CFG2_TL4 ( tl4_idx ) ),
995 ( TXNIC_PF_QS_SQ_CFG2(0) + qsregs ) );
996
997 /* Configure receive queue */
998 writeq ( ( TXNIC_PF_QS_RQ_CFG_CACHING_ALL |
999 TXNIC_PF_QS_RQ_CFG_CQ_QS ( vnic_idx ) |
1000 TXNIC_PF_QS_RQ_CFG_RBDR_CONT_QS ( vnic_idx ) |
1001 TXNIC_PF_QS_RQ_CFG_RBDR_STRT_QS ( vnic_idx ) ),
1002 ( TXNIC_PF_QS_RQ_CFG(0) + qsregs ) );
1003 writeq ( ( TXNIC_PF_QS_RQ_BP_CFG_RBDR_BP_ENA |
1004 TXNIC_PF_QS_RQ_BP_CFG_CQ_BP_ENA |
1005 TXNIC_PF_QS_RQ_BP_CFG_BPID ( vnic_idx ) ),
1006 ( TXNIC_PF_QS_RQ_BP_CFG(0) + qsregs ) );
1007
1008 /* Enable queue set */
1009 writeq ( ( TXNIC_PF_QS_CFG_ENA | TXNIC_PF_QS_CFG_VNIC ( vnic_idx ) ),
1010 ( TXNIC_PF_QS_CFG + qsregs ) );
1011
1012 /* Open virtual NIC */
1013 if ( ( rc = txnic_open ( vnic ) ) != 0 )
1014 goto err_open;
1015
1016 /* Update link state */
1017 txnic_lmac_update_link ( lmac );
1018
1019 return 0;
1020
1021 txnic_close ( vnic );
1022 err_open:
1023 writeq ( 0, ( qsregs + TXNIC_PF_QS_CFG ) );
1024 return rc;
1025 }
1026
1027 /**
1028 * Close network device
1029 *
1030 * @v netdev Network device
1031 */
txnic_lmac_close(struct net_device * netdev)1032 static void txnic_lmac_close ( struct net_device *netdev ) {
1033 struct txnic_lmac *lmac = netdev->priv;
1034 struct txnic_bgx *bgx = lmac->bgx;
1035 struct txnic_pf *pf = bgx->pf;
1036 struct txnic *vnic = lmac->vnic;
1037 void *qsregs = ( pf->regs + TXNIC_PF_QS ( lmac->idx ) );
1038
1039 /* Close virtual NIC */
1040 txnic_close ( vnic );
1041
1042 /* Disable queue set */
1043 writeq ( 0, ( qsregs + TXNIC_PF_QS_CFG ) );
1044 }
1045
1046 /**
1047 * Transmit packet
1048 *
1049 * @v netdev Network device
1050 * @v iobuf I/O buffer
1051 * @ret rc Return status code
1052 */
txnic_lmac_transmit(struct net_device * netdev,struct io_buffer * iobuf)1053 static int txnic_lmac_transmit ( struct net_device *netdev,
1054 struct io_buffer *iobuf ) {
1055 struct txnic_lmac *lmac = netdev->priv;
1056 struct txnic *vnic = lmac->vnic;
1057
1058 return txnic_send ( vnic, iobuf );
1059 }
1060
1061 /**
1062 * Poll network device
1063 *
1064 * @v netdev Network device
1065 */
txnic_lmac_poll(struct net_device * netdev)1066 static void txnic_lmac_poll ( struct net_device *netdev ) {
1067 struct txnic_lmac *lmac = netdev->priv;
1068 struct txnic *vnic = lmac->vnic;
1069
1070 /* Poll virtual NIC */
1071 txnic_poll ( vnic );
1072
1073 /* Poll link state */
1074 txnic_lmac_poll_link ( lmac );
1075 }
1076
1077 /** Network device operations */
1078 static struct net_device_operations txnic_lmac_operations = {
1079 .open = txnic_lmac_open,
1080 .close = txnic_lmac_close,
1081 .transmit = txnic_lmac_transmit,
1082 .poll = txnic_lmac_poll,
1083 };
1084
1085 /**
1086 * Probe logical MAC virtual NIC
1087 *
1088 * @v lmac Logical MAC
1089 * @ret rc Return status code
1090 */
txnic_lmac_probe(struct txnic_lmac * lmac)1091 static int txnic_lmac_probe ( struct txnic_lmac *lmac ) {
1092 struct txnic_bgx *bgx = lmac->bgx;
1093 struct txnic_pf *pf = bgx->pf;
1094 struct txnic *vnic;
1095 struct net_device *netdev;
1096 unsigned long membase;
1097 int rc;
1098
1099 /* Sanity check */
1100 assert ( lmac->vnic == NULL );
1101
1102 /* Calculate register base address */
1103 membase = ( pf->vf_membase + ( lmac->idx * pf->vf_stride ) );
1104
1105 /* Allocate and initialise network device */
1106 vnic = txnic_alloc ( &bgx->pci->dev, membase );
1107 if ( ! vnic ) {
1108 rc = -ENOMEM;
1109 goto err_alloc;
1110 }
1111 netdev = vnic->netdev;
1112 netdev_init ( netdev, &txnic_lmac_operations );
1113 netdev->priv = lmac;
1114 lmac->vnic = vnic;
1115
1116 /* Reset device */
1117 txnic_lmac_reset ( lmac );
1118
1119 /* Set MAC address */
1120 memcpy ( netdev->hw_addr, lmac->mac.raw, ETH_ALEN );
1121
1122 /* Register network device */
1123 if ( ( rc = register_netdev ( netdev ) ) != 0 )
1124 goto err_register;
1125 vnic->name = netdev->name;
1126 DBGC ( TXNICCOL ( pf ), "TXNIC %d/%d/%d is %s (%s)\n", pf->node,
1127 bgx->idx, lmac->idx, vnic->name, eth_ntoa ( lmac->mac.raw ) );
1128
1129 /* Update link state */
1130 txnic_lmac_update_link ( lmac );
1131
1132 return 0;
1133
1134 unregister_netdev ( netdev );
1135 err_register:
1136 txnic_lmac_reset ( lmac );
1137 txnic_free ( vnic );
1138 lmac->vnic = NULL;
1139 err_alloc:
1140 return rc;
1141 }
1142
1143 /**
1144 * Remove logical MAC virtual NIC
1145 *
1146 * @v lmac Logical MAC
1147 */
txnic_lmac_remove(struct txnic_lmac * lmac)1148 static void txnic_lmac_remove ( struct txnic_lmac *lmac ) {
1149 uint64_t config;
1150
1151 /* Sanity check */
1152 assert ( lmac->vnic != NULL );
1153
1154 /* Disable packet receive and transmit */
1155 config = readq ( lmac->regs + BGX_CMR_CONFIG );
1156 config &= ~( BGX_CMR_CONFIG_DATA_PKT_TX_EN |
1157 BGX_CMR_CONFIG_DATA_PKT_RX_EN );
1158 writeq ( config, ( lmac->regs + BGX_CMR_CONFIG ) );
1159
1160 /* Unregister network device */
1161 unregister_netdev ( lmac->vnic->netdev );
1162
1163 /* Reset device */
1164 txnic_lmac_reset ( lmac );
1165
1166 /* Free virtual NIC */
1167 txnic_free ( lmac->vnic );
1168 lmac->vnic = NULL;
1169 }
1170
1171 /**
1172 * Probe all LMACs on a BGX Ethernet interface
1173 *
1174 * @v pf Physical function
1175 * @v bgx BGX Ethernet interface
1176 * @ret rc Return status code
1177 */
txnic_lmac_probe_all(struct txnic_pf * pf,struct txnic_bgx * bgx)1178 static int txnic_lmac_probe_all ( struct txnic_pf *pf, struct txnic_bgx *bgx ) {
1179 unsigned int bgx_idx;
1180 int lmac_idx;
1181 int count;
1182 int rc;
1183
1184 /* Sanity checks */
1185 bgx_idx = bgx->idx;
1186 assert ( pf->node == bgx->node );
1187 assert ( pf->bgx[bgx_idx] == NULL );
1188 assert ( bgx->pf == NULL );
1189
1190 /* Associate BGX with physical function */
1191 pf->bgx[bgx_idx] = bgx;
1192 bgx->pf = pf;
1193
1194 /* Probe all LMACs */
1195 count = bgx->count;
1196 for ( lmac_idx = 0 ; lmac_idx < count ; lmac_idx++ ) {
1197 if ( ( rc = txnic_lmac_probe ( &bgx->lmac[lmac_idx] ) ) != 0 )
1198 goto err_probe;
1199 }
1200
1201 return 0;
1202
1203 lmac_idx = count;
1204 err_probe:
1205 for ( lmac_idx-- ; lmac_idx >= 0 ; lmac_idx-- )
1206 txnic_lmac_remove ( &bgx->lmac[lmac_idx] );
1207 pf->bgx[bgx_idx] = NULL;
1208 bgx->pf = NULL;
1209 return rc;
1210 }
1211
1212 /**
1213 * Remove all LMACs on a BGX Ethernet interface
1214 *
1215 * @v pf Physical function
1216 * @v bgx BGX Ethernet interface
1217 */
txnic_lmac_remove_all(struct txnic_pf * pf,struct txnic_bgx * bgx)1218 static void txnic_lmac_remove_all ( struct txnic_pf *pf,
1219 struct txnic_bgx *bgx ) {
1220 unsigned int lmac_idx;
1221
1222 /* Sanity checks */
1223 assert ( pf->bgx[bgx->idx] == bgx );
1224 assert ( bgx->pf == pf );
1225
1226 /* Remove all LMACs */
1227 for ( lmac_idx = 0 ; lmac_idx < bgx->count ; lmac_idx++ )
1228 txnic_lmac_remove ( &bgx->lmac[lmac_idx] );
1229
1230 /* Disassociate BGX from physical function */
1231 pf->bgx[bgx->idx] = NULL;
1232 bgx->pf = NULL;
1233 }
1234
1235 /******************************************************************************
1236 *
1237 * NIC physical function interface
1238 *
1239 ******************************************************************************
1240 */
1241
1242 /**
1243 * Probe PCI device
1244 *
1245 * @v pci PCI device
1246 * @ret rc Return status code
1247 */
txnic_pf_probe(struct pci_device * pci)1248 static int txnic_pf_probe ( struct pci_device *pci ) {
1249 struct txnic_pf *pf;
1250 struct txnic_bgx *bgx;
1251 unsigned long membase;
1252 unsigned int i;
1253 int rc;
1254
1255 /* Allocate and initialise structure */
1256 pf = zalloc ( sizeof ( *pf ) );
1257 if ( ! pf ) {
1258 rc = -ENOMEM;
1259 goto err_alloc;
1260 }
1261 pf->pci = pci;
1262 pci_set_drvdata ( pci, pf );
1263
1264 /* Get base addresses */
1265 membase = pciea_bar_start ( pci, PCIEA_BEI_BAR_0 );
1266 pf->vf_membase = pciea_bar_start ( pci, PCIEA_BEI_VF_BAR_0 );
1267 pf->vf_stride = pciea_bar_size ( pci, PCIEA_BEI_VF_BAR_0 );
1268
1269 /* Calculate node ID */
1270 pf->node = txnic_address_node ( membase );
1271 DBGC ( TXNICCOL ( pf ), "TXNIC %d/*/* PF %s at %#lx (VF %#lx+%#lx)\n",
1272 pf->node, pci->dev.name, membase, pf->vf_membase, pf->vf_stride);
1273
1274 /* Fix up PCI device */
1275 adjust_pci_device ( pci );
1276
1277 /* Map registers */
1278 pf->regs = ioremap ( membase, TXNIC_PF_BAR_SIZE );
1279 if ( ! pf->regs ) {
1280 rc = -ENODEV;
1281 goto err_ioremap;
1282 }
1283
1284 /* Configure physical function */
1285 writeq ( TXNIC_PF_CFG_ENA, ( pf->regs + TXNIC_PF_CFG ) );
1286 writeq ( ( TXNIC_PF_BP_CFG_BP_POLL_ENA |
1287 TXNIC_PF_BP_CFG_BP_POLL_DLY_DEFAULT ),
1288 ( pf->regs + TXNIC_PF_BP_CFG ) );
1289 for ( i = 0 ; i < TXNIC_NUM_BGX ; i++ ) {
1290 writeq ( ( TXNIC_PF_INTF_SEND_CFG_BLOCK_BGX |
1291 TXNIC_PF_INTF_SEND_CFG_BLOCK ( i ) ),
1292 ( pf->regs + TXNIC_PF_INTF_SEND_CFG ( i ) ) );
1293 writeq ( ( TXNIC_PF_INTF_BP_CFG_BP_ENA |
1294 TXNIC_PF_INTF_BP_CFG_BP_ID_BGX |
1295 TXNIC_PF_INTF_BP_CFG_BP_ID ( i ) ),
1296 ( pf->regs + TXNIC_PF_INTF_BP_CFG ( i ) ) );
1297 }
1298 writeq ( ( TXNIC_PF_PKIND_CFG_LENERR_EN |
1299 TXNIC_PF_PKIND_CFG_MAXLEN_DISABLE |
1300 TXNIC_PF_PKIND_CFG_MINLEN_DISABLE ),
1301 ( pf->regs + TXNIC_PF_PKIND_CFG(0) ) );
1302
1303 /* Add to list of physical functions */
1304 list_add_tail ( &pf->list, &txnic_pfs );
1305
1306 /* Probe all LMACs, if applicable */
1307 list_for_each_entry ( bgx, &txnic_bgxs, list ) {
1308 if ( bgx->node != pf->node )
1309 continue;
1310 if ( ( rc = txnic_lmac_probe_all ( pf, bgx ) ) != 0 )
1311 goto err_probe;
1312 }
1313
1314 return 0;
1315
1316 err_probe:
1317 for ( i = 0 ; i < TXNIC_NUM_BGX ; i++ ) {
1318 if ( pf->bgx[i] )
1319 txnic_lmac_remove_all ( pf, pf->bgx[i] );
1320 }
1321 list_del ( &pf->list );
1322 writeq ( 0, ( pf->regs + TXNIC_PF_CFG ) );
1323 iounmap ( pf->regs );
1324 err_ioremap:
1325 free ( pf );
1326 err_alloc:
1327 return rc;
1328 }
1329
1330 /**
1331 * Remove PCI device
1332 *
1333 * @v pci PCI device
1334 */
txnic_pf_remove(struct pci_device * pci)1335 static void txnic_pf_remove ( struct pci_device *pci ) {
1336 struct txnic_pf *pf = pci_get_drvdata ( pci );
1337 unsigned int i;
1338
1339 /* Remove all LMACs, if applicable */
1340 for ( i = 0 ; i < TXNIC_NUM_BGX ; i++ ) {
1341 if ( pf->bgx[i] )
1342 txnic_lmac_remove_all ( pf, pf->bgx[i] );
1343 }
1344
1345 /* Remove from list of physical functions */
1346 list_del ( &pf->list );
1347
1348 /* Unmap registers */
1349 iounmap ( pf->regs );
1350
1351 /* Free physical function */
1352 free ( pf );
1353 }
1354
1355 /** NIC physical function PCI device IDs */
1356 static struct pci_device_id txnic_pf_ids[] = {
1357 PCI_ROM ( 0x177d, 0xa01e, "thunder-pf", "ThunderX NIC PF", 0 ),
1358 };
1359
1360 /** NIC physical function PCI driver */
1361 struct pci_driver txnic_pf_driver __pci_driver = {
1362 .ids = txnic_pf_ids,
1363 .id_count = ( sizeof ( txnic_pf_ids ) / sizeof ( txnic_pf_ids[0] ) ),
1364 .probe = txnic_pf_probe,
1365 .remove = txnic_pf_remove,
1366 };
1367
1368 /******************************************************************************
1369 *
1370 * BGX interface
1371 *
1372 ******************************************************************************
1373 */
1374
1375 /** LMAC types */
1376 static struct txnic_lmac_type txnic_lmac_types[] = {
1377 [TXNIC_LMAC_XAUI] = {
1378 .name = "XAUI",
1379 .count = 1,
1380 .lane_to_sds = 0xe4,
1381 },
1382 [TXNIC_LMAC_RXAUI] = {
1383 .name = "RXAUI",
1384 .count = 2,
1385 .lane_to_sds = 0x0e04,
1386 },
1387 [TXNIC_LMAC_10G_R] = {
1388 .name = "10GBASE-R",
1389 .count = 4,
1390 .lane_to_sds = 0x00000000,
1391 },
1392 [TXNIC_LMAC_40G_R] = {
1393 .name = "40GBASE-R",
1394 .count = 1,
1395 .lane_to_sds = 0xe4,
1396 },
1397 };
1398
1399 /**
1400 * Detect BGX Ethernet interface LMAC type
1401 *
1402 * @v bgx BGX Ethernet interface
1403 * @ret type LMAC type, or negative error
1404 */
txnic_bgx_detect(struct txnic_bgx * bgx)1405 static int txnic_bgx_detect ( struct txnic_bgx *bgx ) {
1406 uint64_t config;
1407 uint64_t br_pmd_control;
1408 uint64_t rx_lmacs;
1409 unsigned int type;
1410
1411 /* We assume that the early (pre-UEFI) firmware will have
1412 * configured at least the LMAC 0 type and use of link
1413 * training, and may have overridden the number of LMACs.
1414 */
1415
1416 /* Determine type from LMAC 0 */
1417 config = readq ( bgx->regs + BGX_CMR_CONFIG );
1418 type = BGX_CMR_CONFIG_LMAC_TYPE_GET ( config );
1419 if ( ( type >= ( sizeof ( txnic_lmac_types ) /
1420 sizeof ( txnic_lmac_types[0] ) ) ) ||
1421 ( txnic_lmac_types[type].count == 0 ) ) {
1422 DBGC ( TXNICCOL ( bgx ), "TXNIC %d/%d/* BGX unknown type %d\n",
1423 bgx->node, bgx->idx, type );
1424 return -ENOTTY;
1425 }
1426 bgx->type = &txnic_lmac_types[type];
1427
1428 /* Check whether link training is required */
1429 br_pmd_control = readq ( bgx->regs + BGX_SPU_BR_PMD_CONTROL );
1430 bgx->training =
1431 ( !! ( br_pmd_control & BGX_SPU_BR_PMD_CONTROL_TRAIN_EN ) );
1432
1433 /* Determine number of LMACs */
1434 rx_lmacs = readq ( bgx->regs + BGX_CMR_RX_LMACS );
1435 bgx->count = BGX_CMR_RX_LMACS_LMACS_GET ( rx_lmacs );
1436 if ( ( bgx->count == TXNIC_NUM_LMAC ) &&
1437 ( bgx->type->count != TXNIC_NUM_LMAC ) ) {
1438 DBGC ( TXNICCOL ( bgx ), "TXNIC %d/%d/* assuming %d LMACs\n",
1439 bgx->node, bgx->idx, bgx->type->count );
1440 bgx->count = bgx->type->count;
1441 }
1442
1443 return type;
1444 }
1445
1446 /**
1447 * Initialise BGX Ethernet interface
1448 *
1449 * @v bgx BGX Ethernet interface
1450 * @v type LMAC type
1451 */
txnic_bgx_init(struct txnic_bgx * bgx,unsigned int type)1452 static void txnic_bgx_init ( struct txnic_bgx *bgx, unsigned int type ) {
1453 uint64_t global_config;
1454 uint32_t lane_to_sds;
1455 unsigned int i;
1456
1457 /* Set number of LMACs */
1458 writeq ( BGX_CMR_RX_LMACS_LMACS_SET ( bgx->count ),
1459 ( bgx->regs + BGX_CMR_RX_LMACS ) );
1460 writeq ( BGX_CMR_TX_LMACS_LMACS_SET ( bgx->count ),
1461 ( bgx->regs + BGX_CMR_TX_LMACS ) );
1462
1463 /* Set LMAC types and lane mappings, and disable all LMACs */
1464 lane_to_sds = bgx->type->lane_to_sds;
1465 for ( i = 0 ; i < bgx->count ; i++ ) {
1466 writeq ( ( BGX_CMR_CONFIG_LMAC_TYPE_SET ( type ) |
1467 BGX_CMR_CONFIG_LANE_TO_SDS ( lane_to_sds ) ),
1468 ( bgx->regs + BGX_LMAC ( i ) + BGX_CMR_CONFIG ) );
1469 lane_to_sds >>= 8;
1470 }
1471
1472 /* Reset all MAC address filtering */
1473 for ( i = 0 ; i < TXNIC_NUM_DMAC ; i++ )
1474 writeq ( 0, ( bgx->regs + BGX_CMR_RX_DMAC_CAM ( i ) ) );
1475
1476 /* Reset NCSI steering */
1477 for ( i = 0 ; i < TXNIC_NUM_STEERING ; i++ )
1478 writeq ( 0, ( bgx->regs + BGX_CMR_RX_STEERING ( i ) ) );
1479
1480 /* Enable backpressure to all channels */
1481 writeq ( BGX_CMR_CHAN_MSK_AND_ALL ( bgx->count ),
1482 ( bgx->regs + BGX_CMR_CHAN_MSK_AND ) );
1483
1484 /* Strip FCS */
1485 global_config = readq ( bgx->regs + BGX_CMR_GLOBAL_CONFIG );
1486 global_config |= BGX_CMR_GLOBAL_CONFIG_FCS_STRIP;
1487 writeq ( global_config, ( bgx->regs + BGX_CMR_GLOBAL_CONFIG ) );
1488 }
1489
1490 /**
1491 * Get MAC address
1492 *
1493 * @v lmac Logical MAC
1494 */
txnic_bgx_mac(struct txnic_lmac * lmac)1495 static void txnic_bgx_mac ( struct txnic_lmac *lmac ) {
1496 struct txnic_bgx *bgx = lmac->bgx;
1497 unsigned int lmac_idx = TXNIC_LMAC_IDX ( lmac->idx );
1498 uint64_t mac;
1499 EFI_STATUS efirc;
1500 int rc;
1501
1502 /* Extract MAC from Board Configuration protocol, if available */
1503 if ( txcfg ) {
1504 if ( ( efirc = txcfg->GetLmacProp ( txcfg, bgx->node, bgx->idx,
1505 lmac_idx, MAC_ADDRESS,
1506 sizeof ( mac ),
1507 &mac ) ) == 0 ) {
1508 lmac->mac.be64 = cpu_to_be64 ( mac );
1509 } else {
1510 rc = -EEFI ( efirc );
1511 DBGC ( TXNICCOL ( bgx ), "TXNIC %d/%d/%d could not get "
1512 "MAC address: %s\n", bgx->node, bgx->idx,
1513 lmac->idx, strerror ( rc ) );
1514 }
1515 } else {
1516 DBGC ( TXNICCOL ( bgx ), "TXNIC %d/%d/%d has no board "
1517 "configuration protocol\n", bgx->node, bgx->idx,
1518 lmac->idx );
1519 }
1520
1521 /* Use random MAC address if none available */
1522 if ( ! lmac->mac.be64 ) {
1523 DBGC ( TXNICCOL ( bgx ), "TXNIC %d/%d/%d has no MAC address\n",
1524 bgx->node, bgx->idx, lmac->idx );
1525 eth_random_addr ( lmac->mac.raw );
1526 }
1527 }
1528
1529 /**
1530 * Initialise Super PHY Unit (SPU)
1531 *
1532 * @v lmac Logical MAC
1533 */
txnic_bgx_spu_init(struct txnic_lmac * lmac)1534 static void txnic_bgx_spu_init ( struct txnic_lmac *lmac ) {
1535 struct txnic_bgx *bgx = lmac->bgx;
1536
1537 /* Reset PHY */
1538 writeq ( BGX_SPU_CONTROL1_RESET, ( lmac->regs + BGX_SPU_CONTROL1 ) );
1539 mdelay ( BGX_SPU_RESET_DELAY_MS );
1540
1541 /* Power down PHY */
1542 writeq ( BGX_SPU_CONTROL1_LO_PWR, ( lmac->regs + BGX_SPU_CONTROL1 ) );
1543
1544 /* Configure training, if applicable */
1545 if ( bgx->training ) {
1546 writeq ( 0, ( lmac->regs + BGX_SPU_BR_PMD_LP_CUP ) );
1547 writeq ( 0, ( lmac->regs + BGX_SPU_BR_PMD_LD_CUP ) );
1548 writeq ( 0, ( lmac->regs + BGX_SPU_BR_PMD_LD_REP ) );
1549 writeq ( BGX_SPU_BR_PMD_CONTROL_TRAIN_EN,
1550 ( lmac->regs + BGX_SPU_BR_PMD_CONTROL ) );
1551 }
1552
1553 /* Disable forward error correction */
1554 writeq ( 0, ( lmac->regs + BGX_SPU_FEC_CONTROL ) );
1555
1556 /* Disable autonegotiation */
1557 writeq ( 0, ( lmac->regs + BGX_SPU_AN_CONTROL ) );
1558
1559 /* Power up PHY */
1560 writeq ( 0, ( lmac->regs + BGX_SPU_CONTROL1 ) );
1561 }
1562
1563 /**
1564 * Initialise LMAC
1565 *
1566 * @v bgx BGX Ethernet interface
1567 * @v lmac_idx LMAC index
1568 */
txnic_bgx_lmac_init(struct txnic_bgx * bgx,unsigned int lmac_idx)1569 static void txnic_bgx_lmac_init ( struct txnic_bgx *bgx,
1570 unsigned int lmac_idx ) {
1571 struct txnic_lmac *lmac = &bgx->lmac[lmac_idx];
1572 uint64_t config;
1573
1574 /* Record associated BGX */
1575 lmac->bgx = bgx;
1576
1577 /* Set register base address (already mapped) */
1578 lmac->regs = ( bgx->regs + BGX_LMAC ( lmac_idx ) );
1579
1580 /* Calculate virtual NIC index */
1581 lmac->idx = TXNIC_VNIC_IDX ( bgx->idx, lmac_idx );
1582
1583 /* Set MAC address */
1584 txnic_bgx_mac ( lmac );
1585
1586 /* Initialise PHY */
1587 txnic_bgx_spu_init ( lmac );
1588
1589 /* Accept all multicasts and broadcasts */
1590 writeq ( ( BGX_CMR_RX_DMAC_CTL_MCST_MODE_ACCEPT |
1591 BGX_CMR_RX_DMAC_CTL_BCST_ACCEPT ),
1592 ( lmac->regs + BGX_CMR_RX_DMAC_CTL ) );
1593
1594 /* Enable LMAC */
1595 config = readq ( lmac->regs + BGX_CMR_CONFIG );
1596 config |= ( BGX_CMR_CONFIG_ENABLE |
1597 BGX_CMR_CONFIG_DATA_PKT_RX_EN |
1598 BGX_CMR_CONFIG_DATA_PKT_TX_EN );
1599 writeq ( config, ( lmac->regs + BGX_CMR_CONFIG ) );
1600 }
1601
1602 /**
1603 * Probe PCI device
1604 *
1605 * @v pci PCI device
1606 * @ret rc Return status code
1607 */
txnic_bgx_probe(struct pci_device * pci)1608 static int txnic_bgx_probe ( struct pci_device *pci ) {
1609 struct txnic_bgx *bgx;
1610 struct txnic_pf *pf;
1611 unsigned long membase;
1612 unsigned int i;
1613 int type;
1614 int rc;
1615
1616 /* Allocate and initialise structure */
1617 bgx = zalloc ( sizeof ( *bgx ) );
1618 if ( ! bgx ) {
1619 rc = -ENOMEM;
1620 goto err_alloc;
1621 }
1622 bgx->pci = pci;
1623 pci_set_drvdata ( pci, bgx );
1624
1625 /* Get base address */
1626 membase = pciea_bar_start ( pci, PCIEA_BEI_BAR_0 );
1627
1628 /* Calculate node ID and index */
1629 bgx->node = txnic_address_node ( membase );
1630 bgx->idx = txnic_address_bgx ( membase );
1631
1632 /* Fix up PCI device */
1633 adjust_pci_device ( pci );
1634
1635 /* Map registers */
1636 bgx->regs = ioremap ( membase, TXNIC_BGX_BAR_SIZE );
1637 if ( ! bgx->regs ) {
1638 rc = -ENODEV;
1639 goto err_ioremap;
1640 }
1641
1642 /* Detect LMAC type */
1643 if ( ( type = txnic_bgx_detect ( bgx ) ) < 0 ) {
1644 rc = type;
1645 goto err_detect;
1646 }
1647 DBGC ( TXNICCOL ( bgx ), "TXNIC %d/%d/* BGX %s at %#lx %dx %s%s\n",
1648 bgx->node, bgx->idx, pci->dev.name, membase, bgx->count,
1649 bgx->type->name, ( bgx->training ? "(training)" : "" ) );
1650
1651 /* Initialise interface */
1652 txnic_bgx_init ( bgx, type );
1653
1654 /* Initialise all LMACs */
1655 for ( i = 0 ; i < bgx->count ; i++ )
1656 txnic_bgx_lmac_init ( bgx, i );
1657
1658 /* Add to list of BGX devices */
1659 list_add_tail ( &bgx->list, &txnic_bgxs );
1660
1661 /* Probe all LMACs, if applicable */
1662 list_for_each_entry ( pf, &txnic_pfs, list ) {
1663 if ( pf->node != bgx->node )
1664 continue;
1665 if ( ( rc = txnic_lmac_probe_all ( pf, bgx ) ) != 0 )
1666 goto err_probe;
1667 }
1668
1669 return 0;
1670
1671 if ( bgx->pf )
1672 txnic_lmac_remove_all ( bgx->pf, bgx );
1673 list_del ( &bgx->list );
1674 err_probe:
1675 err_detect:
1676 iounmap ( bgx->regs );
1677 err_ioremap:
1678 free ( bgx );
1679 err_alloc:
1680 return rc;
1681 }
1682
1683 /**
1684 * Remove PCI device
1685 *
1686 * @v pci PCI device
1687 */
txnic_bgx_remove(struct pci_device * pci)1688 static void txnic_bgx_remove ( struct pci_device *pci ) {
1689 struct txnic_bgx *bgx = pci_get_drvdata ( pci );
1690
1691 /* Remove all LMACs, if applicable */
1692 if ( bgx->pf )
1693 txnic_lmac_remove_all ( bgx->pf, bgx );
1694
1695 /* Remove from list of BGX devices */
1696 list_del ( &bgx->list );
1697
1698 /* Unmap registers */
1699 iounmap ( bgx->regs );
1700
1701 /* Free BGX device */
1702 free ( bgx );
1703 }
1704
1705 /** BGX PCI device IDs */
1706 static struct pci_device_id txnic_bgx_ids[] = {
1707 PCI_ROM ( 0x177d, 0xa026, "thunder-bgx", "ThunderX BGX", 0 ),
1708 };
1709
1710 /** BGX PCI driver */
1711 struct pci_driver txnic_bgx_driver __pci_driver = {
1712 .ids = txnic_bgx_ids,
1713 .id_count = ( sizeof ( txnic_bgx_ids ) / sizeof ( txnic_bgx_ids[0] ) ),
1714 .probe = txnic_bgx_probe,
1715 .remove = txnic_bgx_remove,
1716 };
1717