1 /*
2 * forcedeth.c -- Driver for NVIDIA nForce media access controllers for iPXE
3 * Copyright (c) 2010 Andrei Faur <da3drus@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation; either version 2 of the
8 * License, or any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 *
20 * Portions of this code are taken from the Linux forcedeth driver that was
21 * based on a cleanroom reimplementation which was based on reverse engineered
22 * documentation written by Carl-Daniel Hailfinger and Andrew de Quincey:
23 * Copyright (C) 2003,4,5 Manfred Spraul
24 * Copyright (C) 2004 Andrew de Quincey (wol support)
25 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
26 * IRQ rate fixes, bigendian fixes, cleanups, verification)
27 * Copyright (c) 2004,2005,2006,2007,2008,2009 NVIDIA Corporation
28 *
29 * The probe, remove, open and close functions, along with the functions they
30 * call, are direct copies of the above mentioned driver, modified where
31 * necessary to make them work for iPXE.
32 *
33 * The poll and transmit functions were completely rewritten to make use of
34 * the iPXE API. This process was aided by constant referencing of the above
35 * mentioned Linux driver. This driver would not have been possible without this
36 * prior work.
37 *
38 */
39
40 FILE_LICENCE ( GPL2_OR_LATER );
41
42 #include <stdint.h>
43 #include <stdio.h>
44 #include <stdlib.h>
45 #include <string.h>
46 #include <unistd.h>
47 #include <assert.h>
48 #include <byteswap.h>
49 #include <errno.h>
50 #include <ipxe/ethernet.h>
51 #include <ipxe/if_ether.h>
52 #include <ipxe/io.h>
53 #include <ipxe/iobuf.h>
54 #include <ipxe/malloc.h>
55 #include <ipxe/netdevice.h>
56 #include <ipxe/crypto.h>
57 #include <ipxe/pci.h>
58 #include <ipxe/timer.h>
59 #include <mii.h>
60 #include "forcedeth.h"
61
pci_push(void * ioaddr)62 static inline void pci_push ( void *ioaddr )
63 {
64 /* force out pending posted writes */
65 wmb();
66 readl ( ioaddr );
67 }
68
69 static int
reg_delay(struct forcedeth_private * priv,int offset,u32 mask,u32 target,int delay,int delaymax,const char * msg)70 reg_delay ( struct forcedeth_private *priv, int offset, u32 mask,
71 u32 target, int delay, int delaymax, const char *msg )
72 {
73 void *ioaddr = priv->mmio_addr;
74
75 pci_push ( ioaddr );
76 do {
77 udelay ( delay );
78 delaymax -= delay;
79 if ( delaymax < 0 ) {
80 if ( msg )
81 DBG ( "%s\n", msg );
82 return 1;
83 }
84 } while ( ( readl ( ioaddr + offset ) & mask ) != target );
85
86 return 0;
87 }
88
89 /* read/write a register on the PHY */
90 static int
mii_rw(struct forcedeth_private * priv,int addr,int miireg,int value)91 mii_rw ( struct forcedeth_private *priv, int addr, int miireg, int value )
92 {
93 void *ioaddr = priv->mmio_addr;
94 u32 reg;
95 int retval;
96
97 writel ( NVREG_MIISTAT_MASK_RW, ioaddr + NvRegMIIStatus );
98
99 reg = readl ( ioaddr + NvRegMIIControl );
100 if ( reg & NVREG_MIICTL_INUSE ) {
101 writel ( NVREG_MIICTL_INUSE, ioaddr + NvRegMIIControl );
102 udelay ( NV_MIIBUSY_DELAY );
103 }
104
105 reg = ( addr << NVREG_MIICTL_ADDRSHIFT ) | miireg;
106 if ( value != MII_READ ) {
107 writel ( value, ioaddr + NvRegMIIData );
108 reg |= NVREG_MIICTL_WRITE;
109 }
110 writel ( reg, ioaddr + NvRegMIIControl );
111
112 if ( reg_delay ( priv, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
113 NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL ) ) {
114 DBG ( "mii_rw of reg %d at PHY %d timed out.\n",
115 miireg, addr );
116 retval = -1;
117 } else if ( value != MII_READ ) {
118 /* it was a write operation - fewer failures are detectable */
119 DBG ( "mii_rw wrote 0x%x to reg %d at PHY %d\n",
120 value, miireg, addr );
121 retval = 0;
122 } else if ( readl ( ioaddr + NvRegMIIStatus ) & NVREG_MIISTAT_ERROR ) {
123 DBG ( "mii_rw of reg %d at PHY %d failed.\n",
124 miireg, addr );
125 retval = -1;
126 } else {
127 retval = readl ( ioaddr + NvRegMIIData );
128 DBG ( "mii_rw read from reg %d at PHY %d: 0x%x.\n",
129 miireg, addr, retval );
130 }
131
132 return retval;
133 }
134
135 static void
nv_txrx_gate(struct forcedeth_private * priv,int gate)136 nv_txrx_gate ( struct forcedeth_private *priv, int gate )
137 {
138 void *ioaddr = priv->mmio_addr;
139 u32 powerstate;
140
141 if ( ! priv->mac_in_use &&
142 ( priv->driver_data & DEV_HAS_POWER_CNTRL ) ) {
143 powerstate = readl ( ioaddr + NvRegPowerState2 );
144 if ( gate )
145 powerstate |= NVREG_POWERSTATE2_GATE_CLOCKS;
146 else
147 powerstate &= ~NVREG_POWERSTATE2_GATE_CLOCKS;
148 writel ( powerstate, ioaddr + NvRegPowerState2 );
149 }
150 }
151
152 static void
nv_mac_reset(struct forcedeth_private * priv)153 nv_mac_reset ( struct forcedeth_private * priv )
154 {
155 void *ioaddr = priv->mmio_addr;
156 u32 temp1, temp2, temp3;
157
158 writel ( NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | NVREG_TXRXCTL_DESC_1,
159 ioaddr + NvRegTxRxControl );
160 pci_push ( ioaddr );
161
162 /* save registers since they will be cleared on reset */
163 temp1 = readl ( ioaddr + NvRegMacAddrA );
164 temp2 = readl ( ioaddr + NvRegMacAddrB );
165 temp3 = readl ( ioaddr + NvRegTransmitPoll );
166
167 writel ( NVREG_MAC_RESET_ASSERT, ioaddr + NvRegMacReset );
168 pci_push ( ioaddr );
169 udelay ( NV_MAC_RESET_DELAY );
170 writel ( 0, ioaddr + NvRegMacReset );
171 pci_push ( ioaddr );
172 udelay ( NV_MAC_RESET_DELAY );
173
174 /* restore saved registers */
175 writel ( temp1, ioaddr + NvRegMacAddrA );
176 writel ( temp2, ioaddr + NvRegMacAddrB );
177 writel ( temp3, ioaddr + NvRegTransmitPoll );
178
179 writel ( NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_DESC_1,
180 ioaddr + NvRegTxRxControl );
181 pci_push ( ioaddr );
182 }
183
184 static void
nv_init_tx_ring(struct forcedeth_private * priv)185 nv_init_tx_ring ( struct forcedeth_private *priv )
186 {
187 int i;
188
189 for ( i = 0; i < TX_RING_SIZE; i++ ) {
190 priv->tx_ring[i].flaglen = 0;
191 priv->tx_ring[i].buf = 0;
192 priv->tx_iobuf[i] = NULL;
193 }
194
195 priv->tx_fill_ctr = 0;
196 priv->tx_curr = 0;
197 priv->tx_tail = 0;
198 }
199
200 /**
201 * nv_alloc_rx - Allocates iobufs for every Rx descriptor
202 * that doesn't have one and isn't in use by the hardware
203 *
204 * @v priv Driver private structure
205 */
206 static void
nv_alloc_rx(struct forcedeth_private * priv)207 nv_alloc_rx ( struct forcedeth_private *priv )
208 {
209 struct ring_desc *rx_curr_desc;
210 int i;
211 u32 status;
212
213 DBGP ( "nv_alloc_rx\n" );
214
215 for ( i = 0; i < RX_RING_SIZE; i++ ) {
216 rx_curr_desc = priv->rx_ring + i;
217 status = le32_to_cpu ( rx_curr_desc->flaglen );
218
219 /* Don't touch the descriptors owned by the hardware */
220 if ( status & NV_RX_AVAIL )
221 continue;
222
223 /* Descriptors with iobufs still need to be processed */
224 if ( priv->rx_iobuf[i] != NULL )
225 continue;
226
227 /* If alloc_iob fails, try again later (next poll) */
228 if ( ! ( priv->rx_iobuf[i] = alloc_iob ( RX_BUF_SZ ) ) ) {
229 DBG ( "Refill rx_ring failed, size %d\n", RX_BUF_SZ );
230 break;
231 }
232
233 rx_curr_desc->buf =
234 cpu_to_le32 ( virt_to_bus ( priv->rx_iobuf[i]->data ) );
235 wmb();
236 rx_curr_desc->flaglen =
237 cpu_to_le32 ( RX_BUF_SZ | NV_RX_AVAIL );
238 }
239 }
240
241 static void
nv_init_rx_ring(struct forcedeth_private * priv)242 nv_init_rx_ring ( struct forcedeth_private *priv )
243 {
244 int i;
245
246 for ( i = 0; i < RX_RING_SIZE; i++ ) {
247 priv->rx_ring[i].flaglen = 0;
248 priv->rx_ring[i].buf = 0;
249 priv->rx_iobuf[i] = NULL;
250 }
251
252 priv->rx_curr = 0;
253 }
254
255 /**
256 * nv_init_rings - Allocate and intialize descriptor rings
257 *
258 * @v priv Driver private structure
259 *
260 * @ret rc Return status code
261 **/
262 static int
nv_init_rings(struct forcedeth_private * priv)263 nv_init_rings ( struct forcedeth_private *priv )
264 {
265 void *ioaddr = priv->mmio_addr;
266 int rc = -ENOMEM;
267
268 /* Allocate ring for both TX and RX */
269 priv->rx_ring =
270 malloc_dma ( sizeof(struct ring_desc) * RXTX_RING_SIZE, 32 );
271 if ( ! priv->rx_ring )
272 goto err_malloc;
273 priv->tx_ring = &priv->rx_ring[RX_RING_SIZE];
274
275 /* Initialize rings */
276 nv_init_tx_ring ( priv );
277 nv_init_rx_ring ( priv );
278
279 /* Allocate iobufs for RX */
280 nv_alloc_rx ( priv );
281
282 /* Give hw rings */
283 writel ( cpu_to_le32 ( virt_to_bus ( priv->rx_ring ) ),
284 ioaddr + NvRegRxRingPhysAddr );
285 writel ( cpu_to_le32 ( virt_to_bus ( priv->tx_ring ) ),
286 ioaddr + NvRegTxRingPhysAddr );
287
288 DBG ( "RX ring at phys addr: %#08lx\n",
289 virt_to_bus ( priv->rx_ring ) );
290 DBG ( "TX ring at phys addr: %#08lx\n",
291 virt_to_bus ( priv->tx_ring ) );
292
293 writel ( ( ( RX_RING_SIZE - 1 ) << NVREG_RINGSZ_RXSHIFT ) +
294 ( ( TX_RING_SIZE - 1 ) << NVREG_RINGSZ_TXSHIFT ),
295 ioaddr + NvRegRingSizes );
296
297 return 0;
298
299 err_malloc:
300 DBG ( "Could not allocate descriptor rings\n");
301 return rc;
302 }
303
304 static void
nv_free_rxtx_resources(struct forcedeth_private * priv)305 nv_free_rxtx_resources ( struct forcedeth_private *priv )
306 {
307 int i;
308
309 DBGP ( "nv_free_rxtx_resources\n" );
310
311 free_dma ( priv->rx_ring, sizeof(struct ring_desc) * RXTX_RING_SIZE );
312
313 for ( i = 0; i < RX_RING_SIZE; i++ ) {
314 free_iob ( priv->rx_iobuf[i] );
315 priv->rx_iobuf[i] = NULL;
316 }
317 }
318
319 static void
nv_txrx_reset(struct forcedeth_private * priv)320 nv_txrx_reset ( struct forcedeth_private *priv )
321 {
322 void *ioaddr = priv->mmio_addr;
323
324 writel ( NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | NVREG_TXRXCTL_DESC_1,
325 ioaddr + NvRegTxRxControl );
326 pci_push ( ioaddr );
327 udelay ( NV_TXRX_RESET_DELAY );
328 writel ( NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_DESC_1,
329 ioaddr + NvRegTxRxControl );
330 pci_push ( ioaddr );
331 }
332
333 static void
nv_disable_hw_interrupts(struct forcedeth_private * priv)334 nv_disable_hw_interrupts ( struct forcedeth_private *priv )
335 {
336 void *ioaddr = priv->mmio_addr;
337
338 writel ( 0, ioaddr + NvRegIrqMask );
339 pci_push ( ioaddr );
340 }
341
342 static void
nv_enable_hw_interrupts(struct forcedeth_private * priv)343 nv_enable_hw_interrupts ( struct forcedeth_private *priv )
344 {
345 void *ioaddr = priv->mmio_addr;
346
347 writel ( NVREG_IRQMASK_THROUGHPUT, ioaddr + NvRegIrqMask );
348 }
349
350 static void
nv_start_rx(struct forcedeth_private * priv)351 nv_start_rx ( struct forcedeth_private *priv )
352 {
353 void *ioaddr = priv->mmio_addr;
354 u32 rx_ctrl = readl ( ioaddr + NvRegReceiverControl );
355
356 DBGP ( "nv_start_rx\n" );
357 /* Already running? Stop it. */
358 if ( ( readl ( ioaddr + NvRegReceiverControl ) & NVREG_RCVCTL_START ) && !priv->mac_in_use ) {
359 rx_ctrl &= ~NVREG_RCVCTL_START;
360 writel ( rx_ctrl, ioaddr + NvRegReceiverControl );
361 pci_push ( ioaddr );
362 }
363 writel ( priv->linkspeed, ioaddr + NvRegLinkSpeed );
364 pci_push ( ioaddr );
365 rx_ctrl |= NVREG_RCVCTL_START;
366 if ( priv->mac_in_use )
367 rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
368 writel ( rx_ctrl, ioaddr + NvRegReceiverControl );
369 DBG ( "nv_start_rx to duplex %d, speed 0x%08x.\n",
370 priv->duplex, priv->linkspeed);
371 pci_push ( ioaddr );
372 }
373
374 static void
nv_stop_rx(struct forcedeth_private * priv)375 nv_stop_rx ( struct forcedeth_private *priv )
376 {
377 void *ioaddr = priv->mmio_addr;
378 u32 rx_ctrl = readl ( ioaddr + NvRegReceiverControl );
379
380 DBGP ( "nv_stop_rx\n" );
381 if ( ! priv->mac_in_use )
382 rx_ctrl &= ~NVREG_RCVCTL_START;
383 else
384 rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
385 writel ( rx_ctrl, ioaddr + NvRegReceiverControl );
386 reg_delay ( priv, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
387 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX,
388 "nv_stop_rx: ReceiverStatus remained busy");
389
390 udelay ( NV_RXSTOP_DELAY2 );
391 if ( ! priv->mac_in_use )
392 writel ( 0, priv + NvRegLinkSpeed );
393 }
394
395 static void
nv_start_tx(struct forcedeth_private * priv)396 nv_start_tx ( struct forcedeth_private *priv )
397 {
398 void *ioaddr = priv->mmio_addr;
399 u32 tx_ctrl = readl ( ioaddr + NvRegTransmitterControl );
400
401 DBGP ( "nv_start_tx\n" );
402 tx_ctrl |= NVREG_XMITCTL_START;
403 if ( priv->mac_in_use )
404 tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
405 writel ( tx_ctrl, ioaddr + NvRegTransmitterControl );
406 pci_push ( ioaddr );
407 }
408
409 static void
nv_stop_tx(struct forcedeth_private * priv)410 nv_stop_tx ( struct forcedeth_private *priv )
411 {
412 void *ioaddr = priv->mmio_addr;
413 u32 tx_ctrl = readl ( ioaddr + NvRegTransmitterControl );
414
415 DBGP ( "nv_stop_tx");
416
417 if ( ! priv->mac_in_use )
418 tx_ctrl &= ~NVREG_XMITCTL_START;
419 else
420 tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
421 writel ( tx_ctrl, ioaddr + NvRegTransmitterControl );
422 reg_delay ( priv, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
423 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX,
424 "nv_stop_tx: TransmitterStatus remained busy");
425
426 udelay ( NV_TXSTOP_DELAY2 );
427 if ( ! priv->mac_in_use )
428 writel( readl ( ioaddr + NvRegTransmitPoll) &
429 NVREG_TRANSMITPOLL_MAC_ADDR_REV,
430 ioaddr + NvRegTransmitPoll);
431 }
432
433
434 static void
nv_update_pause(struct forcedeth_private * priv,u32 pause_flags)435 nv_update_pause ( struct forcedeth_private *priv, u32 pause_flags )
436 {
437 void *ioaddr = priv->mmio_addr;
438
439 priv->pause_flags &= ~ ( NV_PAUSEFRAME_TX_ENABLE |
440 NV_PAUSEFRAME_RX_ENABLE );
441
442 if ( priv->pause_flags & NV_PAUSEFRAME_RX_CAPABLE ) {
443 u32 pff = readl ( ioaddr + NvRegPacketFilterFlags ) & ~NVREG_PFF_PAUSE_RX;
444 if ( pause_flags & NV_PAUSEFRAME_RX_ENABLE ) {
445 writel ( pff | NVREG_PFF_PAUSE_RX, ioaddr + NvRegPacketFilterFlags );
446 priv->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
447 } else {
448 writel ( pff, ioaddr + NvRegPacketFilterFlags );
449 }
450 }
451 if ( priv->pause_flags & NV_PAUSEFRAME_TX_CAPABLE ) {
452 u32 regmisc = readl ( ioaddr + NvRegMisc1 ) & ~NVREG_MISC1_PAUSE_TX;
453 if ( pause_flags & NV_PAUSEFRAME_TX_ENABLE ) {
454 u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1;
455 if ( priv->driver_data & DEV_HAS_PAUSEFRAME_TX_V2 )
456 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2;
457 if ( priv->driver_data & DEV_HAS_PAUSEFRAME_TX_V3 ) {
458 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3;
459 /* limit the number of tx pause frames to a default of 8 */
460 writel ( readl ( ioaddr + NvRegTxPauseFrameLimit ) |
461 NVREG_TX_PAUSEFRAMELIMIT_ENABLE,
462 ioaddr + NvRegTxPauseFrameLimit );
463 }
464 writel ( pause_enable, ioaddr + NvRegTxPauseFrame );
465 writel ( regmisc | NVREG_MISC1_PAUSE_TX, ioaddr + NvRegMisc1 );
466 priv->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
467 } else {
468 writel ( NVREG_TX_PAUSEFRAME_DISABLE, ioaddr + NvRegTxPauseFrame );
469 writel ( regmisc, ioaddr + NvRegMisc1 );
470 }
471 }
472 }
473
474 static int
nv_update_linkspeed(struct forcedeth_private * priv)475 nv_update_linkspeed ( struct forcedeth_private *priv )
476 {
477 void *ioaddr = priv->mmio_addr;
478 int adv = 0;
479 int lpa = 0;
480 int adv_lpa, adv_pause, lpa_pause;
481 u32 newls = priv->linkspeed;
482 int newdup = priv->duplex;
483 int mii_status;
484 int retval = 0;
485 u32 control_1000, status_1000, phyreg, pause_flags, txreg;
486 u32 txrxFlags = 0;
487 u32 phy_exp;
488
489 /* BMSR_LSTATUS is latched, read it twice:
490 * we want the current value.
491 */
492 mii_rw ( priv, priv->phyaddr, MII_BMSR, MII_READ );
493 mii_status = mii_rw ( priv, priv->phyaddr, MII_BMSR, MII_READ );
494
495 if ( ! ( mii_status & BMSR_LSTATUS ) ) {
496 DBG ( "No link detected by phy - falling back to 10HD.\n" );
497 newls = NVREG_LINKSPEED_FORCE | NVREG_LINKSPEED_10;
498 newdup = 0;
499 retval = 0;
500 goto set_speed;
501 }
502
503 /* check auto negotiation is complete */
504 if ( ! ( mii_status & BMSR_ANEGCOMPLETE ) ) {
505 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */
506 newls = NVREG_LINKSPEED_FORCE | NVREG_LINKSPEED_10;
507 newdup = 0;
508 retval = 0;
509 DBG ( "autoneg not completed - falling back to 10HD.\n" );
510 goto set_speed;
511 }
512
513 adv = mii_rw ( priv, priv->phyaddr, MII_ADVERTISE, MII_READ );
514 lpa = mii_rw ( priv, priv->phyaddr, MII_LPA, MII_READ );
515 DBG ( "nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n", adv, lpa );
516
517 retval = 1;
518 if ( priv->gigabit == PHY_GIGABIT ) {
519 control_1000 = mii_rw ( priv, priv->phyaddr, MII_CTRL1000, MII_READ);
520 status_1000 = mii_rw ( priv, priv->phyaddr, MII_STAT1000, MII_READ);
521
522 if ( ( control_1000 & ADVERTISE_1000FULL ) &&
523 ( status_1000 & LPA_1000FULL ) ) {
524 DBG ( "nv_update_linkspeed: GBit ethernet detected.\n" );
525 newls = NVREG_LINKSPEED_FORCE | NVREG_LINKSPEED_1000;
526 newdup = 1;
527 goto set_speed;
528 }
529 }
530
531 /* FIXME: handle parallel detection properly */
532 adv_lpa = lpa & adv;
533 if ( adv_lpa & LPA_100FULL ) {
534 newls = NVREG_LINKSPEED_FORCE | NVREG_LINKSPEED_100;
535 newdup = 1;
536 } else if ( adv_lpa & LPA_100HALF ) {
537 newls = NVREG_LINKSPEED_FORCE | NVREG_LINKSPEED_100;
538 newdup = 0;
539 } else if ( adv_lpa & LPA_10FULL ) {
540 newls = NVREG_LINKSPEED_FORCE | NVREG_LINKSPEED_10;
541 newdup = 1;
542 } else if ( adv_lpa & LPA_10HALF ) {
543 newls = NVREG_LINKSPEED_FORCE | NVREG_LINKSPEED_10;
544 newdup = 0;
545 } else {
546 DBG ( "bad ability %04x - falling back to 10HD.\n", adv_lpa);
547 newls = NVREG_LINKSPEED_FORCE | NVREG_LINKSPEED_10;
548 newdup = 0;
549 }
550
551 set_speed:
552 if ( priv->duplex == newdup && priv->linkspeed == newls )
553 return retval;
554
555 DBG ( "changing link setting from %d/%d to %d/%d.\n",
556 priv->linkspeed, priv->duplex, newls, newdup);
557
558 priv->duplex = newdup;
559 priv->linkspeed = newls;
560
561 /* The transmitter and receiver must be restarted for safe update */
562 if ( readl ( ioaddr + NvRegTransmitterControl ) & NVREG_XMITCTL_START ) {
563 txrxFlags |= NV_RESTART_TX;
564 nv_stop_tx ( priv );
565 }
566 if ( readl ( ioaddr + NvRegReceiverControl ) & NVREG_RCVCTL_START) {
567 txrxFlags |= NV_RESTART_RX;
568 nv_stop_rx ( priv );
569 }
570
571 if ( priv->gigabit == PHY_GIGABIT ) {
572 phyreg = readl ( ioaddr + NvRegSlotTime );
573 phyreg &= ~(0x3FF00);
574 if ( ( ( priv->linkspeed & 0xFFF ) == NVREG_LINKSPEED_10 ) ||
575 ( ( priv->linkspeed & 0xFFF ) == NVREG_LINKSPEED_100) )
576 phyreg |= NVREG_SLOTTIME_10_100_FULL;
577 else if ( ( priv->linkspeed & 0xFFF ) == NVREG_LINKSPEED_1000 )
578 phyreg |= NVREG_SLOTTIME_1000_FULL;
579 writel( phyreg, priv + NvRegSlotTime );
580 }
581
582 phyreg = readl ( ioaddr + NvRegPhyInterface );
583 phyreg &= ~( PHY_HALF | PHY_100 | PHY_1000 );
584 if ( priv->duplex == 0 )
585 phyreg |= PHY_HALF;
586 if ( ( priv->linkspeed & NVREG_LINKSPEED_MASK ) == NVREG_LINKSPEED_100 )
587 phyreg |= PHY_100;
588 else if ( ( priv->linkspeed & NVREG_LINKSPEED_MASK ) == NVREG_LINKSPEED_1000 )
589 phyreg |= PHY_1000;
590 writel ( phyreg, ioaddr + NvRegPhyInterface );
591
592 phy_exp = mii_rw ( priv, priv->phyaddr, MII_EXPANSION, MII_READ ) & EXPANSION_NWAY; /* autoneg capable */
593 if ( phyreg & PHY_RGMII ) {
594 if ( ( priv->linkspeed & NVREG_LINKSPEED_MASK ) == NVREG_LINKSPEED_1000 ) {
595 txreg = NVREG_TX_DEFERRAL_RGMII_1000;
596 } else {
597 if ( !phy_exp && !priv->duplex && ( priv->driver_data & DEV_HAS_COLLISION_FIX ) ) {
598 if ( ( priv->linkspeed & NVREG_LINKSPEED_MASK ) == NVREG_LINKSPEED_10 )
599 txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_10;
600 else
601 txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_100;
602 } else {
603 txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
604 }
605 }
606 } else {
607 if ( !phy_exp && !priv->duplex && ( priv->driver_data & DEV_HAS_COLLISION_FIX ) )
608 txreg = NVREG_TX_DEFERRAL_MII_STRETCH;
609 else
610 txreg = NVREG_TX_DEFERRAL_DEFAULT;
611 }
612 writel ( txreg, ioaddr + NvRegTxDeferral );
613
614 txreg = NVREG_TX_WM_DESC1_DEFAULT;
615 writel ( txreg, ioaddr + NvRegTxWatermark );
616
617 writel ( NVREG_MISC1_FORCE | ( priv->duplex ? 0 : NVREG_MISC1_HD ), ioaddr + NvRegMisc1 );
618 pci_push ( ioaddr );
619 writel ( priv->linkspeed, priv + NvRegLinkSpeed);
620 pci_push ( ioaddr );
621
622 pause_flags = 0;
623 /* setup pause frame */
624 if ( priv->duplex != 0 ) {
625 if ( priv->pause_flags & NV_PAUSEFRAME_AUTONEG ) {
626 adv_pause = adv & ( ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM );
627 lpa_pause = lpa & ( LPA_PAUSE_CAP | LPA_PAUSE_ASYM );
628
629 switch ( adv_pause ) {
630 case ADVERTISE_PAUSE_CAP:
631 if ( lpa_pause & LPA_PAUSE_CAP ) {
632 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
633 if ( priv->pause_flags & NV_PAUSEFRAME_TX_REQ )
634 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
635 }
636 break;
637 case ADVERTISE_PAUSE_ASYM:
638 if ( lpa_pause == ( LPA_PAUSE_CAP | LPA_PAUSE_ASYM ) )
639 {
640 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
641 }
642 break;
643 case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM:
644 if ( lpa_pause & LPA_PAUSE_CAP )
645 {
646 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
647 if ( priv->pause_flags & NV_PAUSEFRAME_TX_REQ )
648 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
649 }
650 if ( lpa_pause == LPA_PAUSE_ASYM )
651 {
652 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
653 }
654 break;
655 }
656 } else {
657 pause_flags = priv->pause_flags;
658 }
659 }
660 nv_update_pause ( priv, pause_flags );
661
662 if ( txrxFlags & NV_RESTART_TX )
663 nv_start_tx ( priv );
664 if ( txrxFlags & NV_RESTART_RX )
665 nv_start_rx ( priv );
666
667 return retval;
668 }
669
670
671 /**
672 * open - Called when a network interface is made active
673 *
674 * @v netdev Network device
675 * @ret rc Return status code, 0 on success, negative value on failure
676 **/
677 static int
forcedeth_open(struct net_device * netdev)678 forcedeth_open ( struct net_device *netdev )
679 {
680 struct forcedeth_private *priv = netdev_priv ( netdev );
681 void *ioaddr = priv->mmio_addr;
682 int i;
683 int rc;
684 u32 low;
685
686 DBGP ( "forcedeth_open\n" );
687
688 /* Power up phy */
689 mii_rw ( priv, priv->phyaddr, MII_BMCR,
690 mii_rw ( priv, priv->phyaddr, MII_BMCR, MII_READ ) & ~BMCR_PDOWN );
691
692 nv_txrx_gate ( priv, 0 );
693
694 /* Erase previous misconfiguration */
695 if ( priv->driver_data & DEV_HAS_POWER_CNTRL )
696 nv_mac_reset ( priv );
697
698 /* Clear multicast masks and addresses, enter promiscuous mode */
699 writel ( 0, ioaddr + NvRegMulticastAddrA );
700 writel ( 0, ioaddr + NvRegMulticastAddrB );
701 writel ( NVREG_MCASTMASKA_NONE, ioaddr + NvRegMulticastMaskA );
702 writel ( NVREG_MCASTMASKB_NONE, ioaddr + NvRegMulticastMaskB );
703 writel ( NVREG_PFF_PROMISC, ioaddr + NvRegPacketFilterFlags );
704
705 writel ( 0, ioaddr + NvRegTransmitterControl );
706 writel ( 0, ioaddr + NvRegReceiverControl );
707
708 writel ( 0, ioaddr + NvRegAdapterControl );
709
710 writel ( 0, ioaddr + NvRegLinkSpeed );
711 writel ( readl ( ioaddr + NvRegTransmitPoll ) & NVREG_TRANSMITPOLL_MAC_ADDR_REV,
712 ioaddr + NvRegTransmitPoll );
713 nv_txrx_reset ( priv );
714 writel ( 0, ioaddr + NvRegUnknownSetupReg6 );
715
716 /* Initialize descriptor rings */
717 if ( ( rc = nv_init_rings ( priv ) ) != 0 )
718 goto err_init_rings;
719
720 writel ( priv->linkspeed, ioaddr + NvRegLinkSpeed );
721 writel ( NVREG_TX_WM_DESC1_DEFAULT, ioaddr + NvRegTxWatermark );
722 writel ( NVREG_TXRXCTL_DESC_1, ioaddr + NvRegTxRxControl );
723 writel ( 0 , ioaddr + NvRegVlanControl );
724 pci_push ( ioaddr );
725 writel ( NVREG_TXRXCTL_BIT1 | NVREG_TXRXCTL_DESC_1,
726 ioaddr + NvRegTxRxControl );
727 reg_delay ( priv, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31,
728 NVREG_UNKSETUP5_BIT31, NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX,
729 "open: SetupReg5, Bit 31 remained off\n" );
730
731 writel ( 0, ioaddr + NvRegMIIMask );
732 writel ( NVREG_IRQSTAT_MASK, ioaddr + NvRegIrqStatus );
733 writel ( NVREG_MIISTAT_MASK_ALL, ioaddr + NvRegMIIStatus );
734
735 writel ( NVREG_MISC1_FORCE | NVREG_MISC1_HD, ioaddr + NvRegMisc1 );
736 writel ( readl ( ioaddr + NvRegTransmitterStatus ),
737 ioaddr + NvRegTransmitterStatus );
738 writel ( RX_BUF_SZ, ioaddr + NvRegOffloadConfig );
739
740 writel ( readl ( ioaddr + NvRegReceiverStatus),
741 ioaddr + NvRegReceiverStatus );
742
743 /* Set up slot time */
744 low = ( random() & NVREG_SLOTTIME_MASK );
745 writel ( low | NVREG_SLOTTIME_DEFAULT, ioaddr + NvRegSlotTime );
746
747 writel ( NVREG_TX_DEFERRAL_DEFAULT , ioaddr + NvRegTxDeferral );
748 writel ( NVREG_RX_DEFERRAL_DEFAULT , ioaddr + NvRegRxDeferral );
749
750 writel ( NVREG_POLL_DEFAULT_THROUGHPUT, ioaddr + NvRegPollingInterval );
751
752 writel ( NVREG_UNKSETUP6_VAL, ioaddr + NvRegUnknownSetupReg6 );
753 writel ( ( priv->phyaddr << NVREG_ADAPTCTL_PHYSHIFT ) |
754 NVREG_ADAPTCTL_PHYVALID | NVREG_ADAPTCTL_RUNNING,
755 ioaddr + NvRegAdapterControl );
756 writel ( NVREG_MIISPEED_BIT8 | NVREG_MIIDELAY, ioaddr + NvRegMIISpeed );
757 writel ( NVREG_MII_LINKCHANGE, ioaddr + NvRegMIIMask );
758
759 i = readl ( ioaddr + NvRegPowerState );
760 if ( ( i & NVREG_POWERSTATE_POWEREDUP ) == 0 )
761 writel ( NVREG_POWERSTATE_POWEREDUP | i, ioaddr + NvRegPowerState );
762
763 pci_push ( ioaddr );
764 udelay ( 10 );
765 writel ( readl ( ioaddr + NvRegPowerState ) | NVREG_POWERSTATE_VALID,
766 ioaddr + NvRegPowerState );
767
768 nv_disable_hw_interrupts ( priv );
769 writel ( NVREG_MIISTAT_MASK_ALL, ioaddr + NvRegMIIStatus );
770 writel ( NVREG_IRQSTAT_MASK, ioaddr + NvRegIrqStatus );
771 pci_push ( ioaddr );
772
773 readl ( ioaddr + NvRegMIIStatus );
774 writel ( NVREG_MIISTAT_MASK_ALL, ioaddr + NvRegMIIStatus );
775 priv->linkspeed = 0;
776 nv_update_linkspeed ( priv );
777 nv_start_rx ( priv );
778 nv_start_tx ( priv );
779
780 return 0;
781
782 err_init_rings:
783 return rc;
784 }
785
786 /**
787 * transmit - Transmit a packet
788 *
789 * @v netdev Network device
790 * @v iobuf I/O buffer
791 *
792 * @ret rc Returns 0 on success, negative on failure
793 */
794 static int
forcedeth_transmit(struct net_device * netdev,struct io_buffer * iobuf)795 forcedeth_transmit ( struct net_device *netdev, struct io_buffer *iobuf )
796 {
797 struct forcedeth_private *priv = netdev_priv ( netdev );
798 void *ioaddr = priv->mmio_addr;
799 struct ring_desc *tx_curr_desc;
800 u32 size = iob_len ( iobuf );
801
802 DBGP ( "forcedeth_transmit\n" );
803
804 /* NOTE: Some NICs have a hw bug that causes them to malfunction
805 * when there are more than 16 outstanding TXs. Increasing the TX
806 * ring size might trigger this bug */
807 if ( priv->tx_fill_ctr == TX_RING_SIZE ) {
808 DBG ( "Tx overflow\n" );
809 return -ENOBUFS;
810 }
811
812 /* Pad small packets to minimum length */
813 iob_pad ( iobuf, ETH_ZLEN );
814
815 priv->tx_iobuf[priv->tx_curr] = iobuf;
816
817 tx_curr_desc = priv->tx_ring + priv->tx_curr;
818
819 /* Configure current descriptor to transmit packet
820 * ( NV_TX_VALID sets the ownership bit ) */
821 tx_curr_desc->buf =
822 cpu_to_le32 ( virt_to_bus ( iobuf->data ) );
823 wmb();
824 /* Since we don't do fragmentation offloading, we always have
825 * the last packet bit set */
826 tx_curr_desc->flaglen =
827 cpu_to_le32 ( ( size - 1 ) | NV_TX_VALID | NV_TX_LASTPACKET );
828
829 DBG ( "forcedeth_transmit: flaglen = %#04x\n",
830 ( size - 1 ) | NV_TX_VALID | NV_TX_LASTPACKET );
831 DBG ( "forcedeth_transmit: tx_fill_ctr = %d\n",
832 priv->tx_fill_ctr );
833
834 writel ( NVREG_TXRXCTL_KICK | NVREG_TXRXCTL_DESC_1,
835 ioaddr + NvRegTxRxControl );
836 pci_push ( ioaddr );
837
838 /* Point to the next free descriptor */
839 priv->tx_curr = ( priv->tx_curr + 1 ) % TX_RING_SIZE;
840
841 /* Increment number of descriptors in use */
842 priv->tx_fill_ctr++;
843
844 return 0;
845 }
846
847 /**
848 * nv_process_tx_packets - Checks for successfully sent packets,
849 * reports them to iPXE with netdev_tx_complete()
850 *
851 * @v netdev Network device
852 */
853 static void
nv_process_tx_packets(struct net_device * netdev)854 nv_process_tx_packets ( struct net_device *netdev )
855 {
856 struct forcedeth_private *priv = netdev_priv ( netdev );
857 struct ring_desc *tx_curr_desc;
858 u32 flaglen;
859
860 DBGP ( "nv_process_tx_packets\n" );
861
862 while ( priv->tx_tail != priv->tx_curr ) {
863
864 tx_curr_desc = priv->tx_ring + priv->tx_tail;
865 flaglen = le32_to_cpu ( tx_curr_desc->flaglen );
866 rmb();
867
868 /* Skip this descriptor if hardware still owns it */
869 if ( flaglen & NV_TX_VALID )
870 break;
871
872 DBG ( "Transmitted packet.\n" );
873 DBG ( "priv->tx_fill_ctr= %d\n", priv->tx_fill_ctr );
874 DBG ( "priv->tx_tail = %d\n", priv->tx_tail );
875 DBG ( "priv->tx_curr = %d\n", priv->tx_curr );
876 DBG ( "flaglen = %#04x\n", flaglen );
877
878 /* This packet is ready for completion */
879 netdev_tx_complete ( netdev, priv->tx_iobuf[priv->tx_tail] );
880
881 /* Clear the descriptor */
882 memset ( tx_curr_desc, 0, sizeof(*tx_curr_desc) );
883
884 /* Reduce the number of tx descriptors in use */
885 priv->tx_fill_ctr--;
886
887 /* Go to next available descriptor */
888 priv->tx_tail = ( priv->tx_tail + 1 ) % TX_RING_SIZE;
889 }
890 }
891
892 /**
893 * nv_process_rx_packets - Checks for received packets, reports them
894 * to iPXE with netdev_rx() or netdev_rx_err() if there was an error receiving
895 * the packet
896 *
897 * @v netdev Network device
898 */
899 static void
nv_process_rx_packets(struct net_device * netdev)900 nv_process_rx_packets ( struct net_device *netdev )
901 {
902 struct forcedeth_private *priv = netdev_priv ( netdev );
903 struct io_buffer *curr_iob;
904 struct ring_desc *rx_curr_desc;
905 u32 flags, len;
906 int i;
907
908 DBGP ( "nv_process_rx_packets\n" );
909
910 for ( i = 0; i < RX_RING_SIZE; i++ ) {
911
912 rx_curr_desc = priv->rx_ring + priv->rx_curr;
913 flags = le32_to_cpu ( rx_curr_desc->flaglen );
914 rmb();
915
916 /* Skip this descriptor if hardware still owns it */
917 if ( flags & NV_RX_AVAIL )
918 break;
919
920 /* We own the descriptor, but it has not been refilled yet */
921 curr_iob = priv->rx_iobuf[priv->rx_curr];
922 DBG ( "%p %p\n", curr_iob, priv->rx_iobuf[priv->rx_curr] );
923 if ( curr_iob == NULL )
924 break;
925
926 DBG ( "Received packet.\n" );
927 DBG ( "priv->rx_curr = %d\n", priv->rx_curr );
928 DBG ( "flags = %#04x\n", flags );
929
930 /* Check for errors */
931 if ( ( flags & NV_RX_DESCRIPTORVALID ) &&
932 ( flags & NV_RX_ERROR ) ) {
933 netdev_rx_err ( netdev, curr_iob, -EINVAL );
934 DBG ( " Corrupted packet received!\n" );
935 } else {
936 len = flags & LEN_MASK_V1;
937
938 iob_put ( curr_iob, len );
939 netdev_rx ( netdev, curr_iob );
940 }
941
942 /* Invalidate iobuf */
943 priv->rx_iobuf[priv->rx_curr] = NULL;
944
945 /* Invalidate descriptor */
946 memset ( rx_curr_desc, 0, sizeof(*rx_curr_desc) );
947
948 /* Point to the next free descriptor */
949 priv->rx_curr = ( priv->rx_curr + 1 ) % RX_RING_SIZE;
950 }
951
952 nv_alloc_rx ( priv );
953 }
954
955 /**
956 * check_link - Check for link status change
957 *
958 * @v netdev Network device
959 */
960 static void
forcedeth_link_status(struct net_device * netdev)961 forcedeth_link_status ( struct net_device *netdev )
962 {
963 struct forcedeth_private *priv = netdev_priv ( netdev );
964 void *ioaddr = priv->mmio_addr;
965
966 /* Clear the MII link change status by reading the MIIStatus register */
967 readl ( ioaddr + NvRegMIIStatus );
968 writel ( NVREG_MIISTAT_LINKCHANGE, ioaddr + NvRegMIIStatus );
969
970 if ( nv_update_linkspeed ( priv ) == 1 )
971 netdev_link_up ( netdev );
972 else
973 netdev_link_down ( netdev );
974 }
975
976 /**
977 * poll - Poll for received packets
978 *
979 * @v netdev Network device
980 */
981 static void
forcedeth_poll(struct net_device * netdev)982 forcedeth_poll ( struct net_device *netdev )
983 {
984 struct forcedeth_private *priv = netdev_priv ( netdev );
985 void *ioaddr = priv->mmio_addr;
986 u32 status;
987
988 DBGP ( "forcedeth_poll\n" );
989
990 status = readl ( ioaddr + NvRegIrqStatus ) & NVREG_IRQSTAT_MASK;
991
992 /* Return when no interrupts have been triggered */
993 if ( ! status )
994 return;
995
996 /* Clear interrupts */
997 writel ( NVREG_IRQSTAT_MASK, ioaddr + NvRegIrqStatus );
998
999 DBG ( "forcedeth_poll: status = %#04x\n", status );
1000
1001 /* Link change interrupt occurred. Call always if link is down,
1002 * to give auto-neg a chance to finish */
1003 if ( ( status & NVREG_IRQ_LINK ) || ! ( netdev_link_ok ( netdev ) ) )
1004 forcedeth_link_status ( netdev );
1005
1006 /* Process transmitted packets */
1007 nv_process_tx_packets ( netdev );
1008
1009 /* Process received packets */
1010 nv_process_rx_packets ( netdev );
1011 }
1012
1013 /**
1014 * close - Disable network interface
1015 *
1016 * @v netdev network interface device structure
1017 **/
1018 static void
forcedeth_close(struct net_device * netdev)1019 forcedeth_close ( struct net_device *netdev )
1020 {
1021 struct forcedeth_private *priv = netdev_priv ( netdev );
1022
1023 DBGP ( "forcedeth_close\n" );
1024
1025 nv_stop_rx ( priv );
1026 nv_stop_tx ( priv );
1027 nv_txrx_reset ( priv );
1028
1029 /* Disable interrupts on the nic or we will lock up */
1030 nv_disable_hw_interrupts ( priv );
1031
1032 nv_free_rxtx_resources ( priv );
1033
1034 nv_txrx_gate ( priv, 0 );
1035
1036 /* FIXME: power down nic */
1037 }
1038
1039 /**
1040 * irq - enable or disable interrupts
1041 *
1042 * @v netdev network adapter
1043 * @v action requested interrupt action
1044 **/
1045 static void
forcedeth_irq(struct net_device * netdev,int action)1046 forcedeth_irq ( struct net_device *netdev, int action )
1047 {
1048 struct forcedeth_private *priv = netdev_priv ( netdev );
1049
1050 DBGP ( "forcedeth_irq\n" );
1051
1052 switch ( action ) {
1053 case 0:
1054 nv_disable_hw_interrupts ( priv );
1055 break;
1056 default:
1057 nv_enable_hw_interrupts ( priv );
1058 break;
1059 }
1060 }
1061
1062 static struct net_device_operations forcedeth_operations = {
1063 .open = forcedeth_open,
1064 .transmit = forcedeth_transmit,
1065 .poll = forcedeth_poll,
1066 .close = forcedeth_close,
1067 .irq = forcedeth_irq,
1068 };
1069
1070 static int
nv_setup_mac_addr(struct forcedeth_private * priv)1071 nv_setup_mac_addr ( struct forcedeth_private *priv )
1072 {
1073 struct net_device *dev = priv->netdev;
1074 void *ioaddr = priv->mmio_addr;
1075 u32 orig_mac[2];
1076 u32 txreg;
1077
1078 orig_mac[0] = readl ( ioaddr + NvRegMacAddrA );
1079 orig_mac[1] = readl ( ioaddr + NvRegMacAddrB );
1080
1081 txreg = readl ( ioaddr + NvRegTransmitPoll );
1082
1083 if ( ( priv->driver_data & DEV_HAS_CORRECT_MACADDR ) ||
1084 ( txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV ) ) {
1085 /* mac address is already in correct order */
1086 dev->hw_addr[0] = ( orig_mac[0] >> 0 ) & 0xff;
1087 dev->hw_addr[1] = ( orig_mac[0] >> 8 ) & 0xff;
1088 dev->hw_addr[2] = ( orig_mac[0] >> 16 ) & 0xff;
1089 dev->hw_addr[3] = ( orig_mac[0] >> 24 ) & 0xff;
1090 dev->hw_addr[4] = ( orig_mac[1] >> 0 ) & 0xff;
1091 dev->hw_addr[5] = ( orig_mac[1] >> 8 ) & 0xff;
1092 } else {
1093 /* need to reverse mac address to correct order */
1094 dev->hw_addr[0] = ( orig_mac[1] >> 8 ) & 0xff;
1095 dev->hw_addr[1] = ( orig_mac[1] >> 0 ) & 0xff;
1096 dev->hw_addr[2] = ( orig_mac[0] >> 24 ) & 0xff;
1097 dev->hw_addr[3] = ( orig_mac[0] >> 16 ) & 0xff;
1098 dev->hw_addr[4] = ( orig_mac[0] >> 8 ) & 0xff;
1099 dev->hw_addr[5] = ( orig_mac[0] >> 0 ) & 0xff;
1100 }
1101
1102 if ( ! is_valid_ether_addr ( dev->hw_addr ) )
1103 return -EADDRNOTAVAIL;
1104
1105 DBG ( "MAC address is: %s\n", eth_ntoa ( dev->hw_addr ) );
1106
1107 return 0;
1108 }
1109
1110 static int
nv_mgmt_acquire_sema(struct forcedeth_private * priv)1111 nv_mgmt_acquire_sema ( struct forcedeth_private *priv )
1112 {
1113 void *ioaddr = priv->mmio_addr;
1114 int i;
1115 u32 tx_ctrl, mgmt_sema;
1116
1117 for ( i = 0; i < 10; i++ ) {
1118 mgmt_sema = readl ( ioaddr + NvRegTransmitterControl ) &
1119 NVREG_XMITCTL_MGMT_SEMA_MASK;
1120 if ( mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE )
1121 break;
1122 mdelay ( 500 );
1123 }
1124
1125 if ( mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE )
1126 return 0;
1127
1128 for ( i = 0; i < 2; i++ ) {
1129 tx_ctrl = readl ( ioaddr + NvRegTransmitterControl );
1130 tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ;
1131 writel ( tx_ctrl, ioaddr + NvRegTransmitterControl );
1132
1133 /* verify that the semaphore was acquired */
1134 tx_ctrl = readl ( ioaddr + NvRegTransmitterControl );
1135 if ( ( ( tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK ) ==
1136 NVREG_XMITCTL_HOST_SEMA_ACQ ) &&
1137 ( ( tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK ) ==
1138 NVREG_XMITCTL_MGMT_SEMA_FREE ) ) {
1139 priv->mgmt_sema = 1;
1140 return 1;
1141 } else {
1142 udelay ( 50 );
1143 }
1144 }
1145
1146 return 0;
1147 }
1148
1149 static void
nv_mgmt_release_sema(struct forcedeth_private * priv)1150 nv_mgmt_release_sema ( struct forcedeth_private *priv )
1151 {
1152 void *ioaddr = priv->mmio_addr;
1153 u32 tx_ctrl;
1154
1155 if ( priv->driver_data & DEV_HAS_MGMT_UNIT ) {
1156 if ( priv->mgmt_sema ) {
1157 tx_ctrl = readl (ioaddr + NvRegTransmitterControl );
1158 tx_ctrl &= ~NVREG_XMITCTL_HOST_SEMA_ACQ;
1159 writel ( tx_ctrl, ioaddr + NvRegTransmitterControl );
1160 }
1161 }
1162 }
1163
1164 static int
nv_mgmt_get_version(struct forcedeth_private * priv)1165 nv_mgmt_get_version ( struct forcedeth_private *priv )
1166 {
1167 void *ioaddr = priv->mmio_addr;
1168 u32 data_ready = readl ( ioaddr + NvRegTransmitterControl );
1169 u32 data_ready2 = 0;
1170 unsigned long start;
1171 int ready = 0;
1172
1173 writel ( NVREG_MGMTUNITGETVERSION,
1174 ioaddr + NvRegMgmtUnitGetVersion );
1175 writel ( data_ready ^ NVREG_XMITCTL_DATA_START,
1176 ioaddr + NvRegTransmitterControl );
1177 start = currticks();
1178
1179 while ( currticks() > start + 5 * TICKS_PER_SEC ) {
1180 data_ready2 = readl ( ioaddr + NvRegTransmitterControl );
1181 if ( ( data_ready & NVREG_XMITCTL_DATA_READY ) !=
1182 ( data_ready2 & NVREG_XMITCTL_DATA_READY ) ) {
1183 ready = 1;
1184 break;
1185 }
1186 mdelay ( 1000 );
1187 }
1188
1189 if ( ! ready || ( data_ready2 & NVREG_XMITCTL_DATA_ERROR ) )
1190 return 0;
1191
1192 priv->mgmt_version =
1193 readl ( ioaddr + NvRegMgmtUnitVersion ) & NVREG_MGMTUNITVERSION;
1194
1195 return 1;
1196 }
1197
1198
1199
1200 static int
phy_reset(struct forcedeth_private * priv,u32 bmcr_setup)1201 phy_reset ( struct forcedeth_private *priv, u32 bmcr_setup )
1202 {
1203 u32 miicontrol;
1204 unsigned int tries = 0;
1205
1206 miicontrol = BMCR_RESET | bmcr_setup;
1207 if ( mii_rw ( priv, priv->phyaddr, MII_BMCR, miicontrol ) ) {
1208 return -1;
1209 }
1210
1211 mdelay ( 500 );
1212
1213 /* must wait till reset is deasserted */
1214 while ( miicontrol & BMCR_RESET ) {
1215 mdelay ( 10 );
1216 miicontrol = mii_rw ( priv, priv->phyaddr, MII_BMCR, MII_READ );
1217 if ( tries++ > 100 )
1218 return -1;
1219 }
1220 return 0;
1221 }
1222
1223 static int
phy_init(struct forcedeth_private * priv)1224 phy_init ( struct forcedeth_private *priv )
1225 {
1226 void *ioaddr = priv->mmio_addr;
1227 u32 phyinterface, phy_reserved, mii_status;
1228 u32 mii_control, mii_control_1000, reg;
1229
1230 /* phy errata for E3016 phy */
1231 if ( priv->phy_model == PHY_MODEL_MARVELL_E3016 ) {
1232 reg = mii_rw ( priv, priv->phyaddr, MII_NCONFIG, MII_READ );
1233 reg &= ~PHY_MARVELL_E3016_INITMASK;
1234 if ( mii_rw ( priv, priv->phyaddr, MII_NCONFIG, reg ) ) {
1235 DBG ( "PHY write to errata reg failed.\n" );
1236 return PHY_ERROR;
1237 }
1238 }
1239
1240 if ( priv->phy_oui == PHY_OUI_REALTEK ) {
1241 if ( priv->phy_model == PHY_MODEL_REALTEK_8211 &&
1242 priv->phy_rev == PHY_REV_REALTEK_8211B ) {
1243 if ( mii_rw ( priv, priv->phyaddr,
1244 PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 ) ) {
1245 DBG ( "PHY init failed.\n" );
1246 return PHY_ERROR;
1247 }
1248 if ( mii_rw ( priv, priv->phyaddr,
1249 PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2 ) ) {
1250 DBG ( "PHY init failed.\n" );
1251 return PHY_ERROR;
1252 }
1253 if ( mii_rw ( priv, priv->phyaddr,
1254 PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3 ) ) {
1255 DBG ( "PHY init failed.\n" );
1256 return PHY_ERROR;
1257 }
1258 if ( mii_rw ( priv, priv->phyaddr,
1259 PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4 ) ) {
1260 DBG ( "PHY init failed.\n" );
1261 return PHY_ERROR;
1262 }
1263 if ( mii_rw ( priv, priv->phyaddr,
1264 PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5 ) ) {
1265 DBG ( "PHY init failed.\n" );
1266 return PHY_ERROR;
1267 }
1268 if ( mii_rw ( priv, priv->phyaddr,
1269 PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6 ) ) {
1270 DBG ( "PHY init failed.\n" );
1271 return PHY_ERROR;
1272 }
1273 if ( mii_rw ( priv, priv->phyaddr,
1274 PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 ) ) {
1275 DBG ( "PHY init failed.\n" );
1276 return PHY_ERROR;
1277 }
1278 }
1279
1280 if ( priv->phy_model == PHY_MODEL_REALTEK_8211 &&
1281 priv->phy_rev == PHY_REV_REALTEK_8211C ) {
1282 u32 powerstate = readl ( ioaddr + NvRegPowerState2 );
1283
1284 /* need to perform hw phy reset */
1285 powerstate |= NVREG_POWERSTATE2_PHY_RESET;
1286 writel ( powerstate , ioaddr + NvRegPowerState2 );
1287 mdelay ( 25 );
1288
1289 powerstate &= ~NVREG_POWERSTATE2_PHY_RESET;
1290 writel ( powerstate , ioaddr + NvRegPowerState2 );
1291 mdelay ( 25 );
1292
1293 reg = mii_rw ( priv, priv->phyaddr,
1294 PHY_REALTEK_INIT_REG6, MII_READ );
1295 reg |= PHY_REALTEK_INIT9;
1296 if ( mii_rw ( priv, priv->phyaddr,
1297 PHY_REALTEK_INIT_REG6, reg ) ) {
1298 DBG ( "PHY init failed.\n" );
1299 return PHY_ERROR;
1300 }
1301 if ( mii_rw ( priv, priv->phyaddr,
1302 PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10 ) ) {
1303 DBG ( "PHY init failed.\n" );
1304 return PHY_ERROR;
1305 }
1306
1307 reg = mii_rw ( priv, priv->phyaddr,
1308 PHY_REALTEK_INIT_REG7, MII_READ );
1309 if ( ! ( reg & PHY_REALTEK_INIT11 ) ) {
1310 reg |= PHY_REALTEK_INIT11;
1311 if ( mii_rw ( priv, priv->phyaddr,
1312 PHY_REALTEK_INIT_REG7, reg ) ) {
1313 DBG ( "PHY init failed.\n" );
1314 return PHY_ERROR;
1315 }
1316 }
1317 if ( mii_rw ( priv, priv->phyaddr,
1318 PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 ) ) {
1319 DBG ( "PHY init failed.\n" );
1320 return PHY_ERROR;
1321 }
1322 }
1323 if ( priv->phy_model == PHY_MODEL_REALTEK_8201 ) {
1324 if ( priv->driver_data & DEV_NEED_PHY_INIT_FIX ) {
1325 phy_reserved = mii_rw ( priv, priv->phyaddr,
1326 PHY_REALTEK_INIT_REG6,
1327 MII_READ );
1328 phy_reserved |= PHY_REALTEK_INIT7;
1329 if ( mii_rw ( priv, priv->phyaddr,
1330 PHY_REALTEK_INIT_REG6,
1331 phy_reserved ) ) {
1332 DBG ( "PHY init failed.\n" );
1333 return PHY_ERROR;
1334 }
1335 }
1336 }
1337 }
1338
1339 /* set advertise register */
1340 reg = mii_rw ( priv, priv->phyaddr, MII_ADVERTISE, MII_READ );
1341 reg |= ( ADVERTISE_10HALF | ADVERTISE_10FULL | ADVERTISE_100HALF |
1342 ADVERTISE_100FULL | ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP );
1343 if ( mii_rw ( priv, priv->phyaddr, MII_ADVERTISE, reg ) ) {
1344 DBG ( "PHY init failed.\n" );
1345 return PHY_ERROR;
1346 }
1347
1348 /* get phy interface type */
1349 phyinterface = readl ( ioaddr + NvRegPhyInterface );
1350
1351 /* see if gigabit phy */
1352 mii_status = mii_rw ( priv, priv->phyaddr, MII_BMSR, MII_READ );
1353 if ( mii_status & PHY_GIGABIT ) {
1354 priv->gigabit = PHY_GIGABIT;
1355 mii_control_1000 =
1356 mii_rw ( priv, priv->phyaddr, MII_CTRL1000, MII_READ );
1357 mii_control_1000 &= ~ADVERTISE_1000HALF;
1358 if ( phyinterface & PHY_RGMII )
1359 mii_control_1000 |= ADVERTISE_1000FULL;
1360 else
1361 mii_control_1000 &= ~ADVERTISE_1000FULL;
1362
1363 if ( mii_rw ( priv, priv->phyaddr, MII_CTRL1000, mii_control_1000)) {
1364 DBG ( "PHY init failed.\n" );
1365 return PHY_ERROR;
1366 }
1367 } else {
1368 priv->gigabit = 0;
1369 }
1370
1371 mii_control = mii_rw ( priv, priv->phyaddr, MII_BMCR, MII_READ );
1372 mii_control |= BMCR_ANENABLE;
1373
1374 if ( priv->phy_oui == PHY_OUI_REALTEK &&
1375 priv->phy_model == PHY_MODEL_REALTEK_8211 &&
1376 priv->phy_rev == PHY_REV_REALTEK_8211C ) {
1377 /* start autoneg since we already performed hw reset above */
1378 mii_control |= BMCR_ANRESTART;
1379 if ( mii_rw ( priv, priv->phyaddr, MII_BMCR, mii_control ) ) {
1380 DBG ( "PHY init failed.\n" );
1381 return PHY_ERROR;
1382 }
1383 } else {
1384 /* reset the phy
1385 * (certain phys need bmcr to be setup with reset )
1386 */
1387 if ( phy_reset ( priv, mii_control ) ) {
1388 DBG ( "PHY reset failed\n" );
1389 return PHY_ERROR;
1390 }
1391 }
1392
1393 /* phy vendor specific configuration */
1394 if ( ( priv->phy_oui == PHY_OUI_CICADA ) && ( phyinterface & PHY_RGMII ) ) {
1395 phy_reserved = mii_rw ( priv, priv->phyaddr, MII_RESV1, MII_READ );
1396 phy_reserved &= ~( PHY_CICADA_INIT1 | PHY_CICADA_INIT2 );
1397 phy_reserved |= ( PHY_CICADA_INIT3 | PHY_CICADA_INIT4 );
1398 if ( mii_rw ( priv, priv->phyaddr, MII_RESV1, phy_reserved ) ) {
1399 DBG ( "PHY init failed.\n" );
1400 return PHY_ERROR;
1401 }
1402 phy_reserved = mii_rw ( priv, priv->phyaddr, MII_NCONFIG, MII_READ );
1403 phy_reserved |= PHY_CICADA_INIT5;
1404 if ( mii_rw ( priv, priv->phyaddr, MII_NCONFIG, phy_reserved ) ) {
1405 DBG ( "PHY init failed.\n" );
1406 return PHY_ERROR;
1407 }
1408 }
1409 if ( priv->phy_oui == PHY_OUI_CICADA ) {
1410 phy_reserved = mii_rw ( priv, priv->phyaddr, MII_SREVISION, MII_READ );
1411 phy_reserved |= PHY_CICADA_INIT6;
1412 if ( mii_rw ( priv, priv->phyaddr, MII_SREVISION, phy_reserved ) ) {
1413 DBG ( "PHY init failed.\n" );
1414 return PHY_ERROR;
1415 }
1416 }
1417 if ( priv->phy_oui == PHY_OUI_VITESSE ) {
1418 if ( mii_rw ( priv, priv->phyaddr, PHY_VITESSE_INIT_REG1,
1419 PHY_VITESSE_INIT1)) {
1420 DBG ( "PHY init failed.\n" );
1421 return PHY_ERROR;
1422 }
1423 if ( mii_rw ( priv, priv->phyaddr, PHY_VITESSE_INIT_REG2,
1424 PHY_VITESSE_INIT2)) {
1425 DBG ( "PHY init failed.\n" );
1426 return PHY_ERROR;
1427 }
1428 phy_reserved = mii_rw ( priv, priv->phyaddr,
1429 PHY_VITESSE_INIT_REG4, MII_READ);
1430 if ( mii_rw ( priv, priv->phyaddr, PHY_VITESSE_INIT_REG4,
1431 phy_reserved ) ) {
1432 DBG ( "PHY init failed.\n" );
1433 return PHY_ERROR;
1434 }
1435 phy_reserved = mii_rw ( priv, priv->phyaddr,
1436 PHY_VITESSE_INIT_REG3, MII_READ);
1437 phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1438 phy_reserved |= PHY_VITESSE_INIT3;
1439 if ( mii_rw ( priv, priv->phyaddr, PHY_VITESSE_INIT_REG3,
1440 phy_reserved ) ) {
1441 DBG ( "PHY init failed.\n" );
1442 return PHY_ERROR;
1443 }
1444 if ( mii_rw ( priv, priv->phyaddr, PHY_VITESSE_INIT_REG2,
1445 PHY_VITESSE_INIT4 ) ) {
1446 DBG ( "PHY init failed.\n" );
1447 return PHY_ERROR;
1448 }
1449 if ( mii_rw ( priv, priv->phyaddr, PHY_VITESSE_INIT_REG2,
1450 PHY_VITESSE_INIT5 ) ) {
1451 DBG ( "PHY init failed.\n" );
1452 return PHY_ERROR;
1453 }
1454 phy_reserved = mii_rw ( priv, priv->phyaddr,
1455 PHY_VITESSE_INIT_REG4, MII_READ);
1456 phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1457 phy_reserved |= PHY_VITESSE_INIT3;
1458 if ( mii_rw ( priv, priv->phyaddr, PHY_VITESSE_INIT_REG4,
1459 phy_reserved ) ) {
1460 DBG ( "PHY init failed.\n" );
1461 return PHY_ERROR;
1462 }
1463 phy_reserved = mii_rw ( priv, priv->phyaddr,
1464 PHY_VITESSE_INIT_REG3, MII_READ);
1465 if ( mii_rw ( priv, priv->phyaddr, PHY_VITESSE_INIT_REG3,
1466 phy_reserved ) ) {
1467 DBG ( "PHY init failed.\n" );
1468 return PHY_ERROR;
1469 }
1470 if ( mii_rw ( priv, priv->phyaddr, PHY_VITESSE_INIT_REG2,
1471 PHY_VITESSE_INIT6 ) ) {
1472 DBG ( "PHY init failed.\n" );
1473 return PHY_ERROR;
1474 }
1475 if ( mii_rw ( priv, priv->phyaddr, PHY_VITESSE_INIT_REG2,
1476 PHY_VITESSE_INIT7 ) ) {
1477 DBG ( "PHY init failed.\n" );
1478 return PHY_ERROR;
1479 }
1480 phy_reserved = mii_rw ( priv, priv->phyaddr,
1481 PHY_VITESSE_INIT_REG4, MII_READ);
1482 if ( mii_rw ( priv, priv->phyaddr, PHY_VITESSE_INIT_REG4,
1483 phy_reserved ) ) {
1484 DBG ( "PHY init failed.\n" );
1485 return PHY_ERROR;
1486 }
1487 phy_reserved = mii_rw ( priv, priv->phyaddr,
1488 PHY_VITESSE_INIT_REG3, MII_READ);
1489 phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
1490 phy_reserved |= PHY_VITESSE_INIT8;
1491 if ( mii_rw ( priv, priv->phyaddr, PHY_VITESSE_INIT_REG3,
1492 phy_reserved ) ) {
1493 DBG ( "PHY init failed.\n" );
1494 return PHY_ERROR;
1495 }
1496 if ( mii_rw ( priv, priv->phyaddr, PHY_VITESSE_INIT_REG2,
1497 PHY_VITESSE_INIT9 ) ) {
1498 DBG ( "PHY init failed.\n" );
1499 return PHY_ERROR;
1500 }
1501 if ( mii_rw ( priv, priv->phyaddr, PHY_VITESSE_INIT_REG1,
1502 PHY_VITESSE_INIT10 ) ) {
1503 DBG ( "PHY init failed.\n" );
1504 return PHY_ERROR;
1505 }
1506 }
1507
1508 if ( priv->phy_oui == PHY_OUI_REALTEK ) {
1509 if ( priv->phy_model == PHY_MODEL_REALTEK_8211 &&
1510 priv->phy_rev == PHY_REV_REALTEK_8211B ) {
1511 /* reset could have cleared these out, set them back */
1512 if ( mii_rw ( priv, priv->phyaddr,
1513 PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 ) ) {
1514 DBG ( "PHY init failed.\n" );
1515 return PHY_ERROR;
1516 }
1517 if ( mii_rw ( priv, priv->phyaddr,
1518 PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2 ) ) {
1519 DBG ( "PHY init failed.\n" );
1520 return PHY_ERROR;
1521 }
1522 if ( mii_rw ( priv, priv->phyaddr,
1523 PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3 ) ) {
1524 DBG ( "PHY init failed.\n" );
1525 return PHY_ERROR;
1526 }
1527 if ( mii_rw ( priv, priv->phyaddr,
1528 PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4 ) ) {
1529 DBG ( "PHY init failed.\n" );
1530 return PHY_ERROR;
1531 }
1532 if ( mii_rw ( priv, priv->phyaddr,
1533 PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5 ) ) {
1534 DBG ( "PHY init failed.\n" );
1535 return PHY_ERROR;
1536 }
1537 if ( mii_rw ( priv, priv->phyaddr,
1538 PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6 ) ) {
1539 DBG ( "PHY init failed.\n" );
1540 return PHY_ERROR;
1541 }
1542 if ( mii_rw ( priv, priv->phyaddr,
1543 PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 ) ) {
1544 DBG ( "PHY init failed.\n" );
1545 return PHY_ERROR;
1546 }
1547 }
1548 if ( priv->phy_model == PHY_MODEL_REALTEK_8201 ) {
1549 if ( priv->driver_data & DEV_NEED_PHY_INIT_FIX ) {
1550 phy_reserved = mii_rw ( priv, priv->phyaddr,
1551 PHY_REALTEK_INIT_REG6,
1552 MII_READ );
1553 phy_reserved |= PHY_REALTEK_INIT7;
1554 if ( mii_rw ( priv, priv->phyaddr,
1555 PHY_REALTEK_INIT_REG6,
1556 phy_reserved ) ) {
1557 DBG ( "PHY init failed.\n" );
1558 return PHY_ERROR;
1559 }
1560 }
1561
1562 if ( mii_rw ( priv, priv->phyaddr,
1563 PHY_REALTEK_INIT_REG1,
1564 PHY_REALTEK_INIT3 ) ) {
1565 DBG ( "PHY init failed.\n" );
1566 return PHY_ERROR;
1567 }
1568 phy_reserved = mii_rw ( priv, priv->phyaddr,
1569 PHY_REALTEK_INIT_REG2,
1570 MII_READ );
1571 phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
1572 phy_reserved |= PHY_REALTEK_INIT3;
1573 if ( mii_rw ( priv, priv->phyaddr,
1574 PHY_REALTEK_INIT_REG2,
1575 phy_reserved ) ) {
1576 DBG ( "PHY init failed.\n" );
1577 return PHY_ERROR;
1578 }
1579 if ( mii_rw ( priv, priv->phyaddr,
1580 PHY_REALTEK_INIT_REG1,
1581 PHY_REALTEK_INIT1 ) ) {
1582 DBG ( "PHY init failed.\n" );
1583 return PHY_ERROR;
1584 }
1585 }
1586 }
1587
1588 /* some phys clear out pause advertisement on reset, set it back */
1589 mii_rw ( priv, priv->phyaddr, MII_ADVERTISE, reg );
1590
1591 /* restart auto negotiation, power down phy */
1592 mii_control = mii_rw ( priv, priv->phyaddr, MII_BMCR, MII_READ );
1593 mii_control |= ( BMCR_ANRESTART | BMCR_ANENABLE );
1594 if ( mii_rw ( priv, priv->phyaddr, MII_BMCR, mii_control ) ) {
1595 return PHY_ERROR;
1596 }
1597
1598 return 0;
1599 }
1600
1601 /**
1602 * nv_setup_phy - Find PHY and initialize it
1603 *
1604 * @v priv Driver private structure
1605 *
1606 * @ret rc Return status code
1607 **/
1608 static int
nv_setup_phy(struct forcedeth_private * priv)1609 nv_setup_phy ( struct forcedeth_private *priv )
1610 {
1611 void *ioaddr = priv->mmio_addr;
1612 u32 phystate_orig = 0, phystate;
1613 int phyinitialised = 0;
1614 u32 powerstate;
1615 int rc = 0;
1616 int i;
1617
1618 if ( priv->driver_data & DEV_HAS_POWER_CNTRL ) {
1619 /* take phy and nic out of low power mode */
1620 powerstate = readl ( ioaddr + NvRegPowerState2 );
1621 powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
1622 if ( ( priv->driver_data & DEV_NEED_LOW_POWER_FIX ) &&
1623 ( ( priv->pci_dev->class & 0xff ) >= 0xA3 ) )
1624 powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
1625 writel ( powerstate, ioaddr + NvRegPowerState2 );
1626 }
1627
1628
1629 /* clear phy state and temporarily halt phy interrupts */
1630 writel ( 0, ioaddr + NvRegMIIMask );
1631 phystate = readl ( ioaddr + NvRegAdapterControl );
1632 if ( phystate & NVREG_ADAPTCTL_RUNNING ) {
1633 phystate_orig = 1;
1634 phystate &= ~NVREG_ADAPTCTL_RUNNING;
1635 writel ( phystate, ioaddr + NvRegAdapterControl );
1636 }
1637 writel ( NVREG_MIISTAT_MASK_ALL, ioaddr + NvRegMIIStatus );
1638
1639 if ( priv->driver_data & DEV_HAS_MGMT_UNIT ) {
1640 /* management unit running on the mac? */
1641 if ( ( readl ( ioaddr + NvRegTransmitterControl ) & NVREG_XMITCTL_MGMT_ST ) &&
1642 ( readl ( ioaddr + NvRegTransmitterControl ) & NVREG_XMITCTL_SYNC_PHY_INIT ) &&
1643 nv_mgmt_acquire_sema ( priv ) &&
1644 nv_mgmt_get_version ( priv ) ) {
1645 priv->mac_in_use = 1;
1646 if ( priv->mgmt_version > 0 ) {
1647 priv->mac_in_use = readl ( ioaddr + NvRegMgmtUnitControl ) & NVREG_MGMTUNITCONTROL_INUSE;
1648 }
1649
1650 DBG ( "mgmt unit is running. mac in use\n" );
1651
1652 /* management unit setup the phy already? */
1653 if ( priv->mac_in_use &&
1654 ( ( readl ( ioaddr + NvRegTransmitterControl ) & NVREG_XMITCTL_SYNC_MASK ) ==
1655 NVREG_XMITCTL_SYNC_PHY_INIT ) ) {
1656 /* phy is inited by mgmt unit */
1657 phyinitialised = 1;
1658 DBG ( "Phy already initialized by mgmt unit" );
1659 }
1660 }
1661 }
1662
1663 /* find a suitable phy */
1664 for ( i = 1; i <= 32; i++ ) {
1665 int id1, id2;
1666 int phyaddr = i & 0x1f;
1667
1668 id1 = mii_rw ( priv, phyaddr, MII_PHYSID1, MII_READ );
1669 if ( id1 < 0 || id1 == 0xffff )
1670 continue;
1671 id2 = mii_rw ( priv, phyaddr, MII_PHYSID2, MII_READ );
1672 if ( id2 < 0 || id2 == 0xffff )
1673 continue;
1674
1675 priv->phy_model = id2 & PHYID2_MODEL_MASK;
1676 id1 = ( id1 & PHYID1_OUI_MASK ) << PHYID1_OUI_SHFT;
1677 id2 = ( id2 & PHYID2_OUI_MASK ) >> PHYID2_OUI_SHFT;
1678 DBG ( "Found PHY: %04x:%04x at address %d\n", id1, id2, phyaddr );
1679 priv->phyaddr = phyaddr;
1680 priv->phy_oui = id1 | id2;
1681
1682 /* Realtek hardcoded phy id1 to all zeros on certain phys */
1683 if ( priv->phy_oui == PHY_OUI_REALTEK2 )
1684 priv->phy_oui = PHY_OUI_REALTEK;
1685 /* Setup phy revision for Realtek */
1686 if ( priv->phy_oui == PHY_OUI_REALTEK &&
1687 priv->phy_model == PHY_MODEL_REALTEK_8211 )
1688 priv->phy_rev = mii_rw ( priv, phyaddr, MII_RESV1,
1689 MII_READ ) & PHY_REV_MASK;
1690 break;
1691 }
1692 if ( i == 33 ) {
1693 DBG ( "Could not find a valid PHY.\n" );
1694 rc = -ENODEV;
1695 goto err_phy;
1696 }
1697
1698 if ( ! phyinitialised ) {
1699 /* reset it */
1700 phy_init ( priv );
1701 } else {
1702 u32 mii_status = mii_rw ( priv, priv->phyaddr, MII_BMSR, MII_READ );
1703 if ( mii_status & PHY_GIGABIT ) {
1704 priv->gigabit = PHY_GIGABIT;
1705 }
1706 }
1707
1708 return 0;
1709
1710 err_phy:
1711 if ( phystate_orig )
1712 writel ( phystate | NVREG_ADAPTCTL_RUNNING,
1713 ioaddr + NvRegAdapterControl );
1714 return rc;
1715 }
1716
1717 /**
1718 * forcedeth_map_regs - Find a suitable BAR for the NIC and
1719 * map the registers in memory
1720 *
1721 * @v priv Driver private structure
1722 *
1723 * @ret rc Return status code
1724 **/
1725 static int
forcedeth_map_regs(struct forcedeth_private * priv)1726 forcedeth_map_regs ( struct forcedeth_private *priv )
1727 {
1728 void *ioaddr;
1729 uint32_t bar;
1730 unsigned long addr;
1731 u32 register_size;
1732 int reg;
1733 int rc;
1734
1735 /* Set register size based on NIC */
1736 if ( priv->driver_data & ( DEV_HAS_VLAN | DEV_HAS_MSI_X |
1737 DEV_HAS_POWER_CNTRL | DEV_HAS_STATISTICS_V2 |
1738 DEV_HAS_STATISTICS_V3 ) ) {
1739 register_size = NV_PCI_REGSZ_VER3;
1740 } else if ( priv->driver_data & DEV_HAS_STATISTICS_V1 ) {
1741 register_size = NV_PCI_REGSZ_VER2;
1742 } else {
1743 register_size = NV_PCI_REGSZ_VER1;
1744 }
1745
1746 /* Find an appropriate region for all the registers */
1747 rc = -EINVAL;
1748 addr = 0;
1749 for ( reg = PCI_BASE_ADDRESS_0; reg <= PCI_BASE_ADDRESS_5; reg += 4 ) {
1750 pci_read_config_dword ( priv->pci_dev, reg, &bar );
1751
1752 if ( ( ! ( bar & PCI_BASE_ADDRESS_SPACE_IO ) ) &&
1753 ( pci_bar_size ( priv->pci_dev, reg ) >= register_size ) ){
1754 addr = pci_bar_start ( priv->pci_dev, reg );
1755 break;
1756 }
1757 }
1758
1759 if ( reg > PCI_BASE_ADDRESS_5 ) {
1760 DBG ( "Couldn't find register window\n" );
1761 goto err_bar_sz;
1762 }
1763
1764 rc = -ENOMEM;
1765 ioaddr = ioremap ( addr, register_size );
1766 if ( ! ioaddr ) {
1767 DBG ( "Cannot remap MMIO\n" );
1768 goto err_ioremap;
1769 }
1770
1771 priv->mmio_addr = ioaddr;
1772
1773 return 0;
1774
1775 err_bar_sz:
1776 err_ioremap:
1777 return rc;
1778 }
1779
1780 /**
1781 * probe - Initial configuration of NIC
1782 *
1783 * @v pdev PCI device
1784 * @v ent PCI IDs
1785 *
1786 * @ret rc Return status code
1787 **/
1788 static int
forcedeth_probe(struct pci_device * pdev)1789 forcedeth_probe ( struct pci_device *pdev )
1790 {
1791 struct net_device *netdev;
1792 struct forcedeth_private *priv;
1793 void *ioaddr;
1794 int rc;
1795
1796 DBGP ( "forcedeth_probe\n" );
1797
1798 DBG ( "Found %s, vendor = %#04x, device = %#04x\n",
1799 pdev->id->name, pdev->id->vendor, pdev->id->device );
1800
1801 /* Allocate our private data */
1802 netdev = alloc_etherdev ( sizeof ( *priv ) );
1803 if ( ! netdev ) {
1804 rc = -ENOMEM;
1805 DBG ( "Failed to allocate net device\n" );
1806 goto err_alloc_etherdev;
1807 }
1808
1809 /* Link our operations to the netdev struct */
1810 netdev_init ( netdev, &forcedeth_operations );
1811
1812 /* Link the PCI device to the netdev struct */
1813 pci_set_drvdata ( pdev, netdev );
1814 netdev->dev = &pdev->dev;
1815
1816 /* Get a reference to our private data */
1817 priv = netdev_priv ( netdev );
1818
1819 /* We'll need these set up for the rest of the routines */
1820 priv->pci_dev = pdev;
1821 priv->netdev = netdev;
1822 priv->driver_data = pdev->id->driver_data;
1823
1824 adjust_pci_device ( pdev );
1825
1826 /* Use memory mapped I/O */
1827 if ( ( rc = forcedeth_map_regs ( priv ) ) != 0 )
1828 goto err_map_regs;
1829 ioaddr = priv->mmio_addr;
1830
1831 /* Verify and get MAC address */
1832 if ( ( rc = nv_setup_mac_addr ( priv ) ) != 0 ) {
1833 DBG ( "Invalid MAC address detected\n" );
1834 goto err_mac_addr;
1835 }
1836
1837 /* Disable WOL */
1838 writel ( 0, ioaddr + NvRegWakeUpFlags );
1839
1840 if ( ( rc = nv_setup_phy ( priv ) ) != 0 )
1841 goto err_setup_phy;
1842
1843 /* Set Pause Frame parameters */
1844 priv->pause_flags = NV_PAUSEFRAME_RX_CAPABLE |
1845 NV_PAUSEFRAME_RX_REQ |
1846 NV_PAUSEFRAME_AUTONEG;
1847 if ( ( priv->driver_data & DEV_HAS_PAUSEFRAME_TX_V1 ) ||
1848 ( priv->driver_data & DEV_HAS_PAUSEFRAME_TX_V2 ) ||
1849 ( priv->driver_data & DEV_HAS_PAUSEFRAME_TX_V3 ) ) {
1850 priv->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
1851 }
1852
1853 if ( priv->pause_flags & NV_PAUSEFRAME_TX_CAPABLE )
1854 writel ( NVREG_TX_PAUSEFRAME_DISABLE, ioaddr + NvRegTxPauseFrame );
1855
1856 /* Set default link speed settings */
1857 priv->linkspeed = NVREG_LINKSPEED_FORCE | NVREG_LINKSPEED_10;
1858 priv->duplex = 0;
1859
1860 if ( ( rc = register_netdev ( netdev ) ) != 0 ) {
1861 DBG ( "Error registering netdev\n" );
1862 goto err_register_netdev;
1863 }
1864
1865 forcedeth_link_status ( netdev );
1866
1867 return 0;
1868
1869 err_register_netdev:
1870 err_setup_phy:
1871 err_mac_addr:
1872 iounmap ( priv->mmio_addr );
1873 err_map_regs:
1874 netdev_nullify ( netdev );
1875 netdev_put ( netdev );
1876 err_alloc_etherdev:
1877 return rc;
1878 }
1879
1880 static void
nv_restore_phy(struct forcedeth_private * priv)1881 nv_restore_phy ( struct forcedeth_private *priv )
1882 {
1883 u16 phy_reserved, mii_control;
1884
1885 if ( priv->phy_oui == PHY_OUI_REALTEK &&
1886 priv->phy_model == PHY_MODEL_REALTEK_8201 ) {
1887 mii_rw ( priv, priv->phyaddr, PHY_REALTEK_INIT_REG1,
1888 PHY_REALTEK_INIT3 );
1889 phy_reserved = mii_rw ( priv, priv->phyaddr,
1890 PHY_REALTEK_INIT_REG2, MII_READ );
1891 phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
1892 phy_reserved |= PHY_REALTEK_INIT8;
1893 mii_rw ( priv, priv->phyaddr, PHY_REALTEK_INIT_REG2,
1894 phy_reserved );
1895 mii_rw ( priv, priv->phyaddr, PHY_REALTEK_INIT_REG1,
1896 PHY_REALTEK_INIT1 );
1897
1898 /* restart auto negotiation */
1899 mii_control = mii_rw ( priv, priv->phyaddr, MII_BMCR, MII_READ );
1900 mii_control |= ( BMCR_ANRESTART | BMCR_ANENABLE );
1901 mii_rw ( priv, priv->phyaddr, MII_BMCR, mii_control );
1902 }
1903 }
1904
1905 /**
1906 * remove - Device Removal Routine
1907 *
1908 * @v pdev PCI device information struct
1909 **/
1910 static void
forcedeth_remove(struct pci_device * pdev)1911 forcedeth_remove ( struct pci_device *pdev )
1912 {
1913 struct net_device *netdev = pci_get_drvdata ( pdev );
1914 struct forcedeth_private *priv = netdev->priv;
1915
1916 DBGP ( "forcedeth_remove\n" );
1917
1918 unregister_netdev ( netdev );
1919
1920 nv_restore_phy ( priv );
1921
1922 nv_mgmt_release_sema ( priv );
1923
1924 iounmap ( priv->mmio_addr );
1925
1926 netdev_nullify ( netdev );
1927 netdev_put ( netdev );
1928 }
1929
1930 static struct pci_device_id forcedeth_nics[] = {
1931 PCI_ROM(0x10DE, 0x01C3, "nForce", "nForce Ethernet Controller", DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER),
1932 PCI_ROM(0x10DE, 0x0066, "nForce2", "nForce2 Ethernet Controller", DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER),
1933 PCI_ROM(0x10DE, 0x00D6, "nForce3", "nForce3 Ethernet Controller", DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER),
1934 PCI_ROM(0x10DE, 0x0086, "nForce3", "nForce3 Ethernet Controller", DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC| DEV_HAS_CHECKSUM),
1935 PCI_ROM(0x10DE, 0x008C, "nForce3", "nForce3 Ethernet Controller", DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC| DEV_HAS_CHECKSUM),
1936 PCI_ROM(0x10DE, 0x00E6, "nForce3", "nForce3 Ethernet Controller", DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC| DEV_HAS_CHECKSUM),
1937 PCI_ROM(0x10DE, 0x00DF, "nForce3", "nForce3 Ethernet Controller", DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC| DEV_HAS_CHECKSUM),
1938 PCI_ROM(0x10DE, 0x0056, "CK804", "CK804 Ethernet Controller", DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM| DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT),
1939 PCI_ROM(0x10DE, 0x0057, "CK804", "CK804 Ethernet Controller", DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM| DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT),
1940 PCI_ROM(0x10DE, 0x0037, "MCP04", "MCP04 Ethernet Controller", DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM| DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT),
1941 PCI_ROM(0x10DE, 0x0038, "MCP04", "MCP04 Ethernet Controller", DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM| DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT),
1942 PCI_ROM(0x10DE, 0x0268, "MCP51", "MCP51 Ethernet Controller", DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL| DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX),
1943 PCI_ROM(0x10DE, 0x0269, "MCP51", "MCP51 Ethernet Controller", DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL| DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX),
1944 PCI_ROM(0x10DE, 0x0372, "MCP55", "MCP55 Ethernet Controller", DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM| DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X| DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1| DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED| DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX),
1945 PCI_ROM(0x10DE, 0x0373, "MCP55", "MCP55 Ethernet Controller", DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM| DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X| DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1| DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT| DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX),
1946 PCI_ROM(0x10DE, 0x03E5, "MCP61", "MCP61 Ethernet Controller", DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL| DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2| DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR| DEV_NEED_MSI_FIX),
1947 PCI_ROM(0x10DE, 0x03E6, "MCP61", "MCP61 Ethernet Controller", DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL| DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2| DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR| DEV_NEED_MSI_FIX),
1948 PCI_ROM(0x10DE, 0x03EE, "MCP61", "MCP61 Ethernet Controller", DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL| DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2| DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR| DEV_NEED_MSI_FIX),
1949 PCI_ROM(0x10DE, 0x03EF, "MCP61", "MCP61 Ethernet Controller", DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL| DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2| DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR| DEV_NEED_MSI_FIX),
1950 PCI_ROM(0x10DE, 0x0450, "MCP65", "MCP65 Ethernet Controller", DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA| DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1| DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT| DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE| DEV_NEED_MSI_FIX),
1951 PCI_ROM(0x10DE, 0x0451, "MCP65", "MCP65 Ethernet Controller", DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA| DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1| DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT| DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE| DEV_NEED_MSI_FIX),
1952 PCI_ROM(0x10DE, 0x0452, "MCP65", "MCP65 Ethernet Controller", DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA| DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1| DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT| DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE| DEV_NEED_MSI_FIX),
1953 PCI_ROM(0x10DE, 0x0453, "MCP65", "MCP65 Ethernet Controller", DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA| DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1| DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT| DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE| DEV_NEED_MSI_FIX),
1954 PCI_ROM(0x10DE, 0x054C, "MCP67", "MCP67 Ethernet Controller", DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL| DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2| DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR| DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX),
1955 PCI_ROM(0x10DE, 0x054D, "MCP67", "MCP67 Ethernet Controller", DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL| DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2| DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR| DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX),
1956 PCI_ROM(0x10DE, 0x054E, "MCP67", "MCP67 Ethernet Controller", DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL| DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2| DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR| DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX),
1957 PCI_ROM(0x10DE, 0x054F, "MCP67", "MCP67 Ethernet Controller", DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL| DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2| DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR| DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX),
1958 PCI_ROM(0x10DE, 0x07DC, "MCP73", "MCP73 Ethernet Controller", DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL| DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2| DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR| DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX),
1959 PCI_ROM(0x10DE, 0x07DD, "MCP73", "MCP73 Ethernet Controller", DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL| DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2| DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR| DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX),
1960 PCI_ROM(0x10DE, 0x07DE, "MCP73", "MCP73 Ethernet Controller", DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL| DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2| DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR| DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX),
1961 PCI_ROM(0x10DE, 0x07DF, "MCP73", "MCP73 Ethernet Controller", DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL| DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2| DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR| DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX),
1962 PCI_ROM(0x10DE, 0x0760, "MCP77", "MCP77 Ethernet Controller", DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA| DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2| DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT| DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX| DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX| DEV_NEED_MSI_FIX),
1963 PCI_ROM(0x10DE, 0x0761, "MCP77", "MCP77 Ethernet Controller", DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA| DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2| DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT| DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX| DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX| DEV_NEED_MSI_FIX),
1964 PCI_ROM(0x10DE, 0x0762, "MCP77", "MCP77 Ethernet Controller", DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA| DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2| DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT| DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX| DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX| DEV_NEED_MSI_FIX),
1965 PCI_ROM(0x10DE, 0x0763, "MCP77", "MCP77 Ethernet Controller", DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA| DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2| DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT| DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX| DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX| DEV_NEED_MSI_FIX),
1966 PCI_ROM(0x10DE, 0x0AB0, "MCP79", "MCP79 Ethernet Controller", DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM| DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL| DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3| DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR| DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE| DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX),
1967 PCI_ROM(0x10DE, 0x0AB1, "MCP79", "MCP79 Ethernet Controller", DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM| DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL| DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3| DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR| DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE| DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX),
1968 PCI_ROM(0x10DE, 0x0AB2, "MCP79", "MCP79 Ethernet Controller", DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM| DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL| DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3| DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR| DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE| DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX),
1969 PCI_ROM(0x10DE, 0x0AB3, "MCP79", "MCP79 Ethernet Controller", DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM| DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL| DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3| DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR| DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE| DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX),
1970 PCI_ROM(0x10DE, 0x0D7D, "MCP89", "MCP89 Ethernet Controller", DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM| DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL| DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3| DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR| DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX),
1971 };
1972
1973 struct pci_driver forcedeth_driver __pci_driver = {
1974 .ids = forcedeth_nics,
1975 .id_count = ARRAY_SIZE(forcedeth_nics),
1976 .probe = forcedeth_probe,
1977 .remove = forcedeth_remove,
1978 };
1979