xref: /linux/drivers/net/ethernet/dec/tulip/de2104x.c (revision f86fd32d)
1 /* de2104x.c: A Linux PCI Ethernet driver for Intel/Digital 21040/1 chips. */
2 /*
3 	Copyright 2001,2003 Jeff Garzik <jgarzik@pobox.com>
4 
5 	Copyright 1994, 1995 Digital Equipment Corporation.	    [de4x5.c]
6 	Written/copyright 1994-2001 by Donald Becker.		    [tulip.c]
7 
8 	This software may be used and distributed according to the terms of
9 	the GNU General Public License (GPL), incorporated herein by reference.
10 	Drivers based on or derived from this code fall under the GPL and must
11 	retain the authorship, copyright and license notice.  This file is not
12 	a complete program and may only be used when the entire operating
13 	system is licensed under the GPL.
14 
15 	See the file COPYING in this distribution for more information.
16 
17 	TODO, in rough priority order:
18 	* Support forcing media type with a module parameter,
19 	  like dl2k.c/sundance.c
20 	* Constants (module parms?) for Rx work limit
21 	* Complete reset on PciErr
22 	* Jumbo frames / dev->change_mtu
23 	* Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
24 	* Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
25 	* Implement Tx software interrupt mitigation via
26 	  Tx descriptor bit
27 
28  */
29 
30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 
32 #define DRV_NAME		"de2104x"
33 #define DRV_VERSION		"0.7"
34 #define DRV_RELDATE		"Mar 17, 2004"
35 
36 #include <linux/module.h>
37 #include <linux/kernel.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/init.h>
41 #include <linux/interrupt.h>
42 #include <linux/pci.h>
43 #include <linux/delay.h>
44 #include <linux/ethtool.h>
45 #include <linux/compiler.h>
46 #include <linux/rtnetlink.h>
47 #include <linux/crc32.h>
48 #include <linux/slab.h>
49 
50 #include <asm/io.h>
51 #include <asm/irq.h>
52 #include <linux/uaccess.h>
53 #include <asm/unaligned.h>
54 
55 /* These identify the driver base version and may not be removed. */
56 static char version[] =
57 "PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")";
58 
59 MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
60 MODULE_DESCRIPTION("Intel/Digital 21040/1 series PCI Ethernet driver");
61 MODULE_LICENSE("GPL");
62 MODULE_VERSION(DRV_VERSION);
63 
64 static int debug = -1;
65 module_param (debug, int, 0);
66 MODULE_PARM_DESC (debug, "de2104x bitmapped message enable number");
67 
68 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
69 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
70         defined(CONFIG_SPARC) || defined(__ia64__) ||		   \
71         defined(__sh__) || defined(__mips__)
72 static int rx_copybreak = 1518;
73 #else
74 static int rx_copybreak = 100;
75 #endif
76 module_param (rx_copybreak, int, 0);
77 MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copied");
78 
79 #define DE_DEF_MSG_ENABLE	(NETIF_MSG_DRV		| \
80 				 NETIF_MSG_PROBE 	| \
81 				 NETIF_MSG_LINK		| \
82 				 NETIF_MSG_IFDOWN	| \
83 				 NETIF_MSG_IFUP		| \
84 				 NETIF_MSG_RX_ERR	| \
85 				 NETIF_MSG_TX_ERR)
86 
87 /* Descriptor skip length in 32 bit longwords. */
88 #ifndef CONFIG_DE2104X_DSL
89 #define DSL			0
90 #else
91 #define DSL			CONFIG_DE2104X_DSL
92 #endif
93 
94 #define DE_RX_RING_SIZE		64
95 #define DE_TX_RING_SIZE		64
96 #define DE_RING_BYTES		\
97 		((sizeof(struct de_desc) * DE_RX_RING_SIZE) +	\
98 		(sizeof(struct de_desc) * DE_TX_RING_SIZE))
99 #define NEXT_TX(N)		(((N) + 1) & (DE_TX_RING_SIZE - 1))
100 #define NEXT_RX(N)		(((N) + 1) & (DE_RX_RING_SIZE - 1))
101 #define TX_BUFFS_AVAIL(CP)					\
102 	(((CP)->tx_tail <= (CP)->tx_head) ?			\
103 	  (CP)->tx_tail + (DE_TX_RING_SIZE - 1) - (CP)->tx_head :	\
104 	  (CP)->tx_tail - (CP)->tx_head - 1)
105 
106 #define PKT_BUF_SZ		1536	/* Size of each temporary Rx buffer.*/
107 #define RX_OFFSET		2
108 
109 #define DE_SETUP_SKB		((struct sk_buff *) 1)
110 #define DE_DUMMY_SKB		((struct sk_buff *) 2)
111 #define DE_SETUP_FRAME_WORDS	96
112 #define DE_EEPROM_WORDS		256
113 #define DE_EEPROM_SIZE		(DE_EEPROM_WORDS * sizeof(u16))
114 #define DE_MAX_MEDIA		5
115 
116 #define DE_MEDIA_TP_AUTO	0
117 #define DE_MEDIA_BNC		1
118 #define DE_MEDIA_AUI		2
119 #define DE_MEDIA_TP		3
120 #define DE_MEDIA_TP_FD		4
121 #define DE_MEDIA_INVALID	DE_MAX_MEDIA
122 #define DE_MEDIA_FIRST		0
123 #define DE_MEDIA_LAST		(DE_MAX_MEDIA - 1)
124 #define DE_AUI_BNC		(SUPPORTED_AUI | SUPPORTED_BNC)
125 
126 #define DE_TIMER_LINK		(60 * HZ)
127 #define DE_TIMER_NO_LINK	(5 * HZ)
128 
129 #define DE_NUM_REGS		16
130 #define DE_REGS_SIZE		(DE_NUM_REGS * sizeof(u32))
131 #define DE_REGS_VER		1
132 
133 /* Time in jiffies before concluding the transmitter is hung. */
134 #define TX_TIMEOUT		(6*HZ)
135 
136 /* This is a mysterious value that can be written to CSR11 in the 21040 (only)
137    to support a pre-NWay full-duplex signaling mechanism using short frames.
138    No one knows what it should be, but if left at its default value some
139    10base2(!) packets trigger a full-duplex-request interrupt. */
140 #define FULL_DUPLEX_MAGIC	0x6969
141 
142 enum {
143 	/* NIC registers */
144 	BusMode			= 0x00,
145 	TxPoll			= 0x08,
146 	RxPoll			= 0x10,
147 	RxRingAddr		= 0x18,
148 	TxRingAddr		= 0x20,
149 	MacStatus		= 0x28,
150 	MacMode			= 0x30,
151 	IntrMask		= 0x38,
152 	RxMissed		= 0x40,
153 	ROMCmd			= 0x48,
154 	CSR11			= 0x58,
155 	SIAStatus		= 0x60,
156 	CSR13			= 0x68,
157 	CSR14			= 0x70,
158 	CSR15			= 0x78,
159 	PCIPM			= 0x40,
160 
161 	/* BusMode bits */
162 	CmdReset		= (1 << 0),
163 	CacheAlign16		= 0x00008000,
164 	BurstLen4		= 0x00000400,
165 	DescSkipLen		= (DSL << 2),
166 
167 	/* Rx/TxPoll bits */
168 	NormalTxPoll		= (1 << 0),
169 	NormalRxPoll		= (1 << 0),
170 
171 	/* Tx/Rx descriptor status bits */
172 	DescOwn			= (1 << 31),
173 	RxError			= (1 << 15),
174 	RxErrLong		= (1 << 7),
175 	RxErrCRC		= (1 << 1),
176 	RxErrFIFO		= (1 << 0),
177 	RxErrRunt		= (1 << 11),
178 	RxErrFrame		= (1 << 14),
179 	RingEnd			= (1 << 25),
180 	FirstFrag		= (1 << 29),
181 	LastFrag		= (1 << 30),
182 	TxError			= (1 << 15),
183 	TxFIFOUnder		= (1 << 1),
184 	TxLinkFail		= (1 << 2) | (1 << 10) | (1 << 11),
185 	TxMaxCol		= (1 << 8),
186 	TxOWC			= (1 << 9),
187 	TxJabber		= (1 << 14),
188 	SetupFrame		= (1 << 27),
189 	TxSwInt			= (1 << 31),
190 
191 	/* MacStatus bits */
192 	IntrOK			= (1 << 16),
193 	IntrErr			= (1 << 15),
194 	RxIntr			= (1 << 6),
195 	RxEmpty			= (1 << 7),
196 	TxIntr			= (1 << 0),
197 	TxEmpty			= (1 << 2),
198 	PciErr			= (1 << 13),
199 	TxState			= (1 << 22) | (1 << 21) | (1 << 20),
200 	RxState			= (1 << 19) | (1 << 18) | (1 << 17),
201 	LinkFail		= (1 << 12),
202 	LinkPass		= (1 << 4),
203 	RxStopped		= (1 << 8),
204 	TxStopped		= (1 << 1),
205 
206 	/* MacMode bits */
207 	TxEnable		= (1 << 13),
208 	RxEnable		= (1 << 1),
209 	RxTx			= TxEnable | RxEnable,
210 	FullDuplex		= (1 << 9),
211 	AcceptAllMulticast	= (1 << 7),
212 	AcceptAllPhys		= (1 << 6),
213 	BOCnt			= (1 << 5),
214 	MacModeClear		= (1<<12) | (1<<11) | (1<<10) | (1<<8) | (1<<3) |
215 				  RxTx | BOCnt | AcceptAllPhys | AcceptAllMulticast,
216 
217 	/* ROMCmd bits */
218 	EE_SHIFT_CLK		= 0x02,	/* EEPROM shift clock. */
219 	EE_CS			= 0x01,	/* EEPROM chip select. */
220 	EE_DATA_WRITE		= 0x04,	/* Data from the Tulip to EEPROM. */
221 	EE_WRITE_0		= 0x01,
222 	EE_WRITE_1		= 0x05,
223 	EE_DATA_READ		= 0x08,	/* Data from the EEPROM chip. */
224 	EE_ENB			= (0x4800 | EE_CS),
225 
226 	/* The EEPROM commands include the alway-set leading bit. */
227 	EE_READ_CMD		= 6,
228 
229 	/* RxMissed bits */
230 	RxMissedOver		= (1 << 16),
231 	RxMissedMask		= 0xffff,
232 
233 	/* SROM-related bits */
234 	SROMC0InfoLeaf		= 27,
235 	MediaBlockMask		= 0x3f,
236 	MediaCustomCSRs		= (1 << 6),
237 
238 	/* PCIPM bits */
239 	PM_Sleep		= (1 << 31),
240 	PM_Snooze		= (1 << 30),
241 	PM_Mask			= PM_Sleep | PM_Snooze,
242 
243 	/* SIAStatus bits */
244 	NWayState		= (1 << 14) | (1 << 13) | (1 << 12),
245 	NWayRestart		= (1 << 12),
246 	NonselPortActive	= (1 << 9),
247 	SelPortActive		= (1 << 8),
248 	LinkFailStatus		= (1 << 2),
249 	NetCxnErr		= (1 << 1),
250 };
251 
252 static const u32 de_intr_mask =
253 	IntrOK | IntrErr | RxIntr | RxEmpty | TxIntr | TxEmpty |
254 	LinkPass | LinkFail | PciErr;
255 
256 /*
257  * Set the programmable burst length to 4 longwords for all:
258  * DMA errors result without these values. Cache align 16 long.
259  */
260 static const u32 de_bus_mode = CacheAlign16 | BurstLen4 | DescSkipLen;
261 
262 struct de_srom_media_block {
263 	u8			opts;
264 	u16			csr13;
265 	u16			csr14;
266 	u16			csr15;
267 } __packed;
268 
269 struct de_srom_info_leaf {
270 	u16			default_media;
271 	u8			n_blocks;
272 	u8			unused;
273 } __packed;
274 
275 struct de_desc {
276 	__le32			opts1;
277 	__le32			opts2;
278 	__le32			addr1;
279 	__le32			addr2;
280 #if DSL
281 	__le32			skip[DSL];
282 #endif
283 };
284 
285 struct media_info {
286 	u16			type;	/* DE_MEDIA_xxx */
287 	u16			csr13;
288 	u16			csr14;
289 	u16			csr15;
290 };
291 
292 struct ring_info {
293 	struct sk_buff		*skb;
294 	dma_addr_t		mapping;
295 };
296 
297 struct de_private {
298 	unsigned		tx_head;
299 	unsigned		tx_tail;
300 	unsigned		rx_tail;
301 
302 	void			__iomem *regs;
303 	struct net_device	*dev;
304 	spinlock_t		lock;
305 
306 	struct de_desc		*rx_ring;
307 	struct de_desc		*tx_ring;
308 	struct ring_info	tx_skb[DE_TX_RING_SIZE];
309 	struct ring_info	rx_skb[DE_RX_RING_SIZE];
310 	unsigned		rx_buf_sz;
311 	dma_addr_t		ring_dma;
312 
313 	u32			msg_enable;
314 
315 	struct pci_dev		*pdev;
316 
317 	u16			setup_frame[DE_SETUP_FRAME_WORDS];
318 
319 	u32			media_type;
320 	u32			media_supported;
321 	u32			media_advertise;
322 	struct media_info	media[DE_MAX_MEDIA];
323 	struct timer_list	media_timer;
324 
325 	u8			*ee_data;
326 	unsigned		board_idx;
327 	unsigned		de21040 : 1;
328 	unsigned		media_lock : 1;
329 };
330 
331 
332 static void de_set_rx_mode (struct net_device *dev);
333 static void de_tx (struct de_private *de);
334 static void de_clean_rings (struct de_private *de);
335 static void de_media_interrupt (struct de_private *de, u32 status);
336 static void de21040_media_timer (struct timer_list *t);
337 static void de21041_media_timer (struct timer_list *t);
338 static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media);
339 
340 
341 static const struct pci_device_id de_pci_tbl[] = {
342 	{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
343 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
344 	{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
345 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
346 	{ },
347 };
348 MODULE_DEVICE_TABLE(pci, de_pci_tbl);
349 
350 static const char * const media_name[DE_MAX_MEDIA] = {
351 	"10baseT auto",
352 	"BNC",
353 	"AUI",
354 	"10baseT-HD",
355 	"10baseT-FD"
356 };
357 
358 /* 21040 transceiver register settings:
359  * TP AUTO(unused), BNC(unused), AUI, TP, TP FD*/
360 static u16 t21040_csr13[] = { 0, 0, 0x8F09, 0x8F01, 0x8F01, };
361 static u16 t21040_csr14[] = { 0, 0, 0x0705, 0xFFFF, 0xFFFD, };
362 static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, };
363 
364 /* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/
365 static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
366 static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
367 /* If on-chip autonegotiation is broken, use half-duplex (FF3F) instead */
368 static u16 t21041_csr14_brk[] = { 0xFF3F, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
369 static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
370 
371 
372 #define dr32(reg)	ioread32(de->regs + (reg))
373 #define dw32(reg, val)	iowrite32((val), de->regs + (reg))
374 
375 
376 static void de_rx_err_acct (struct de_private *de, unsigned rx_tail,
377 			    u32 status, u32 len)
378 {
379 	netif_dbg(de, rx_err, de->dev,
380 		  "rx err, slot %d status 0x%x len %d\n",
381 		  rx_tail, status, len);
382 
383 	if ((status & 0x38000300) != 0x0300) {
384 		/* Ingore earlier buffers. */
385 		if ((status & 0xffff) != 0x7fff) {
386 			netif_warn(de, rx_err, de->dev,
387 				   "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
388 				   status);
389 			de->dev->stats.rx_length_errors++;
390 		}
391 	} else if (status & RxError) {
392 		/* There was a fatal error. */
393 		de->dev->stats.rx_errors++; /* end of a packet.*/
394 		if (status & 0x0890) de->dev->stats.rx_length_errors++;
395 		if (status & RxErrCRC) de->dev->stats.rx_crc_errors++;
396 		if (status & RxErrFIFO) de->dev->stats.rx_fifo_errors++;
397 	}
398 }
399 
400 static void de_rx (struct de_private *de)
401 {
402 	unsigned rx_tail = de->rx_tail;
403 	unsigned rx_work = DE_RX_RING_SIZE;
404 	unsigned drop = 0;
405 	int rc;
406 
407 	while (--rx_work) {
408 		u32 status, len;
409 		dma_addr_t mapping;
410 		struct sk_buff *skb, *copy_skb;
411 		unsigned copying_skb, buflen;
412 
413 		skb = de->rx_skb[rx_tail].skb;
414 		BUG_ON(!skb);
415 		rmb();
416 		status = le32_to_cpu(de->rx_ring[rx_tail].opts1);
417 		if (status & DescOwn)
418 			break;
419 
420 		/* the length is actually a 15 bit value here according
421 		 * to Table 4-1 in the DE2104x spec so mask is 0x7fff
422 		 */
423 		len = ((status >> 16) & 0x7fff) - 4;
424 		mapping = de->rx_skb[rx_tail].mapping;
425 
426 		if (unlikely(drop)) {
427 			de->dev->stats.rx_dropped++;
428 			goto rx_next;
429 		}
430 
431 		if (unlikely((status & 0x38008300) != 0x0300)) {
432 			de_rx_err_acct(de, rx_tail, status, len);
433 			goto rx_next;
434 		}
435 
436 		copying_skb = (len <= rx_copybreak);
437 
438 		netif_dbg(de, rx_status, de->dev,
439 			  "rx slot %d status 0x%x len %d copying? %d\n",
440 			  rx_tail, status, len, copying_skb);
441 
442 		buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz;
443 		copy_skb = netdev_alloc_skb(de->dev, buflen);
444 		if (unlikely(!copy_skb)) {
445 			de->dev->stats.rx_dropped++;
446 			drop = 1;
447 			rx_work = 100;
448 			goto rx_next;
449 		}
450 
451 		if (!copying_skb) {
452 			pci_unmap_single(de->pdev, mapping,
453 					 buflen, PCI_DMA_FROMDEVICE);
454 			skb_put(skb, len);
455 
456 			mapping =
457 			de->rx_skb[rx_tail].mapping =
458 				pci_map_single(de->pdev, copy_skb->data,
459 					       buflen, PCI_DMA_FROMDEVICE);
460 			de->rx_skb[rx_tail].skb = copy_skb;
461 		} else {
462 			pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
463 			skb_reserve(copy_skb, RX_OFFSET);
464 			skb_copy_from_linear_data(skb, skb_put(copy_skb, len),
465 						  len);
466 			pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
467 
468 			/* We'll reuse the original ring buffer. */
469 			skb = copy_skb;
470 		}
471 
472 		skb->protocol = eth_type_trans (skb, de->dev);
473 
474 		de->dev->stats.rx_packets++;
475 		de->dev->stats.rx_bytes += skb->len;
476 		rc = netif_rx (skb);
477 		if (rc == NET_RX_DROP)
478 			drop = 1;
479 
480 rx_next:
481 		if (rx_tail == (DE_RX_RING_SIZE - 1))
482 			de->rx_ring[rx_tail].opts2 =
483 				cpu_to_le32(RingEnd | de->rx_buf_sz);
484 		else
485 			de->rx_ring[rx_tail].opts2 = cpu_to_le32(de->rx_buf_sz);
486 		de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping);
487 		wmb();
488 		de->rx_ring[rx_tail].opts1 = cpu_to_le32(DescOwn);
489 		rx_tail = NEXT_RX(rx_tail);
490 	}
491 
492 	if (!rx_work)
493 		netdev_warn(de->dev, "rx work limit reached\n");
494 
495 	de->rx_tail = rx_tail;
496 }
497 
498 static irqreturn_t de_interrupt (int irq, void *dev_instance)
499 {
500 	struct net_device *dev = dev_instance;
501 	struct de_private *de = netdev_priv(dev);
502 	u32 status;
503 
504 	status = dr32(MacStatus);
505 	if ((!(status & (IntrOK|IntrErr))) || (status == 0xFFFF))
506 		return IRQ_NONE;
507 
508 	netif_dbg(de, intr, dev, "intr, status %08x mode %08x desc %u/%u/%u\n",
509 		  status, dr32(MacMode),
510 		  de->rx_tail, de->tx_head, de->tx_tail);
511 
512 	dw32(MacStatus, status);
513 
514 	if (status & (RxIntr | RxEmpty)) {
515 		de_rx(de);
516 		if (status & RxEmpty)
517 			dw32(RxPoll, NormalRxPoll);
518 	}
519 
520 	spin_lock(&de->lock);
521 
522 	if (status & (TxIntr | TxEmpty))
523 		de_tx(de);
524 
525 	if (status & (LinkPass | LinkFail))
526 		de_media_interrupt(de, status);
527 
528 	spin_unlock(&de->lock);
529 
530 	if (status & PciErr) {
531 		u16 pci_status;
532 
533 		pci_read_config_word(de->pdev, PCI_STATUS, &pci_status);
534 		pci_write_config_word(de->pdev, PCI_STATUS, pci_status);
535 		netdev_err(de->dev,
536 			   "PCI bus error, status=%08x, PCI status=%04x\n",
537 			   status, pci_status);
538 	}
539 
540 	return IRQ_HANDLED;
541 }
542 
543 static void de_tx (struct de_private *de)
544 {
545 	unsigned tx_head = de->tx_head;
546 	unsigned tx_tail = de->tx_tail;
547 
548 	while (tx_tail != tx_head) {
549 		struct sk_buff *skb;
550 		u32 status;
551 
552 		rmb();
553 		status = le32_to_cpu(de->tx_ring[tx_tail].opts1);
554 		if (status & DescOwn)
555 			break;
556 
557 		skb = de->tx_skb[tx_tail].skb;
558 		BUG_ON(!skb);
559 		if (unlikely(skb == DE_DUMMY_SKB))
560 			goto next;
561 
562 		if (unlikely(skb == DE_SETUP_SKB)) {
563 			pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
564 					 sizeof(de->setup_frame), PCI_DMA_TODEVICE);
565 			goto next;
566 		}
567 
568 		pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
569 				 skb->len, PCI_DMA_TODEVICE);
570 
571 		if (status & LastFrag) {
572 			if (status & TxError) {
573 				netif_dbg(de, tx_err, de->dev,
574 					  "tx err, status 0x%x\n",
575 					  status);
576 				de->dev->stats.tx_errors++;
577 				if (status & TxOWC)
578 					de->dev->stats.tx_window_errors++;
579 				if (status & TxMaxCol)
580 					de->dev->stats.tx_aborted_errors++;
581 				if (status & TxLinkFail)
582 					de->dev->stats.tx_carrier_errors++;
583 				if (status & TxFIFOUnder)
584 					de->dev->stats.tx_fifo_errors++;
585 			} else {
586 				de->dev->stats.tx_packets++;
587 				de->dev->stats.tx_bytes += skb->len;
588 				netif_dbg(de, tx_done, de->dev,
589 					  "tx done, slot %d\n", tx_tail);
590 			}
591 			dev_consume_skb_irq(skb);
592 		}
593 
594 next:
595 		de->tx_skb[tx_tail].skb = NULL;
596 
597 		tx_tail = NEXT_TX(tx_tail);
598 	}
599 
600 	de->tx_tail = tx_tail;
601 
602 	if (netif_queue_stopped(de->dev) && (TX_BUFFS_AVAIL(de) > (DE_TX_RING_SIZE / 4)))
603 		netif_wake_queue(de->dev);
604 }
605 
606 static netdev_tx_t de_start_xmit (struct sk_buff *skb,
607 					struct net_device *dev)
608 {
609 	struct de_private *de = netdev_priv(dev);
610 	unsigned int entry, tx_free;
611 	u32 mapping, len, flags = FirstFrag | LastFrag;
612 	struct de_desc *txd;
613 
614 	spin_lock_irq(&de->lock);
615 
616 	tx_free = TX_BUFFS_AVAIL(de);
617 	if (tx_free == 0) {
618 		netif_stop_queue(dev);
619 		spin_unlock_irq(&de->lock);
620 		return NETDEV_TX_BUSY;
621 	}
622 	tx_free--;
623 
624 	entry = de->tx_head;
625 
626 	txd = &de->tx_ring[entry];
627 
628 	len = skb->len;
629 	mapping = pci_map_single(de->pdev, skb->data, len, PCI_DMA_TODEVICE);
630 	if (entry == (DE_TX_RING_SIZE - 1))
631 		flags |= RingEnd;
632 	if (!tx_free || (tx_free == (DE_TX_RING_SIZE / 2)))
633 		flags |= TxSwInt;
634 	flags |= len;
635 	txd->opts2 = cpu_to_le32(flags);
636 	txd->addr1 = cpu_to_le32(mapping);
637 
638 	de->tx_skb[entry].skb = skb;
639 	de->tx_skb[entry].mapping = mapping;
640 	wmb();
641 
642 	txd->opts1 = cpu_to_le32(DescOwn);
643 	wmb();
644 
645 	de->tx_head = NEXT_TX(entry);
646 	netif_dbg(de, tx_queued, dev, "tx queued, slot %d, skblen %d\n",
647 		  entry, skb->len);
648 
649 	if (tx_free == 0)
650 		netif_stop_queue(dev);
651 
652 	spin_unlock_irq(&de->lock);
653 
654 	/* Trigger an immediate transmit demand. */
655 	dw32(TxPoll, NormalTxPoll);
656 
657 	return NETDEV_TX_OK;
658 }
659 
660 /* Set or clear the multicast filter for this adaptor.
661    Note that we only use exclusion around actually queueing the
662    new frame, not around filling de->setup_frame.  This is non-deterministic
663    when re-entered but still correct. */
664 
665 static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
666 {
667 	struct de_private *de = netdev_priv(dev);
668 	u16 hash_table[32];
669 	struct netdev_hw_addr *ha;
670 	int i;
671 	u16 *eaddrs;
672 
673 	memset(hash_table, 0, sizeof(hash_table));
674 	__set_bit_le(255, hash_table);			/* Broadcast entry */
675 	/* This should work on big-endian machines as well. */
676 	netdev_for_each_mc_addr(ha, dev) {
677 		int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
678 
679 		__set_bit_le(index, hash_table);
680 	}
681 
682 	for (i = 0; i < 32; i++) {
683 		*setup_frm++ = hash_table[i];
684 		*setup_frm++ = hash_table[i];
685 	}
686 	setup_frm = &de->setup_frame[13*6];
687 
688 	/* Fill the final entry with our physical address. */
689 	eaddrs = (u16 *)dev->dev_addr;
690 	*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
691 	*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
692 	*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
693 }
694 
695 static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
696 {
697 	struct de_private *de = netdev_priv(dev);
698 	struct netdev_hw_addr *ha;
699 	u16 *eaddrs;
700 
701 	/* We have <= 14 addresses so we can use the wonderful
702 	   16 address perfect filtering of the Tulip. */
703 	netdev_for_each_mc_addr(ha, dev) {
704 		eaddrs = (u16 *) ha->addr;
705 		*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
706 		*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
707 		*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
708 	}
709 	/* Fill the unused entries with the broadcast address. */
710 	memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
711 	setup_frm = &de->setup_frame[15*6];
712 
713 	/* Fill the final entry with our physical address. */
714 	eaddrs = (u16 *)dev->dev_addr;
715 	*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
716 	*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
717 	*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
718 }
719 
720 
721 static void __de_set_rx_mode (struct net_device *dev)
722 {
723 	struct de_private *de = netdev_priv(dev);
724 	u32 macmode;
725 	unsigned int entry;
726 	u32 mapping;
727 	struct de_desc *txd;
728 	struct de_desc *dummy_txd = NULL;
729 
730 	macmode = dr32(MacMode) & ~(AcceptAllMulticast | AcceptAllPhys);
731 
732 	if (dev->flags & IFF_PROMISC) {	/* Set promiscuous. */
733 		macmode |= AcceptAllMulticast | AcceptAllPhys;
734 		goto out;
735 	}
736 
737 	if ((netdev_mc_count(dev) > 1000) || (dev->flags & IFF_ALLMULTI)) {
738 		/* Too many to filter well -- accept all multicasts. */
739 		macmode |= AcceptAllMulticast;
740 		goto out;
741 	}
742 
743 	/* Note that only the low-address shortword of setup_frame is valid!
744 	   The values are doubled for big-endian architectures. */
745 	if (netdev_mc_count(dev) > 14)	/* Must use a multicast hash table. */
746 		build_setup_frame_hash (de->setup_frame, dev);
747 	else
748 		build_setup_frame_perfect (de->setup_frame, dev);
749 
750 	/*
751 	 * Now add this frame to the Tx list.
752 	 */
753 
754 	entry = de->tx_head;
755 
756 	/* Avoid a chip errata by prefixing a dummy entry. */
757 	if (entry != 0) {
758 		de->tx_skb[entry].skb = DE_DUMMY_SKB;
759 
760 		dummy_txd = &de->tx_ring[entry];
761 		dummy_txd->opts2 = (entry == (DE_TX_RING_SIZE - 1)) ?
762 				   cpu_to_le32(RingEnd) : 0;
763 		dummy_txd->addr1 = 0;
764 
765 		/* Must set DescOwned later to avoid race with chip */
766 
767 		entry = NEXT_TX(entry);
768 	}
769 
770 	de->tx_skb[entry].skb = DE_SETUP_SKB;
771 	de->tx_skb[entry].mapping = mapping =
772 	    pci_map_single (de->pdev, de->setup_frame,
773 			    sizeof (de->setup_frame), PCI_DMA_TODEVICE);
774 
775 	/* Put the setup frame on the Tx list. */
776 	txd = &de->tx_ring[entry];
777 	if (entry == (DE_TX_RING_SIZE - 1))
778 		txd->opts2 = cpu_to_le32(SetupFrame | RingEnd | sizeof (de->setup_frame));
779 	else
780 		txd->opts2 = cpu_to_le32(SetupFrame | sizeof (de->setup_frame));
781 	txd->addr1 = cpu_to_le32(mapping);
782 	wmb();
783 
784 	txd->opts1 = cpu_to_le32(DescOwn);
785 	wmb();
786 
787 	if (dummy_txd) {
788 		dummy_txd->opts1 = cpu_to_le32(DescOwn);
789 		wmb();
790 	}
791 
792 	de->tx_head = NEXT_TX(entry);
793 
794 	if (TX_BUFFS_AVAIL(de) == 0)
795 		netif_stop_queue(dev);
796 
797 	/* Trigger an immediate transmit demand. */
798 	dw32(TxPoll, NormalTxPoll);
799 
800 out:
801 	if (macmode != dr32(MacMode))
802 		dw32(MacMode, macmode);
803 }
804 
805 static void de_set_rx_mode (struct net_device *dev)
806 {
807 	unsigned long flags;
808 	struct de_private *de = netdev_priv(dev);
809 
810 	spin_lock_irqsave (&de->lock, flags);
811 	__de_set_rx_mode(dev);
812 	spin_unlock_irqrestore (&de->lock, flags);
813 }
814 
815 static inline void de_rx_missed(struct de_private *de, u32 rx_missed)
816 {
817 	if (unlikely(rx_missed & RxMissedOver))
818 		de->dev->stats.rx_missed_errors += RxMissedMask;
819 	else
820 		de->dev->stats.rx_missed_errors += (rx_missed & RxMissedMask);
821 }
822 
823 static void __de_get_stats(struct de_private *de)
824 {
825 	u32 tmp = dr32(RxMissed); /* self-clearing */
826 
827 	de_rx_missed(de, tmp);
828 }
829 
830 static struct net_device_stats *de_get_stats(struct net_device *dev)
831 {
832 	struct de_private *de = netdev_priv(dev);
833 
834 	/* The chip only need report frame silently dropped. */
835 	spin_lock_irq(&de->lock);
836  	if (netif_running(dev) && netif_device_present(dev))
837  		__de_get_stats(de);
838 	spin_unlock_irq(&de->lock);
839 
840 	return &dev->stats;
841 }
842 
843 static inline int de_is_running (struct de_private *de)
844 {
845 	return (dr32(MacStatus) & (RxState | TxState)) ? 1 : 0;
846 }
847 
848 static void de_stop_rxtx (struct de_private *de)
849 {
850 	u32 macmode;
851 	unsigned int i = 1300/100;
852 
853 	macmode = dr32(MacMode);
854 	if (macmode & RxTx) {
855 		dw32(MacMode, macmode & ~RxTx);
856 		dr32(MacMode);
857 	}
858 
859 	/* wait until in-flight frame completes.
860 	 * Max time @ 10BT: 1500*8b/10Mbps == 1200us (+ 100us margin)
861 	 * Typically expect this loop to end in < 50 us on 100BT.
862 	 */
863 	while (--i) {
864 		if (!de_is_running(de))
865 			return;
866 		udelay(100);
867 	}
868 
869 	netdev_warn(de->dev, "timeout expired, stopping DMA\n");
870 }
871 
872 static inline void de_start_rxtx (struct de_private *de)
873 {
874 	u32 macmode;
875 
876 	macmode = dr32(MacMode);
877 	if ((macmode & RxTx) != RxTx) {
878 		dw32(MacMode, macmode | RxTx);
879 		dr32(MacMode);
880 	}
881 }
882 
883 static void de_stop_hw (struct de_private *de)
884 {
885 
886 	udelay(5);
887 	dw32(IntrMask, 0);
888 
889 	de_stop_rxtx(de);
890 
891 	dw32(MacStatus, dr32(MacStatus));
892 
893 	udelay(10);
894 
895 	de->rx_tail = 0;
896 	de->tx_head = de->tx_tail = 0;
897 }
898 
899 static void de_link_up(struct de_private *de)
900 {
901 	if (!netif_carrier_ok(de->dev)) {
902 		netif_carrier_on(de->dev);
903 		netif_info(de, link, de->dev, "link up, media %s\n",
904 			   media_name[de->media_type]);
905 	}
906 }
907 
908 static void de_link_down(struct de_private *de)
909 {
910 	if (netif_carrier_ok(de->dev)) {
911 		netif_carrier_off(de->dev);
912 		netif_info(de, link, de->dev, "link down\n");
913 	}
914 }
915 
916 static void de_set_media (struct de_private *de)
917 {
918 	unsigned media = de->media_type;
919 	u32 macmode = dr32(MacMode);
920 
921 	if (de_is_running(de))
922 		netdev_warn(de->dev, "chip is running while changing media!\n");
923 
924 	if (de->de21040)
925 		dw32(CSR11, FULL_DUPLEX_MAGIC);
926 	dw32(CSR13, 0); /* Reset phy */
927 	dw32(CSR14, de->media[media].csr14);
928 	dw32(CSR15, de->media[media].csr15);
929 	dw32(CSR13, de->media[media].csr13);
930 
931 	/* must delay 10ms before writing to other registers,
932 	 * especially CSR6
933 	 */
934 	mdelay(10);
935 
936 	if (media == DE_MEDIA_TP_FD)
937 		macmode |= FullDuplex;
938 	else
939 		macmode &= ~FullDuplex;
940 
941 	netif_info(de, link, de->dev, "set link %s\n", media_name[media]);
942 	netif_info(de, hw, de->dev, "mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n",
943 		   dr32(MacMode), dr32(SIAStatus),
944 		   dr32(CSR13), dr32(CSR14), dr32(CSR15));
945 	netif_info(de, hw, de->dev, "set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
946 		   macmode, de->media[media].csr13,
947 		   de->media[media].csr14, de->media[media].csr15);
948 	if (macmode != dr32(MacMode))
949 		dw32(MacMode, macmode);
950 }
951 
952 static void de_next_media (struct de_private *de, const u32 *media,
953 			   unsigned int n_media)
954 {
955 	unsigned int i;
956 
957 	for (i = 0; i < n_media; i++) {
958 		if (de_ok_to_advertise(de, media[i])) {
959 			de->media_type = media[i];
960 			return;
961 		}
962 	}
963 }
964 
965 static void de21040_media_timer (struct timer_list *t)
966 {
967 	struct de_private *de = from_timer(de, t, media_timer);
968 	struct net_device *dev = de->dev;
969 	u32 status = dr32(SIAStatus);
970 	unsigned int carrier;
971 	unsigned long flags;
972 
973 	carrier = (status & NetCxnErr) ? 0 : 1;
974 
975 	if (carrier) {
976 		if (de->media_type != DE_MEDIA_AUI && (status & LinkFailStatus))
977 			goto no_link_yet;
978 
979 		de->media_timer.expires = jiffies + DE_TIMER_LINK;
980 		add_timer(&de->media_timer);
981 		if (!netif_carrier_ok(dev))
982 			de_link_up(de);
983 		else
984 			netif_info(de, timer, dev, "%s link ok, status %x\n",
985 				   media_name[de->media_type], status);
986 		return;
987 	}
988 
989 	de_link_down(de);
990 
991 	if (de->media_lock)
992 		return;
993 
994 	if (de->media_type == DE_MEDIA_AUI) {
995 		static const u32 next_state = DE_MEDIA_TP;
996 		de_next_media(de, &next_state, 1);
997 	} else {
998 		static const u32 next_state = DE_MEDIA_AUI;
999 		de_next_media(de, &next_state, 1);
1000 	}
1001 
1002 	spin_lock_irqsave(&de->lock, flags);
1003 	de_stop_rxtx(de);
1004 	spin_unlock_irqrestore(&de->lock, flags);
1005 	de_set_media(de);
1006 	de_start_rxtx(de);
1007 
1008 no_link_yet:
1009 	de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1010 	add_timer(&de->media_timer);
1011 
1012 	netif_info(de, timer, dev, "no link, trying media %s, status %x\n",
1013 		   media_name[de->media_type], status);
1014 }
1015 
1016 static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
1017 {
1018 	switch (new_media) {
1019 	case DE_MEDIA_TP_AUTO:
1020 		if (!(de->media_advertise & ADVERTISED_Autoneg))
1021 			return 0;
1022 		if (!(de->media_advertise & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full)))
1023 			return 0;
1024 		break;
1025 	case DE_MEDIA_BNC:
1026 		if (!(de->media_advertise & ADVERTISED_BNC))
1027 			return 0;
1028 		break;
1029 	case DE_MEDIA_AUI:
1030 		if (!(de->media_advertise & ADVERTISED_AUI))
1031 			return 0;
1032 		break;
1033 	case DE_MEDIA_TP:
1034 		if (!(de->media_advertise & ADVERTISED_10baseT_Half))
1035 			return 0;
1036 		break;
1037 	case DE_MEDIA_TP_FD:
1038 		if (!(de->media_advertise & ADVERTISED_10baseT_Full))
1039 			return 0;
1040 		break;
1041 	}
1042 
1043 	return 1;
1044 }
1045 
1046 static void de21041_media_timer (struct timer_list *t)
1047 {
1048 	struct de_private *de = from_timer(de, t, media_timer);
1049 	struct net_device *dev = de->dev;
1050 	u32 status = dr32(SIAStatus);
1051 	unsigned int carrier;
1052 	unsigned long flags;
1053 
1054 	/* clear port active bits */
1055 	dw32(SIAStatus, NonselPortActive | SelPortActive);
1056 
1057 	carrier = (status & NetCxnErr) ? 0 : 1;
1058 
1059 	if (carrier) {
1060 		if ((de->media_type == DE_MEDIA_TP_AUTO ||
1061 		     de->media_type == DE_MEDIA_TP ||
1062 		     de->media_type == DE_MEDIA_TP_FD) &&
1063 		    (status & LinkFailStatus))
1064 			goto no_link_yet;
1065 
1066 		de->media_timer.expires = jiffies + DE_TIMER_LINK;
1067 		add_timer(&de->media_timer);
1068 		if (!netif_carrier_ok(dev))
1069 			de_link_up(de);
1070 		else
1071 			netif_info(de, timer, dev,
1072 				   "%s link ok, mode %x status %x\n",
1073 				   media_name[de->media_type],
1074 				   dr32(MacMode), status);
1075 		return;
1076 	}
1077 
1078 	de_link_down(de);
1079 
1080 	/* if media type locked, don't switch media */
1081 	if (de->media_lock)
1082 		goto set_media;
1083 
1084 	/* if activity detected, use that as hint for new media type */
1085 	if (status & NonselPortActive) {
1086 		unsigned int have_media = 1;
1087 
1088 		/* if AUI/BNC selected, then activity is on TP port */
1089 		if (de->media_type == DE_MEDIA_AUI ||
1090 		    de->media_type == DE_MEDIA_BNC) {
1091 			if (de_ok_to_advertise(de, DE_MEDIA_TP_AUTO))
1092 				de->media_type = DE_MEDIA_TP_AUTO;
1093 			else
1094 				have_media = 0;
1095 		}
1096 
1097 		/* TP selected.  If there is only TP and BNC, then it's BNC */
1098 		else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_BNC) &&
1099 			 de_ok_to_advertise(de, DE_MEDIA_BNC))
1100 			de->media_type = DE_MEDIA_BNC;
1101 
1102 		/* TP selected.  If there is only TP and AUI, then it's AUI */
1103 		else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_AUI) &&
1104 			 de_ok_to_advertise(de, DE_MEDIA_AUI))
1105 			de->media_type = DE_MEDIA_AUI;
1106 
1107 		/* otherwise, ignore the hint */
1108 		else
1109 			have_media = 0;
1110 
1111 		if (have_media)
1112 			goto set_media;
1113 	}
1114 
1115 	/*
1116 	 * Absent or ambiguous activity hint, move to next advertised
1117 	 * media state.  If de->media_type is left unchanged, this
1118 	 * simply resets the PHY and reloads the current media settings.
1119 	 */
1120 	if (de->media_type == DE_MEDIA_AUI) {
1121 		static const u32 next_states[] = {
1122 			DE_MEDIA_BNC, DE_MEDIA_TP_AUTO
1123 		};
1124 		de_next_media(de, next_states, ARRAY_SIZE(next_states));
1125 	} else if (de->media_type == DE_MEDIA_BNC) {
1126 		static const u32 next_states[] = {
1127 			DE_MEDIA_TP_AUTO, DE_MEDIA_AUI
1128 		};
1129 		de_next_media(de, next_states, ARRAY_SIZE(next_states));
1130 	} else {
1131 		static const u32 next_states[] = {
1132 			DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO
1133 		};
1134 		de_next_media(de, next_states, ARRAY_SIZE(next_states));
1135 	}
1136 
1137 set_media:
1138 	spin_lock_irqsave(&de->lock, flags);
1139 	de_stop_rxtx(de);
1140 	spin_unlock_irqrestore(&de->lock, flags);
1141 	de_set_media(de);
1142 	de_start_rxtx(de);
1143 
1144 no_link_yet:
1145 	de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1146 	add_timer(&de->media_timer);
1147 
1148 	netif_info(de, timer, dev, "no link, trying media %s, status %x\n",
1149 		   media_name[de->media_type], status);
1150 }
1151 
1152 static void de_media_interrupt (struct de_private *de, u32 status)
1153 {
1154 	if (status & LinkPass) {
1155 		/* Ignore if current media is AUI or BNC and we can't use TP */
1156 		if ((de->media_type == DE_MEDIA_AUI ||
1157 		     de->media_type == DE_MEDIA_BNC) &&
1158 		    (de->media_lock ||
1159 		     !de_ok_to_advertise(de, DE_MEDIA_TP_AUTO)))
1160 			return;
1161 		/* If current media is not TP, change it to TP */
1162 		if ((de->media_type == DE_MEDIA_AUI ||
1163 		     de->media_type == DE_MEDIA_BNC)) {
1164 			de->media_type = DE_MEDIA_TP_AUTO;
1165 			de_stop_rxtx(de);
1166 			de_set_media(de);
1167 			de_start_rxtx(de);
1168 		}
1169 		de_link_up(de);
1170 		mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
1171 		return;
1172 	}
1173 
1174 	BUG_ON(!(status & LinkFail));
1175 	/* Mark the link as down only if current media is TP */
1176 	if (netif_carrier_ok(de->dev) && de->media_type != DE_MEDIA_AUI &&
1177 	    de->media_type != DE_MEDIA_BNC) {
1178 		de_link_down(de);
1179 		mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1180 	}
1181 }
1182 
1183 static int de_reset_mac (struct de_private *de)
1184 {
1185 	u32 status, tmp;
1186 
1187 	/*
1188 	 * Reset MAC.  de4x5.c and tulip.c examined for "advice"
1189 	 * in this area.
1190 	 */
1191 
1192 	if (dr32(BusMode) == 0xffffffff)
1193 		return -EBUSY;
1194 
1195 	/* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
1196 	dw32 (BusMode, CmdReset);
1197 	mdelay (1);
1198 
1199 	dw32 (BusMode, de_bus_mode);
1200 	mdelay (1);
1201 
1202 	for (tmp = 0; tmp < 5; tmp++) {
1203 		dr32 (BusMode);
1204 		mdelay (1);
1205 	}
1206 
1207 	mdelay (1);
1208 
1209 	status = dr32(MacStatus);
1210 	if (status & (RxState | TxState))
1211 		return -EBUSY;
1212 	if (status == 0xffffffff)
1213 		return -ENODEV;
1214 	return 0;
1215 }
1216 
1217 static void de_adapter_wake (struct de_private *de)
1218 {
1219 	u32 pmctl;
1220 
1221 	if (de->de21040)
1222 		return;
1223 
1224 	pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1225 	if (pmctl & PM_Mask) {
1226 		pmctl &= ~PM_Mask;
1227 		pci_write_config_dword(de->pdev, PCIPM, pmctl);
1228 
1229 		/* de4x5.c delays, so we do too */
1230 		msleep(10);
1231 	}
1232 }
1233 
1234 static void de_adapter_sleep (struct de_private *de)
1235 {
1236 	u32 pmctl;
1237 
1238 	if (de->de21040)
1239 		return;
1240 
1241 	dw32(CSR13, 0); /* Reset phy */
1242 	pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1243 	pmctl |= PM_Sleep;
1244 	pci_write_config_dword(de->pdev, PCIPM, pmctl);
1245 }
1246 
1247 static int de_init_hw (struct de_private *de)
1248 {
1249 	struct net_device *dev = de->dev;
1250 	u32 macmode;
1251 	int rc;
1252 
1253 	de_adapter_wake(de);
1254 
1255 	macmode = dr32(MacMode) & ~MacModeClear;
1256 
1257 	rc = de_reset_mac(de);
1258 	if (rc)
1259 		return rc;
1260 
1261 	de_set_media(de); /* reset phy */
1262 
1263 	dw32(RxRingAddr, de->ring_dma);
1264 	dw32(TxRingAddr, de->ring_dma + (sizeof(struct de_desc) * DE_RX_RING_SIZE));
1265 
1266 	dw32(MacMode, RxTx | macmode);
1267 
1268 	dr32(RxMissed); /* self-clearing */
1269 
1270 	dw32(IntrMask, de_intr_mask);
1271 
1272 	de_set_rx_mode(dev);
1273 
1274 	return 0;
1275 }
1276 
1277 static int de_refill_rx (struct de_private *de)
1278 {
1279 	unsigned i;
1280 
1281 	for (i = 0; i < DE_RX_RING_SIZE; i++) {
1282 		struct sk_buff *skb;
1283 
1284 		skb = netdev_alloc_skb(de->dev, de->rx_buf_sz);
1285 		if (!skb)
1286 			goto err_out;
1287 
1288 		de->rx_skb[i].mapping = pci_map_single(de->pdev,
1289 			skb->data, de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1290 		de->rx_skb[i].skb = skb;
1291 
1292 		de->rx_ring[i].opts1 = cpu_to_le32(DescOwn);
1293 		if (i == (DE_RX_RING_SIZE - 1))
1294 			de->rx_ring[i].opts2 =
1295 				cpu_to_le32(RingEnd | de->rx_buf_sz);
1296 		else
1297 			de->rx_ring[i].opts2 = cpu_to_le32(de->rx_buf_sz);
1298 		de->rx_ring[i].addr1 = cpu_to_le32(de->rx_skb[i].mapping);
1299 		de->rx_ring[i].addr2 = 0;
1300 	}
1301 
1302 	return 0;
1303 
1304 err_out:
1305 	de_clean_rings(de);
1306 	return -ENOMEM;
1307 }
1308 
1309 static int de_init_rings (struct de_private *de)
1310 {
1311 	memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1312 	de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1313 
1314 	de->rx_tail = 0;
1315 	de->tx_head = de->tx_tail = 0;
1316 
1317 	return de_refill_rx (de);
1318 }
1319 
1320 static int de_alloc_rings (struct de_private *de)
1321 {
1322 	de->rx_ring = pci_alloc_consistent(de->pdev, DE_RING_BYTES, &de->ring_dma);
1323 	if (!de->rx_ring)
1324 		return -ENOMEM;
1325 	de->tx_ring = &de->rx_ring[DE_RX_RING_SIZE];
1326 	return de_init_rings(de);
1327 }
1328 
1329 static void de_clean_rings (struct de_private *de)
1330 {
1331 	unsigned i;
1332 
1333 	memset(de->rx_ring, 0, sizeof(struct de_desc) * DE_RX_RING_SIZE);
1334 	de->rx_ring[DE_RX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1335 	wmb();
1336 	memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1337 	de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1338 	wmb();
1339 
1340 	for (i = 0; i < DE_RX_RING_SIZE; i++) {
1341 		if (de->rx_skb[i].skb) {
1342 			pci_unmap_single(de->pdev, de->rx_skb[i].mapping,
1343 					 de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1344 			dev_kfree_skb(de->rx_skb[i].skb);
1345 		}
1346 	}
1347 
1348 	for (i = 0; i < DE_TX_RING_SIZE; i++) {
1349 		struct sk_buff *skb = de->tx_skb[i].skb;
1350 		if ((skb) && (skb != DE_DUMMY_SKB)) {
1351 			if (skb != DE_SETUP_SKB) {
1352 				de->dev->stats.tx_dropped++;
1353 				pci_unmap_single(de->pdev,
1354 					de->tx_skb[i].mapping,
1355 					skb->len, PCI_DMA_TODEVICE);
1356 				dev_kfree_skb(skb);
1357 			} else {
1358 				pci_unmap_single(de->pdev,
1359 					de->tx_skb[i].mapping,
1360 					sizeof(de->setup_frame),
1361 					PCI_DMA_TODEVICE);
1362 			}
1363 		}
1364 	}
1365 
1366 	memset(&de->rx_skb, 0, sizeof(struct ring_info) * DE_RX_RING_SIZE);
1367 	memset(&de->tx_skb, 0, sizeof(struct ring_info) * DE_TX_RING_SIZE);
1368 }
1369 
1370 static void de_free_rings (struct de_private *de)
1371 {
1372 	de_clean_rings(de);
1373 	pci_free_consistent(de->pdev, DE_RING_BYTES, de->rx_ring, de->ring_dma);
1374 	de->rx_ring = NULL;
1375 	de->tx_ring = NULL;
1376 }
1377 
1378 static int de_open (struct net_device *dev)
1379 {
1380 	struct de_private *de = netdev_priv(dev);
1381 	const int irq = de->pdev->irq;
1382 	int rc;
1383 
1384 	netif_dbg(de, ifup, dev, "enabling interface\n");
1385 
1386 	de->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1387 
1388 	rc = de_alloc_rings(de);
1389 	if (rc) {
1390 		netdev_err(dev, "ring allocation failure, err=%d\n", rc);
1391 		return rc;
1392 	}
1393 
1394 	dw32(IntrMask, 0);
1395 
1396 	rc = request_irq(irq, de_interrupt, IRQF_SHARED, dev->name, dev);
1397 	if (rc) {
1398 		netdev_err(dev, "IRQ %d request failure, err=%d\n", irq, rc);
1399 		goto err_out_free;
1400 	}
1401 
1402 	rc = de_init_hw(de);
1403 	if (rc) {
1404 		netdev_err(dev, "h/w init failure, err=%d\n", rc);
1405 		goto err_out_free_irq;
1406 	}
1407 
1408 	netif_start_queue(dev);
1409 	mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1410 
1411 	return 0;
1412 
1413 err_out_free_irq:
1414 	free_irq(irq, dev);
1415 err_out_free:
1416 	de_free_rings(de);
1417 	return rc;
1418 }
1419 
1420 static int de_close (struct net_device *dev)
1421 {
1422 	struct de_private *de = netdev_priv(dev);
1423 	unsigned long flags;
1424 
1425 	netif_dbg(de, ifdown, dev, "disabling interface\n");
1426 
1427 	del_timer_sync(&de->media_timer);
1428 
1429 	spin_lock_irqsave(&de->lock, flags);
1430 	de_stop_hw(de);
1431 	netif_stop_queue(dev);
1432 	netif_carrier_off(dev);
1433 	spin_unlock_irqrestore(&de->lock, flags);
1434 
1435 	free_irq(de->pdev->irq, dev);
1436 
1437 	de_free_rings(de);
1438 	de_adapter_sleep(de);
1439 	return 0;
1440 }
1441 
1442 static void de_tx_timeout (struct net_device *dev, unsigned int txqueue)
1443 {
1444 	struct de_private *de = netdev_priv(dev);
1445 	const int irq = de->pdev->irq;
1446 
1447 	netdev_dbg(dev, "NIC status %08x mode %08x sia %08x desc %u/%u/%u\n",
1448 		   dr32(MacStatus), dr32(MacMode), dr32(SIAStatus),
1449 		   de->rx_tail, de->tx_head, de->tx_tail);
1450 
1451 	del_timer_sync(&de->media_timer);
1452 
1453 	disable_irq(irq);
1454 	spin_lock_irq(&de->lock);
1455 
1456 	de_stop_hw(de);
1457 	netif_stop_queue(dev);
1458 	netif_carrier_off(dev);
1459 
1460 	spin_unlock_irq(&de->lock);
1461 	enable_irq(irq);
1462 
1463 	/* Update the error counts. */
1464 	__de_get_stats(de);
1465 
1466 	synchronize_irq(irq);
1467 	de_clean_rings(de);
1468 
1469 	de_init_rings(de);
1470 
1471 	de_init_hw(de);
1472 
1473 	netif_wake_queue(dev);
1474 }
1475 
1476 static void __de_get_regs(struct de_private *de, u8 *buf)
1477 {
1478 	int i;
1479 	u32 *rbuf = (u32 *)buf;
1480 
1481 	/* read all CSRs */
1482 	for (i = 0; i < DE_NUM_REGS; i++)
1483 		rbuf[i] = dr32(i * 8);
1484 
1485 	/* handle self-clearing RxMissed counter, CSR8 */
1486 	de_rx_missed(de, rbuf[8]);
1487 }
1488 
1489 static void __de_get_link_ksettings(struct de_private *de,
1490 				    struct ethtool_link_ksettings *cmd)
1491 {
1492 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1493 						de->media_supported);
1494 	cmd->base.phy_address = 0;
1495 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1496 						de->media_advertise);
1497 
1498 	switch (de->media_type) {
1499 	case DE_MEDIA_AUI:
1500 		cmd->base.port = PORT_AUI;
1501 		break;
1502 	case DE_MEDIA_BNC:
1503 		cmd->base.port = PORT_BNC;
1504 		break;
1505 	default:
1506 		cmd->base.port = PORT_TP;
1507 		break;
1508 	}
1509 
1510 	cmd->base.speed = 10;
1511 
1512 	if (dr32(MacMode) & FullDuplex)
1513 		cmd->base.duplex = DUPLEX_FULL;
1514 	else
1515 		cmd->base.duplex = DUPLEX_HALF;
1516 
1517 	if (de->media_lock)
1518 		cmd->base.autoneg = AUTONEG_DISABLE;
1519 	else
1520 		cmd->base.autoneg = AUTONEG_ENABLE;
1521 
1522 	/* ignore maxtxpkt, maxrxpkt for now */
1523 }
1524 
1525 static int __de_set_link_ksettings(struct de_private *de,
1526 				   const struct ethtool_link_ksettings *cmd)
1527 {
1528 	u32 new_media;
1529 	unsigned int media_lock;
1530 	u8 duplex = cmd->base.duplex;
1531 	u8 port = cmd->base.port;
1532 	u8 autoneg = cmd->base.autoneg;
1533 	u32 advertising;
1534 
1535 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
1536 						cmd->link_modes.advertising);
1537 
1538 	if (cmd->base.speed != 10)
1539 		return -EINVAL;
1540 	if (duplex != DUPLEX_HALF && duplex != DUPLEX_FULL)
1541 		return -EINVAL;
1542 	if (port != PORT_TP && port != PORT_AUI && port != PORT_BNC)
1543 		return -EINVAL;
1544 	if (de->de21040 && port == PORT_BNC)
1545 		return -EINVAL;
1546 	if (autoneg != AUTONEG_DISABLE && autoneg != AUTONEG_ENABLE)
1547 		return -EINVAL;
1548 	if (advertising & ~de->media_supported)
1549 		return -EINVAL;
1550 	if (autoneg == AUTONEG_ENABLE &&
1551 	    (!(advertising & ADVERTISED_Autoneg)))
1552 		return -EINVAL;
1553 
1554 	switch (port) {
1555 	case PORT_AUI:
1556 		new_media = DE_MEDIA_AUI;
1557 		if (!(advertising & ADVERTISED_AUI))
1558 			return -EINVAL;
1559 		break;
1560 	case PORT_BNC:
1561 		new_media = DE_MEDIA_BNC;
1562 		if (!(advertising & ADVERTISED_BNC))
1563 			return -EINVAL;
1564 		break;
1565 	default:
1566 		if (autoneg == AUTONEG_ENABLE)
1567 			new_media = DE_MEDIA_TP_AUTO;
1568 		else if (duplex == DUPLEX_FULL)
1569 			new_media = DE_MEDIA_TP_FD;
1570 		else
1571 			new_media = DE_MEDIA_TP;
1572 		if (!(advertising & ADVERTISED_TP))
1573 			return -EINVAL;
1574 		if (!(advertising & (ADVERTISED_10baseT_Full |
1575 				     ADVERTISED_10baseT_Half)))
1576 			return -EINVAL;
1577 		break;
1578 	}
1579 
1580 	media_lock = (autoneg == AUTONEG_ENABLE) ? 0 : 1;
1581 
1582 	if ((new_media == de->media_type) &&
1583 	    (media_lock == de->media_lock) &&
1584 	    (advertising == de->media_advertise))
1585 		return 0; /* nothing to change */
1586 
1587 	de_link_down(de);
1588 	mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1589 	de_stop_rxtx(de);
1590 
1591 	de->media_type = new_media;
1592 	de->media_lock = media_lock;
1593 	de->media_advertise = advertising;
1594 	de_set_media(de);
1595 	if (netif_running(de->dev))
1596 		de_start_rxtx(de);
1597 
1598 	return 0;
1599 }
1600 
1601 static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
1602 {
1603 	struct de_private *de = netdev_priv(dev);
1604 
1605 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1606 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1607 	strlcpy(info->bus_info, pci_name(de->pdev), sizeof(info->bus_info));
1608 }
1609 
1610 static int de_get_regs_len(struct net_device *dev)
1611 {
1612 	return DE_REGS_SIZE;
1613 }
1614 
1615 static int de_get_link_ksettings(struct net_device *dev,
1616 				 struct ethtool_link_ksettings *cmd)
1617 {
1618 	struct de_private *de = netdev_priv(dev);
1619 
1620 	spin_lock_irq(&de->lock);
1621 	__de_get_link_ksettings(de, cmd);
1622 	spin_unlock_irq(&de->lock);
1623 
1624 	return 0;
1625 }
1626 
1627 static int de_set_link_ksettings(struct net_device *dev,
1628 				 const struct ethtool_link_ksettings *cmd)
1629 {
1630 	struct de_private *de = netdev_priv(dev);
1631 	int rc;
1632 
1633 	spin_lock_irq(&de->lock);
1634 	rc = __de_set_link_ksettings(de, cmd);
1635 	spin_unlock_irq(&de->lock);
1636 
1637 	return rc;
1638 }
1639 
1640 static u32 de_get_msglevel(struct net_device *dev)
1641 {
1642 	struct de_private *de = netdev_priv(dev);
1643 
1644 	return de->msg_enable;
1645 }
1646 
1647 static void de_set_msglevel(struct net_device *dev, u32 msglvl)
1648 {
1649 	struct de_private *de = netdev_priv(dev);
1650 
1651 	de->msg_enable = msglvl;
1652 }
1653 
1654 static int de_get_eeprom(struct net_device *dev,
1655 			 struct ethtool_eeprom *eeprom, u8 *data)
1656 {
1657 	struct de_private *de = netdev_priv(dev);
1658 
1659 	if (!de->ee_data)
1660 		return -EOPNOTSUPP;
1661 	if ((eeprom->offset != 0) || (eeprom->magic != 0) ||
1662 	    (eeprom->len != DE_EEPROM_SIZE))
1663 		return -EINVAL;
1664 	memcpy(data, de->ee_data, eeprom->len);
1665 
1666 	return 0;
1667 }
1668 
1669 static int de_nway_reset(struct net_device *dev)
1670 {
1671 	struct de_private *de = netdev_priv(dev);
1672 	u32 status;
1673 
1674 	if (de->media_type != DE_MEDIA_TP_AUTO)
1675 		return -EINVAL;
1676 	if (netif_carrier_ok(de->dev))
1677 		de_link_down(de);
1678 
1679 	status = dr32(SIAStatus);
1680 	dw32(SIAStatus, (status & ~NWayState) | NWayRestart);
1681 	netif_info(de, link, dev, "link nway restart, status %x,%x\n",
1682 		   status, dr32(SIAStatus));
1683 	return 0;
1684 }
1685 
1686 static void de_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1687 			void *data)
1688 {
1689 	struct de_private *de = netdev_priv(dev);
1690 
1691 	regs->version = (DE_REGS_VER << 2) | de->de21040;
1692 
1693 	spin_lock_irq(&de->lock);
1694 	__de_get_regs(de, data);
1695 	spin_unlock_irq(&de->lock);
1696 }
1697 
1698 static const struct ethtool_ops de_ethtool_ops = {
1699 	.get_link		= ethtool_op_get_link,
1700 	.get_drvinfo		= de_get_drvinfo,
1701 	.get_regs_len		= de_get_regs_len,
1702 	.get_msglevel		= de_get_msglevel,
1703 	.set_msglevel		= de_set_msglevel,
1704 	.get_eeprom		= de_get_eeprom,
1705 	.nway_reset		= de_nway_reset,
1706 	.get_regs		= de_get_regs,
1707 	.get_link_ksettings	= de_get_link_ksettings,
1708 	.set_link_ksettings	= de_set_link_ksettings,
1709 };
1710 
1711 static void de21040_get_mac_address(struct de_private *de)
1712 {
1713 	unsigned i;
1714 
1715 	dw32 (ROMCmd, 0);	/* Reset the pointer with a dummy write. */
1716 	udelay(5);
1717 
1718 	for (i = 0; i < 6; i++) {
1719 		int value, boguscnt = 100000;
1720 		do {
1721 			value = dr32(ROMCmd);
1722 			rmb();
1723 		} while (value < 0 && --boguscnt > 0);
1724 		de->dev->dev_addr[i] = value;
1725 		udelay(1);
1726 		if (boguscnt <= 0)
1727 			pr_warn("timeout reading 21040 MAC address byte %u\n",
1728 				i);
1729 	}
1730 }
1731 
1732 static void de21040_get_media_info(struct de_private *de)
1733 {
1734 	unsigned int i;
1735 
1736 	de->media_type = DE_MEDIA_TP;
1737 	de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full |
1738 			       SUPPORTED_10baseT_Half | SUPPORTED_AUI;
1739 	de->media_advertise = de->media_supported;
1740 
1741 	for (i = 0; i < DE_MAX_MEDIA; i++) {
1742 		switch (i) {
1743 		case DE_MEDIA_AUI:
1744 		case DE_MEDIA_TP:
1745 		case DE_MEDIA_TP_FD:
1746 			de->media[i].type = i;
1747 			de->media[i].csr13 = t21040_csr13[i];
1748 			de->media[i].csr14 = t21040_csr14[i];
1749 			de->media[i].csr15 = t21040_csr15[i];
1750 			break;
1751 		default:
1752 			de->media[i].type = DE_MEDIA_INVALID;
1753 			break;
1754 		}
1755 	}
1756 }
1757 
1758 /* Note: this routine returns extra data bits for size detection. */
1759 static unsigned tulip_read_eeprom(void __iomem *regs, int location,
1760 				  int addr_len)
1761 {
1762 	int i;
1763 	unsigned retval = 0;
1764 	void __iomem *ee_addr = regs + ROMCmd;
1765 	int read_cmd = location | (EE_READ_CMD << addr_len);
1766 
1767 	writel(EE_ENB & ~EE_CS, ee_addr);
1768 	writel(EE_ENB, ee_addr);
1769 
1770 	/* Shift the read command bits out. */
1771 	for (i = 4 + addr_len; i >= 0; i--) {
1772 		short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1773 		writel(EE_ENB | dataval, ee_addr);
1774 		readl(ee_addr);
1775 		writel(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1776 		readl(ee_addr);
1777 		retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1778 	}
1779 	writel(EE_ENB, ee_addr);
1780 	readl(ee_addr);
1781 
1782 	for (i = 16; i > 0; i--) {
1783 		writel(EE_ENB | EE_SHIFT_CLK, ee_addr);
1784 		readl(ee_addr);
1785 		retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1786 		writel(EE_ENB, ee_addr);
1787 		readl(ee_addr);
1788 	}
1789 
1790 	/* Terminate the EEPROM access. */
1791 	writel(EE_ENB & ~EE_CS, ee_addr);
1792 	return retval;
1793 }
1794 
1795 static void de21041_get_srom_info(struct de_private *de)
1796 {
1797 	unsigned i, sa_offset = 0, ofs;
1798 	u8 ee_data[DE_EEPROM_SIZE + 6] = {};
1799 	unsigned ee_addr_size = tulip_read_eeprom(de->regs, 0xff, 8) & 0x40000 ? 8 : 6;
1800 	struct de_srom_info_leaf *il;
1801 	void *bufp;
1802 
1803 	/* download entire eeprom */
1804 	for (i = 0; i < DE_EEPROM_WORDS; i++)
1805 		((__le16 *)ee_data)[i] =
1806 			cpu_to_le16(tulip_read_eeprom(de->regs, i, ee_addr_size));
1807 
1808 	/* DEC now has a specification but early board makers
1809 	   just put the address in the first EEPROM locations. */
1810 	/* This does  memcmp(eedata, eedata+16, 8) */
1811 
1812 #ifndef CONFIG_MIPS_COBALT
1813 
1814 	for (i = 0; i < 8; i ++)
1815 		if (ee_data[i] != ee_data[16+i])
1816 			sa_offset = 20;
1817 
1818 #endif
1819 
1820 	/* store MAC address */
1821 	for (i = 0; i < 6; i ++)
1822 		de->dev->dev_addr[i] = ee_data[i + sa_offset];
1823 
1824 	/* get offset of controller 0 info leaf.  ignore 2nd byte. */
1825 	ofs = ee_data[SROMC0InfoLeaf];
1826 	if (ofs >= (sizeof(ee_data) - sizeof(struct de_srom_info_leaf) - sizeof(struct de_srom_media_block)))
1827 		goto bad_srom;
1828 
1829 	/* get pointer to info leaf */
1830 	il = (struct de_srom_info_leaf *) &ee_data[ofs];
1831 
1832 	/* paranoia checks */
1833 	if (il->n_blocks == 0)
1834 		goto bad_srom;
1835 	if ((sizeof(ee_data) - ofs) <
1836 	    (sizeof(struct de_srom_info_leaf) + (sizeof(struct de_srom_media_block) * il->n_blocks)))
1837 		goto bad_srom;
1838 
1839 	/* get default media type */
1840 	switch (get_unaligned(&il->default_media)) {
1841 	case 0x0001:  de->media_type = DE_MEDIA_BNC; break;
1842 	case 0x0002:  de->media_type = DE_MEDIA_AUI; break;
1843 	case 0x0204:  de->media_type = DE_MEDIA_TP_FD; break;
1844 	default: de->media_type = DE_MEDIA_TP_AUTO; break;
1845 	}
1846 
1847 	if (netif_msg_probe(de))
1848 		pr_info("de%d: SROM leaf offset %u, default media %s\n",
1849 		       de->board_idx, ofs, media_name[de->media_type]);
1850 
1851 	/* init SIA register values to defaults */
1852 	for (i = 0; i < DE_MAX_MEDIA; i++) {
1853 		de->media[i].type = DE_MEDIA_INVALID;
1854 		de->media[i].csr13 = 0xffff;
1855 		de->media[i].csr14 = 0xffff;
1856 		de->media[i].csr15 = 0xffff;
1857 	}
1858 
1859 	/* parse media blocks to see what medias are supported,
1860 	 * and if any custom CSR values are provided
1861 	 */
1862 	bufp = ((void *)il) + sizeof(*il);
1863 	for (i = 0; i < il->n_blocks; i++) {
1864 		struct de_srom_media_block *ib = bufp;
1865 		unsigned idx;
1866 
1867 		/* index based on media type in media block */
1868 		switch(ib->opts & MediaBlockMask) {
1869 		case 0: /* 10baseT */
1870 			de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Half
1871 					  | SUPPORTED_Autoneg;
1872 			idx = DE_MEDIA_TP;
1873 			de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1874 			break;
1875 		case 1: /* BNC */
1876 			de->media_supported |= SUPPORTED_BNC;
1877 			idx = DE_MEDIA_BNC;
1878 			break;
1879 		case 2: /* AUI */
1880 			de->media_supported |= SUPPORTED_AUI;
1881 			idx = DE_MEDIA_AUI;
1882 			break;
1883 		case 4: /* 10baseT-FD */
1884 			de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full
1885 					  | SUPPORTED_Autoneg;
1886 			idx = DE_MEDIA_TP_FD;
1887 			de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1888 			break;
1889 		default:
1890 			goto bad_srom;
1891 		}
1892 
1893 		de->media[idx].type = idx;
1894 
1895 		if (netif_msg_probe(de))
1896 			pr_info("de%d:   media block #%u: %s",
1897 				de->board_idx, i,
1898 				media_name[de->media[idx].type]);
1899 
1900 		bufp += sizeof (ib->opts);
1901 
1902 		if (ib->opts & MediaCustomCSRs) {
1903 			de->media[idx].csr13 = get_unaligned(&ib->csr13);
1904 			de->media[idx].csr14 = get_unaligned(&ib->csr14);
1905 			de->media[idx].csr15 = get_unaligned(&ib->csr15);
1906 			bufp += sizeof(ib->csr13) + sizeof(ib->csr14) +
1907 				sizeof(ib->csr15);
1908 
1909 			if (netif_msg_probe(de))
1910 				pr_cont(" (%x,%x,%x)\n",
1911 					de->media[idx].csr13,
1912 					de->media[idx].csr14,
1913 					de->media[idx].csr15);
1914 
1915 		} else {
1916 			if (netif_msg_probe(de))
1917 				pr_cont("\n");
1918 		}
1919 
1920 		if (bufp > ((void *)&ee_data[DE_EEPROM_SIZE - 3]))
1921 			break;
1922 	}
1923 
1924 	de->media_advertise = de->media_supported;
1925 
1926 fill_defaults:
1927 	/* fill in defaults, for cases where custom CSRs not used */
1928 	for (i = 0; i < DE_MAX_MEDIA; i++) {
1929 		if (de->media[i].csr13 == 0xffff)
1930 			de->media[i].csr13 = t21041_csr13[i];
1931 		if (de->media[i].csr14 == 0xffff) {
1932 			/* autonegotiation is broken at least on some chip
1933 			   revisions - rev. 0x21 works, 0x11 does not */
1934 			if (de->pdev->revision < 0x20)
1935 				de->media[i].csr14 = t21041_csr14_brk[i];
1936 			else
1937 				de->media[i].csr14 = t21041_csr14[i];
1938 		}
1939 		if (de->media[i].csr15 == 0xffff)
1940 			de->media[i].csr15 = t21041_csr15[i];
1941 	}
1942 
1943 	de->ee_data = kmemdup(&ee_data[0], DE_EEPROM_SIZE, GFP_KERNEL);
1944 
1945 	return;
1946 
1947 bad_srom:
1948 	/* for error cases, it's ok to assume we support all these */
1949 	for (i = 0; i < DE_MAX_MEDIA; i++)
1950 		de->media[i].type = i;
1951 	de->media_supported =
1952 		SUPPORTED_10baseT_Half |
1953 		SUPPORTED_10baseT_Full |
1954 		SUPPORTED_Autoneg |
1955 		SUPPORTED_TP |
1956 		SUPPORTED_AUI |
1957 		SUPPORTED_BNC;
1958 	goto fill_defaults;
1959 }
1960 
1961 static const struct net_device_ops de_netdev_ops = {
1962 	.ndo_open		= de_open,
1963 	.ndo_stop		= de_close,
1964 	.ndo_set_rx_mode	= de_set_rx_mode,
1965 	.ndo_start_xmit		= de_start_xmit,
1966 	.ndo_get_stats		= de_get_stats,
1967 	.ndo_tx_timeout 	= de_tx_timeout,
1968 	.ndo_set_mac_address 	= eth_mac_addr,
1969 	.ndo_validate_addr	= eth_validate_addr,
1970 };
1971 
1972 static int de_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1973 {
1974 	struct net_device *dev;
1975 	struct de_private *de;
1976 	int rc;
1977 	void __iomem *regs;
1978 	unsigned long pciaddr;
1979 	static int board_idx = -1;
1980 
1981 	board_idx++;
1982 
1983 #ifndef MODULE
1984 	if (board_idx == 0)
1985 		pr_info("%s\n", version);
1986 #endif
1987 
1988 	/* allocate a new ethernet device structure, and fill in defaults */
1989 	dev = alloc_etherdev(sizeof(struct de_private));
1990 	if (!dev)
1991 		return -ENOMEM;
1992 
1993 	dev->netdev_ops = &de_netdev_ops;
1994 	SET_NETDEV_DEV(dev, &pdev->dev);
1995 	dev->ethtool_ops = &de_ethtool_ops;
1996 	dev->watchdog_timeo = TX_TIMEOUT;
1997 
1998 	de = netdev_priv(dev);
1999 	de->de21040 = ent->driver_data == 0 ? 1 : 0;
2000 	de->pdev = pdev;
2001 	de->dev = dev;
2002 	de->msg_enable = (debug < 0 ? DE_DEF_MSG_ENABLE : debug);
2003 	de->board_idx = board_idx;
2004 	spin_lock_init (&de->lock);
2005 	timer_setup(&de->media_timer,
2006 		    de->de21040 ? de21040_media_timer : de21041_media_timer,
2007 		    0);
2008 
2009 	netif_carrier_off(dev);
2010 
2011 	/* wake up device, assign resources */
2012 	rc = pci_enable_device(pdev);
2013 	if (rc)
2014 		goto err_out_free;
2015 
2016 	/* reserve PCI resources to ensure driver atomicity */
2017 	rc = pci_request_regions(pdev, DRV_NAME);
2018 	if (rc)
2019 		goto err_out_disable;
2020 
2021 	/* check for invalid IRQ value */
2022 	if (pdev->irq < 2) {
2023 		rc = -EIO;
2024 		pr_err("invalid irq (%d) for pci dev %s\n",
2025 		       pdev->irq, pci_name(pdev));
2026 		goto err_out_res;
2027 	}
2028 
2029 	/* obtain and check validity of PCI I/O address */
2030 	pciaddr = pci_resource_start(pdev, 1);
2031 	if (!pciaddr) {
2032 		rc = -EIO;
2033 		pr_err("no MMIO resource for pci dev %s\n", pci_name(pdev));
2034 		goto err_out_res;
2035 	}
2036 	if (pci_resource_len(pdev, 1) < DE_REGS_SIZE) {
2037 		rc = -EIO;
2038 		pr_err("MMIO resource (%llx) too small on pci dev %s\n",
2039 		       (unsigned long long)pci_resource_len(pdev, 1),
2040 		       pci_name(pdev));
2041 		goto err_out_res;
2042 	}
2043 
2044 	/* remap CSR registers */
2045 	regs = ioremap(pciaddr, DE_REGS_SIZE);
2046 	if (!regs) {
2047 		rc = -EIO;
2048 		pr_err("Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n",
2049 		       (unsigned long long)pci_resource_len(pdev, 1),
2050 		       pciaddr, pci_name(pdev));
2051 		goto err_out_res;
2052 	}
2053 	de->regs = regs;
2054 
2055 	de_adapter_wake(de);
2056 
2057 	/* make sure hardware is not running */
2058 	rc = de_reset_mac(de);
2059 	if (rc) {
2060 		pr_err("Cannot reset MAC, pci dev %s\n", pci_name(pdev));
2061 		goto err_out_iomap;
2062 	}
2063 
2064 	/* get MAC address, initialize default media type and
2065 	 * get list of supported media
2066 	 */
2067 	if (de->de21040) {
2068 		de21040_get_mac_address(de);
2069 		de21040_get_media_info(de);
2070 	} else {
2071 		de21041_get_srom_info(de);
2072 	}
2073 
2074 	/* register new network interface with kernel */
2075 	rc = register_netdev(dev);
2076 	if (rc)
2077 		goto err_out_iomap;
2078 
2079 	/* print info about board and interface just registered */
2080 	netdev_info(dev, "%s at %p, %pM, IRQ %d\n",
2081 		    de->de21040 ? "21040" : "21041",
2082 		    regs, dev->dev_addr, pdev->irq);
2083 
2084 	pci_set_drvdata(pdev, dev);
2085 
2086 	/* enable busmastering */
2087 	pci_set_master(pdev);
2088 
2089 	/* put adapter to sleep */
2090 	de_adapter_sleep(de);
2091 
2092 	return 0;
2093 
2094 err_out_iomap:
2095 	kfree(de->ee_data);
2096 	iounmap(regs);
2097 err_out_res:
2098 	pci_release_regions(pdev);
2099 err_out_disable:
2100 	pci_disable_device(pdev);
2101 err_out_free:
2102 	free_netdev(dev);
2103 	return rc;
2104 }
2105 
2106 static void de_remove_one(struct pci_dev *pdev)
2107 {
2108 	struct net_device *dev = pci_get_drvdata(pdev);
2109 	struct de_private *de = netdev_priv(dev);
2110 
2111 	BUG_ON(!dev);
2112 	unregister_netdev(dev);
2113 	kfree(de->ee_data);
2114 	iounmap(de->regs);
2115 	pci_release_regions(pdev);
2116 	pci_disable_device(pdev);
2117 	free_netdev(dev);
2118 }
2119 
2120 #ifdef CONFIG_PM
2121 
2122 static int de_suspend (struct pci_dev *pdev, pm_message_t state)
2123 {
2124 	struct net_device *dev = pci_get_drvdata (pdev);
2125 	struct de_private *de = netdev_priv(dev);
2126 
2127 	rtnl_lock();
2128 	if (netif_running (dev)) {
2129 		const int irq = pdev->irq;
2130 
2131 		del_timer_sync(&de->media_timer);
2132 
2133 		disable_irq(irq);
2134 		spin_lock_irq(&de->lock);
2135 
2136 		de_stop_hw(de);
2137 		netif_stop_queue(dev);
2138 		netif_device_detach(dev);
2139 		netif_carrier_off(dev);
2140 
2141 		spin_unlock_irq(&de->lock);
2142 		enable_irq(irq);
2143 
2144 		/* Update the error counts. */
2145 		__de_get_stats(de);
2146 
2147 		synchronize_irq(irq);
2148 		de_clean_rings(de);
2149 
2150 		de_adapter_sleep(de);
2151 		pci_disable_device(pdev);
2152 	} else {
2153 		netif_device_detach(dev);
2154 	}
2155 	rtnl_unlock();
2156 	return 0;
2157 }
2158 
2159 static int de_resume (struct pci_dev *pdev)
2160 {
2161 	struct net_device *dev = pci_get_drvdata (pdev);
2162 	struct de_private *de = netdev_priv(dev);
2163 	int retval = 0;
2164 
2165 	rtnl_lock();
2166 	if (netif_device_present(dev))
2167 		goto out;
2168 	if (!netif_running(dev))
2169 		goto out_attach;
2170 	if ((retval = pci_enable_device(pdev))) {
2171 		netdev_err(dev, "pci_enable_device failed in resume\n");
2172 		goto out;
2173 	}
2174 	pci_set_master(pdev);
2175 	de_init_rings(de);
2176 	de_init_hw(de);
2177 out_attach:
2178 	netif_device_attach(dev);
2179 out:
2180 	rtnl_unlock();
2181 	return 0;
2182 }
2183 
2184 #endif /* CONFIG_PM */
2185 
2186 static struct pci_driver de_driver = {
2187 	.name		= DRV_NAME,
2188 	.id_table	= de_pci_tbl,
2189 	.probe		= de_init_one,
2190 	.remove		= de_remove_one,
2191 #ifdef CONFIG_PM
2192 	.suspend	= de_suspend,
2193 	.resume		= de_resume,
2194 #endif
2195 };
2196 
2197 static int __init de_init (void)
2198 {
2199 #ifdef MODULE
2200 	pr_info("%s\n", version);
2201 #endif
2202 	return pci_register_driver(&de_driver);
2203 }
2204 
2205 static void __exit de_exit (void)
2206 {
2207 	pci_unregister_driver (&de_driver);
2208 }
2209 
2210 module_init(de_init);
2211 module_exit(de_exit);
2212