xref: /openbsd/sys/dev/ic/dc.c (revision 404b540a)
1 /*	$OpenBSD: dc.c,v 1.112 2009/08/10 20:29:54 deraadt Exp $	*/
2 
3 /*
4  * Copyright (c) 1997, 1998, 1999
5  *	Bill Paul <wpaul@ee.columbia.edu>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * $FreeBSD: src/sys/pci/if_dc.c,v 1.43 2001/01/19 23:55:07 wpaul Exp $
35  */
36 
37 /*
38  * DEC "tulip" clone ethernet driver. Supports the DEC/Intel 21143
39  * series chips and several workalikes including the following:
40  *
41  * Macronix 98713/98715/98725/98727/98732 PMAC (www.macronix.com)
42  * Macronix/Lite-On 82c115 PNIC II (www.macronix.com)
43  * Lite-On 82c168/82c169 PNIC (www.litecom.com)
44  * ASIX Electronics AX88140A (www.asix.com.tw)
45  * ASIX Electronics AX88141 (www.asix.com.tw)
46  * ADMtek AL981 (www.admtek.com.tw)
47  * ADMtek AN983 (www.admtek.com.tw)
48  * Davicom DM9100, DM9102, DM9102A (www.davicom8.com)
49  * Accton EN1217, EN2242 (www.accton.com)
50  * Xircom X3201 (www.xircom.com)
51  *
52  * Datasheets for the 21143 are available at developer.intel.com.
53  * Datasheets for the clone parts can be found at their respective sites.
54  * (Except for the PNIC; see www.freebsd.org/~wpaul/PNIC/pnic.ps.gz.)
55  * The PNIC II is essentially a Macronix 98715A chip; the only difference
56  * worth noting is that its multicast hash table is only 128 bits wide
57  * instead of 512.
58  *
59  * Written by Bill Paul <wpaul@ee.columbia.edu>
60  * Electrical Engineering Department
61  * Columbia University, New York City
62  */
63 
64 /*
65  * The Intel 21143 is the successor to the DEC 21140. It is basically
66  * the same as the 21140 but with a few new features. The 21143 supports
67  * three kinds of media attachments:
68  *
69  * o MII port, for 10Mbps and 100Mbps support and NWAY
70  *   autonegotiation provided by an external PHY.
71  * o SYM port, for symbol mode 100Mbps support.
72  * o 10baseT port.
73  * o AUI/BNC port.
74  *
75  * The 100Mbps SYM port and 10baseT port can be used together in
76  * combination with the internal NWAY support to create a 10/100
77  * autosensing configuration.
78  *
79  * Note that not all tulip workalikes are handled in this driver: we only
80  * deal with those which are relatively well behaved. The Winbond is
81  * handled separately due to its different register offsets and the
82  * special handling needed for its various bugs. The PNIC is handled
83  * here, but I'm not thrilled about it.
84  *
85  * All of the workalike chips use some form of MII transceiver support
86  * with the exception of the Macronix chips, which also have a SYM port.
87  * The ASIX AX88140A is also documented to have a SYM port, but all
88  * the cards I've seen use an MII transceiver, probably because the
89  * AX88140A doesn't support internal NWAY.
90  */
91 
92 #include "bpfilter.h"
93 
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/mbuf.h>
97 #include <sys/protosw.h>
98 #include <sys/socket.h>
99 #include <sys/ioctl.h>
100 #include <sys/errno.h>
101 #include <sys/malloc.h>
102 #include <sys/kernel.h>
103 #include <sys/device.h>
104 #include <sys/timeout.h>
105 
106 #include <net/if.h>
107 #include <net/if_dl.h>
108 #include <net/if_types.h>
109 
110 #ifdef INET
111 #include <netinet/in.h>
112 #include <netinet/in_systm.h>
113 #include <netinet/in_var.h>
114 #include <netinet/ip.h>
115 #include <netinet/if_ether.h>
116 #endif
117 
118 #include <net/if_media.h>
119 
120 #if NBPFILTER > 0
121 #include <net/bpf.h>
122 #endif
123 
124 #include <dev/mii/mii.h>
125 #include <dev/mii/miivar.h>
126 
127 #include <machine/bus.h>
128 #include <dev/pci/pcidevs.h>
129 
130 #include <dev/ic/dcreg.h>
131 
132 int dc_intr(void *);
133 void dc_power(int, void *);
134 struct dc_type *dc_devtype(void *);
135 int dc_newbuf(struct dc_softc *, int, struct mbuf *);
136 int dc_encap(struct dc_softc *, struct mbuf *, u_int32_t *);
137 int dc_coal(struct dc_softc *, struct mbuf **);
138 
139 void dc_pnic_rx_bug_war(struct dc_softc *, int);
140 int dc_rx_resync(struct dc_softc *);
141 void dc_rxeof(struct dc_softc *);
142 void dc_txeof(struct dc_softc *);
143 void dc_tick(void *);
144 void dc_tx_underrun(struct dc_softc *);
145 void dc_start(struct ifnet *);
146 int dc_ioctl(struct ifnet *, u_long, caddr_t);
147 void dc_init(void *);
148 void dc_stop(struct dc_softc *);
149 void dc_watchdog(struct ifnet *);
150 int dc_ifmedia_upd(struct ifnet *);
151 void dc_ifmedia_sts(struct ifnet *, struct ifmediareq *);
152 
153 void dc_delay(struct dc_softc *);
154 void dc_eeprom_width(struct dc_softc *);
155 void dc_eeprom_idle(struct dc_softc *);
156 void dc_eeprom_putbyte(struct dc_softc *, int);
157 void dc_eeprom_getword(struct dc_softc *, int, u_int16_t *);
158 void dc_eeprom_getword_pnic(struct dc_softc *, int, u_int16_t *);
159 void dc_eeprom_getword_xircom(struct dc_softc *, int, u_int16_t *);
160 void dc_read_eeprom(struct dc_softc *, caddr_t, int, int, int);
161 
162 void dc_mii_writebit(struct dc_softc *, int);
163 int dc_mii_readbit(struct dc_softc *);
164 void dc_mii_sync(struct dc_softc *);
165 void dc_mii_send(struct dc_softc *, u_int32_t, int);
166 int dc_mii_readreg(struct dc_softc *, struct dc_mii_frame *);
167 int dc_mii_writereg(struct dc_softc *, struct dc_mii_frame *);
168 int dc_miibus_readreg(struct device *, int, int);
169 void dc_miibus_writereg(struct device *, int, int, int);
170 void dc_miibus_statchg(struct device *);
171 
172 void dc_setcfg(struct dc_softc *, int);
173 u_int32_t dc_crc_le(struct dc_softc *, caddr_t);
174 u_int32_t dc_crc_be(caddr_t);
175 void dc_setfilt_21143(struct dc_softc *);
176 void dc_setfilt_asix(struct dc_softc *);
177 void dc_setfilt_admtek(struct dc_softc *);
178 void dc_setfilt_xircom(struct dc_softc *);
179 
180 void dc_setfilt(struct dc_softc *);
181 
182 void dc_reset(struct dc_softc *);
183 int dc_list_rx_init(struct dc_softc *);
184 int dc_list_tx_init(struct dc_softc *);
185 
186 void dc_read_srom(struct dc_softc *, int);
187 void dc_parse_21143_srom(struct dc_softc *);
188 void dc_decode_leaf_sia(struct dc_softc *,
189 				     struct dc_eblock_sia *);
190 void dc_decode_leaf_mii(struct dc_softc *,
191 				     struct dc_eblock_mii *);
192 void dc_decode_leaf_sym(struct dc_softc *,
193 				     struct dc_eblock_sym *);
194 void dc_apply_fixup(struct dc_softc *, int);
195 
196 #define DC_SETBIT(sc, reg, x)				\
197 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x))
198 
199 #define DC_CLRBIT(sc, reg, x)				\
200 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x))
201 
202 #define SIO_SET(x)	DC_SETBIT(sc, DC_SIO, (x))
203 #define SIO_CLR(x)	DC_CLRBIT(sc, DC_SIO, (x))
204 
205 void
206 dc_delay(struct dc_softc *sc)
207 {
208 	int idx;
209 
210 	for (idx = (300 / 33) + 1; idx > 0; idx--)
211 		CSR_READ_4(sc, DC_BUSCTL);
212 }
213 
214 void
215 dc_eeprom_width(struct dc_softc *sc)
216 {
217 	int i;
218 
219 	/* Force EEPROM to idle state. */
220 	dc_eeprom_idle(sc);
221 
222 	/* Enter EEPROM access mode. */
223 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
224 	dc_delay(sc);
225 	DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
226 	dc_delay(sc);
227 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
228 	dc_delay(sc);
229 	DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
230 	dc_delay(sc);
231 
232 	for (i = 3; i--;) {
233 		if (6 & (1 << i))
234 			DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
235 		else
236 			DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
237 		dc_delay(sc);
238 		DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
239 		dc_delay(sc);
240 		DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
241 		dc_delay(sc);
242 	}
243 
244 	for (i = 1; i <= 12; i++) {
245 		DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
246 		dc_delay(sc);
247 		if (!(CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)) {
248 			DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
249 			dc_delay(sc);
250 			break;
251 		}
252 		DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
253 		dc_delay(sc);
254 	}
255 
256 	/* Turn off EEPROM access mode. */
257 	dc_eeprom_idle(sc);
258 
259 	if (i < 4 || i > 12)
260 		sc->dc_romwidth = 6;
261 	else
262 		sc->dc_romwidth = i;
263 
264 	/* Enter EEPROM access mode. */
265 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
266 	dc_delay(sc);
267 	DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
268 	dc_delay(sc);
269 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
270 	dc_delay(sc);
271 	DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
272 	dc_delay(sc);
273 
274 	/* Turn off EEPROM access mode. */
275 	dc_eeprom_idle(sc);
276 }
277 
278 void
279 dc_eeprom_idle(struct dc_softc *sc)
280 {
281 	int i;
282 
283 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
284 	dc_delay(sc);
285 	DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
286 	dc_delay(sc);
287 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
288 	dc_delay(sc);
289 	DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
290 	dc_delay(sc);
291 
292 	for (i = 0; i < 25; i++) {
293 		DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
294 		dc_delay(sc);
295 		DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
296 		dc_delay(sc);
297 	}
298 
299 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
300 	dc_delay(sc);
301 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CS);
302 	dc_delay(sc);
303 	CSR_WRITE_4(sc, DC_SIO, 0x00000000);
304 }
305 
306 /*
307  * Send a read command and address to the EEPROM, check for ACK.
308  */
309 void
310 dc_eeprom_putbyte(struct dc_softc *sc, int addr)
311 {
312 	int d, i;
313 
314 	d = DC_EECMD_READ >> 6;
315 
316 	for (i = 3; i--; ) {
317 		if (d & (1 << i))
318 			DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
319 		else
320 			DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
321 		dc_delay(sc);
322 		DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
323 		dc_delay(sc);
324 		DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
325 		dc_delay(sc);
326 	}
327 
328 	/*
329 	 * Feed in each bit and strobe the clock.
330 	 */
331 	for (i = sc->dc_romwidth; i--;) {
332 		if (addr & (1 << i)) {
333 			SIO_SET(DC_SIO_EE_DATAIN);
334 		} else {
335 			SIO_CLR(DC_SIO_EE_DATAIN);
336 		}
337 		dc_delay(sc);
338 		SIO_SET(DC_SIO_EE_CLK);
339 		dc_delay(sc);
340 		SIO_CLR(DC_SIO_EE_CLK);
341 		dc_delay(sc);
342 	}
343 }
344 
345 /*
346  * Read a word of data stored in the EEPROM at address 'addr.'
347  * The PNIC 82c168/82c169 has its own non-standard way to read
348  * the EEPROM.
349  */
350 void
351 dc_eeprom_getword_pnic(struct dc_softc *sc, int addr, u_int16_t *dest)
352 {
353 	int i;
354 	u_int32_t r;
355 
356 	CSR_WRITE_4(sc, DC_PN_SIOCTL, DC_PN_EEOPCODE_READ|addr);
357 
358 	for (i = 0; i < DC_TIMEOUT; i++) {
359 		DELAY(1);
360 		r = CSR_READ_4(sc, DC_SIO);
361 		if (!(r & DC_PN_SIOCTL_BUSY)) {
362 			*dest = (u_int16_t)(r & 0xFFFF);
363 			return;
364 		}
365 	}
366 }
367 
368 /*
369  * Read a word of data stored in the EEPROM at address 'addr.'
370  * The Xircom X3201 has its own non-standard way to read
371  * the EEPROM, too.
372  */
373 void
374 dc_eeprom_getword_xircom(struct dc_softc *sc, int addr, u_int16_t *dest)
375 {
376 	SIO_SET(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ);
377 
378 	addr *= 2;
379 	CSR_WRITE_4(sc, DC_ROM, addr | 0x160);
380 	*dest = (u_int16_t)CSR_READ_4(sc, DC_SIO) & 0xff;
381 	addr += 1;
382 	CSR_WRITE_4(sc, DC_ROM, addr | 0x160);
383 	*dest |= ((u_int16_t)CSR_READ_4(sc, DC_SIO) & 0xff) << 8;
384 
385 	SIO_CLR(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ);
386 }
387 
388 /*
389  * Read a word of data stored in the EEPROM at address 'addr.'
390  */
391 void
392 dc_eeprom_getword(struct dc_softc *sc, int addr, u_int16_t *dest)
393 {
394 	int i;
395 	u_int16_t word = 0;
396 
397 	/* Force EEPROM to idle state. */
398 	dc_eeprom_idle(sc);
399 
400 	/* Enter EEPROM access mode. */
401 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
402 	dc_delay(sc);
403 	DC_SETBIT(sc, DC_SIO,  DC_SIO_ROMCTL_READ);
404 	dc_delay(sc);
405 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
406 	dc_delay(sc);
407 	DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
408 	dc_delay(sc);
409 
410 	/*
411 	 * Send address of word we want to read.
412 	 */
413 	dc_eeprom_putbyte(sc, addr);
414 
415 	/*
416 	 * Start reading bits from EEPROM.
417 	 */
418 	for (i = 0x8000; i; i >>= 1) {
419 		SIO_SET(DC_SIO_EE_CLK);
420 		dc_delay(sc);
421 		if (CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)
422 			word |= i;
423 		dc_delay(sc);
424 		SIO_CLR(DC_SIO_EE_CLK);
425 		dc_delay(sc);
426 	}
427 
428 	/* Turn off EEPROM access mode. */
429 	dc_eeprom_idle(sc);
430 
431 	*dest = word;
432 }
433 
434 /*
435  * Read a sequence of words from the EEPROM.
436  */
437 void
438 dc_read_eeprom(struct dc_softc *sc, caddr_t dest, int off, int cnt,
439     int swap)
440 {
441 	int i;
442 	u_int16_t word = 0, *ptr;
443 
444 	for (i = 0; i < cnt; i++) {
445 		if (DC_IS_PNIC(sc))
446 			dc_eeprom_getword_pnic(sc, off + i, &word);
447 		else if (DC_IS_XIRCOM(sc))
448 			dc_eeprom_getword_xircom(sc, off + i, &word);
449 		else
450 			dc_eeprom_getword(sc, off + i, &word);
451 		ptr = (u_int16_t *)(dest + (i * 2));
452 		if (swap)
453 			*ptr = betoh16(word);
454 		else
455 			*ptr = letoh16(word);
456 	}
457 }
458 
459 /*
460  * The following two routines are taken from the Macronix 98713
461  * Application Notes pp.19-21.
462  */
463 /*
464  * Write a bit to the MII bus.
465  */
466 void
467 dc_mii_writebit(struct dc_softc *sc, int bit)
468 {
469 	if (bit)
470 		CSR_WRITE_4(sc, DC_SIO,
471 		    DC_SIO_ROMCTL_WRITE|DC_SIO_MII_DATAOUT);
472 	else
473 		CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE);
474 
475 	DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK);
476 	DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK);
477 }
478 
479 /*
480  * Read a bit from the MII bus.
481  */
482 int
483 dc_mii_readbit(struct dc_softc *sc)
484 {
485 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_READ|DC_SIO_MII_DIR);
486 	CSR_READ_4(sc, DC_SIO);
487 	DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK);
488 	DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK);
489 	if (CSR_READ_4(sc, DC_SIO) & DC_SIO_MII_DATAIN)
490 		return (1);
491 	return (0);
492 }
493 
494 /*
495  * Sync the PHYs by setting data bit and strobing the clock 32 times.
496  */
497 void
498 dc_mii_sync(struct dc_softc *sc)
499 {
500 	int i;
501 
502 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE);
503 
504 	for (i = 0; i < 32; i++)
505 		dc_mii_writebit(sc, 1);
506 }
507 
508 /*
509  * Clock a series of bits through the MII.
510  */
511 void
512 dc_mii_send(struct dc_softc *sc, u_int32_t bits, int cnt)
513 {
514 	int i;
515 
516 	for (i = (0x1 << (cnt - 1)); i; i >>= 1)
517 		dc_mii_writebit(sc, bits & i);
518 }
519 
520 /*
521  * Read an PHY register through the MII.
522  */
523 int
524 dc_mii_readreg(struct dc_softc *sc, struct dc_mii_frame *frame)
525 {
526 	int i, ack, s;
527 
528 	s = splnet();
529 
530 	/*
531 	 * Set up frame for RX.
532 	 */
533 	frame->mii_stdelim = DC_MII_STARTDELIM;
534 	frame->mii_opcode = DC_MII_READOP;
535 	frame->mii_turnaround = 0;
536 	frame->mii_data = 0;
537 
538 	/*
539 	 * Sync the PHYs.
540 	 */
541 	dc_mii_sync(sc);
542 
543 	/*
544 	 * Send command/address info.
545 	 */
546 	dc_mii_send(sc, frame->mii_stdelim, 2);
547 	dc_mii_send(sc, frame->mii_opcode, 2);
548 	dc_mii_send(sc, frame->mii_phyaddr, 5);
549 	dc_mii_send(sc, frame->mii_regaddr, 5);
550 
551 #ifdef notdef
552 	/* Idle bit */
553 	dc_mii_writebit(sc, 1);
554 	dc_mii_writebit(sc, 0);
555 #endif
556 
557 	/* Check for ack */
558 	ack = dc_mii_readbit(sc);
559 
560 	/*
561 	 * Now try reading data bits. If the ack failed, we still
562 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
563 	 */
564 	if (ack) {
565 		for(i = 0; i < 16; i++) {
566 			dc_mii_readbit(sc);
567 		}
568 		goto fail;
569 	}
570 
571 	for (i = 0x8000; i; i >>= 1) {
572 		if (!ack) {
573 			if (dc_mii_readbit(sc))
574 				frame->mii_data |= i;
575 		}
576 	}
577 
578 fail:
579 
580 	dc_mii_writebit(sc, 0);
581 	dc_mii_writebit(sc, 0);
582 
583 	splx(s);
584 
585 	if (ack)
586 		return (1);
587 	return (0);
588 }
589 
590 /*
591  * Write to a PHY register through the MII.
592  */
593 int
594 dc_mii_writereg(struct dc_softc *sc, struct dc_mii_frame *frame)
595 {
596 	int s;
597 
598 	s = splnet();
599 	/*
600 	 * Set up frame for TX.
601 	 */
602 
603 	frame->mii_stdelim = DC_MII_STARTDELIM;
604 	frame->mii_opcode = DC_MII_WRITEOP;
605 	frame->mii_turnaround = DC_MII_TURNAROUND;
606 
607 	/*
608 	 * Sync the PHYs.
609 	 */
610 	dc_mii_sync(sc);
611 
612 	dc_mii_send(sc, frame->mii_stdelim, 2);
613 	dc_mii_send(sc, frame->mii_opcode, 2);
614 	dc_mii_send(sc, frame->mii_phyaddr, 5);
615 	dc_mii_send(sc, frame->mii_regaddr, 5);
616 	dc_mii_send(sc, frame->mii_turnaround, 2);
617 	dc_mii_send(sc, frame->mii_data, 16);
618 
619 	/* Idle bit. */
620 	dc_mii_writebit(sc, 0);
621 	dc_mii_writebit(sc, 0);
622 
623 	splx(s);
624 	return (0);
625 }
626 
627 int
628 dc_miibus_readreg(struct device *self, int phy, int reg)
629 {
630 	struct dc_mii_frame frame;
631 	struct dc_softc *sc = (struct dc_softc *)self;
632 	int i, rval, phy_reg;
633 
634 	/*
635 	 * Note: both the AL981 and AN983 have internal PHYs,
636 	 * however the AL981 provides direct access to the PHY
637 	 * registers while the AN983 uses a serial MII interface.
638 	 * The AN983's MII interface is also buggy in that you
639 	 * can read from any MII address (0 to 31), but only address 1
640 	 * behaves normally. To deal with both cases, we pretend
641 	 * that the PHY is at MII address 1.
642 	 */
643 	if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR)
644 		return (0);
645 
646 	/*
647 	 * Note: the ukphy probs of the RS7112 report a PHY at
648 	 * MII address 0 (possibly HomePNA?) and 1 (ethernet)
649 	 * so we only respond to correct one.
650 	 */
651 	if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR)
652 		return (0);
653 
654 	if (sc->dc_pmode != DC_PMODE_MII) {
655 		if (phy == (MII_NPHY - 1)) {
656 			switch(reg) {
657 			case MII_BMSR:
658 				/*
659 				 * Fake something to make the probe
660 				 * code think there's a PHY here.
661 				 */
662 				return (BMSR_MEDIAMASK);
663 				break;
664 			case MII_PHYIDR1:
665 				if (DC_IS_PNIC(sc))
666 					return (PCI_VENDOR_LITEON);
667 				return (PCI_VENDOR_DEC);
668 				break;
669 			case MII_PHYIDR2:
670 				if (DC_IS_PNIC(sc))
671 					return (PCI_PRODUCT_LITEON_PNIC);
672 				return (PCI_PRODUCT_DEC_21142);
673 				break;
674 			default:
675 				return (0);
676 				break;
677 			}
678 		} else
679 			return (0);
680 	}
681 
682 	if (DC_IS_PNIC(sc)) {
683 		CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_READ |
684 		    (phy << 23) | (reg << 18));
685 		for (i = 0; i < DC_TIMEOUT; i++) {
686 			DELAY(1);
687 			rval = CSR_READ_4(sc, DC_PN_MII);
688 			if (!(rval & DC_PN_MII_BUSY)) {
689 				rval &= 0xFFFF;
690 				return (rval == 0xFFFF ? 0 : rval);
691 			}
692 		}
693 		return (0);
694 	}
695 
696 	if (DC_IS_COMET(sc)) {
697 		switch(reg) {
698 		case MII_BMCR:
699 			phy_reg = DC_AL_BMCR;
700 			break;
701 		case MII_BMSR:
702 			phy_reg = DC_AL_BMSR;
703 			break;
704 		case MII_PHYIDR1:
705 			phy_reg = DC_AL_VENID;
706 			break;
707 		case MII_PHYIDR2:
708 			phy_reg = DC_AL_DEVID;
709 			break;
710 		case MII_ANAR:
711 			phy_reg = DC_AL_ANAR;
712 			break;
713 		case MII_ANLPAR:
714 			phy_reg = DC_AL_LPAR;
715 			break;
716 		case MII_ANER:
717 			phy_reg = DC_AL_ANER;
718 			break;
719 		default:
720 			printf("%s: phy_read: bad phy register %x\n",
721 			    sc->sc_dev.dv_xname, reg);
722 			return (0);
723 			break;
724 		}
725 
726 		rval = CSR_READ_4(sc, phy_reg) & 0x0000FFFF;
727 
728 		if (rval == 0xFFFF)
729 			return (0);
730 		return (rval);
731 	}
732 
733 	bzero(&frame, sizeof(frame));
734 
735 	frame.mii_phyaddr = phy;
736 	frame.mii_regaddr = reg;
737 	if (sc->dc_type == DC_TYPE_98713) {
738 		phy_reg = CSR_READ_4(sc, DC_NETCFG);
739 		CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL);
740 	}
741 	dc_mii_readreg(sc, &frame);
742 	if (sc->dc_type == DC_TYPE_98713)
743 		CSR_WRITE_4(sc, DC_NETCFG, phy_reg);
744 
745 	return (frame.mii_data);
746 }
747 
748 void
749 dc_miibus_writereg(struct device *self, int phy, int reg, int data)
750 {
751 	struct dc_softc *sc = (struct dc_softc *)self;
752 	struct dc_mii_frame frame;
753 	int i, phy_reg;
754 
755 	bzero((char *)&frame, sizeof(frame));
756 
757 	if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR)
758 		return;
759 	if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR)
760 		return;
761 
762 	if (DC_IS_PNIC(sc)) {
763 		CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_WRITE |
764 		    (phy << 23) | (reg << 10) | data);
765 		for (i = 0; i < DC_TIMEOUT; i++) {
766 			if (!(CSR_READ_4(sc, DC_PN_MII) & DC_PN_MII_BUSY))
767 				break;
768 		}
769 		return;
770 	}
771 
772 	if (DC_IS_COMET(sc)) {
773 		switch(reg) {
774 		case MII_BMCR:
775 			phy_reg = DC_AL_BMCR;
776 			break;
777 		case MII_BMSR:
778 			phy_reg = DC_AL_BMSR;
779 			break;
780 		case MII_PHYIDR1:
781 			phy_reg = DC_AL_VENID;
782 			break;
783 		case MII_PHYIDR2:
784 			phy_reg = DC_AL_DEVID;
785 			break;
786 		case MII_ANAR:
787 			phy_reg = DC_AL_ANAR;
788 			break;
789 		case MII_ANLPAR:
790 			phy_reg = DC_AL_LPAR;
791 			break;
792 		case MII_ANER:
793 			phy_reg = DC_AL_ANER;
794 			break;
795 		default:
796 			printf("%s: phy_write: bad phy register %x\n",
797 			    sc->sc_dev.dv_xname, reg);
798 			return;
799 			break;
800 		}
801 
802 		CSR_WRITE_4(sc, phy_reg, data);
803 		return;
804 	}
805 
806 	frame.mii_phyaddr = phy;
807 	frame.mii_regaddr = reg;
808 	frame.mii_data = data;
809 
810 	if (sc->dc_type == DC_TYPE_98713) {
811 		phy_reg = CSR_READ_4(sc, DC_NETCFG);
812 		CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL);
813 	}
814 	dc_mii_writereg(sc, &frame);
815 	if (sc->dc_type == DC_TYPE_98713)
816 		CSR_WRITE_4(sc, DC_NETCFG, phy_reg);
817 }
818 
819 void
820 dc_miibus_statchg(struct device *self)
821 {
822 	struct dc_softc *sc = (struct dc_softc *)self;
823 	struct mii_data *mii;
824 	struct ifmedia *ifm;
825 
826 	if (DC_IS_ADMTEK(sc))
827 		return;
828 
829 	mii = &sc->sc_mii;
830 	ifm = &mii->mii_media;
831 	if (DC_IS_DAVICOM(sc) && IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) {
832 		dc_setcfg(sc, ifm->ifm_media);
833 		sc->dc_if_media = ifm->ifm_media;
834 	} else {
835 		dc_setcfg(sc, mii->mii_media_active);
836 		sc->dc_if_media = mii->mii_media_active;
837 	}
838 }
839 
840 #define DC_BITS_512	9
841 #define DC_BITS_128	7
842 #define DC_BITS_64	6
843 
844 u_int32_t
845 dc_crc_le(struct dc_softc *sc, caddr_t addr)
846 {
847 	u_int32_t crc;
848 
849 	/* Compute CRC for the address value. */
850 	crc = ether_crc32_le(addr, ETHER_ADDR_LEN);
851 
852 	/*
853 	 * The hash table on the PNIC II and the MX98715AEC-C/D/E
854 	 * chips is only 128 bits wide.
855 	 */
856 	if (sc->dc_flags & DC_128BIT_HASH)
857 		return (crc & ((1 << DC_BITS_128) - 1));
858 
859 	/* The hash table on the MX98715BEC is only 64 bits wide. */
860 	if (sc->dc_flags & DC_64BIT_HASH)
861 		return (crc & ((1 << DC_BITS_64) - 1));
862 
863 	/* Xircom's hash filtering table is different (read: weird) */
864 	/* Xircom uses the LEAST significant bits */
865 	if (DC_IS_XIRCOM(sc)) {
866 		if ((crc & 0x180) == 0x180)
867 			return (crc & 0x0F) + (crc	& 0x70)*3 + (14 << 4);
868 		else
869 			return (crc & 0x1F) + ((crc>>1) & 0xF0)*3 + (12 << 4);
870 	}
871 
872 	return (crc & ((1 << DC_BITS_512) - 1));
873 }
874 
875 /*
876  * Calculate CRC of a multicast group address, return the lower 6 bits.
877  */
878 #define dc_crc_be(addr)	((ether_crc32_be(addr,ETHER_ADDR_LEN) >> 26) \
879 	& 0x0000003F)
880 
881 /*
882  * 21143-style RX filter setup routine. Filter programming is done by
883  * downloading a special setup frame into the TX engine. 21143, Macronix,
884  * PNIC, PNIC II and Davicom chips are programmed this way.
885  *
886  * We always program the chip using 'hash perfect' mode, i.e. one perfect
887  * address (our node address) and a 512-bit hash filter for multicast
888  * frames. We also sneak the broadcast address into the hash filter since
889  * we need that too.
890  */
891 void
892 dc_setfilt_21143(struct dc_softc *sc)
893 {
894 	struct dc_desc *sframe;
895 	u_int32_t h, *sp;
896 	struct arpcom *ac = &sc->sc_arpcom;
897 	struct ether_multi *enm;
898 	struct ether_multistep step;
899 	struct ifnet *ifp;
900 	int i;
901 
902 	ifp = &sc->sc_arpcom.ac_if;
903 
904 	i = sc->dc_cdata.dc_tx_prod;
905 	DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT);
906 	sc->dc_cdata.dc_tx_cnt++;
907 	sframe = &sc->dc_ldata->dc_tx_list[i];
908 	sp = &sc->dc_ldata->dc_sbuf[0];
909 	bzero((char *)sp, DC_SFRAME_LEN);
910 
911 	sframe->dc_data = htole32(sc->sc_listmap->dm_segs[0].ds_addr +
912 	    offsetof(struct dc_list_data, dc_sbuf));
913 	sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP |
914 	    DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT);
915 
916 	sc->dc_cdata.dc_tx_chain[i].sd_mbuf =
917 	    (struct mbuf *)&sc->dc_ldata->dc_sbuf[0];
918 
919 	/* If we want promiscuous mode, set the allframes bit. */
920 	if (ifp->if_flags & IFF_PROMISC)
921 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
922 	else
923 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
924 
925 allmulti:
926 	if (ifp->if_flags & IFF_ALLMULTI)
927 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
928 	else {
929 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
930 
931 		ETHER_FIRST_MULTI(step, ac, enm);
932 		while (enm != NULL) {
933 			if (bcmp(enm->enm_addrlo, enm->enm_addrhi,
934 			    ETHER_ADDR_LEN)) {
935 				ifp->if_flags |= IFF_ALLMULTI;
936 				goto allmulti;
937 			}
938 
939 			h = dc_crc_le(sc, enm->enm_addrlo);
940 			sp[h >> 4] |= htole32(1 << (h & 0xF));
941 			ETHER_NEXT_MULTI(step, enm);
942 		}
943 	}
944 
945 	if (ifp->if_flags & IFF_BROADCAST) {
946 		h = dc_crc_le(sc, (caddr_t)&etherbroadcastaddr);
947 		sp[h >> 4] |= htole32(1 << (h & 0xF));
948 	}
949 
950 	/* Set our MAC address */
951 	sp[39] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 0);
952 	sp[40] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 1);
953 	sp[41] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 2);
954 
955 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
956 	    offsetof(struct dc_list_data, dc_sbuf[0]),
957 	    sizeof(struct dc_list_data) -
958 	    offsetof(struct dc_list_data, dc_sbuf[0]),
959 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
960 
961 	sframe->dc_status = htole32(DC_TXSTAT_OWN);
962 
963 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
964 	    offsetof(struct dc_list_data, dc_tx_list[i]),
965 	    sizeof(struct dc_desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
966 
967 	CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
968 
969 	/*
970 	 * The PNIC takes an exceedingly long time to process its
971 	 * setup frame; wait 10ms after posting the setup frame
972 	 * before proceeding, just so it has time to swallow its
973 	 * medicine.
974 	 */
975 	DELAY(10000);
976 
977 	ifp->if_timer = 5;
978 }
979 
980 void
981 dc_setfilt_admtek(struct dc_softc *sc)
982 {
983 	struct ifnet *ifp;
984 	struct arpcom *ac = &sc->sc_arpcom;
985 	struct ether_multi *enm;
986 	struct ether_multistep step;
987 	int h = 0;
988 	u_int32_t hashes[2] = { 0, 0 };
989 
990 	ifp = &sc->sc_arpcom.ac_if;
991 
992 	/* Init our MAC address */
993 	CSR_WRITE_4(sc, DC_AL_PAR0, ac->ac_enaddr[3] << 24 |
994 	    ac->ac_enaddr[2] << 16 | ac->ac_enaddr[1] << 8 | ac->ac_enaddr[0]);
995 	CSR_WRITE_4(sc, DC_AL_PAR1, ac->ac_enaddr[5] << 8 | ac->ac_enaddr[4]);
996 
997 	/* If we want promiscuous mode, set the allframes bit. */
998 	if (ifp->if_flags & IFF_PROMISC)
999 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1000 	else
1001 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1002 
1003 allmulti:
1004 	if (ifp->if_flags & IFF_ALLMULTI)
1005 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1006 	else
1007 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1008 
1009 	/* first, zot all the existing hash bits */
1010 	CSR_WRITE_4(sc, DC_AL_MAR0, 0);
1011 	CSR_WRITE_4(sc, DC_AL_MAR1, 0);
1012 
1013 	/*
1014 	 * If we're already in promisc or allmulti mode, we
1015 	 * don't have to bother programming the multicast filter.
1016 	 */
1017 	if (ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI))
1018 		return;
1019 
1020 	/* now program new ones */
1021 	ETHER_FIRST_MULTI(step, ac, enm);
1022 	while (enm != NULL) {
1023 		if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1024 			ifp->if_flags |= IFF_ALLMULTI;
1025 			goto allmulti;
1026 		}
1027 
1028 		if (DC_IS_CENTAUR(sc))
1029 			h = dc_crc_le(sc, enm->enm_addrlo);
1030 		else
1031 			h = dc_crc_be(enm->enm_addrlo);
1032 		if (h < 32)
1033 			hashes[0] |= (1 << h);
1034 		else
1035 			hashes[1] |= (1 << (h - 32));
1036 		ETHER_NEXT_MULTI(step, enm);
1037 	}
1038 
1039 	CSR_WRITE_4(sc, DC_AL_MAR0, hashes[0]);
1040 	CSR_WRITE_4(sc, DC_AL_MAR1, hashes[1]);
1041 }
1042 
1043 void
1044 dc_setfilt_asix(struct dc_softc *sc)
1045 {
1046 	struct ifnet *ifp;
1047 	struct arpcom *ac = &sc->sc_arpcom;
1048 	struct ether_multi *enm;
1049 	struct ether_multistep step;
1050 	int h = 0;
1051 	u_int32_t hashes[2] = { 0, 0 };
1052 
1053 	ifp = &sc->sc_arpcom.ac_if;
1054 
1055 	/* Init our MAC address */
1056 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR0);
1057 	CSR_WRITE_4(sc, DC_AX_FILTDATA,
1058 	    *(u_int32_t *)(&sc->sc_arpcom.ac_enaddr[0]));
1059 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR1);
1060 	CSR_WRITE_4(sc, DC_AX_FILTDATA,
1061 	    *(u_int32_t *)(&sc->sc_arpcom.ac_enaddr[4]));
1062 
1063 	/* If we want promiscuous mode, set the allframes bit. */
1064 	if (ifp->if_flags & IFF_PROMISC)
1065 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1066 	else
1067 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1068 
1069 	if (ifp->if_flags & IFF_ALLMULTI)
1070 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1071 	else
1072 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1073 
1074 	/*
1075 	 * The ASIX chip has a special bit to enable reception
1076 	 * of broadcast frames.
1077 	 */
1078 	if (ifp->if_flags & IFF_BROADCAST)
1079 		DC_SETBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD);
1080 	else
1081 		DC_CLRBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD);
1082 
1083 	/* first, zot all the existing hash bits */
1084 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0);
1085 	CSR_WRITE_4(sc, DC_AX_FILTDATA, 0);
1086 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1);
1087 	CSR_WRITE_4(sc, DC_AX_FILTDATA, 0);
1088 
1089 	/*
1090 	 * If we're already in promisc or allmulti mode, we
1091 	 * don't have to bother programming the multicast filter.
1092 	 */
1093 	if (ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI))
1094 		return;
1095 
1096 	/* now program new ones */
1097 	ETHER_FIRST_MULTI(step, ac, enm);
1098 	while (enm != NULL) {
1099 		h = dc_crc_be(enm->enm_addrlo);
1100 		if (h < 32)
1101 			hashes[0] |= (1 << h);
1102 		else
1103 			hashes[1] |= (1 << (h - 32));
1104 		ETHER_NEXT_MULTI(step, enm);
1105 	}
1106 
1107 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0);
1108 	CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[0]);
1109 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1);
1110 	CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[1]);
1111 }
1112 
1113 void
1114 dc_setfilt_xircom(struct dc_softc *sc)
1115 {
1116 	struct dc_desc *sframe;
1117 	struct arpcom *ac = &sc->sc_arpcom;
1118 	struct ether_multi *enm;
1119 	struct ether_multistep step;
1120 	u_int32_t h, *sp;
1121 	struct ifnet *ifp;
1122 	int i;
1123 
1124 	ifp = &sc->sc_arpcom.ac_if;
1125 	DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON));
1126 
1127 	i = sc->dc_cdata.dc_tx_prod;
1128 	DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT);
1129 	sc->dc_cdata.dc_tx_cnt++;
1130 	sframe = &sc->dc_ldata->dc_tx_list[i];
1131 	sp = &sc->dc_ldata->dc_sbuf[0];
1132 	bzero((char *)sp, DC_SFRAME_LEN);
1133 
1134 	sframe->dc_data = htole32(sc->sc_listmap->dm_segs[0].ds_addr +
1135 	    offsetof(struct dc_list_data, dc_sbuf));
1136 	sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP |
1137 	    DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT);
1138 
1139 	sc->dc_cdata.dc_tx_chain[i].sd_mbuf =
1140 	    (struct mbuf *)&sc->dc_ldata->dc_sbuf[0];
1141 
1142 	/* If we want promiscuous mode, set the allframes bit. */
1143 	if (ifp->if_flags & IFF_PROMISC)
1144 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1145 	else
1146 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1147 
1148 	if (ifp->if_flags & IFF_ALLMULTI)
1149 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1150 	else
1151 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1152 
1153 	/* now program new ones */
1154 	ETHER_FIRST_MULTI(step, ac, enm);
1155 	while (enm != NULL) {
1156 		h = dc_crc_le(sc, enm->enm_addrlo);
1157 		sp[h >> 4] |= htole32(1 << (h & 0xF));
1158 		ETHER_NEXT_MULTI(step, enm);
1159 	}
1160 
1161 	if (ifp->if_flags & IFF_BROADCAST) {
1162 		h = dc_crc_le(sc, (caddr_t)&etherbroadcastaddr);
1163 		sp[h >> 4] |= htole32(1 << (h & 0xF));
1164 	}
1165 
1166 	/* Set our MAC address */
1167 	sp[0] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 0);
1168 	sp[1] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 1);
1169 	sp[2] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 2);
1170 
1171 	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
1172 	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON);
1173 	ifp->if_flags |= IFF_RUNNING;
1174 	sframe->dc_status = htole32(DC_TXSTAT_OWN);
1175 	CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
1176 
1177 	/*
1178 	 * wait some time...
1179 	 */
1180 	DELAY(1000);
1181 
1182 	ifp->if_timer = 5;
1183 }
1184 
1185 void
1186 dc_setfilt(struct dc_softc *sc)
1187 {
1188 	if (DC_IS_INTEL(sc) || DC_IS_MACRONIX(sc) || DC_IS_PNIC(sc) ||
1189 	    DC_IS_PNICII(sc) || DC_IS_DAVICOM(sc) || DC_IS_CONEXANT(sc))
1190 		dc_setfilt_21143(sc);
1191 
1192 	if (DC_IS_ASIX(sc))
1193 		dc_setfilt_asix(sc);
1194 
1195 	if (DC_IS_ADMTEK(sc))
1196 		dc_setfilt_admtek(sc);
1197 
1198 	if (DC_IS_XIRCOM(sc))
1199 		dc_setfilt_xircom(sc);
1200 }
1201 
1202 /*
1203  * In order to fiddle with the
1204  * 'full-duplex' and '100Mbps' bits in the netconfig register, we
1205  * first have to put the transmit and/or receive logic in the idle state.
1206  */
1207 void
1208 dc_setcfg(struct dc_softc *sc, int media)
1209 {
1210 	int i, restart = 0;
1211 	u_int32_t isr;
1212 
1213 	if (IFM_SUBTYPE(media) == IFM_NONE)
1214 		return;
1215 
1216 	if (CSR_READ_4(sc, DC_NETCFG) & (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON)) {
1217 		restart = 1;
1218 		DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON));
1219 
1220 		for (i = 0; i < DC_TIMEOUT; i++) {
1221 			isr = CSR_READ_4(sc, DC_ISR);
1222 			if (isr & DC_ISR_TX_IDLE &&
1223 			    ((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED ||
1224 			    (isr & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT))
1225 				break;
1226 			DELAY(10);
1227 		}
1228 
1229 		if (i == DC_TIMEOUT) {
1230 			if (!(isr & DC_ISR_TX_IDLE) && !DC_IS_ASIX(sc))
1231 				printf("%s: failed to force tx to idle state\n",
1232 				    sc->sc_dev.dv_xname);
1233 			if (!((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED ||
1234 			    (isr & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT) &&
1235 			    !DC_HAS_BROKEN_RXSTATE(sc))
1236 				printf("%s: failed to force rx to idle state\n",
1237 				    sc->sc_dev.dv_xname);
1238 		}
1239 	}
1240 
1241 	if (IFM_SUBTYPE(media) == IFM_100_TX) {
1242 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL);
1243 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT);
1244 		if (sc->dc_pmode == DC_PMODE_MII) {
1245 			int watchdogreg;
1246 
1247 			if (DC_IS_INTEL(sc)) {
1248 			/* there's a write enable bit here that reads as 1 */
1249 				watchdogreg = CSR_READ_4(sc, DC_WATCHDOG);
1250 				watchdogreg &= ~DC_WDOG_CTLWREN;
1251 				watchdogreg |= DC_WDOG_JABBERDIS;
1252 				CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg);
1253 			} else {
1254 				DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS);
1255 			}
1256 			DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS|
1257 			    DC_NETCFG_PORTSEL|DC_NETCFG_SCRAMBLER));
1258 			if (sc->dc_type == DC_TYPE_98713)
1259 				DC_SETBIT(sc, DC_NETCFG, (DC_NETCFG_PCS|
1260 				    DC_NETCFG_SCRAMBLER));
1261 			if (!DC_IS_DAVICOM(sc))
1262 				DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1263 			DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
1264 			if (DC_IS_INTEL(sc))
1265 				dc_apply_fixup(sc, IFM_AUTO);
1266 		} else {
1267 			if (DC_IS_PNIC(sc)) {
1268 				DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_SPEEDSEL);
1269 				DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP);
1270 				DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL);
1271 			}
1272 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1273 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
1274 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER);
1275 			if (DC_IS_INTEL(sc))
1276 				dc_apply_fixup(sc,
1277 				    (media & IFM_GMASK) == IFM_FDX ?
1278 				    IFM_100_TX|IFM_FDX : IFM_100_TX);
1279 		}
1280 	}
1281 
1282 	if (IFM_SUBTYPE(media) == IFM_10_T) {
1283 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL);
1284 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT);
1285 		if (sc->dc_pmode == DC_PMODE_MII) {
1286 			int watchdogreg;
1287 
1288 			if (DC_IS_INTEL(sc)) {
1289 			/* there's a write enable bit here that reads as 1 */
1290 				watchdogreg = CSR_READ_4(sc, DC_WATCHDOG);
1291 				watchdogreg &= ~DC_WDOG_CTLWREN;
1292 				watchdogreg |= DC_WDOG_JABBERDIS;
1293 				CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg);
1294 			} else {
1295 				DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS);
1296 			}
1297 			DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS|
1298 			    DC_NETCFG_PORTSEL|DC_NETCFG_SCRAMBLER));
1299 			if (sc->dc_type == DC_TYPE_98713)
1300 				DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
1301 			if (!DC_IS_DAVICOM(sc))
1302 				DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1303 			DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
1304 			if (DC_IS_INTEL(sc))
1305 				dc_apply_fixup(sc, IFM_AUTO);
1306 		} else {
1307 			if (DC_IS_PNIC(sc)) {
1308 				DC_PN_GPIO_CLRBIT(sc, DC_PN_GPIO_SPEEDSEL);
1309 				DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP);
1310 				DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL);
1311 			}
1312 			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1313 			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
1314 			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER);
1315 			if (DC_IS_INTEL(sc)) {
1316 				DC_CLRBIT(sc, DC_SIARESET, DC_SIA_RESET);
1317 				DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
1318 				if ((media & IFM_GMASK) == IFM_FDX)
1319 					DC_SETBIT(sc, DC_10BTCTRL, 0x7F3D);
1320 				else
1321 					DC_SETBIT(sc, DC_10BTCTRL, 0x7F3F);
1322 				DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET);
1323 				DC_CLRBIT(sc, DC_10BTCTRL,
1324 				    DC_TCTL_AUTONEGENBL);
1325 				dc_apply_fixup(sc,
1326 				    (media & IFM_GMASK) == IFM_FDX ?
1327 				    IFM_10_T|IFM_FDX : IFM_10_T);
1328 				DELAY(20000);
1329 			}
1330 		}
1331 	}
1332 
1333 	/*
1334 	 * If this is a Davicom DM9102A card with a DM9801 HomePNA
1335 	 * PHY and we want HomePNA mode, set the portsel bit to turn
1336 	 * on the external MII port.
1337 	 */
1338 	if (DC_IS_DAVICOM(sc)) {
1339 		if (IFM_SUBTYPE(media) == IFM_HPNA_1) {
1340 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1341 			sc->dc_link = 1;
1342 		} else {
1343 			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1344 		}
1345 	}
1346 
1347 	if ((media & IFM_GMASK) == IFM_FDX) {
1348 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX);
1349 		if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc))
1350 			DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX);
1351 	} else {
1352 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX);
1353 		if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc))
1354 			DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX);
1355 	}
1356 
1357 	if (restart)
1358 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON|DC_NETCFG_RX_ON);
1359 }
1360 
1361 void
1362 dc_reset(struct dc_softc *sc)
1363 {
1364 	int i;
1365 
1366 	DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET);
1367 
1368 	for (i = 0; i < DC_TIMEOUT; i++) {
1369 		DELAY(10);
1370 		if (!(CSR_READ_4(sc, DC_BUSCTL) & DC_BUSCTL_RESET))
1371 			break;
1372 	}
1373 
1374 	if (DC_IS_ASIX(sc) || DC_IS_ADMTEK(sc) || DC_IS_XIRCOM(sc) ||
1375 	    DC_IS_INTEL(sc) || DC_IS_CONEXANT(sc)) {
1376 		DELAY(10000);
1377 		DC_CLRBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET);
1378 		i = 0;
1379 	}
1380 
1381 	if (i == DC_TIMEOUT)
1382 		printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
1383 
1384 	/* Wait a little while for the chip to get its brains in order. */
1385 	DELAY(1000);
1386 
1387 	CSR_WRITE_4(sc, DC_IMR, 0x00000000);
1388 	CSR_WRITE_4(sc, DC_BUSCTL, 0x00000000);
1389 	CSR_WRITE_4(sc, DC_NETCFG, 0x00000000);
1390 
1391 	/*
1392 	 * Bring the SIA out of reset. In some cases, it looks
1393 	 * like failing to unreset the SIA soon enough gets it
1394 	 * into a state where it will never come out of reset
1395 	 * until we reset the whole chip again.
1396 	 */
1397 	if (DC_IS_INTEL(sc)) {
1398 		DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET);
1399 		CSR_WRITE_4(sc, DC_10BTCTRL, 0);
1400 		CSR_WRITE_4(sc, DC_WATCHDOG, 0);
1401 	}
1402 
1403 	if (sc->dc_type == DC_TYPE_21145)
1404 		dc_setcfg(sc, IFM_10_T);
1405 }
1406 
1407 void
1408 dc_apply_fixup(struct dc_softc *sc, int media)
1409 {
1410 	struct dc_mediainfo *m;
1411 	u_int8_t *p;
1412 	int i;
1413 	u_int32_t reg;
1414 
1415 	m = sc->dc_mi;
1416 
1417 	while (m != NULL) {
1418 		if (m->dc_media == media)
1419 			break;
1420 		m = m->dc_next;
1421 	}
1422 
1423 	if (m == NULL)
1424 		return;
1425 
1426 	for (i = 0, p = m->dc_reset_ptr; i < m->dc_reset_len; i++, p += 2) {
1427 		reg = (p[0] | (p[1] << 8)) << 16;
1428 		CSR_WRITE_4(sc, DC_WATCHDOG, reg);
1429 	}
1430 
1431 	for (i = 0, p = m->dc_gp_ptr; i < m->dc_gp_len; i++, p += 2) {
1432 		reg = (p[0] | (p[1] << 8)) << 16;
1433 		CSR_WRITE_4(sc, DC_WATCHDOG, reg);
1434 	}
1435 }
1436 
1437 void
1438 dc_decode_leaf_sia(struct dc_softc *sc, struct dc_eblock_sia *l)
1439 {
1440 	struct dc_mediainfo *m;
1441 
1442 	m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
1443 	if (m == NULL)
1444 		return;
1445 	switch (l->dc_sia_code & ~DC_SIA_CODE_EXT) {
1446 	case DC_SIA_CODE_10BT:
1447 		m->dc_media = IFM_10_T;
1448 		break;
1449 	case DC_SIA_CODE_10BT_FDX:
1450 		m->dc_media = IFM_10_T|IFM_FDX;
1451 		break;
1452 	case DC_SIA_CODE_10B2:
1453 		m->dc_media = IFM_10_2;
1454 		break;
1455 	case DC_SIA_CODE_10B5:
1456 		m->dc_media = IFM_10_5;
1457 		break;
1458 	default:
1459 		break;
1460 	}
1461 
1462 	/*
1463 	 * We need to ignore CSR13, CSR14, CSR15 for SIA mode.
1464 	 * Things apparently already work for cards that do
1465 	 * supply Media Specific Data.
1466 	 */
1467 	if (l->dc_sia_code & DC_SIA_CODE_EXT) {
1468 		m->dc_gp_len = 2;
1469 		m->dc_gp_ptr =
1470 		(u_int8_t *)&l->dc_un.dc_sia_ext.dc_sia_gpio_ctl;
1471 	} else {
1472 		m->dc_gp_len = 2;
1473 		m->dc_gp_ptr =
1474 		(u_int8_t *)&l->dc_un.dc_sia_noext.dc_sia_gpio_ctl;
1475 	}
1476 
1477 	m->dc_next = sc->dc_mi;
1478 	sc->dc_mi = m;
1479 
1480 	sc->dc_pmode = DC_PMODE_SIA;
1481 }
1482 
1483 void
1484 dc_decode_leaf_sym(struct dc_softc *sc, struct dc_eblock_sym *l)
1485 {
1486 	struct dc_mediainfo *m;
1487 
1488 	m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
1489 	if (m == NULL)
1490 		return;
1491 	if (l->dc_sym_code == DC_SYM_CODE_100BT)
1492 		m->dc_media = IFM_100_TX;
1493 
1494 	if (l->dc_sym_code == DC_SYM_CODE_100BT_FDX)
1495 		m->dc_media = IFM_100_TX|IFM_FDX;
1496 
1497 	m->dc_gp_len = 2;
1498 	m->dc_gp_ptr = (u_int8_t *)&l->dc_sym_gpio_ctl;
1499 
1500 	m->dc_next = sc->dc_mi;
1501 	sc->dc_mi = m;
1502 
1503 	sc->dc_pmode = DC_PMODE_SYM;
1504 }
1505 
1506 void
1507 dc_decode_leaf_mii(struct dc_softc *sc, struct dc_eblock_mii *l)
1508 {
1509 	u_int8_t *p;
1510 	struct dc_mediainfo *m;
1511 
1512 	m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
1513 	if (m == NULL)
1514 		return;
1515 	/* We abuse IFM_AUTO to represent MII. */
1516 	m->dc_media = IFM_AUTO;
1517 	m->dc_gp_len = l->dc_gpr_len;
1518 
1519 	p = (u_int8_t *)l;
1520 	p += sizeof(struct dc_eblock_mii);
1521 	m->dc_gp_ptr = p;
1522 	p += 2 * l->dc_gpr_len;
1523 	m->dc_reset_len = *p;
1524 	p++;
1525 	m->dc_reset_ptr = p;
1526 
1527 	m->dc_next = sc->dc_mi;
1528 	sc->dc_mi = m;
1529 }
1530 
1531 void
1532 dc_read_srom(struct dc_softc *sc, int bits)
1533 {
1534 	int size;
1535 
1536 	size = 2 << bits;
1537 	sc->dc_srom = malloc(size, M_DEVBUF, M_NOWAIT);
1538 	if (sc->dc_srom == NULL)
1539 		return;
1540 	dc_read_eeprom(sc, (caddr_t)sc->dc_srom, 0, (size / 2), 0);
1541 }
1542 
1543 void
1544 dc_parse_21143_srom(struct dc_softc *sc)
1545 {
1546 	struct dc_leaf_hdr *lhdr;
1547 	struct dc_eblock_hdr *hdr;
1548 	int have_mii, i, loff;
1549 	char *ptr;
1550 
1551 	have_mii = 0;
1552 	loff = sc->dc_srom[27];
1553 	lhdr = (struct dc_leaf_hdr *)&(sc->dc_srom[loff]);
1554 
1555 	ptr = (char *)lhdr;
1556 	ptr += sizeof(struct dc_leaf_hdr) - 1;
1557 	/*
1558 	 * Look if we got a MII media block.
1559 	 */
1560 	for (i = 0; i < lhdr->dc_mcnt; i++) {
1561 		hdr = (struct dc_eblock_hdr *)ptr;
1562 		if (hdr->dc_type == DC_EBLOCK_MII)
1563 		    have_mii++;
1564 
1565 		ptr += (hdr->dc_len & 0x7F);
1566 		ptr++;
1567 	}
1568 
1569 	/*
1570 	 * Do the same thing again. Only use SIA and SYM media
1571 	 * blocks if no MII media block is available.
1572 	 */
1573 	ptr = (char *)lhdr;
1574 	ptr += sizeof(struct dc_leaf_hdr) - 1;
1575 	for (i = 0; i < lhdr->dc_mcnt; i++) {
1576 		hdr = (struct dc_eblock_hdr *)ptr;
1577 		switch(hdr->dc_type) {
1578 		case DC_EBLOCK_MII:
1579 			dc_decode_leaf_mii(sc, (struct dc_eblock_mii *)hdr);
1580 			break;
1581 		case DC_EBLOCK_SIA:
1582 			if (! have_mii)
1583 			    dc_decode_leaf_sia(sc,
1584 				(struct dc_eblock_sia *)hdr);
1585 			break;
1586 		case DC_EBLOCK_SYM:
1587 			if (! have_mii)
1588 			    dc_decode_leaf_sym(sc,
1589 				(struct dc_eblock_sym *)hdr);
1590 			break;
1591 		default:
1592 			/* Don't care. Yet. */
1593 			break;
1594 		}
1595 		ptr += (hdr->dc_len & 0x7F);
1596 		ptr++;
1597 	}
1598 }
1599 
1600 /*
1601  * Attach the interface. Allocate softc structures, do ifmedia
1602  * setup and ethernet/BPF attach.
1603  */
1604 void
1605 dc_attach(struct dc_softc *sc)
1606 {
1607 	struct ifnet *ifp;
1608 	int mac_offset, tmp, i;
1609 	u_int32_t reg;
1610 
1611 	/*
1612 	 * Get station address from the EEPROM.
1613 	 */
1614 	if (sc->sc_hasmac)
1615 		goto hasmac;
1616 
1617 	switch(sc->dc_type) {
1618 	case DC_TYPE_98713:
1619 	case DC_TYPE_98713A:
1620 	case DC_TYPE_987x5:
1621 	case DC_TYPE_PNICII:
1622 		dc_read_eeprom(sc, (caddr_t)&mac_offset,
1623 		    (DC_EE_NODEADDR_OFFSET / 2), 1, 0);
1624 		dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr,
1625 		    (mac_offset / 2), 3, 0);
1626 		break;
1627 	case DC_TYPE_PNIC:
1628 		dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr, 0, 3, 1);
1629 		break;
1630 	case DC_TYPE_DM9102:
1631 	case DC_TYPE_21143:
1632 	case DC_TYPE_21145:
1633 	case DC_TYPE_ASIX:
1634 		dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr,
1635 		    DC_EE_NODEADDR, 3, 0);
1636 		break;
1637 	case DC_TYPE_AL981:
1638 	case DC_TYPE_AN983:
1639 		reg = CSR_READ_4(sc, DC_AL_PAR0);
1640 		sc->sc_arpcom.ac_enaddr[0] = (reg & 0xff);
1641 		sc->sc_arpcom.ac_enaddr[1] = (reg >> 8) & 0xff;
1642 		sc->sc_arpcom.ac_enaddr[2] = (reg >> 16) & 0xff;
1643 		sc->sc_arpcom.ac_enaddr[3] = (reg >> 24) & 0xff;
1644 		reg = CSR_READ_4(sc, DC_AL_PAR1);
1645 		sc->sc_arpcom.ac_enaddr[4] = (reg & 0xff);
1646 		sc->sc_arpcom.ac_enaddr[5] = (reg >> 8) & 0xff;
1647 		break;
1648 	case DC_TYPE_CONEXANT:
1649 		bcopy(&sc->dc_srom + DC_CONEXANT_EE_NODEADDR,
1650 		    &sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
1651 		break;
1652 	case DC_TYPE_XIRCOM:
1653 		/* Some newer units have the MAC at offset 8 */
1654 		dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr, 8, 3, 0);
1655 
1656 		if (sc->sc_arpcom.ac_enaddr[0] == 0x00 &&
1657 		    sc->sc_arpcom.ac_enaddr[1] == 0x10 &&
1658 		    sc->sc_arpcom.ac_enaddr[2] == 0xa4)
1659 			break;
1660 		if (sc->sc_arpcom.ac_enaddr[0] == 0x00 &&
1661 		    sc->sc_arpcom.ac_enaddr[1] == 0x80 &&
1662 		    sc->sc_arpcom.ac_enaddr[2] == 0xc7)
1663 			break;
1664 		dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr, 3, 3, 0);
1665 		break;
1666 	default:
1667 		dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr,
1668 		    DC_EE_NODEADDR, 3, 0);
1669 		break;
1670 	}
1671 hasmac:
1672 
1673 	if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct dc_list_data),
1674 	    PAGE_SIZE, 0, sc->sc_listseg, 1, &sc->sc_listnseg,
1675 	    BUS_DMA_NOWAIT) != 0) {
1676 		printf(": can't alloc list mem\n");
1677 		goto fail;
1678 	}
1679 	if (bus_dmamem_map(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg,
1680 	    sizeof(struct dc_list_data), &sc->sc_listkva,
1681 	    BUS_DMA_NOWAIT) != 0) {
1682 		printf(": can't map list mem\n");
1683 		goto fail;
1684 	}
1685 	if (bus_dmamap_create(sc->sc_dmat, sizeof(struct dc_list_data), 1,
1686 	    sizeof(struct dc_list_data), 0, BUS_DMA_NOWAIT,
1687 	    &sc->sc_listmap) != 0) {
1688 		printf(": can't alloc list map\n");
1689 		goto fail;
1690 	}
1691 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, sc->sc_listkva,
1692 	    sizeof(struct dc_list_data), NULL, BUS_DMA_NOWAIT) != 0) {
1693 		printf(": can't load list map\n");
1694 		goto fail;
1695 	}
1696 	sc->dc_ldata = (struct dc_list_data *)sc->sc_listkva;
1697 	bzero(sc->dc_ldata, sizeof(struct dc_list_data));
1698 
1699 	for (i = 0; i < DC_RX_LIST_CNT; i++) {
1700 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
1701 		    0, BUS_DMA_NOWAIT,
1702 		    &sc->dc_cdata.dc_rx_chain[i].sd_map) != 0) {
1703 			printf(": can't create rx map\n");
1704 			return;
1705 		}
1706 	}
1707 	if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
1708 	    BUS_DMA_NOWAIT, &sc->sc_rx_sparemap) != 0) {
1709 		printf(": can't create rx spare map\n");
1710 		return;
1711 	}
1712 
1713 	for (i = 0; i < DC_TX_LIST_CNT; i++) {
1714 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES,
1715 		    DC_TX_LIST_CNT - 5, MCLBYTES, 0, BUS_DMA_NOWAIT,
1716 		    &sc->dc_cdata.dc_tx_chain[i].sd_map) != 0) {
1717 			printf(": can't create tx map\n");
1718 			return;
1719 		}
1720 	}
1721 	if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, DC_TX_LIST_CNT - 5,
1722 	    MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_tx_sparemap) != 0) {
1723 		printf(": can't create tx spare map\n");
1724 		return;
1725 	}
1726 
1727 	/*
1728 	 * A 21143 or clone chip was detected. Inform the world.
1729 	 */
1730 	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
1731 
1732 	ifp = &sc->sc_arpcom.ac_if;
1733 	ifp->if_softc = sc;
1734 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1735 	ifp->if_ioctl = dc_ioctl;
1736 	ifp->if_start = dc_start;
1737 	ifp->if_watchdog = dc_watchdog;
1738 	ifp->if_baudrate = 10000000;
1739 	IFQ_SET_MAXLEN(&ifp->if_snd, DC_TX_LIST_CNT - 1);
1740 	IFQ_SET_READY(&ifp->if_snd);
1741 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1742 
1743 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1744 
1745 	/* Do MII setup. If this is a 21143, check for a PHY on the
1746 	 * MII bus after applying any necessary fixups to twiddle the
1747 	 * GPIO bits. If we don't end up finding a PHY, restore the
1748 	 * old selection (SIA only or SIA/SYM) and attach the dcphy
1749 	 * driver instead.
1750 	 */
1751 	if (DC_IS_INTEL(sc)) {
1752 		dc_apply_fixup(sc, IFM_AUTO);
1753 		tmp = sc->dc_pmode;
1754 		sc->dc_pmode = DC_PMODE_MII;
1755 	}
1756 
1757 	/*
1758 	 * Setup General Purpose port mode and data so the tulip can talk
1759 	 * to the MII.  This needs to be done before mii_attach so that
1760 	 * we can actually see them.
1761 	 */
1762 	if (DC_IS_XIRCOM(sc)) {
1763 		CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN |
1764 		    DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
1765 		DELAY(10);
1766 		CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN |
1767 		    DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
1768 		DELAY(10);
1769 	}
1770 
1771 	sc->sc_mii.mii_ifp = ifp;
1772 	sc->sc_mii.mii_readreg = dc_miibus_readreg;
1773 	sc->sc_mii.mii_writereg = dc_miibus_writereg;
1774 	sc->sc_mii.mii_statchg = dc_miibus_statchg;
1775 	ifmedia_init(&sc->sc_mii.mii_media, 0, dc_ifmedia_upd, dc_ifmedia_sts);
1776 	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
1777 	    MII_OFFSET_ANY, 0);
1778 
1779 	if (DC_IS_INTEL(sc)) {
1780 		if (LIST_EMPTY(&sc->sc_mii.mii_phys)) {
1781 			sc->dc_pmode = tmp;
1782 			if (sc->dc_pmode != DC_PMODE_SIA)
1783 				sc->dc_pmode = DC_PMODE_SYM;
1784 			sc->dc_flags |= DC_21143_NWAY;
1785 			if (sc->dc_flags & DC_MOMENCO_BOTCH)
1786 				sc->dc_pmode = DC_PMODE_MII;
1787 			mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff,
1788 			    MII_PHY_ANY, MII_OFFSET_ANY, 0);
1789 		} else {
1790 			/* we have a PHY, so we must clear this bit */
1791 			sc->dc_flags &= ~DC_TULIP_LEDS;
1792 		}
1793 	}
1794 
1795 	if (LIST_EMPTY(&sc->sc_mii.mii_phys)) {
1796 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
1797 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
1798 		printf("%s: MII without any PHY!\n", sc->sc_dev.dv_xname);
1799 	} else if (sc->dc_type == DC_TYPE_21145) {
1800 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_10_T);
1801 	} else
1802 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1803 
1804 	if (DC_IS_DAVICOM(sc) && sc->dc_revision >= DC_REVISION_DM9102A)
1805 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_HPNA_1,0,NULL);
1806 
1807 	if (DC_IS_ADMTEK(sc)) {
1808 		/*
1809 		 * Set automatic TX underrun recovery for the ADMtek chips
1810 		 */
1811 		DC_SETBIT(sc, DC_AL_CR, DC_AL_CR_ATUR);
1812 	}
1813 
1814 	/*
1815 	 * Call MI attach routines.
1816 	 */
1817 	if_attach(ifp);
1818 	ether_ifattach(ifp);
1819 
1820 	sc->sc_pwrhook = powerhook_establish(dc_power, sc);
1821 
1822 fail:
1823 	return;
1824 }
1825 
1826 /*
1827  * Initialize the transmit descriptors.
1828  */
1829 int
1830 dc_list_tx_init(struct dc_softc *sc)
1831 {
1832 	struct dc_chain_data *cd;
1833 	struct dc_list_data *ld;
1834 	int i;
1835 	bus_addr_t next;
1836 
1837 	cd = &sc->dc_cdata;
1838 	ld = sc->dc_ldata;
1839 	for (i = 0; i < DC_TX_LIST_CNT; i++) {
1840 		next = sc->sc_listmap->dm_segs[0].ds_addr;
1841 		if (i == (DC_TX_LIST_CNT - 1))
1842 			next +=
1843 			    offsetof(struct dc_list_data, dc_tx_list[0]);
1844 		else
1845 			next +=
1846 			    offsetof(struct dc_list_data, dc_tx_list[i + 1]);
1847 		cd->dc_tx_chain[i].sd_mbuf = NULL;
1848 		ld->dc_tx_list[i].dc_data = htole32(0);
1849 		ld->dc_tx_list[i].dc_ctl = htole32(0);
1850 		ld->dc_tx_list[i].dc_next = htole32(next);
1851 	}
1852 
1853 	cd->dc_tx_prod = cd->dc_tx_cons = cd->dc_tx_cnt = 0;
1854 
1855 	return (0);
1856 }
1857 
1858 
1859 /*
1860  * Initialize the RX descriptors and allocate mbufs for them. Note that
1861  * we arrange the descriptors in a closed ring, so that the last descriptor
1862  * points back to the first.
1863  */
1864 int
1865 dc_list_rx_init(struct dc_softc *sc)
1866 {
1867 	struct dc_chain_data *cd;
1868 	struct dc_list_data *ld;
1869 	int i;
1870 	bus_addr_t next;
1871 
1872 	cd = &sc->dc_cdata;
1873 	ld = sc->dc_ldata;
1874 
1875 	for (i = 0; i < DC_RX_LIST_CNT; i++) {
1876 		if (dc_newbuf(sc, i, NULL) == ENOBUFS)
1877 			return (ENOBUFS);
1878 		next = sc->sc_listmap->dm_segs[0].ds_addr;
1879 		if (i == (DC_RX_LIST_CNT - 1))
1880 			next +=
1881 			    offsetof(struct dc_list_data, dc_rx_list[0]);
1882 		else
1883 			next +=
1884 			    offsetof(struct dc_list_data, dc_rx_list[i + 1]);
1885 		ld->dc_rx_list[i].dc_next = htole32(next);
1886 	}
1887 
1888 	cd->dc_rx_prod = 0;
1889 
1890 	return (0);
1891 }
1892 
1893 /*
1894  * Initialize an RX descriptor and attach an MBUF cluster.
1895  */
1896 int
1897 dc_newbuf(struct dc_softc *sc, int i, struct mbuf *m)
1898 {
1899 	struct mbuf *m_new = NULL;
1900 	struct dc_desc *c;
1901 	bus_dmamap_t map;
1902 
1903 	c = &sc->dc_ldata->dc_rx_list[i];
1904 
1905 	if (m == NULL) {
1906 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1907 		if (m_new == NULL)
1908 			return (ENOBUFS);
1909 
1910 		MCLGET(m_new, M_DONTWAIT);
1911 		if (!(m_new->m_flags & M_EXT)) {
1912 			m_freem(m_new);
1913 			return (ENOBUFS);
1914 		}
1915 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1916 		if (bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_rx_sparemap,
1917 		    m_new, BUS_DMA_NOWAIT) != 0) {
1918 			m_freem(m_new);
1919 			return (ENOBUFS);
1920 		}
1921 		map = sc->dc_cdata.dc_rx_chain[i].sd_map;
1922 		sc->dc_cdata.dc_rx_chain[i].sd_map = sc->sc_rx_sparemap;
1923 		sc->sc_rx_sparemap = map;
1924 	} else {
1925 		/*
1926 		 * We're re-using a previously allocated mbuf;
1927 		 * be sure to re-init pointers and lengths to
1928 		 * default values.
1929 		 */
1930 		m_new = m;
1931 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1932 		m_new->m_data = m_new->m_ext.ext_buf;
1933 	}
1934 
1935 	m_adj(m_new, sizeof(u_int64_t));
1936 
1937 	/*
1938 	 * If this is a PNIC chip, zero the buffer. This is part
1939 	 * of the workaround for the receive bug in the 82c168 and
1940 	 * 82c169 chips.
1941 	 */
1942 	if (sc->dc_flags & DC_PNIC_RX_BUG_WAR)
1943 		bzero((char *)mtod(m_new, char *), m_new->m_len);
1944 
1945 	bus_dmamap_sync(sc->sc_dmat, sc->dc_cdata.dc_rx_chain[i].sd_map, 0,
1946 	    sc->dc_cdata.dc_rx_chain[i].sd_map->dm_mapsize,
1947 	    BUS_DMASYNC_PREREAD);
1948 
1949 	sc->dc_cdata.dc_rx_chain[i].sd_mbuf = m_new;
1950 	c->dc_data = htole32(
1951 	    sc->dc_cdata.dc_rx_chain[i].sd_map->dm_segs[0].ds_addr +
1952 	    sizeof(u_int64_t));
1953 	c->dc_ctl = htole32(DC_RXCTL_RLINK | ETHER_MAX_DIX_LEN);
1954 	c->dc_status = htole32(DC_RXSTAT_OWN);
1955 
1956 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1957 	    offsetof(struct dc_list_data, dc_rx_list[i]),
1958 	    sizeof(struct dc_desc),
1959 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1960 
1961 	return (0);
1962 }
1963 
1964 /*
1965  * Grrrrr.
1966  * The PNIC chip has a terrible bug in it that manifests itself during
1967  * periods of heavy activity. The exact mode of failure if difficult to
1968  * pinpoint: sometimes it only happens in promiscuous mode, sometimes it
1969  * will happen on slow machines. The bug is that sometimes instead of
1970  * uploading one complete frame during reception, it uploads what looks
1971  * like the entire contents of its FIFO memory. The frame we want is at
1972  * the end of the whole mess, but we never know exactly how much data has
1973  * been uploaded, so salvaging the frame is hard.
1974  *
1975  * There is only one way to do it reliably, and it's disgusting.
1976  * Here's what we know:
1977  *
1978  * - We know there will always be somewhere between one and three extra
1979  *   descriptors uploaded.
1980  *
1981  * - We know the desired received frame will always be at the end of the
1982  *   total data upload.
1983  *
1984  * - We know the size of the desired received frame because it will be
1985  *   provided in the length field of the status word in the last descriptor.
1986  *
1987  * Here's what we do:
1988  *
1989  * - When we allocate buffers for the receive ring, we bzero() them.
1990  *   This means that we know that the buffer contents should be all
1991  *   zeros, except for data uploaded by the chip.
1992  *
1993  * - We also force the PNIC chip to upload frames that include the
1994  *   ethernet CRC at the end.
1995  *
1996  * - We gather all of the bogus frame data into a single buffer.
1997  *
1998  * - We then position a pointer at the end of this buffer and scan
1999  *   backwards until we encounter the first non-zero byte of data.
2000  *   This is the end of the received frame. We know we will encounter
2001  *   some data at the end of the frame because the CRC will always be
2002  *   there, so even if the sender transmits a packet of all zeros,
2003  *   we won't be fooled.
2004  *
2005  * - We know the size of the actual received frame, so we subtract
2006  *   that value from the current pointer location. This brings us
2007  *   to the start of the actual received packet.
2008  *
2009  * - We copy this into an mbuf and pass it on, along with the actual
2010  *   frame length.
2011  *
2012  * The performance hit is tremendous, but it beats dropping frames all
2013  * the time.
2014  */
2015 
2016 #define DC_WHOLEFRAME	(DC_RXSTAT_FIRSTFRAG|DC_RXSTAT_LASTFRAG)
2017 void
2018 dc_pnic_rx_bug_war(struct dc_softc *sc, int idx)
2019 {
2020 	struct dc_desc		*cur_rx;
2021 	struct dc_desc		*c = NULL;
2022 	struct mbuf		*m = NULL;
2023 	unsigned char		*ptr;
2024 	int			i, total_len;
2025 	u_int32_t		rxstat = 0;
2026 
2027 	i = sc->dc_pnic_rx_bug_save;
2028 	cur_rx = &sc->dc_ldata->dc_rx_list[idx];
2029 	ptr = sc->dc_pnic_rx_buf;
2030 	bzero(ptr, ETHER_MAX_DIX_LEN * 5);
2031 
2032 	/* Copy all the bytes from the bogus buffers. */
2033 	while (1) {
2034 		c = &sc->dc_ldata->dc_rx_list[i];
2035 		rxstat = letoh32(c->dc_status);
2036 		m = sc->dc_cdata.dc_rx_chain[i].sd_mbuf;
2037 		bcopy(mtod(m, char *), ptr, ETHER_MAX_DIX_LEN);
2038 		ptr += ETHER_MAX_DIX_LEN;
2039 		/* If this is the last buffer, break out. */
2040 		if (i == idx || rxstat & DC_RXSTAT_LASTFRAG)
2041 			break;
2042 		dc_newbuf(sc, i, m);
2043 		DC_INC(i, DC_RX_LIST_CNT);
2044 	}
2045 
2046 	/* Find the length of the actual receive frame. */
2047 	total_len = DC_RXBYTES(rxstat);
2048 
2049 	/* Scan backwards until we hit a non-zero byte. */
2050 	while(*ptr == 0x00)
2051 		ptr--;
2052 
2053 	/* Round off. */
2054 	if ((unsigned long)(ptr) & 0x3)
2055 		ptr -= 1;
2056 
2057 	/* Now find the start of the frame. */
2058 	ptr -= total_len;
2059 	if (ptr < sc->dc_pnic_rx_buf)
2060 		ptr = sc->dc_pnic_rx_buf;
2061 
2062 	/*
2063 	 * Now copy the salvaged frame to the last mbuf and fake up
2064 	 * the status word to make it look like a successful
2065  	 * frame reception.
2066 	 */
2067 	dc_newbuf(sc, i, m);
2068 	bcopy(ptr, mtod(m, char *), total_len);
2069 	cur_rx->dc_status = htole32(rxstat | DC_RXSTAT_FIRSTFRAG);
2070 }
2071 
2072 /*
2073  * This routine searches the RX ring for dirty descriptors in the
2074  * event that the rxeof routine falls out of sync with the chip's
2075  * current descriptor pointer. This may happen sometimes as a result
2076  * of a "no RX buffer available" condition that happens when the chip
2077  * consumes all of the RX buffers before the driver has a chance to
2078  * process the RX ring. This routine may need to be called more than
2079  * once to bring the driver back in sync with the chip, however we
2080  * should still be getting RX DONE interrupts to drive the search
2081  * for new packets in the RX ring, so we should catch up eventually.
2082  */
2083 int
2084 dc_rx_resync(struct dc_softc *sc)
2085 {
2086 	u_int32_t stat;
2087 	int i, pos, offset;
2088 
2089 	pos = sc->dc_cdata.dc_rx_prod;
2090 
2091 	for (i = 0; i < DC_RX_LIST_CNT; i++) {
2092 
2093 		offset = offsetof(struct dc_list_data, dc_rx_list[pos]);
2094 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2095 		    offset, sizeof(struct dc_desc),
2096 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2097 
2098 		stat = sc->dc_ldata->dc_rx_list[pos].dc_status;
2099 		if (!(stat & htole32(DC_RXSTAT_OWN)))
2100 			break;
2101 		DC_INC(pos, DC_RX_LIST_CNT);
2102 	}
2103 
2104 	/* If the ring really is empty, then just return. */
2105 	if (i == DC_RX_LIST_CNT)
2106 		return (0);
2107 
2108 	/* We've fallen behind the chip: catch it. */
2109 	sc->dc_cdata.dc_rx_prod = pos;
2110 
2111 	return (EAGAIN);
2112 }
2113 
2114 /*
2115  * A frame has been uploaded: pass the resulting mbuf chain up to
2116  * the higher level protocols.
2117  */
2118 void
2119 dc_rxeof(struct dc_softc *sc)
2120 {
2121 	struct mbuf *m;
2122 	struct ifnet *ifp;
2123 	struct dc_desc *cur_rx;
2124 	int i, offset, total_len = 0;
2125 	u_int32_t rxstat;
2126 
2127 	ifp = &sc->sc_arpcom.ac_if;
2128 	i = sc->dc_cdata.dc_rx_prod;
2129 
2130 	for(;;) {
2131 		struct mbuf	*m0 = NULL;
2132 
2133 		offset = offsetof(struct dc_list_data, dc_rx_list[i]);
2134 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2135 		    offset, sizeof(struct dc_desc),
2136 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2137 
2138 		cur_rx = &sc->dc_ldata->dc_rx_list[i];
2139 		rxstat = letoh32(cur_rx->dc_status);
2140 		if (rxstat & DC_RXSTAT_OWN)
2141 			break;
2142 
2143 		m = sc->dc_cdata.dc_rx_chain[i].sd_mbuf;
2144 		total_len = DC_RXBYTES(rxstat);
2145 
2146 		bus_dmamap_sync(sc->sc_dmat, sc->dc_cdata.dc_rx_chain[i].sd_map,
2147 		    0, sc->dc_cdata.dc_rx_chain[i].sd_map->dm_mapsize,
2148 		    BUS_DMASYNC_POSTREAD);
2149 
2150 		if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) {
2151 			if ((rxstat & DC_WHOLEFRAME) != DC_WHOLEFRAME) {
2152 				if (rxstat & DC_RXSTAT_FIRSTFRAG)
2153 					sc->dc_pnic_rx_bug_save = i;
2154 				if ((rxstat & DC_RXSTAT_LASTFRAG) == 0) {
2155 					DC_INC(i, DC_RX_LIST_CNT);
2156 					continue;
2157 				}
2158 				dc_pnic_rx_bug_war(sc, i);
2159 				rxstat = letoh32(cur_rx->dc_status);
2160 				total_len = DC_RXBYTES(rxstat);
2161 			}
2162 		}
2163 
2164 		sc->dc_cdata.dc_rx_chain[i].sd_mbuf = NULL;
2165 
2166 		/*
2167 		 * If an error occurs, update stats, clear the
2168 		 * status word and leave the mbuf cluster in place:
2169 		 * it should simply get re-used next time this descriptor
2170 		 * comes up in the ring.  However, don't report long
2171 		 * frames as errors since they could be VLANs.
2172 		 */
2173 		if ((rxstat & DC_RXSTAT_RXERR)) {
2174 			if (!(rxstat & DC_RXSTAT_GIANT) ||
2175 			    (rxstat & (DC_RXSTAT_CRCERR | DC_RXSTAT_DRIBBLE |
2176 				       DC_RXSTAT_MIIERE | DC_RXSTAT_COLLSEEN |
2177 				       DC_RXSTAT_RUNT   | DC_RXSTAT_DE))) {
2178 				ifp->if_ierrors++;
2179 				if (rxstat & DC_RXSTAT_COLLSEEN)
2180 					ifp->if_collisions++;
2181 				dc_newbuf(sc, i, m);
2182 				if (rxstat & DC_RXSTAT_CRCERR) {
2183 					DC_INC(i, DC_RX_LIST_CNT);
2184 					continue;
2185 				} else {
2186 					dc_init(sc);
2187 					return;
2188 				}
2189 			}
2190 		}
2191 
2192 		/* No errors; receive the packet. */
2193 		total_len -= ETHER_CRC_LEN;
2194 
2195 		m->m_pkthdr.rcvif = ifp;
2196 		m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN,
2197 		    ifp, NULL);
2198 		dc_newbuf(sc, i, m);
2199 		DC_INC(i, DC_RX_LIST_CNT);
2200 		if (m0 == NULL) {
2201 			ifp->if_ierrors++;
2202 			continue;
2203 		}
2204 		m = m0;
2205 
2206 		ifp->if_ipackets++;
2207 #if NBPFILTER > 0
2208 		if (ifp->if_bpf)
2209 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
2210 #endif
2211 		ether_input_mbuf(ifp, m);
2212 	}
2213 
2214 	sc->dc_cdata.dc_rx_prod = i;
2215 }
2216 
2217 /*
2218  * A frame was downloaded to the chip. It's safe for us to clean up
2219  * the list buffers.
2220  */
2221 
2222 void
2223 dc_txeof(struct dc_softc *sc)
2224 {
2225 	struct dc_desc *cur_tx = NULL;
2226 	struct ifnet *ifp;
2227 	int idx, offset;
2228 
2229 	ifp = &sc->sc_arpcom.ac_if;
2230 
2231 	/*
2232 	 * Go through our tx list and free mbufs for those
2233 	 * frames that have been transmitted.
2234 	 */
2235 	idx = sc->dc_cdata.dc_tx_cons;
2236 	while(idx != sc->dc_cdata.dc_tx_prod) {
2237 		u_int32_t		txstat;
2238 
2239 		offset = offsetof(struct dc_list_data, dc_tx_list[idx]);
2240 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2241 		    offset, sizeof(struct dc_desc),
2242 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2243 
2244 		cur_tx = &sc->dc_ldata->dc_tx_list[idx];
2245 		txstat = letoh32(cur_tx->dc_status);
2246 
2247 		if (txstat & DC_TXSTAT_OWN)
2248 			break;
2249 
2250 		if (!(cur_tx->dc_ctl & htole32(DC_TXCTL_LASTFRAG)) ||
2251 		    cur_tx->dc_ctl & htole32(DC_TXCTL_SETUP)) {
2252 			if (cur_tx->dc_ctl & htole32(DC_TXCTL_SETUP)) {
2253 				/*
2254 				 * Yes, the PNIC is so brain damaged
2255 				 * that it will sometimes generate a TX
2256 				 * underrun error while DMAing the RX
2257 				 * filter setup frame. If we detect this,
2258 				 * we have to send the setup frame again,
2259 				 * or else the filter won't be programmed
2260 				 * correctly.
2261 				 */
2262 				if (DC_IS_PNIC(sc)) {
2263 					if (txstat & DC_TXSTAT_ERRSUM)
2264 						dc_setfilt(sc);
2265 				}
2266 				sc->dc_cdata.dc_tx_chain[idx].sd_mbuf = NULL;
2267 			}
2268 			sc->dc_cdata.dc_tx_cnt--;
2269 			DC_INC(idx, DC_TX_LIST_CNT);
2270 			continue;
2271 		}
2272 
2273 		if (DC_IS_XIRCOM(sc) || DC_IS_CONEXANT(sc)) {
2274 			/*
2275 			 * XXX: Why does my Xircom taunt me so?
2276 			 * For some reason it likes setting the CARRLOST flag
2277 			 * even when the carrier is there. wtf?!
2278 			 * Who knows, but Conexant chips have the
2279 			 * same problem. Maybe they took lessons
2280 			 * from Xircom.
2281 			 */
2282 			if (/*sc->dc_type == DC_TYPE_21143 &&*/
2283 			    sc->dc_pmode == DC_PMODE_MII &&
2284 			    ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM|
2285 			    DC_TXSTAT_NOCARRIER)))
2286 				txstat &= ~DC_TXSTAT_ERRSUM;
2287 		} else {
2288 			if (/*sc->dc_type == DC_TYPE_21143 &&*/
2289 			    sc->dc_pmode == DC_PMODE_MII &&
2290 		    	    ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM|
2291 		    	    DC_TXSTAT_NOCARRIER|DC_TXSTAT_CARRLOST)))
2292 				txstat &= ~DC_TXSTAT_ERRSUM;
2293 		}
2294 
2295 		if (txstat & DC_TXSTAT_ERRSUM) {
2296 			ifp->if_oerrors++;
2297 			if (txstat & DC_TXSTAT_EXCESSCOLL)
2298 				ifp->if_collisions++;
2299 			if (txstat & DC_TXSTAT_LATECOLL)
2300 				ifp->if_collisions++;
2301 			if (!(txstat & DC_TXSTAT_UNDERRUN)) {
2302 				dc_init(sc);
2303 				return;
2304 			}
2305 		}
2306 
2307 		ifp->if_collisions += (txstat & DC_TXSTAT_COLLCNT) >> 3;
2308 
2309 		ifp->if_opackets++;
2310 		if (sc->dc_cdata.dc_tx_chain[idx].sd_map->dm_nsegs != 0) {
2311 			bus_dmamap_t map = sc->dc_cdata.dc_tx_chain[idx].sd_map;
2312 
2313 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2314 			    BUS_DMASYNC_POSTWRITE);
2315 			bus_dmamap_unload(sc->sc_dmat, map);
2316 		}
2317 		if (sc->dc_cdata.dc_tx_chain[idx].sd_mbuf != NULL) {
2318 			m_freem(sc->dc_cdata.dc_tx_chain[idx].sd_mbuf);
2319 			sc->dc_cdata.dc_tx_chain[idx].sd_mbuf = NULL;
2320 		}
2321 
2322 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2323 		    offset, sizeof(struct dc_desc),
2324 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2325 
2326 		sc->dc_cdata.dc_tx_cnt--;
2327 		DC_INC(idx, DC_TX_LIST_CNT);
2328 	}
2329 	sc->dc_cdata.dc_tx_cons = idx;
2330 
2331 	if (DC_TX_LIST_CNT - sc->dc_cdata.dc_tx_cnt > 5)
2332 		ifp->if_flags &= ~IFF_OACTIVE;
2333 	if (sc->dc_cdata.dc_tx_cnt == 0)
2334 		ifp->if_timer = 0;
2335 }
2336 
2337 void
2338 dc_tick(void *xsc)
2339 {
2340 	struct dc_softc *sc = (struct dc_softc *)xsc;
2341 	struct mii_data *mii;
2342 	struct ifnet *ifp;
2343 	int s;
2344 	u_int32_t r;
2345 
2346 	s = splnet();
2347 
2348 	ifp = &sc->sc_arpcom.ac_if;
2349 	mii = &sc->sc_mii;
2350 
2351 	if (sc->dc_flags & DC_REDUCED_MII_POLL) {
2352 		if (sc->dc_flags & DC_21143_NWAY) {
2353 			r = CSR_READ_4(sc, DC_10BTSTAT);
2354 			if (IFM_SUBTYPE(mii->mii_media_active) ==
2355 			    IFM_100_TX && (r & DC_TSTAT_LS100)) {
2356 				sc->dc_link = 0;
2357 				mii_mediachg(mii);
2358 			}
2359 			if (IFM_SUBTYPE(mii->mii_media_active) ==
2360 			    IFM_10_T && (r & DC_TSTAT_LS10)) {
2361 				sc->dc_link = 0;
2362 				mii_mediachg(mii);
2363 			}
2364 			if (sc->dc_link == 0)
2365 				mii_tick(mii);
2366 		} else {
2367 			/*
2368 			 * For NICs which never report DC_RXSTATE_WAIT, we
2369 			 * have to bite the bullet...
2370 			 */
2371 			if ((DC_HAS_BROKEN_RXSTATE(sc) || (CSR_READ_4(sc,
2372 			    DC_ISR) & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT) &&
2373 			    sc->dc_cdata.dc_tx_cnt == 0 && !DC_IS_ASIX(sc)) {
2374 				mii_tick(mii);
2375 				if (!(mii->mii_media_status & IFM_ACTIVE))
2376 					sc->dc_link = 0;
2377 			}
2378 		}
2379 	} else
2380 		mii_tick(mii);
2381 
2382 	/*
2383 	 * When the init routine completes, we expect to be able to send
2384 	 * packets right away, and in fact the network code will send a
2385 	 * gratuitous ARP the moment the init routine marks the interface
2386 	 * as running. However, even though the MAC may have been initialized,
2387 	 * there may be a delay of a few seconds before the PHY completes
2388 	 * autonegotiation and the link is brought up. Any transmissions
2389 	 * made during that delay will be lost. Dealing with this is tricky:
2390 	 * we can't just pause in the init routine while waiting for the
2391 	 * PHY to come ready since that would bring the whole system to
2392 	 * a screeching halt for several seconds.
2393 	 *
2394 	 * What we do here is prevent the TX start routine from sending
2395 	 * any packets until a link has been established. After the
2396 	 * interface has been initialized, the tick routine will poll
2397 	 * the state of the PHY until the IFM_ACTIVE flag is set. Until
2398 	 * that time, packets will stay in the send queue, and once the
2399 	 * link comes up, they will be flushed out to the wire.
2400 	 */
2401 	if (!sc->dc_link && mii->mii_media_status & IFM_ACTIVE &&
2402 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2403 		sc->dc_link++;
2404 		if (IFQ_IS_EMPTY(&ifp->if_snd) == 0)
2405 	 	    dc_start(ifp);
2406 	}
2407 
2408 	if (sc->dc_flags & DC_21143_NWAY && !sc->dc_link)
2409 		timeout_add(&sc->dc_tick_tmo, hz / 10);
2410 	else
2411 		timeout_add_sec(&sc->dc_tick_tmo, 1);
2412 
2413 	splx(s);
2414 }
2415 
2416 /* A transmit underrun has occurred.  Back off the transmit threshold,
2417  * or switch to store and forward mode if we have to.
2418  */
2419 void
2420 dc_tx_underrun(struct dc_softc *sc)
2421 {
2422 	u_int32_t	isr;
2423 	int		i;
2424 
2425 	if (DC_IS_DAVICOM(sc))
2426 		dc_init(sc);
2427 
2428 	if (DC_IS_INTEL(sc)) {
2429 		/*
2430 		 * The real 21143 requires that the transmitter be idle
2431 		 * in order to change the transmit threshold or store
2432 		 * and forward state.
2433 		 */
2434 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
2435 
2436 		for (i = 0; i < DC_TIMEOUT; i++) {
2437 			isr = CSR_READ_4(sc, DC_ISR);
2438 			if (isr & DC_ISR_TX_IDLE)
2439 				break;
2440 			DELAY(10);
2441 		}
2442 		if (i == DC_TIMEOUT) {
2443 			printf("%s: failed to force tx to idle state\n",
2444 			    sc->sc_dev.dv_xname);
2445 			dc_init(sc);
2446 		}
2447 	}
2448 
2449 	sc->dc_txthresh += DC_TXTHRESH_INC;
2450 	if (sc->dc_txthresh > DC_TXTHRESH_MAX) {
2451 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
2452 	} else {
2453 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH);
2454 		DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh);
2455 	}
2456 
2457 	if (DC_IS_INTEL(sc))
2458 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
2459 
2460 	return;
2461 }
2462 
2463 int
2464 dc_intr(void *arg)
2465 {
2466 	struct dc_softc *sc;
2467 	struct ifnet *ifp;
2468 	u_int32_t status;
2469 	int claimed = 0;
2470 
2471 	sc = arg;
2472 
2473 	ifp = &sc->sc_arpcom.ac_if;
2474 
2475 	if ((CSR_READ_4(sc, DC_ISR) & DC_INTRS) == 0)
2476 		return (claimed);
2477 
2478 	/* Suppress unwanted interrupts */
2479 	if (!(ifp->if_flags & IFF_UP)) {
2480 		if (CSR_READ_4(sc, DC_ISR) & DC_INTRS)
2481 			dc_stop(sc);
2482 		return (claimed);
2483 	}
2484 
2485 	/* Disable interrupts. */
2486 	CSR_WRITE_4(sc, DC_IMR, 0x00000000);
2487 
2488 	while (((status = CSR_READ_4(sc, DC_ISR)) & DC_INTRS) &&
2489 	    status != 0xFFFFFFFF &&
2490 	    (ifp->if_flags & IFF_RUNNING)) {
2491 
2492 		claimed = 1;
2493 		CSR_WRITE_4(sc, DC_ISR, status);
2494 
2495 		if (status & DC_ISR_RX_OK) {
2496 			int		curpkts;
2497 			curpkts = ifp->if_ipackets;
2498 			dc_rxeof(sc);
2499 			if (curpkts == ifp->if_ipackets) {
2500 				while(dc_rx_resync(sc))
2501 					dc_rxeof(sc);
2502 			}
2503 		}
2504 
2505 		if (status & (DC_ISR_TX_OK|DC_ISR_TX_NOBUF))
2506 			dc_txeof(sc);
2507 
2508 		if (status & DC_ISR_TX_IDLE) {
2509 			dc_txeof(sc);
2510 			if (sc->dc_cdata.dc_tx_cnt) {
2511 				DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
2512 				CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
2513 			}
2514 		}
2515 
2516 		if (status & DC_ISR_TX_UNDERRUN)
2517 			dc_tx_underrun(sc);
2518 
2519 		if ((status & DC_ISR_RX_WATDOGTIMEO)
2520 		    || (status & DC_ISR_RX_NOBUF)) {
2521 			int		curpkts;
2522 			curpkts = ifp->if_ipackets;
2523 			dc_rxeof(sc);
2524 			if (curpkts == ifp->if_ipackets) {
2525 				while(dc_rx_resync(sc))
2526 					dc_rxeof(sc);
2527 			}
2528 		}
2529 
2530 		if (status & DC_ISR_BUS_ERR) {
2531 			dc_reset(sc);
2532 			dc_init(sc);
2533 		}
2534 	}
2535 
2536 	/* Re-enable interrupts. */
2537 	CSR_WRITE_4(sc, DC_IMR, DC_INTRS);
2538 
2539 	if (IFQ_IS_EMPTY(&ifp->if_snd) == 0)
2540 		dc_start(ifp);
2541 
2542 	return (claimed);
2543 }
2544 
2545 /*
2546  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
2547  * pointers to the fragment pointers.
2548  */
2549 int
2550 dc_encap(struct dc_softc *sc, struct mbuf *m_head, u_int32_t *txidx)
2551 {
2552 	struct dc_desc *f = NULL;
2553 	int frag, cur, cnt = 0, i;
2554 	bus_dmamap_t map;
2555 
2556 	/*
2557  	 * Start packing the mbufs in this chain into
2558 	 * the fragment pointers. Stop when we run out
2559  	 * of fragments or hit the end of the mbuf chain.
2560 	 */
2561 	map = sc->sc_tx_sparemap;
2562 
2563 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map,
2564 	    m_head, BUS_DMA_NOWAIT) != 0)
2565 		return (ENOBUFS);
2566 
2567 	cur = frag = *txidx;
2568 
2569 	for (i = 0; i < map->dm_nsegs; i++) {
2570 		if (sc->dc_flags & DC_TX_ADMTEK_WAR) {
2571 			if (*txidx != sc->dc_cdata.dc_tx_prod &&
2572 			    frag == (DC_TX_LIST_CNT - 1)) {
2573 				bus_dmamap_unload(sc->sc_dmat, map);
2574 				return (ENOBUFS);
2575 			}
2576 		}
2577 		if ((DC_TX_LIST_CNT -
2578 		    (sc->dc_cdata.dc_tx_cnt + cnt)) < 5) {
2579 			bus_dmamap_unload(sc->sc_dmat, map);
2580 			return (ENOBUFS);
2581 		}
2582 
2583 		f = &sc->dc_ldata->dc_tx_list[frag];
2584 		f->dc_ctl = htole32(DC_TXCTL_TLINK | map->dm_segs[i].ds_len);
2585 		if (cnt == 0) {
2586 			f->dc_status = htole32(0);
2587 			f->dc_ctl |= htole32(DC_TXCTL_FIRSTFRAG);
2588 		} else
2589 			f->dc_status = htole32(DC_TXSTAT_OWN);
2590 		f->dc_data = htole32(map->dm_segs[i].ds_addr);
2591 		cur = frag;
2592 		DC_INC(frag, DC_TX_LIST_CNT);
2593 		cnt++;
2594 	}
2595 
2596 	sc->dc_cdata.dc_tx_cnt += cnt;
2597 	sc->dc_cdata.dc_tx_chain[cur].sd_mbuf = m_head;
2598 	sc->sc_tx_sparemap = sc->dc_cdata.dc_tx_chain[cur].sd_map;
2599 	sc->dc_cdata.dc_tx_chain[cur].sd_map = map;
2600 	sc->dc_ldata->dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_LASTFRAG);
2601 	if (sc->dc_flags & DC_TX_INTR_FIRSTFRAG)
2602 		sc->dc_ldata->dc_tx_list[*txidx].dc_ctl |=
2603 		    htole32(DC_TXCTL_FINT);
2604 	if (sc->dc_flags & DC_TX_INTR_ALWAYS)
2605 		sc->dc_ldata->dc_tx_list[cur].dc_ctl |=
2606 		    htole32(DC_TXCTL_FINT);
2607 	if (sc->dc_flags & DC_TX_USE_TX_INTR && sc->dc_cdata.dc_tx_cnt > 64)
2608 		sc->dc_ldata->dc_tx_list[cur].dc_ctl |=
2609 		    htole32(DC_TXCTL_FINT);
2610 	else if ((sc->dc_flags & DC_TX_USE_TX_INTR) &&
2611 		 TBR_IS_ENABLED(&sc->sc_arpcom.ac_if.if_snd))
2612 		sc->dc_ldata->dc_tx_list[cur].dc_ctl |=
2613 		    htole32(DC_TXCTL_FINT);
2614 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2615 	    BUS_DMASYNC_PREWRITE);
2616 
2617 	sc->dc_ldata->dc_tx_list[*txidx].dc_status = htole32(DC_TXSTAT_OWN);
2618 
2619 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2620 	    offsetof(struct dc_list_data, dc_tx_list[*txidx]),
2621 	    sizeof(struct dc_desc) * cnt,
2622 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2623 
2624 	*txidx = frag;
2625 
2626 	return (0);
2627 }
2628 
2629 /*
2630  * Coalesce an mbuf chain into a single mbuf cluster buffer.
2631  * Needed for some really badly behaved chips that just can't
2632  * do scatter/gather correctly.
2633  */
2634 int
2635 dc_coal(struct dc_softc *sc, struct mbuf **m_head)
2636 {
2637 	struct mbuf		*m_new, *m;
2638 
2639 	m = *m_head;
2640 	MGETHDR(m_new, M_DONTWAIT, MT_DATA);
2641 	if (m_new == NULL)
2642 		return (ENOBUFS);
2643 	if (m->m_pkthdr.len > MHLEN) {
2644 		MCLGET(m_new, M_DONTWAIT);
2645 		if (!(m_new->m_flags & M_EXT)) {
2646 			m_freem(m_new);
2647 			return (ENOBUFS);
2648 		}
2649 	}
2650 	m_copydata(m, 0, m->m_pkthdr.len, mtod(m_new, caddr_t));
2651 	m_new->m_pkthdr.len = m_new->m_len = m->m_pkthdr.len;
2652 	m_freem(m);
2653 	*m_head = m_new;
2654 
2655 	return (0);
2656 }
2657 
2658 /*
2659  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2660  * to the mbuf data regions directly in the transmit lists. We also save a
2661  * copy of the pointers since the transmit list fragment pointers are
2662  * physical addresses.
2663  */
2664 
2665 void
2666 dc_start(struct ifnet *ifp)
2667 {
2668 	struct dc_softc *sc;
2669 	struct mbuf *m_head = NULL;
2670 	int idx;
2671 
2672 	sc = ifp->if_softc;
2673 
2674 	if (!sc->dc_link && ifp->if_snd.ifq_len < 10)
2675 		return;
2676 
2677 	if (ifp->if_flags & IFF_OACTIVE)
2678 		return;
2679 
2680 	idx = sc->dc_cdata.dc_tx_prod;
2681 
2682 	while(sc->dc_cdata.dc_tx_chain[idx].sd_mbuf == NULL) {
2683 		IFQ_POLL(&ifp->if_snd, m_head);
2684 		if (m_head == NULL)
2685 			break;
2686 
2687 		if (sc->dc_flags & DC_TX_COALESCE &&
2688 		    (m_head->m_next != NULL ||
2689 			sc->dc_flags & DC_TX_ALIGN)) {
2690 			/* note: dc_coal breaks the poll-and-dequeue rule.
2691 			 * if dc_coal fails, we lose the packet.
2692 			 */
2693 			IFQ_DEQUEUE(&ifp->if_snd, m_head);
2694 			if (dc_coal(sc, &m_head)) {
2695 				ifp->if_flags |= IFF_OACTIVE;
2696 				break;
2697 			}
2698 		}
2699 
2700 		if (dc_encap(sc, m_head, &idx)) {
2701 			ifp->if_flags |= IFF_OACTIVE;
2702 			break;
2703 		}
2704 
2705 		/* now we are committed to transmit the packet */
2706 		if (sc->dc_flags & DC_TX_COALESCE) {
2707 			/* if mbuf is coalesced, it is already dequeued */
2708 		} else
2709 			IFQ_DEQUEUE(&ifp->if_snd, m_head);
2710 
2711 		/*
2712 		 * If there's a BPF listener, bounce a copy of this frame
2713 		 * to him.
2714 		 */
2715 #if NBPFILTER > 0
2716 		if (ifp->if_bpf)
2717 			bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
2718 #endif
2719 		if (sc->dc_flags & DC_TX_ONE) {
2720 			ifp->if_flags |= IFF_OACTIVE;
2721 			break;
2722 		}
2723 	}
2724 	if (idx == sc->dc_cdata.dc_tx_prod)
2725 		return;
2726 
2727 	/* Transmit */
2728 	sc->dc_cdata.dc_tx_prod = idx;
2729 	if (!(sc->dc_flags & DC_TX_POLL))
2730 		CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
2731 
2732 	/*
2733 	 * Set a timeout in case the chip goes out to lunch.
2734 	 */
2735 	ifp->if_timer = 5;
2736 }
2737 
2738 void
2739 dc_init(void *xsc)
2740 {
2741 	struct dc_softc *sc = xsc;
2742 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2743 	struct mii_data *mii;
2744 	int s;
2745 
2746 	s = splnet();
2747 
2748 	mii = &sc->sc_mii;
2749 
2750 	/*
2751 	 * Cancel pending I/O and free all RX/TX buffers.
2752 	 */
2753 	dc_stop(sc);
2754 	dc_reset(sc);
2755 
2756 	/*
2757 	 * Set cache alignment and burst length.
2758 	 */
2759 	if (DC_IS_ASIX(sc) || DC_IS_DAVICOM(sc))
2760 		CSR_WRITE_4(sc, DC_BUSCTL, 0);
2761 	else
2762 		CSR_WRITE_4(sc, DC_BUSCTL, DC_BUSCTL_MRME|DC_BUSCTL_MRLE);
2763 	/*
2764 	 * Evenly share the bus between receive and transmit process.
2765 	 */
2766 	if (DC_IS_INTEL(sc))
2767 		DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_ARBITRATION);
2768 	if (DC_IS_DAVICOM(sc) || DC_IS_INTEL(sc)) {
2769 		DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_USECA);
2770 	} else {
2771 		DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_16LONG);
2772 	}
2773 	if (sc->dc_flags & DC_TX_POLL)
2774 		DC_SETBIT(sc, DC_BUSCTL, DC_TXPOLL_1);
2775 	switch(sc->dc_cachesize) {
2776 	case 32:
2777 		DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_32LONG);
2778 		break;
2779 	case 16:
2780 		DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_16LONG);
2781 		break;
2782 	case 8:
2783 		DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_8LONG);
2784 		break;
2785 	case 0:
2786 	default:
2787 		DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_NONE);
2788 		break;
2789 	}
2790 
2791 	if (sc->dc_flags & DC_TX_STORENFWD)
2792 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
2793 	else {
2794 		if (sc->dc_txthresh > DC_TXTHRESH_MAX) {
2795 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
2796 		} else {
2797 			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
2798 			DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh);
2799 		}
2800 	}
2801 
2802 	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_NO_RXCRC);
2803 	DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_BACKOFF);
2804 
2805 	if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) {
2806 		/*
2807 		 * The app notes for the 98713 and 98715A say that
2808 		 * in order to have the chips operate properly, a magic
2809 		 * number must be written to CSR16. Macronix does not
2810 		 * document the meaning of these bits so there's no way
2811 		 * to know exactly what they do. The 98713 has a magic
2812 		 * number all its own; the rest all use a different one.
2813 		 */
2814 		DC_CLRBIT(sc, DC_MX_MAGICPACKET, 0xFFFF0000);
2815 		if (sc->dc_type == DC_TYPE_98713)
2816 			DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98713);
2817 		else
2818 			DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98715);
2819 	}
2820 
2821 	if (DC_IS_XIRCOM(sc)) {
2822 		CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN |
2823 		    DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
2824 		DELAY(10);
2825 		CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN |
2826 		    DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
2827 		DELAY(10);
2828 	}
2829 
2830 	DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH);
2831 	DC_SETBIT(sc, DC_NETCFG, DC_TXTHRESH_MIN);
2832 
2833 	/* Init circular RX list. */
2834 	if (dc_list_rx_init(sc) == ENOBUFS) {
2835 		printf("%s: initialization failed: no "
2836 		    "memory for rx buffers\n", sc->sc_dev.dv_xname);
2837 		dc_stop(sc);
2838 		splx(s);
2839 		return;
2840 	}
2841 
2842 	/*
2843 	 * Init tx descriptors.
2844 	 */
2845 	dc_list_tx_init(sc);
2846 
2847 	/*
2848 	 * Sync down both lists initialized.
2849 	 */
2850 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2851 	    0, sc->sc_listmap->dm_mapsize,
2852 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2853 
2854 	/*
2855 	 * Load the address of the RX list.
2856 	 */
2857 	CSR_WRITE_4(sc, DC_RXADDR, sc->sc_listmap->dm_segs[0].ds_addr +
2858 	    offsetof(struct dc_list_data, dc_rx_list[0]));
2859 	CSR_WRITE_4(sc, DC_TXADDR, sc->sc_listmap->dm_segs[0].ds_addr +
2860 	    offsetof(struct dc_list_data, dc_tx_list[0]));
2861 
2862 	/*
2863 	 * Enable interrupts.
2864 	 */
2865 	CSR_WRITE_4(sc, DC_IMR, DC_INTRS);
2866 	CSR_WRITE_4(sc, DC_ISR, 0xFFFFFFFF);
2867 
2868 	/* Enable transmitter. */
2869 	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
2870 
2871 	/*
2872 	 * If this is an Intel 21143 and we're not using the
2873 	 * MII port, program the LED control pins so we get
2874 	 * link and activity indications.
2875 	 */
2876 	if (sc->dc_flags & DC_TULIP_LEDS) {
2877 		CSR_WRITE_4(sc, DC_WATCHDOG,
2878 		    DC_WDOG_CTLWREN|DC_WDOG_LINK|DC_WDOG_ACTIVITY);
2879 		CSR_WRITE_4(sc, DC_WATCHDOG, 0);
2880 	}
2881 
2882 	/*
2883 	 * Load the RX/multicast filter. We do this sort of late
2884 	 * because the filter programming scheme on the 21143 and
2885 	 * some clones requires DMAing a setup frame via the TX
2886 	 * engine, and we need the transmitter enabled for that.
2887 	 */
2888 	dc_setfilt(sc);
2889 
2890 	/* Enable receiver. */
2891 	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON);
2892 	CSR_WRITE_4(sc, DC_RXSTART, 0xFFFFFFFF);
2893 
2894 	mii_mediachg(mii);
2895 	dc_setcfg(sc, sc->dc_if_media);
2896 
2897 	ifp->if_flags |= IFF_RUNNING;
2898 	ifp->if_flags &= ~IFF_OACTIVE;
2899 
2900 	splx(s);
2901 
2902 	timeout_set(&sc->dc_tick_tmo, dc_tick, sc);
2903 
2904 	if (IFM_SUBTYPE(mii->mii_media.ifm_media) == IFM_HPNA_1)
2905 		sc->dc_link = 1;
2906 	else {
2907 		if (sc->dc_flags & DC_21143_NWAY)
2908 			timeout_add(&sc->dc_tick_tmo, hz / 10);
2909 		else
2910 			timeout_add_sec(&sc->dc_tick_tmo, 1);
2911 	}
2912 
2913 #ifdef SRM_MEDIA
2914 	if(sc->dc_srm_media) {
2915 		struct ifreq ifr;
2916 
2917 		ifr.ifr_media = sc->dc_srm_media;
2918 		ifmedia_ioctl(ifp, &ifr, &mii->mii_media, SIOCSIFMEDIA);
2919 		sc->dc_srm_media = 0;
2920 	}
2921 #endif
2922 }
2923 
2924 /*
2925  * Set media options.
2926  */
2927 int
2928 dc_ifmedia_upd(struct ifnet *ifp)
2929 {
2930 	struct dc_softc *sc;
2931 	struct mii_data *mii;
2932 	struct ifmedia *ifm;
2933 
2934 	sc = ifp->if_softc;
2935 	mii = &sc->sc_mii;
2936 	mii_mediachg(mii);
2937 
2938 	ifm = &mii->mii_media;
2939 
2940 	if (DC_IS_DAVICOM(sc) &&
2941 	    IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1)
2942 		dc_setcfg(sc, ifm->ifm_media);
2943 	else
2944 		sc->dc_link = 0;
2945 
2946 	return (0);
2947 }
2948 
2949 /*
2950  * Report current media status.
2951  */
2952 void
2953 dc_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2954 {
2955 	struct dc_softc *sc;
2956 	struct mii_data *mii;
2957 	struct ifmedia *ifm;
2958 
2959 	sc = ifp->if_softc;
2960 	mii = &sc->sc_mii;
2961 	mii_pollstat(mii);
2962 	ifm = &mii->mii_media;
2963 	if (DC_IS_DAVICOM(sc)) {
2964 		if (IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) {
2965 			ifmr->ifm_active = ifm->ifm_media;
2966 			ifmr->ifm_status = 0;
2967 			return;
2968 		}
2969 	}
2970 	ifmr->ifm_active = mii->mii_media_active;
2971 	ifmr->ifm_status = mii->mii_media_status;
2972 }
2973 
2974 int
2975 dc_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2976 {
2977 	struct dc_softc		*sc = ifp->if_softc;
2978 	struct ifreq		*ifr = (struct ifreq *) data;
2979 	struct ifaddr		*ifa = (struct ifaddr *)data;
2980 	struct mii_data		*mii;
2981 	int			s, error = 0;
2982 
2983 	s = splnet();
2984 
2985 	switch(command) {
2986 	case SIOCSIFADDR:
2987 		ifp->if_flags |= IFF_UP;
2988 		if (!(ifp->if_flags & IFF_RUNNING))
2989 			dc_init(sc);
2990 #ifdef INET
2991 		if (ifa->ifa_addr->sa_family == AF_INET)
2992 			arp_ifinit(&sc->sc_arpcom, ifa);
2993 #endif
2994 		break;
2995 	case SIOCSIFFLAGS:
2996 		if (ifp->if_flags & IFF_UP) {
2997 			if (ifp->if_flags & IFF_RUNNING &&
2998 			    (ifp->if_flags ^ sc->dc_if_flags) &
2999 			     IFF_PROMISC) {
3000 				dc_setfilt(sc);
3001 			} else {
3002 				if (!(ifp->if_flags & IFF_RUNNING)) {
3003 					sc->dc_txthresh = 0;
3004 					dc_init(sc);
3005 				}
3006 			}
3007 		} else {
3008 			if (ifp->if_flags & IFF_RUNNING)
3009 				dc_stop(sc);
3010 		}
3011 		sc->dc_if_flags = ifp->if_flags;
3012 		break;
3013 	case SIOCGIFMEDIA:
3014 	case SIOCSIFMEDIA:
3015 		mii = &sc->sc_mii;
3016 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
3017 #ifdef SRM_MEDIA
3018 		if (sc->dc_srm_media)
3019 			sc->dc_srm_media = 0;
3020 #endif
3021 		break;
3022 	default:
3023 		error = ether_ioctl(ifp, &sc->sc_arpcom, command, data);
3024 	}
3025 
3026 	if (error == ENETRESET) {
3027 		if (ifp->if_flags & IFF_RUNNING)
3028 			dc_setfilt(sc);
3029 		error = 0;
3030 	}
3031 
3032 	splx(s);
3033 	return (error);
3034 }
3035 
3036 void
3037 dc_watchdog(struct ifnet *ifp)
3038 {
3039 	struct dc_softc *sc;
3040 
3041 	sc = ifp->if_softc;
3042 
3043 	ifp->if_oerrors++;
3044 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
3045 
3046 	dc_stop(sc);
3047 	dc_reset(sc);
3048 	dc_init(sc);
3049 
3050 	if (IFQ_IS_EMPTY(&ifp->if_snd) == 0)
3051 		dc_start(ifp);
3052 }
3053 
3054 /*
3055  * Stop the adapter and free any mbufs allocated to the
3056  * RX and TX lists.
3057  */
3058 void
3059 dc_stop(struct dc_softc *sc)
3060 {
3061 	struct ifnet *ifp;
3062 	int i;
3063 
3064 	ifp = &sc->sc_arpcom.ac_if;
3065 	ifp->if_timer = 0;
3066 
3067 	timeout_del(&sc->dc_tick_tmo);
3068 
3069 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3070 
3071 	DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ON|DC_NETCFG_TX_ON));
3072 	CSR_WRITE_4(sc, DC_IMR, 0x00000000);
3073 	CSR_WRITE_4(sc, DC_TXADDR, 0x00000000);
3074 	CSR_WRITE_4(sc, DC_RXADDR, 0x00000000);
3075 	sc->dc_link = 0;
3076 
3077 	/*
3078 	 * Free data in the RX lists.
3079 	 */
3080 	for (i = 0; i < DC_RX_LIST_CNT; i++) {
3081 		if (sc->dc_cdata.dc_rx_chain[i].sd_map->dm_nsegs != 0) {
3082 			bus_dmamap_t map = sc->dc_cdata.dc_rx_chain[i].sd_map;
3083 
3084 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3085 			    BUS_DMASYNC_POSTREAD);
3086 			bus_dmamap_unload(sc->sc_dmat, map);
3087 		}
3088 		if (sc->dc_cdata.dc_rx_chain[i].sd_mbuf != NULL) {
3089 			m_freem(sc->dc_cdata.dc_rx_chain[i].sd_mbuf);
3090 			sc->dc_cdata.dc_rx_chain[i].sd_mbuf = NULL;
3091 		}
3092 	}
3093 	bzero((char *)&sc->dc_ldata->dc_rx_list,
3094 		sizeof(sc->dc_ldata->dc_rx_list));
3095 
3096 	/*
3097 	 * Free the TX list buffers.
3098 	 */
3099 	for (i = 0; i < DC_TX_LIST_CNT; i++) {
3100 		if (sc->dc_cdata.dc_tx_chain[i].sd_map->dm_nsegs != 0) {
3101 			bus_dmamap_t map = sc->dc_cdata.dc_tx_chain[i].sd_map;
3102 
3103 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3104 			    BUS_DMASYNC_POSTWRITE);
3105 			bus_dmamap_unload(sc->sc_dmat, map);
3106 		}
3107 		if (sc->dc_cdata.dc_tx_chain[i].sd_mbuf != NULL) {
3108 			if (sc->dc_ldata->dc_tx_list[i].dc_ctl &
3109 			    htole32(DC_TXCTL_SETUP)) {
3110 				sc->dc_cdata.dc_tx_chain[i].sd_mbuf = NULL;
3111 				continue;
3112 			}
3113 			m_freem(sc->dc_cdata.dc_tx_chain[i].sd_mbuf);
3114 			sc->dc_cdata.dc_tx_chain[i].sd_mbuf = NULL;
3115 		}
3116 	}
3117 	bzero((char *)&sc->dc_ldata->dc_tx_list,
3118 		sizeof(sc->dc_ldata->dc_tx_list));
3119 
3120 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
3121 	    0, sc->sc_listmap->dm_mapsize,
3122 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3123 }
3124 
3125 void
3126 dc_power(int why, void *arg)
3127 {
3128 	struct dc_softc *sc = arg;
3129 	struct ifnet *ifp;
3130 	int s;
3131 
3132 	s = splnet();
3133 	if (why != PWR_RESUME)
3134 		dc_stop(sc);
3135 	else {
3136 		ifp = &sc->sc_arpcom.ac_if;
3137 		if (ifp->if_flags & IFF_UP)
3138 			dc_init(sc);
3139 	}
3140 	splx(s);
3141 }
3142 
3143 int
3144 dc_detach(struct dc_softc *sc)
3145 {
3146 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
3147 
3148 	if (LIST_FIRST(&sc->sc_mii.mii_phys) != NULL)
3149 		mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
3150 
3151 	if (sc->dc_srom)
3152 		free(sc->dc_srom, M_DEVBUF);
3153 
3154 	timeout_del(&sc->dc_tick_tmo);
3155 
3156 	ether_ifdetach(ifp);
3157 	if_detach(ifp);
3158 
3159 	if (sc->sc_pwrhook != NULL)
3160 		powerhook_disestablish(sc->sc_pwrhook);
3161 
3162 	return (0);
3163 }
3164 
3165 struct cfdriver dc_cd = {
3166 	0, "dc", DV_IFNET
3167 };
3168