xref: /openbsd/sys/dev/ic/dc.c (revision 4bdff4be)
1 /*	$OpenBSD: dc.c,v 1.156 2023/11/10 15:51:20 bluhm Exp $	*/
2 
3 /*
4  * Copyright (c) 1997, 1998, 1999
5  *	Bill Paul <wpaul@ee.columbia.edu>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * $FreeBSD: src/sys/pci/if_dc.c,v 1.43 2001/01/19 23:55:07 wpaul Exp $
35  */
36 
37 /*
38  * DEC "tulip" clone ethernet driver. Supports the DEC/Intel 21143
39  * series chips and several workalikes including the following:
40  *
41  * Macronix 98713/98715/98725/98727/98732 PMAC (www.macronix.com)
42  * Macronix/Lite-On 82c115 PNIC II (www.macronix.com)
43  * Lite-On 82c168/82c169 PNIC (www.litecom.com)
44  * ASIX Electronics AX88140A (www.asix.com.tw)
45  * ASIX Electronics AX88141 (www.asix.com.tw)
46  * ADMtek AL981 (www.admtek.com.tw)
47  * ADMtek AN983 (www.admtek.com.tw)
48  * Davicom DM9100, DM9102, DM9102A (www.davicom8.com)
49  * Accton EN1217, EN2242 (www.accton.com)
50  * Xircom X3201 (www.xircom.com)
51  *
52  * Datasheets for the 21143 are available at developer.intel.com.
53  * Datasheets for the clone parts can be found at their respective sites.
54  * (Except for the PNIC; see www.freebsd.org/~wpaul/PNIC/pnic.ps.gz.)
55  * The PNIC II is essentially a Macronix 98715A chip; the only difference
56  * worth noting is that its multicast hash table is only 128 bits wide
57  * instead of 512.
58  *
59  * Written by Bill Paul <wpaul@ee.columbia.edu>
60  * Electrical Engineering Department
61  * Columbia University, New York City
62  */
63 
64 /*
65  * The Intel 21143 is the successor to the DEC 21140. It is basically
66  * the same as the 21140 but with a few new features. The 21143 supports
67  * three kinds of media attachments:
68  *
69  * o MII port, for 10Mbps and 100Mbps support and NWAY
70  *   autonegotiation provided by an external PHY.
71  * o SYM port, for symbol mode 100Mbps support.
72  * o 10baseT port.
73  * o AUI/BNC port.
74  *
75  * The 100Mbps SYM port and 10baseT port can be used together in
76  * combination with the internal NWAY support to create a 10/100
77  * autosensing configuration.
78  *
79  * Note that not all tulip workalikes are handled in this driver: we only
80  * deal with those which are relatively well behaved. The Winbond is
81  * handled separately due to its different register offsets and the
82  * special handling needed for its various bugs. The PNIC is handled
83  * here, but I'm not thrilled about it.
84  *
85  * All of the workalike chips use some form of MII transceiver support
86  * with the exception of the Macronix chips, which also have a SYM port.
87  * The ASIX AX88140A is also documented to have a SYM port, but all
88  * the cards I've seen use an MII transceiver, probably because the
89  * AX88140A doesn't support internal NWAY.
90  */
91 
92 #include "bpfilter.h"
93 
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/mbuf.h>
97 #include <sys/socket.h>
98 #include <sys/ioctl.h>
99 #include <sys/errno.h>
100 #include <sys/malloc.h>
101 #include <sys/kernel.h>
102 #include <sys/device.h>
103 #include <sys/timeout.h>
104 
105 #include <net/if.h>
106 
107 #include <netinet/in.h>
108 #include <netinet/if_ether.h>
109 
110 #include <net/if_media.h>
111 
112 #if NBPFILTER > 0
113 #include <net/bpf.h>
114 #endif
115 
116 #include <dev/mii/mii.h>
117 #include <dev/mii/miivar.h>
118 
119 #include <machine/bus.h>
120 #include <dev/pci/pcidevs.h>
121 
122 #include <dev/ic/dcreg.h>
123 
124 /*
125  * The Davicom DM9102 has a broken DMA engine that reads beyond the
126  * end of the programmed transfer.  Architectures with a proper IOMMU
127  * (such as sparc64) will trap on this access.  To avoid having to
128  * copy each transmitted mbuf to guarantee enough trailing space,
129  * those architectures should implement BUS_DMA_OVERRUN that takes
130  * appropriate action to tolerate this behaviour.
131  */
132 #ifndef BUS_DMA_OVERRUN
133 #define BUS_DMA_OVERRUN 0
134 #endif
135 
136 int dc_intr(void *);
137 struct dc_type *dc_devtype(void *);
138 int dc_newbuf(struct dc_softc *, int, struct mbuf *);
139 int dc_encap(struct dc_softc *, bus_dmamap_t, struct mbuf *, u_int32_t *);
140 
141 void dc_pnic_rx_bug_war(struct dc_softc *, int);
142 int dc_rx_resync(struct dc_softc *);
143 int dc_rxeof(struct dc_softc *);
144 void dc_txeof(struct dc_softc *);
145 void dc_tick(void *);
146 void dc_tx_underrun(struct dc_softc *);
147 void dc_start(struct ifnet *);
148 int dc_ioctl(struct ifnet *, u_long, caddr_t);
149 void dc_watchdog(struct ifnet *);
150 int dc_ifmedia_upd(struct ifnet *);
151 void dc_ifmedia_sts(struct ifnet *, struct ifmediareq *);
152 
153 void dc_delay(struct dc_softc *);
154 void dc_eeprom_width(struct dc_softc *);
155 void dc_eeprom_idle(struct dc_softc *);
156 void dc_eeprom_putbyte(struct dc_softc *, int);
157 void dc_eeprom_getword(struct dc_softc *, int, u_int16_t *);
158 void dc_eeprom_getword_pnic(struct dc_softc *, int, u_int16_t *);
159 void dc_eeprom_getword_xircom(struct dc_softc *, int, u_int16_t *);
160 void dc_read_eeprom(struct dc_softc *, caddr_t, int, int, int);
161 
162 void dc_mii_writebit(struct dc_softc *, int);
163 int dc_mii_readbit(struct dc_softc *);
164 void dc_mii_sync(struct dc_softc *);
165 void dc_mii_send(struct dc_softc *, u_int32_t, int);
166 int dc_mii_readreg(struct dc_softc *, struct dc_mii_frame *);
167 int dc_mii_writereg(struct dc_softc *, struct dc_mii_frame *);
168 int dc_miibus_readreg(struct device *, int, int);
169 void dc_miibus_writereg(struct device *, int, int, int);
170 void dc_miibus_statchg(struct device *);
171 
172 void dc_setcfg(struct dc_softc *, uint64_t);
173 u_int32_t dc_crc_le(struct dc_softc *, caddr_t);
174 u_int32_t dc_crc_be(caddr_t);
175 void dc_setfilt_21143(struct dc_softc *);
176 void dc_setfilt_asix(struct dc_softc *);
177 void dc_setfilt_admtek(struct dc_softc *);
178 void dc_setfilt_xircom(struct dc_softc *);
179 
180 void dc_setfilt(struct dc_softc *);
181 
182 void dc_reset(struct dc_softc *);
183 int dc_list_rx_init(struct dc_softc *);
184 int dc_list_tx_init(struct dc_softc *);
185 
186 void dc_read_srom(struct dc_softc *, int);
187 void dc_parse_21143_srom(struct dc_softc *);
188 void dc_decode_leaf_sia(struct dc_softc *,
189 				     struct dc_eblock_sia *);
190 void dc_decode_leaf_mii(struct dc_softc *,
191 				     struct dc_eblock_mii *);
192 void dc_decode_leaf_sym(struct dc_softc *,
193 				     struct dc_eblock_sym *);
194 void dc_apply_fixup(struct dc_softc *, uint64_t);
195 
196 #define DC_SETBIT(sc, reg, x)				\
197 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x))
198 
199 #define DC_CLRBIT(sc, reg, x)				\
200 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x))
201 
202 #define SIO_SET(x)	DC_SETBIT(sc, DC_SIO, (x))
203 #define SIO_CLR(x)	DC_CLRBIT(sc, DC_SIO, (x))
204 
205 void
206 dc_delay(struct dc_softc *sc)
207 {
208 	int idx;
209 
210 	for (idx = (300 / 33) + 1; idx > 0; idx--)
211 		CSR_READ_4(sc, DC_BUSCTL);
212 }
213 
214 void
215 dc_eeprom_width(struct dc_softc *sc)
216 {
217 	int i;
218 
219 	/* Force EEPROM to idle state. */
220 	dc_eeprom_idle(sc);
221 
222 	/* Enter EEPROM access mode. */
223 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
224 	dc_delay(sc);
225 	DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
226 	dc_delay(sc);
227 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
228 	dc_delay(sc);
229 	DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
230 	dc_delay(sc);
231 
232 	for (i = 3; i--;) {
233 		if (6 & (1 << i))
234 			DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
235 		else
236 			DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
237 		dc_delay(sc);
238 		DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
239 		dc_delay(sc);
240 		DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
241 		dc_delay(sc);
242 	}
243 
244 	for (i = 1; i <= 12; i++) {
245 		DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
246 		dc_delay(sc);
247 		if (!(CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)) {
248 			DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
249 			dc_delay(sc);
250 			break;
251 		}
252 		DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
253 		dc_delay(sc);
254 	}
255 
256 	/* Turn off EEPROM access mode. */
257 	dc_eeprom_idle(sc);
258 
259 	if (i < 4 || i > 12)
260 		sc->dc_romwidth = 6;
261 	else
262 		sc->dc_romwidth = i;
263 
264 	/* Enter EEPROM access mode. */
265 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
266 	dc_delay(sc);
267 	DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
268 	dc_delay(sc);
269 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
270 	dc_delay(sc);
271 	DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
272 	dc_delay(sc);
273 
274 	/* Turn off EEPROM access mode. */
275 	dc_eeprom_idle(sc);
276 }
277 
278 void
279 dc_eeprom_idle(struct dc_softc *sc)
280 {
281 	int i;
282 
283 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
284 	dc_delay(sc);
285 	DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
286 	dc_delay(sc);
287 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
288 	dc_delay(sc);
289 	DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
290 	dc_delay(sc);
291 
292 	for (i = 0; i < 25; i++) {
293 		DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
294 		dc_delay(sc);
295 		DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
296 		dc_delay(sc);
297 	}
298 
299 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
300 	dc_delay(sc);
301 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CS);
302 	dc_delay(sc);
303 	CSR_WRITE_4(sc, DC_SIO, 0x00000000);
304 }
305 
306 /*
307  * Send a read command and address to the EEPROM, check for ACK.
308  */
309 void
310 dc_eeprom_putbyte(struct dc_softc *sc, int addr)
311 {
312 	int d, i;
313 
314 	d = DC_EECMD_READ >> 6;
315 
316 	for (i = 3; i--; ) {
317 		if (d & (1 << i))
318 			DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
319 		else
320 			DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
321 		dc_delay(sc);
322 		DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
323 		dc_delay(sc);
324 		DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
325 		dc_delay(sc);
326 	}
327 
328 	/*
329 	 * Feed in each bit and strobe the clock.
330 	 */
331 	for (i = sc->dc_romwidth; i--;) {
332 		if (addr & (1 << i)) {
333 			SIO_SET(DC_SIO_EE_DATAIN);
334 		} else {
335 			SIO_CLR(DC_SIO_EE_DATAIN);
336 		}
337 		dc_delay(sc);
338 		SIO_SET(DC_SIO_EE_CLK);
339 		dc_delay(sc);
340 		SIO_CLR(DC_SIO_EE_CLK);
341 		dc_delay(sc);
342 	}
343 }
344 
345 /*
346  * Read a word of data stored in the EEPROM at address 'addr.'
347  * The PNIC 82c168/82c169 has its own non-standard way to read
348  * the EEPROM.
349  */
350 void
351 dc_eeprom_getword_pnic(struct dc_softc *sc, int addr, u_int16_t *dest)
352 {
353 	int i;
354 	u_int32_t r;
355 
356 	CSR_WRITE_4(sc, DC_PN_SIOCTL, DC_PN_EEOPCODE_READ|addr);
357 
358 	for (i = 0; i < DC_TIMEOUT; i++) {
359 		DELAY(1);
360 		r = CSR_READ_4(sc, DC_SIO);
361 		if (!(r & DC_PN_SIOCTL_BUSY)) {
362 			*dest = (u_int16_t)(r & 0xFFFF);
363 			return;
364 		}
365 	}
366 }
367 
368 /*
369  * Read a word of data stored in the EEPROM at address 'addr.'
370  * The Xircom X3201 has its own non-standard way to read
371  * the EEPROM, too.
372  */
373 void
374 dc_eeprom_getword_xircom(struct dc_softc *sc, int addr, u_int16_t *dest)
375 {
376 	SIO_SET(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ);
377 
378 	addr *= 2;
379 	CSR_WRITE_4(sc, DC_ROM, addr | 0x160);
380 	*dest = (u_int16_t)CSR_READ_4(sc, DC_SIO) & 0xff;
381 	addr += 1;
382 	CSR_WRITE_4(sc, DC_ROM, addr | 0x160);
383 	*dest |= ((u_int16_t)CSR_READ_4(sc, DC_SIO) & 0xff) << 8;
384 
385 	SIO_CLR(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ);
386 }
387 
388 /*
389  * Read a word of data stored in the EEPROM at address 'addr.'
390  */
391 void
392 dc_eeprom_getword(struct dc_softc *sc, int addr, u_int16_t *dest)
393 {
394 	int i;
395 	u_int16_t word = 0;
396 
397 	/* Force EEPROM to idle state. */
398 	dc_eeprom_idle(sc);
399 
400 	/* Enter EEPROM access mode. */
401 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
402 	dc_delay(sc);
403 	DC_SETBIT(sc, DC_SIO,  DC_SIO_ROMCTL_READ);
404 	dc_delay(sc);
405 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
406 	dc_delay(sc);
407 	DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
408 	dc_delay(sc);
409 
410 	/*
411 	 * Send address of word we want to read.
412 	 */
413 	dc_eeprom_putbyte(sc, addr);
414 
415 	/*
416 	 * Start reading bits from EEPROM.
417 	 */
418 	for (i = 0x8000; i; i >>= 1) {
419 		SIO_SET(DC_SIO_EE_CLK);
420 		dc_delay(sc);
421 		if (CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)
422 			word |= i;
423 		dc_delay(sc);
424 		SIO_CLR(DC_SIO_EE_CLK);
425 		dc_delay(sc);
426 	}
427 
428 	/* Turn off EEPROM access mode. */
429 	dc_eeprom_idle(sc);
430 
431 	*dest = word;
432 }
433 
434 /*
435  * Read a sequence of words from the EEPROM.
436  */
437 void
438 dc_read_eeprom(struct dc_softc *sc, caddr_t dest, int off, int cnt,
439     int swap)
440 {
441 	int i;
442 	u_int16_t word = 0, *ptr;
443 
444 	for (i = 0; i < cnt; i++) {
445 		if (DC_IS_PNIC(sc))
446 			dc_eeprom_getword_pnic(sc, off + i, &word);
447 		else if (DC_IS_XIRCOM(sc))
448 			dc_eeprom_getword_xircom(sc, off + i, &word);
449 		else
450 			dc_eeprom_getword(sc, off + i, &word);
451 		ptr = (u_int16_t *)(dest + (i * 2));
452 		if (swap)
453 			*ptr = betoh16(word);
454 		else
455 			*ptr = letoh16(word);
456 	}
457 }
458 
459 /*
460  * The following two routines are taken from the Macronix 98713
461  * Application Notes pp.19-21.
462  */
463 /*
464  * Write a bit to the MII bus.
465  */
466 void
467 dc_mii_writebit(struct dc_softc *sc, int bit)
468 {
469 	if (bit)
470 		CSR_WRITE_4(sc, DC_SIO,
471 		    DC_SIO_ROMCTL_WRITE|DC_SIO_MII_DATAOUT);
472 	else
473 		CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE);
474 
475 	DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK);
476 	DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK);
477 }
478 
479 /*
480  * Read a bit from the MII bus.
481  */
482 int
483 dc_mii_readbit(struct dc_softc *sc)
484 {
485 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_READ|DC_SIO_MII_DIR);
486 	CSR_READ_4(sc, DC_SIO);
487 	DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK);
488 	DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK);
489 	if (CSR_READ_4(sc, DC_SIO) & DC_SIO_MII_DATAIN)
490 		return (1);
491 	return (0);
492 }
493 
494 /*
495  * Sync the PHYs by setting data bit and strobing the clock 32 times.
496  */
497 void
498 dc_mii_sync(struct dc_softc *sc)
499 {
500 	int i;
501 
502 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE);
503 
504 	for (i = 0; i < 32; i++)
505 		dc_mii_writebit(sc, 1);
506 }
507 
508 /*
509  * Clock a series of bits through the MII.
510  */
511 void
512 dc_mii_send(struct dc_softc *sc, u_int32_t bits, int cnt)
513 {
514 	int i;
515 
516 	for (i = (0x1 << (cnt - 1)); i; i >>= 1)
517 		dc_mii_writebit(sc, bits & i);
518 }
519 
520 /*
521  * Read an PHY register through the MII.
522  */
523 int
524 dc_mii_readreg(struct dc_softc *sc, struct dc_mii_frame *frame)
525 {
526 	int i, ack, s;
527 
528 	s = splnet();
529 
530 	/*
531 	 * Set up frame for RX.
532 	 */
533 	frame->mii_stdelim = DC_MII_STARTDELIM;
534 	frame->mii_opcode = DC_MII_READOP;
535 	frame->mii_turnaround = 0;
536 	frame->mii_data = 0;
537 
538 	/*
539 	 * Sync the PHYs.
540 	 */
541 	dc_mii_sync(sc);
542 
543 	/*
544 	 * Send command/address info.
545 	 */
546 	dc_mii_send(sc, frame->mii_stdelim, 2);
547 	dc_mii_send(sc, frame->mii_opcode, 2);
548 	dc_mii_send(sc, frame->mii_phyaddr, 5);
549 	dc_mii_send(sc, frame->mii_regaddr, 5);
550 
551 #ifdef notdef
552 	/* Idle bit */
553 	dc_mii_writebit(sc, 1);
554 	dc_mii_writebit(sc, 0);
555 #endif
556 
557 	/* Check for ack */
558 	ack = dc_mii_readbit(sc);
559 
560 	/*
561 	 * Now try reading data bits. If the ack failed, we still
562 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
563 	 */
564 	if (ack) {
565 		for(i = 0; i < 16; i++) {
566 			dc_mii_readbit(sc);
567 		}
568 		goto fail;
569 	}
570 
571 	for (i = 0x8000; i; i >>= 1) {
572 		if (!ack) {
573 			if (dc_mii_readbit(sc))
574 				frame->mii_data |= i;
575 		}
576 	}
577 
578 fail:
579 
580 	dc_mii_writebit(sc, 0);
581 	dc_mii_writebit(sc, 0);
582 
583 	splx(s);
584 
585 	if (ack)
586 		return (1);
587 	return (0);
588 }
589 
590 /*
591  * Write to a PHY register through the MII.
592  */
593 int
594 dc_mii_writereg(struct dc_softc *sc, struct dc_mii_frame *frame)
595 {
596 	int s;
597 
598 	s = splnet();
599 	/*
600 	 * Set up frame for TX.
601 	 */
602 
603 	frame->mii_stdelim = DC_MII_STARTDELIM;
604 	frame->mii_opcode = DC_MII_WRITEOP;
605 	frame->mii_turnaround = DC_MII_TURNAROUND;
606 
607 	/*
608 	 * Sync the PHYs.
609 	 */
610 	dc_mii_sync(sc);
611 
612 	dc_mii_send(sc, frame->mii_stdelim, 2);
613 	dc_mii_send(sc, frame->mii_opcode, 2);
614 	dc_mii_send(sc, frame->mii_phyaddr, 5);
615 	dc_mii_send(sc, frame->mii_regaddr, 5);
616 	dc_mii_send(sc, frame->mii_turnaround, 2);
617 	dc_mii_send(sc, frame->mii_data, 16);
618 
619 	/* Idle bit. */
620 	dc_mii_writebit(sc, 0);
621 	dc_mii_writebit(sc, 0);
622 
623 	splx(s);
624 	return (0);
625 }
626 
627 int
628 dc_miibus_readreg(struct device *self, int phy, int reg)
629 {
630 	struct dc_mii_frame frame;
631 	struct dc_softc *sc = (struct dc_softc *)self;
632 	int i, rval, phy_reg;
633 
634 	/*
635 	 * Note: both the AL981 and AN983 have internal PHYs,
636 	 * however the AL981 provides direct access to the PHY
637 	 * registers while the AN983 uses a serial MII interface.
638 	 * The AN983's MII interface is also buggy in that you
639 	 * can read from any MII address (0 to 31), but only address 1
640 	 * behaves normally. To deal with both cases, we pretend
641 	 * that the PHY is at MII address 1.
642 	 */
643 	if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR)
644 		return (0);
645 
646 	/*
647 	 * Note: the ukphy probs of the RS7112 report a PHY at
648 	 * MII address 0 (possibly HomePNA?) and 1 (ethernet)
649 	 * so we only respond to correct one.
650 	 */
651 	if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR)
652 		return (0);
653 
654 	if (sc->dc_pmode != DC_PMODE_MII) {
655 		if (phy == (MII_NPHY - 1)) {
656 			switch(reg) {
657 			case MII_BMSR:
658 				/*
659 				 * Fake something to make the probe
660 				 * code think there's a PHY here.
661 				 */
662 				return (BMSR_MEDIAMASK);
663 				break;
664 			case MII_PHYIDR1:
665 				if (DC_IS_PNIC(sc))
666 					return (PCI_VENDOR_LITEON);
667 				return (PCI_VENDOR_DEC);
668 				break;
669 			case MII_PHYIDR2:
670 				if (DC_IS_PNIC(sc))
671 					return (PCI_PRODUCT_LITEON_PNIC);
672 				return (PCI_PRODUCT_DEC_21142);
673 				break;
674 			default:
675 				return (0);
676 				break;
677 			}
678 		} else
679 			return (0);
680 	}
681 
682 	if (DC_IS_PNIC(sc)) {
683 		CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_READ |
684 		    (phy << 23) | (reg << 18));
685 		for (i = 0; i < DC_TIMEOUT; i++) {
686 			DELAY(1);
687 			rval = CSR_READ_4(sc, DC_PN_MII);
688 			if (!(rval & DC_PN_MII_BUSY)) {
689 				rval &= 0xFFFF;
690 				return (rval == 0xFFFF ? 0 : rval);
691 			}
692 		}
693 		return (0);
694 	}
695 
696 	if (DC_IS_COMET(sc)) {
697 		switch(reg) {
698 		case MII_BMCR:
699 			phy_reg = DC_AL_BMCR;
700 			break;
701 		case MII_BMSR:
702 			phy_reg = DC_AL_BMSR;
703 			break;
704 		case MII_PHYIDR1:
705 			phy_reg = DC_AL_VENID;
706 			break;
707 		case MII_PHYIDR2:
708 			phy_reg = DC_AL_DEVID;
709 			break;
710 		case MII_ANAR:
711 			phy_reg = DC_AL_ANAR;
712 			break;
713 		case MII_ANLPAR:
714 			phy_reg = DC_AL_LPAR;
715 			break;
716 		case MII_ANER:
717 			phy_reg = DC_AL_ANER;
718 			break;
719 		default:
720 			printf("%s: phy_read: bad phy register %x\n",
721 			    sc->sc_dev.dv_xname, reg);
722 			return (0);
723 			break;
724 		}
725 
726 		rval = CSR_READ_4(sc, phy_reg) & 0x0000FFFF;
727 
728 		if (rval == 0xFFFF)
729 			return (0);
730 		return (rval);
731 	}
732 
733 	bzero(&frame, sizeof(frame));
734 
735 	frame.mii_phyaddr = phy;
736 	frame.mii_regaddr = reg;
737 	if (sc->dc_type == DC_TYPE_98713) {
738 		phy_reg = CSR_READ_4(sc, DC_NETCFG);
739 		CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL);
740 	}
741 	dc_mii_readreg(sc, &frame);
742 	if (sc->dc_type == DC_TYPE_98713)
743 		CSR_WRITE_4(sc, DC_NETCFG, phy_reg);
744 
745 	return (frame.mii_data);
746 }
747 
748 void
749 dc_miibus_writereg(struct device *self, int phy, int reg, int data)
750 {
751 	struct dc_softc *sc = (struct dc_softc *)self;
752 	struct dc_mii_frame frame;
753 	int i, phy_reg;
754 
755 	bzero(&frame, sizeof(frame));
756 
757 	if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR)
758 		return;
759 	if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR)
760 		return;
761 
762 	if (DC_IS_PNIC(sc)) {
763 		CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_WRITE |
764 		    (phy << 23) | (reg << 10) | data);
765 		for (i = 0; i < DC_TIMEOUT; i++) {
766 			if (!(CSR_READ_4(sc, DC_PN_MII) & DC_PN_MII_BUSY))
767 				break;
768 		}
769 		return;
770 	}
771 
772 	if (DC_IS_COMET(sc)) {
773 		switch(reg) {
774 		case MII_BMCR:
775 			phy_reg = DC_AL_BMCR;
776 			break;
777 		case MII_BMSR:
778 			phy_reg = DC_AL_BMSR;
779 			break;
780 		case MII_PHYIDR1:
781 			phy_reg = DC_AL_VENID;
782 			break;
783 		case MII_PHYIDR2:
784 			phy_reg = DC_AL_DEVID;
785 			break;
786 		case MII_ANAR:
787 			phy_reg = DC_AL_ANAR;
788 			break;
789 		case MII_ANLPAR:
790 			phy_reg = DC_AL_LPAR;
791 			break;
792 		case MII_ANER:
793 			phy_reg = DC_AL_ANER;
794 			break;
795 		default:
796 			printf("%s: phy_write: bad phy register %x\n",
797 			    sc->sc_dev.dv_xname, reg);
798 			return;
799 		}
800 
801 		CSR_WRITE_4(sc, phy_reg, data);
802 		return;
803 	}
804 
805 	frame.mii_phyaddr = phy;
806 	frame.mii_regaddr = reg;
807 	frame.mii_data = data;
808 
809 	if (sc->dc_type == DC_TYPE_98713) {
810 		phy_reg = CSR_READ_4(sc, DC_NETCFG);
811 		CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL);
812 	}
813 	dc_mii_writereg(sc, &frame);
814 	if (sc->dc_type == DC_TYPE_98713)
815 		CSR_WRITE_4(sc, DC_NETCFG, phy_reg);
816 }
817 
818 void
819 dc_miibus_statchg(struct device *self)
820 {
821 	struct dc_softc *sc = (struct dc_softc *)self;
822 	struct mii_data *mii;
823 	struct ifmedia *ifm;
824 
825 	if (DC_IS_ADMTEK(sc))
826 		return;
827 
828 	mii = &sc->sc_mii;
829 	ifm = &mii->mii_media;
830 	if (DC_IS_DAVICOM(sc) && IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) {
831 		dc_setcfg(sc, ifm->ifm_media);
832 		sc->dc_if_media = ifm->ifm_media;
833 	} else {
834 		dc_setcfg(sc, mii->mii_media_active);
835 		sc->dc_if_media = mii->mii_media_active;
836 	}
837 }
838 
839 #define DC_BITS_512	9
840 #define DC_BITS_128	7
841 #define DC_BITS_64	6
842 
843 u_int32_t
844 dc_crc_le(struct dc_softc *sc, caddr_t addr)
845 {
846 	u_int32_t crc;
847 
848 	/* Compute CRC for the address value. */
849 	crc = ether_crc32_le(addr, ETHER_ADDR_LEN);
850 
851 	/*
852 	 * The hash table on the PNIC II and the MX98715AEC-C/D/E
853 	 * chips is only 128 bits wide.
854 	 */
855 	if (sc->dc_flags & DC_128BIT_HASH)
856 		return (crc & ((1 << DC_BITS_128) - 1));
857 
858 	/* The hash table on the MX98715BEC is only 64 bits wide. */
859 	if (sc->dc_flags & DC_64BIT_HASH)
860 		return (crc & ((1 << DC_BITS_64) - 1));
861 
862 	/* Xircom's hash filtering table is different (read: weird) */
863 	/* Xircom uses the LEAST significant bits */
864 	if (DC_IS_XIRCOM(sc)) {
865 		if ((crc & 0x180) == 0x180)
866 			return (crc & 0x0F) + (crc	& 0x70)*3 + (14 << 4);
867 		else
868 			return (crc & 0x1F) + ((crc>>1) & 0xF0)*3 + (12 << 4);
869 	}
870 
871 	return (crc & ((1 << DC_BITS_512) - 1));
872 }
873 
874 /*
875  * Calculate CRC of a multicast group address, return the lower 6 bits.
876  */
877 #define dc_crc_be(addr)	((ether_crc32_be(addr,ETHER_ADDR_LEN) >> 26) \
878 	& 0x0000003F)
879 
880 /*
881  * 21143-style RX filter setup routine. Filter programming is done by
882  * downloading a special setup frame into the TX engine. 21143, Macronix,
883  * PNIC, PNIC II and Davicom chips are programmed this way.
884  *
885  * We always program the chip using 'hash perfect' mode, i.e. one perfect
886  * address (our node address) and a 512-bit hash filter for multicast
887  * frames. We also sneak the broadcast address into the hash filter since
888  * we need that too.
889  */
890 void
891 dc_setfilt_21143(struct dc_softc *sc)
892 {
893 	struct arpcom *ac = &sc->sc_arpcom;
894 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
895 	struct ether_multi *enm;
896 	struct ether_multistep step;
897 	struct dc_desc *sframe;
898 	u_int32_t h, *sp;
899 	int i;
900 
901 	i = sc->dc_cdata.dc_tx_prod;
902 	DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT);
903 	sc->dc_cdata.dc_tx_cnt++;
904 	sframe = &sc->dc_ldata->dc_tx_list[i];
905 	sp = &sc->dc_ldata->dc_sbuf[0];
906 	bzero(sp, DC_SFRAME_LEN);
907 
908 	sframe->dc_data = htole32(sc->sc_listmap->dm_segs[0].ds_addr +
909 	    offsetof(struct dc_list_data, dc_sbuf));
910 	sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP |
911 	    DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT);
912 
913 	sc->dc_cdata.dc_tx_chain[i].sd_mbuf =
914 	    (struct mbuf *)&sc->dc_ldata->dc_sbuf[0];
915 
916 	DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ALLMULTI | DC_NETCFG_RX_PROMISC));
917 	ifp->if_flags &= ~IFF_ALLMULTI;
918 
919 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
920 		ifp->if_flags |= IFF_ALLMULTI;
921 		if (ifp->if_flags & IFF_PROMISC)
922 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
923 		else
924 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
925 	} else {
926 		ETHER_FIRST_MULTI(step, ac, enm);
927 		while (enm != NULL) {
928 			h = dc_crc_le(sc, enm->enm_addrlo);
929 
930 			sp[h >> 4] |= htole32(1 << (h & 0xF));
931 
932 			ETHER_NEXT_MULTI(step, enm);
933 		}
934 	}
935 
936 	/*
937 	 * Always accept broadcast frames.
938 	 */
939 	h = dc_crc_le(sc, (caddr_t)&etherbroadcastaddr);
940 	sp[h >> 4] |= htole32(1 << (h & 0xF));
941 
942 	/* Set our MAC address */
943 	sp[39] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 0);
944 	sp[40] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 1);
945 	sp[41] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 2);
946 
947 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
948 	    offsetof(struct dc_list_data, dc_sbuf[0]),
949 	    sizeof(struct dc_list_data) -
950 	    offsetof(struct dc_list_data, dc_sbuf[0]),
951 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
952 
953 	sframe->dc_status = htole32(DC_TXSTAT_OWN);
954 
955 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
956 	    offsetof(struct dc_list_data, dc_tx_list[i]),
957 	    sizeof(struct dc_desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
958 
959 	CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
960 
961 	/*
962 	 * The PNIC takes an exceedingly long time to process its
963 	 * setup frame; wait 10ms after posting the setup frame
964 	 * before proceeding, just so it has time to swallow its
965 	 * medicine.
966 	 */
967 	DELAY(10000);
968 
969 	ifp->if_timer = 5;
970 }
971 
972 void
973 dc_setfilt_admtek(struct dc_softc *sc)
974 {
975 	struct arpcom *ac = &sc->sc_arpcom;
976 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
977 	struct ether_multi *enm;
978 	struct ether_multistep step;
979 	u_int32_t hashes[2];
980 	int h = 0;
981 
982 	DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ALLMULTI | DC_NETCFG_RX_PROMISC));
983 	bzero(hashes, sizeof(hashes));
984 	ifp->if_flags &= ~IFF_ALLMULTI;
985 
986 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
987 		ifp->if_flags |= IFF_ALLMULTI;
988 		if (ifp->if_flags & IFF_PROMISC)
989 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
990 		else
991 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
992 	} else {
993 		/* now program new ones */
994 		ETHER_FIRST_MULTI(step, ac, enm);
995 		while (enm != NULL) {
996 			if (DC_IS_CENTAUR(sc))
997 				h = dc_crc_le(sc, enm->enm_addrlo);
998 			else
999 				h = dc_crc_be(enm->enm_addrlo);
1000 
1001 			if (h < 32)
1002 				hashes[0] |= (1 << h);
1003 			else
1004 				hashes[1] |= (1 << (h - 32));
1005 
1006 			ETHER_NEXT_MULTI(step, enm);
1007 		}
1008 	}
1009 
1010 	/* Init our MAC address */
1011 	CSR_WRITE_4(sc, DC_AL_PAR0, ac->ac_enaddr[3] << 24 |
1012 	    ac->ac_enaddr[2] << 16 | ac->ac_enaddr[1] << 8 | ac->ac_enaddr[0]);
1013 	CSR_WRITE_4(sc, DC_AL_PAR1, ac->ac_enaddr[5] << 8 | ac->ac_enaddr[4]);
1014 
1015 	CSR_WRITE_4(sc, DC_AL_MAR0, hashes[0]);
1016 	CSR_WRITE_4(sc, DC_AL_MAR1, hashes[1]);
1017 }
1018 
1019 void
1020 dc_setfilt_asix(struct dc_softc *sc)
1021 {
1022 	struct arpcom *ac = &sc->sc_arpcom;
1023 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1024 	struct ether_multi *enm;
1025 	struct ether_multistep step;
1026 	u_int32_t hashes[2];
1027 	int h = 0;
1028 
1029 	DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ALLMULTI | DC_AX_NETCFG_RX_BROAD |
1030 	    DC_NETCFG_RX_PROMISC));
1031 	bzero(hashes, sizeof(hashes));
1032 	ifp->if_flags &= ~IFF_ALLMULTI;
1033 
1034 	/*
1035 	 * Always accept broadcast frames.
1036 	 */
1037 	DC_SETBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD);
1038 
1039 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1040 		ifp->if_flags |= IFF_ALLMULTI;
1041 		if (ifp->if_flags & IFF_PROMISC)
1042 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1043 		else
1044 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1045 	} else {
1046 		/* now program new ones */
1047 		ETHER_FIRST_MULTI(step, ac, enm);
1048 		while (enm != NULL) {
1049 			h = dc_crc_be(enm->enm_addrlo);
1050 
1051 			if (h < 32)
1052 				hashes[0] |= (1 << h);
1053 			else
1054 				hashes[1] |= (1 << (h - 32));
1055 
1056 			ETHER_NEXT_MULTI(step, enm);
1057 		}
1058 	}
1059 
1060 	/* Init our MAC address */
1061 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR0);
1062 	CSR_WRITE_4(sc, DC_AX_FILTDATA,
1063 	    *(u_int32_t *)(&sc->sc_arpcom.ac_enaddr[0]));
1064 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR1);
1065 	CSR_WRITE_4(sc, DC_AX_FILTDATA,
1066 	    *(u_int32_t *)(&sc->sc_arpcom.ac_enaddr[4]));
1067 
1068 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0);
1069 	CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[0]);
1070 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1);
1071 	CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[1]);
1072 }
1073 
1074 void
1075 dc_setfilt_xircom(struct dc_softc *sc)
1076 {
1077 	struct arpcom *ac = &sc->sc_arpcom;
1078 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1079 	struct ether_multi *enm;
1080 	struct ether_multistep step;
1081 	struct dc_desc *sframe;
1082 	u_int32_t h, *sp;
1083 	int i;
1084 
1085 	DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON));
1086 
1087 	i = sc->dc_cdata.dc_tx_prod;
1088 	DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT);
1089 	sc->dc_cdata.dc_tx_cnt++;
1090 	sframe = &sc->dc_ldata->dc_tx_list[i];
1091 	sp = &sc->dc_ldata->dc_sbuf[0];
1092 	bzero(sp, DC_SFRAME_LEN);
1093 
1094 	sframe->dc_data = htole32(sc->sc_listmap->dm_segs[0].ds_addr +
1095 	    offsetof(struct dc_list_data, dc_sbuf));
1096 	sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP |
1097 	    DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT);
1098 
1099 	sc->dc_cdata.dc_tx_chain[i].sd_mbuf =
1100 	    (struct mbuf *)&sc->dc_ldata->dc_sbuf[0];
1101 
1102 	DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ALLMULTI | DC_NETCFG_RX_PROMISC));
1103 	ifp->if_flags &= ~IFF_ALLMULTI;
1104 
1105 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1106 		ifp->if_flags |= IFF_ALLMULTI;
1107 		if (ifp->if_flags & IFF_PROMISC)
1108 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1109 		else
1110 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1111 	} else {
1112 		/* now program new ones */
1113 		ETHER_FIRST_MULTI(step, ac, enm);
1114 		while (enm != NULL) {
1115 			h = dc_crc_le(sc, enm->enm_addrlo);
1116 
1117 			sp[h >> 4] |= htole32(1 << (h & 0xF));
1118 
1119 			ETHER_NEXT_MULTI(step, enm);
1120 		}
1121 	}
1122 
1123 	/*
1124 	 * Always accept broadcast frames.
1125 	 */
1126 	h = dc_crc_le(sc, (caddr_t)&etherbroadcastaddr);
1127 	sp[h >> 4] |= htole32(1 << (h & 0xF));
1128 
1129 	/* Set our MAC address */
1130 	sp[0] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 0);
1131 	sp[1] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 1);
1132 	sp[2] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 2);
1133 
1134 	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
1135 	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON);
1136 	ifp->if_flags |= IFF_RUNNING;
1137 	sframe->dc_status = htole32(DC_TXSTAT_OWN);
1138 	CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
1139 
1140 	/*
1141 	 * wait some time...
1142 	 */
1143 	DELAY(1000);
1144 
1145 	ifp->if_timer = 5;
1146 }
1147 
1148 void
1149 dc_setfilt(struct dc_softc *sc)
1150 {
1151 	if (DC_IS_INTEL(sc) || DC_IS_MACRONIX(sc) || DC_IS_PNIC(sc) ||
1152 	    DC_IS_PNICII(sc) || DC_IS_DAVICOM(sc) || DC_IS_CONEXANT(sc))
1153 		dc_setfilt_21143(sc);
1154 
1155 	if (DC_IS_ASIX(sc))
1156 		dc_setfilt_asix(sc);
1157 
1158 	if (DC_IS_ADMTEK(sc))
1159 		dc_setfilt_admtek(sc);
1160 
1161 	if (DC_IS_XIRCOM(sc))
1162 		dc_setfilt_xircom(sc);
1163 }
1164 
1165 /*
1166  * In order to fiddle with the
1167  * 'full-duplex' and '100Mbps' bits in the netconfig register, we
1168  * first have to put the transmit and/or receive logic in the idle state.
1169  */
1170 void
1171 dc_setcfg(struct dc_softc *sc, uint64_t media)
1172 {
1173 	int i, restart = 0;
1174 	u_int32_t isr;
1175 
1176 	if (IFM_SUBTYPE(media) == IFM_NONE)
1177 		return;
1178 
1179 	if (CSR_READ_4(sc, DC_NETCFG) & (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON)) {
1180 		restart = 1;
1181 		DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON));
1182 
1183 		for (i = 0; i < DC_TIMEOUT; i++) {
1184 			isr = CSR_READ_4(sc, DC_ISR);
1185 			if (isr & DC_ISR_TX_IDLE &&
1186 			    ((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED ||
1187 			    (isr & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT))
1188 				break;
1189 			DELAY(10);
1190 		}
1191 
1192 		if (i == DC_TIMEOUT) {
1193 			if (!(isr & DC_ISR_TX_IDLE) && !DC_IS_ASIX(sc))
1194 				printf("%s: failed to force tx to idle state\n",
1195 				    sc->sc_dev.dv_xname);
1196 			if (!((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED ||
1197 			    (isr & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT) &&
1198 			    !DC_HAS_BROKEN_RXSTATE(sc))
1199 				printf("%s: failed to force rx to idle state\n",
1200 				    sc->sc_dev.dv_xname);
1201 		}
1202 	}
1203 
1204 	if (IFM_SUBTYPE(media) == IFM_100_TX) {
1205 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL);
1206 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT);
1207 		if (sc->dc_pmode == DC_PMODE_MII) {
1208 			int watchdogreg;
1209 
1210 			if (DC_IS_INTEL(sc)) {
1211 			/* there's a write enable bit here that reads as 1 */
1212 				watchdogreg = CSR_READ_4(sc, DC_WATCHDOG);
1213 				watchdogreg &= ~DC_WDOG_CTLWREN;
1214 				watchdogreg |= DC_WDOG_JABBERDIS;
1215 				CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg);
1216 			} else {
1217 				DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS);
1218 			}
1219 			DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS|
1220 			    DC_NETCFG_PORTSEL|DC_NETCFG_SCRAMBLER));
1221 			if (sc->dc_type == DC_TYPE_98713)
1222 				DC_SETBIT(sc, DC_NETCFG, (DC_NETCFG_PCS|
1223 				    DC_NETCFG_SCRAMBLER));
1224 			if (!DC_IS_DAVICOM(sc))
1225 				DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1226 			DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
1227 			if (DC_IS_INTEL(sc))
1228 				dc_apply_fixup(sc, IFM_AUTO);
1229 		} else {
1230 			if (DC_IS_PNIC(sc)) {
1231 				DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_SPEEDSEL);
1232 				DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP);
1233 				DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL);
1234 			}
1235 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1236 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
1237 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER);
1238 			if (DC_IS_INTEL(sc))
1239 				dc_apply_fixup(sc,
1240 				    (media & IFM_GMASK) == IFM_FDX ?
1241 				    IFM_100_TX|IFM_FDX : IFM_100_TX);
1242 		}
1243 	}
1244 
1245 	if (IFM_SUBTYPE(media) == IFM_10_T) {
1246 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL);
1247 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT);
1248 		if (sc->dc_pmode == DC_PMODE_MII) {
1249 			int watchdogreg;
1250 
1251 			if (DC_IS_INTEL(sc)) {
1252 			/* there's a write enable bit here that reads as 1 */
1253 				watchdogreg = CSR_READ_4(sc, DC_WATCHDOG);
1254 				watchdogreg &= ~DC_WDOG_CTLWREN;
1255 				watchdogreg |= DC_WDOG_JABBERDIS;
1256 				CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg);
1257 			} else {
1258 				DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS);
1259 			}
1260 			DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS|
1261 			    DC_NETCFG_PORTSEL|DC_NETCFG_SCRAMBLER));
1262 			if (sc->dc_type == DC_TYPE_98713)
1263 				DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
1264 			if (!DC_IS_DAVICOM(sc))
1265 				DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1266 			DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
1267 			if (DC_IS_INTEL(sc))
1268 				dc_apply_fixup(sc, IFM_AUTO);
1269 		} else {
1270 			if (DC_IS_PNIC(sc)) {
1271 				DC_PN_GPIO_CLRBIT(sc, DC_PN_GPIO_SPEEDSEL);
1272 				DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP);
1273 				DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL);
1274 			}
1275 			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1276 			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
1277 			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER);
1278 			if (DC_IS_INTEL(sc)) {
1279 				DC_CLRBIT(sc, DC_SIARESET, DC_SIA_RESET);
1280 				DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
1281 				if ((media & IFM_GMASK) == IFM_FDX)
1282 					DC_SETBIT(sc, DC_10BTCTRL, 0x7F3D);
1283 				else
1284 					DC_SETBIT(sc, DC_10BTCTRL, 0x7F3F);
1285 				DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET);
1286 				DC_CLRBIT(sc, DC_10BTCTRL,
1287 				    DC_TCTL_AUTONEGENBL);
1288 				dc_apply_fixup(sc,
1289 				    (media & IFM_GMASK) == IFM_FDX ?
1290 				    IFM_10_T|IFM_FDX : IFM_10_T);
1291 				DELAY(20000);
1292 			}
1293 		}
1294 	}
1295 
1296 	/*
1297 	 * If this is a Davicom DM9102A card with a DM9801 HomePNA
1298 	 * PHY and we want HomePNA mode, set the portsel bit to turn
1299 	 * on the external MII port.
1300 	 */
1301 	if (DC_IS_DAVICOM(sc)) {
1302 		if (IFM_SUBTYPE(media) == IFM_HPNA_1) {
1303 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1304 			sc->dc_link = 1;
1305 		} else {
1306 			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1307 		}
1308 	}
1309 
1310 	if ((media & IFM_GMASK) == IFM_FDX) {
1311 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX);
1312 		if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc))
1313 			DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX);
1314 	} else {
1315 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX);
1316 		if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc))
1317 			DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX);
1318 	}
1319 
1320 	if (restart)
1321 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON|DC_NETCFG_RX_ON);
1322 }
1323 
1324 void
1325 dc_reset(struct dc_softc *sc)
1326 {
1327 	int i;
1328 
1329 	DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET);
1330 
1331 	for (i = 0; i < DC_TIMEOUT; i++) {
1332 		DELAY(10);
1333 		if (!(CSR_READ_4(sc, DC_BUSCTL) & DC_BUSCTL_RESET))
1334 			break;
1335 	}
1336 
1337 	if (DC_IS_ASIX(sc) || DC_IS_ADMTEK(sc) || DC_IS_XIRCOM(sc) ||
1338 	    DC_IS_INTEL(sc) || DC_IS_CONEXANT(sc)) {
1339 		DELAY(10000);
1340 		DC_CLRBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET);
1341 		i = 0;
1342 	}
1343 
1344 	if (i == DC_TIMEOUT)
1345 		printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
1346 
1347 	/* Wait a little while for the chip to get its brains in order. */
1348 	DELAY(1000);
1349 
1350 	CSR_WRITE_4(sc, DC_IMR, 0x00000000);
1351 	CSR_WRITE_4(sc, DC_BUSCTL, 0x00000000);
1352 	CSR_WRITE_4(sc, DC_NETCFG, 0x00000000);
1353 
1354 	/*
1355 	 * Bring the SIA out of reset. In some cases, it looks
1356 	 * like failing to unreset the SIA soon enough gets it
1357 	 * into a state where it will never come out of reset
1358 	 * until we reset the whole chip again.
1359 	 */
1360 	if (DC_IS_INTEL(sc)) {
1361 		DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET);
1362 		CSR_WRITE_4(sc, DC_10BTCTRL, 0);
1363 		CSR_WRITE_4(sc, DC_WATCHDOG, 0);
1364 	}
1365 
1366 	if (sc->dc_type == DC_TYPE_21145)
1367 		dc_setcfg(sc, IFM_10_T);
1368 }
1369 
1370 void
1371 dc_apply_fixup(struct dc_softc *sc, uint64_t media)
1372 {
1373 	struct dc_mediainfo *m;
1374 	u_int8_t *p;
1375 	int i;
1376 	u_int32_t reg;
1377 
1378 	m = sc->dc_mi;
1379 
1380 	while (m != NULL) {
1381 		if (m->dc_media == media)
1382 			break;
1383 		m = m->dc_next;
1384 	}
1385 
1386 	if (m == NULL)
1387 		return;
1388 
1389 	for (i = 0, p = m->dc_reset_ptr; i < m->dc_reset_len; i++, p += 2) {
1390 		reg = (p[0] | (p[1] << 8)) << 16;
1391 		CSR_WRITE_4(sc, DC_WATCHDOG, reg);
1392 	}
1393 
1394 	for (i = 0, p = m->dc_gp_ptr; i < m->dc_gp_len; i++, p += 2) {
1395 		reg = (p[0] | (p[1] << 8)) << 16;
1396 		CSR_WRITE_4(sc, DC_WATCHDOG, reg);
1397 	}
1398 }
1399 
1400 void
1401 dc_decode_leaf_sia(struct dc_softc *sc, struct dc_eblock_sia *l)
1402 {
1403 	struct dc_mediainfo *m;
1404 
1405 	m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
1406 	if (m == NULL)
1407 		return;
1408 	switch (l->dc_sia_code & ~DC_SIA_CODE_EXT) {
1409 	case DC_SIA_CODE_10BT:
1410 		m->dc_media = IFM_10_T;
1411 		break;
1412 	case DC_SIA_CODE_10BT_FDX:
1413 		m->dc_media = IFM_10_T|IFM_FDX;
1414 		break;
1415 	case DC_SIA_CODE_10B2:
1416 		m->dc_media = IFM_10_2;
1417 		break;
1418 	case DC_SIA_CODE_10B5:
1419 		m->dc_media = IFM_10_5;
1420 		break;
1421 	default:
1422 		break;
1423 	}
1424 
1425 	/*
1426 	 * We need to ignore CSR13, CSR14, CSR15 for SIA mode.
1427 	 * Things apparently already work for cards that do
1428 	 * supply Media Specific Data.
1429 	 */
1430 	if (l->dc_sia_code & DC_SIA_CODE_EXT) {
1431 		m->dc_gp_len = 2;
1432 		m->dc_gp_ptr =
1433 		(u_int8_t *)&l->dc_un.dc_sia_ext.dc_sia_gpio_ctl;
1434 	} else {
1435 		m->dc_gp_len = 2;
1436 		m->dc_gp_ptr =
1437 		(u_int8_t *)&l->dc_un.dc_sia_noext.dc_sia_gpio_ctl;
1438 	}
1439 
1440 	m->dc_next = sc->dc_mi;
1441 	sc->dc_mi = m;
1442 
1443 	sc->dc_pmode = DC_PMODE_SIA;
1444 }
1445 
1446 void
1447 dc_decode_leaf_sym(struct dc_softc *sc, struct dc_eblock_sym *l)
1448 {
1449 	struct dc_mediainfo *m;
1450 
1451 	m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
1452 	if (m == NULL)
1453 		return;
1454 	if (l->dc_sym_code == DC_SYM_CODE_100BT)
1455 		m->dc_media = IFM_100_TX;
1456 
1457 	if (l->dc_sym_code == DC_SYM_CODE_100BT_FDX)
1458 		m->dc_media = IFM_100_TX|IFM_FDX;
1459 
1460 	m->dc_gp_len = 2;
1461 	m->dc_gp_ptr = (u_int8_t *)&l->dc_sym_gpio_ctl;
1462 
1463 	m->dc_next = sc->dc_mi;
1464 	sc->dc_mi = m;
1465 
1466 	sc->dc_pmode = DC_PMODE_SYM;
1467 }
1468 
1469 void
1470 dc_decode_leaf_mii(struct dc_softc *sc, struct dc_eblock_mii *l)
1471 {
1472 	u_int8_t *p;
1473 	struct dc_mediainfo *m;
1474 
1475 	m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
1476 	if (m == NULL)
1477 		return;
1478 	/* We abuse IFM_AUTO to represent MII. */
1479 	m->dc_media = IFM_AUTO;
1480 	m->dc_gp_len = l->dc_gpr_len;
1481 
1482 	p = (u_int8_t *)l;
1483 	p += sizeof(struct dc_eblock_mii);
1484 	m->dc_gp_ptr = p;
1485 	p += 2 * l->dc_gpr_len;
1486 	m->dc_reset_len = *p;
1487 	p++;
1488 	m->dc_reset_ptr = p;
1489 
1490 	m->dc_next = sc->dc_mi;
1491 	sc->dc_mi = m;
1492 }
1493 
1494 void
1495 dc_read_srom(struct dc_softc *sc, int bits)
1496 {
1497 	sc->dc_sromsize = 2 << bits;
1498 	sc->dc_srom = malloc(sc->dc_sromsize, M_DEVBUF, M_NOWAIT);
1499 	if (sc->dc_srom == NULL)
1500 		return;
1501 	dc_read_eeprom(sc, (caddr_t)sc->dc_srom, 0, (sc->dc_sromsize / 2), 0);
1502 }
1503 
1504 void
1505 dc_parse_21143_srom(struct dc_softc *sc)
1506 {
1507 	struct dc_leaf_hdr *lhdr;
1508 	struct dc_eblock_hdr *hdr;
1509 	int have_mii, i, loff;
1510 	char *ptr;
1511 
1512 	have_mii = 0;
1513 	loff = sc->dc_srom[27];
1514 	lhdr = (struct dc_leaf_hdr *)&(sc->dc_srom[loff]);
1515 
1516 	ptr = (char *)lhdr;
1517 	ptr += sizeof(struct dc_leaf_hdr) - 1;
1518 	/*
1519 	 * Look if we got a MII media block.
1520 	 */
1521 	for (i = 0; i < lhdr->dc_mcnt; i++) {
1522 		hdr = (struct dc_eblock_hdr *)ptr;
1523 		if (hdr->dc_type == DC_EBLOCK_MII)
1524 		    have_mii++;
1525 
1526 		ptr += (hdr->dc_len & 0x7F);
1527 		ptr++;
1528 	}
1529 
1530 	/*
1531 	 * Do the same thing again. Only use SIA and SYM media
1532 	 * blocks if no MII media block is available.
1533 	 */
1534 	ptr = (char *)lhdr;
1535 	ptr += sizeof(struct dc_leaf_hdr) - 1;
1536 	for (i = 0; i < lhdr->dc_mcnt; i++) {
1537 		hdr = (struct dc_eblock_hdr *)ptr;
1538 		switch(hdr->dc_type) {
1539 		case DC_EBLOCK_MII:
1540 			dc_decode_leaf_mii(sc, (struct dc_eblock_mii *)hdr);
1541 			break;
1542 		case DC_EBLOCK_SIA:
1543 			if (! have_mii)
1544 			    dc_decode_leaf_sia(sc,
1545 				(struct dc_eblock_sia *)hdr);
1546 			break;
1547 		case DC_EBLOCK_SYM:
1548 			if (! have_mii)
1549 			    dc_decode_leaf_sym(sc,
1550 				(struct dc_eblock_sym *)hdr);
1551 			break;
1552 		default:
1553 			/* Don't care. Yet. */
1554 			break;
1555 		}
1556 		ptr += (hdr->dc_len & 0x7F);
1557 		ptr++;
1558 	}
1559 }
1560 
1561 /*
1562  * Attach the interface. Allocate softc structures, do ifmedia
1563  * setup and ethernet/BPF attach.
1564  */
1565 void
1566 dc_attach(struct dc_softc *sc)
1567 {
1568 	struct ifnet *ifp;
1569 	int mac_offset, tmp, i;
1570 	u_int32_t reg;
1571 
1572 	/*
1573 	 * Get station address from the EEPROM.
1574 	 */
1575 	if (sc->sc_hasmac)
1576 		goto hasmac;
1577 
1578 	switch(sc->dc_type) {
1579 	case DC_TYPE_98713:
1580 	case DC_TYPE_98713A:
1581 	case DC_TYPE_987x5:
1582 	case DC_TYPE_PNICII:
1583 		dc_read_eeprom(sc, (caddr_t)&mac_offset,
1584 		    (DC_EE_NODEADDR_OFFSET / 2), 1, 0);
1585 		dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr,
1586 		    (mac_offset / 2), 3, 0);
1587 		break;
1588 	case DC_TYPE_PNIC:
1589 		dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr, 0, 3, 1);
1590 		break;
1591 	case DC_TYPE_DM9102:
1592 	case DC_TYPE_21143:
1593 	case DC_TYPE_21145:
1594 	case DC_TYPE_ASIX:
1595 		dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr,
1596 		    DC_EE_NODEADDR, 3, 0);
1597 		break;
1598 	case DC_TYPE_AL981:
1599 	case DC_TYPE_AN983:
1600 		reg = CSR_READ_4(sc, DC_AL_PAR0);
1601 		sc->sc_arpcom.ac_enaddr[0] = (reg & 0xff);
1602 		sc->sc_arpcom.ac_enaddr[1] = (reg >> 8) & 0xff;
1603 		sc->sc_arpcom.ac_enaddr[2] = (reg >> 16) & 0xff;
1604 		sc->sc_arpcom.ac_enaddr[3] = (reg >> 24) & 0xff;
1605 		reg = CSR_READ_4(sc, DC_AL_PAR1);
1606 		sc->sc_arpcom.ac_enaddr[4] = (reg & 0xff);
1607 		sc->sc_arpcom.ac_enaddr[5] = (reg >> 8) & 0xff;
1608 		break;
1609 	case DC_TYPE_CONEXANT:
1610 		bcopy(&sc->dc_srom + DC_CONEXANT_EE_NODEADDR,
1611 		    &sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
1612 		break;
1613 	case DC_TYPE_XIRCOM:
1614 		/* Some newer units have the MAC at offset 8 */
1615 		dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr, 8, 3, 0);
1616 
1617 		if (sc->sc_arpcom.ac_enaddr[0] == 0x00 &&
1618 		    sc->sc_arpcom.ac_enaddr[1] == 0x10 &&
1619 		    sc->sc_arpcom.ac_enaddr[2] == 0xa4)
1620 			break;
1621 		if (sc->sc_arpcom.ac_enaddr[0] == 0x00 &&
1622 		    sc->sc_arpcom.ac_enaddr[1] == 0x80 &&
1623 		    sc->sc_arpcom.ac_enaddr[2] == 0xc7)
1624 			break;
1625 		dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr, 3, 3, 0);
1626 		break;
1627 	default:
1628 		dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr,
1629 		    DC_EE_NODEADDR, 3, 0);
1630 		break;
1631 	}
1632 hasmac:
1633 
1634 	if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct dc_list_data),
1635 	    PAGE_SIZE, 0, sc->sc_listseg, 1, &sc->sc_listnseg,
1636 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) {
1637 		printf(": can't alloc list mem\n");
1638 		goto fail;
1639 	}
1640 	if (bus_dmamem_map(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg,
1641 	    sizeof(struct dc_list_data), &sc->sc_listkva,
1642 	    BUS_DMA_NOWAIT) != 0) {
1643 		printf(": can't map list mem\n");
1644 		goto fail;
1645 	}
1646 	if (bus_dmamap_create(sc->sc_dmat, sizeof(struct dc_list_data), 1,
1647 	    sizeof(struct dc_list_data), 0, BUS_DMA_NOWAIT,
1648 	    &sc->sc_listmap) != 0) {
1649 		printf(": can't alloc list map\n");
1650 		goto fail;
1651 	}
1652 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, sc->sc_listkva,
1653 	    sizeof(struct dc_list_data), NULL, BUS_DMA_NOWAIT) != 0) {
1654 		printf(": can't load list map\n");
1655 		goto fail;
1656 	}
1657 	sc->dc_ldata = (struct dc_list_data *)sc->sc_listkva;
1658 
1659 	for (i = 0; i < DC_RX_LIST_CNT; i++) {
1660 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
1661 		    0, BUS_DMA_NOWAIT,
1662 		    &sc->dc_cdata.dc_rx_chain[i].sd_map) != 0) {
1663 			printf(": can't create rx map\n");
1664 			return;
1665 		}
1666 	}
1667 	if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
1668 	    BUS_DMA_NOWAIT, &sc->sc_rx_sparemap) != 0) {
1669 		printf(": can't create rx spare map\n");
1670 		return;
1671 	}
1672 
1673 	for (i = 0; i < DC_TX_LIST_CNT; i++) {
1674 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES,
1675 		    (sc->dc_flags & DC_TX_COALESCE) ? 1 : DC_TX_LIST_CNT - 5,
1676 		    MCLBYTES, 0, BUS_DMA_NOWAIT,
1677 		    &sc->dc_cdata.dc_tx_chain[i].sd_map) != 0) {
1678 			printf(": can't create tx map\n");
1679 			return;
1680 		}
1681 	}
1682 	if (bus_dmamap_create(sc->sc_dmat, MCLBYTES,
1683 	    (sc->dc_flags & DC_TX_COALESCE) ? 1 : DC_TX_LIST_CNT - 5,
1684 	    MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_tx_sparemap) != 0) {
1685 		printf(": can't create tx spare map\n");
1686 		return;
1687 	}
1688 
1689 	/*
1690 	 * A 21143 or clone chip was detected. Inform the world.
1691 	 */
1692 	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
1693 
1694 	ifp = &sc->sc_arpcom.ac_if;
1695 	ifp->if_softc = sc;
1696 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1697 	ifp->if_ioctl = dc_ioctl;
1698 	ifp->if_start = dc_start;
1699 	ifp->if_watchdog = dc_watchdog;
1700 	ifq_init_maxlen(&ifp->if_snd, DC_TX_LIST_CNT - 1);
1701 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1702 
1703 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1704 
1705 	/* Do MII setup. If this is a 21143, check for a PHY on the
1706 	 * MII bus after applying any necessary fixups to twiddle the
1707 	 * GPIO bits. If we don't end up finding a PHY, restore the
1708 	 * old selection (SIA only or SIA/SYM) and attach the dcphy
1709 	 * driver instead.
1710 	 */
1711 	if (DC_IS_INTEL(sc)) {
1712 		dc_apply_fixup(sc, IFM_AUTO);
1713 		tmp = sc->dc_pmode;
1714 		sc->dc_pmode = DC_PMODE_MII;
1715 	}
1716 
1717 	/*
1718 	 * Setup General Purpose port mode and data so the tulip can talk
1719 	 * to the MII.  This needs to be done before mii_attach so that
1720 	 * we can actually see them.
1721 	 */
1722 	if (DC_IS_XIRCOM(sc)) {
1723 		CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN |
1724 		    DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
1725 		DELAY(10);
1726 		CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN |
1727 		    DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
1728 		DELAY(10);
1729 	}
1730 
1731 	sc->sc_mii.mii_ifp = ifp;
1732 	sc->sc_mii.mii_readreg = dc_miibus_readreg;
1733 	sc->sc_mii.mii_writereg = dc_miibus_writereg;
1734 	sc->sc_mii.mii_statchg = dc_miibus_statchg;
1735 	ifmedia_init(&sc->sc_mii.mii_media, 0, dc_ifmedia_upd, dc_ifmedia_sts);
1736 	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
1737 	    MII_OFFSET_ANY, 0);
1738 
1739 	if (DC_IS_INTEL(sc)) {
1740 		if (LIST_EMPTY(&sc->sc_mii.mii_phys)) {
1741 			sc->dc_pmode = tmp;
1742 			if (sc->dc_pmode != DC_PMODE_SIA)
1743 				sc->dc_pmode = DC_PMODE_SYM;
1744 			sc->dc_flags |= DC_21143_NWAY;
1745 			if (sc->dc_flags & DC_MOMENCO_BOTCH)
1746 				sc->dc_pmode = DC_PMODE_MII;
1747 			mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff,
1748 			    MII_PHY_ANY, MII_OFFSET_ANY, 0);
1749 		} else {
1750 			/* we have a PHY, so we must clear this bit */
1751 			sc->dc_flags &= ~DC_TULIP_LEDS;
1752 		}
1753 	}
1754 
1755 	if (LIST_EMPTY(&sc->sc_mii.mii_phys)) {
1756 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
1757 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
1758 		printf("%s: MII without any PHY!\n", sc->sc_dev.dv_xname);
1759 	} else if (sc->dc_type == DC_TYPE_21145) {
1760 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_10_T);
1761 	} else
1762 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1763 
1764 	if (DC_IS_DAVICOM(sc) && sc->dc_revision >= DC_REVISION_DM9102A)
1765 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_HPNA_1,0,NULL);
1766 
1767 	if (DC_IS_ADMTEK(sc)) {
1768 		/*
1769 		 * Set automatic TX underrun recovery for the ADMtek chips
1770 		 */
1771 		DC_SETBIT(sc, DC_AL_CR, DC_AL_CR_ATUR);
1772 	}
1773 
1774 	/*
1775 	 * Call MI attach routines.
1776 	 */
1777 	if_attach(ifp);
1778 	ether_ifattach(ifp);
1779 
1780 fail:
1781 	return;
1782 }
1783 
1784 /*
1785  * Initialize the transmit descriptors.
1786  */
1787 int
1788 dc_list_tx_init(struct dc_softc *sc)
1789 {
1790 	struct dc_chain_data *cd;
1791 	struct dc_list_data *ld;
1792 	int i;
1793 	bus_addr_t next;
1794 
1795 	cd = &sc->dc_cdata;
1796 	ld = sc->dc_ldata;
1797 	for (i = 0; i < DC_TX_LIST_CNT; i++) {
1798 		next = sc->sc_listmap->dm_segs[0].ds_addr;
1799 		if (i == (DC_TX_LIST_CNT - 1))
1800 			next +=
1801 			    offsetof(struct dc_list_data, dc_tx_list[0]);
1802 		else
1803 			next +=
1804 			    offsetof(struct dc_list_data, dc_tx_list[i + 1]);
1805 		cd->dc_tx_chain[i].sd_mbuf = NULL;
1806 		ld->dc_tx_list[i].dc_data = htole32(0);
1807 		ld->dc_tx_list[i].dc_ctl = htole32(0);
1808 		ld->dc_tx_list[i].dc_next = htole32(next);
1809 	}
1810 
1811 	cd->dc_tx_prod = cd->dc_tx_cons = cd->dc_tx_cnt = 0;
1812 
1813 	return (0);
1814 }
1815 
1816 
1817 /*
1818  * Initialize the RX descriptors and allocate mbufs for them. Note that
1819  * we arrange the descriptors in a closed ring, so that the last descriptor
1820  * points back to the first.
1821  */
1822 int
1823 dc_list_rx_init(struct dc_softc *sc)
1824 {
1825 	struct dc_chain_data *cd;
1826 	struct dc_list_data *ld;
1827 	int i;
1828 	bus_addr_t next;
1829 
1830 	cd = &sc->dc_cdata;
1831 	ld = sc->dc_ldata;
1832 
1833 	for (i = 0; i < DC_RX_LIST_CNT; i++) {
1834 		if (dc_newbuf(sc, i, NULL) == ENOBUFS)
1835 			return (ENOBUFS);
1836 		next = sc->sc_listmap->dm_segs[0].ds_addr;
1837 		if (i == (DC_RX_LIST_CNT - 1))
1838 			next +=
1839 			    offsetof(struct dc_list_data, dc_rx_list[0]);
1840 		else
1841 			next +=
1842 			    offsetof(struct dc_list_data, dc_rx_list[i + 1]);
1843 		ld->dc_rx_list[i].dc_next = htole32(next);
1844 	}
1845 
1846 	cd->dc_rx_prod = 0;
1847 
1848 	return (0);
1849 }
1850 
1851 /*
1852  * Initialize an RX descriptor and attach an MBUF cluster.
1853  */
1854 int
1855 dc_newbuf(struct dc_softc *sc, int i, struct mbuf *m)
1856 {
1857 	struct mbuf *m_new = NULL;
1858 	struct dc_desc *c;
1859 	bus_dmamap_t map;
1860 
1861 	c = &sc->dc_ldata->dc_rx_list[i];
1862 
1863 	if (m == NULL) {
1864 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1865 		if (m_new == NULL)
1866 			return (ENOBUFS);
1867 
1868 		MCLGET(m_new, M_DONTWAIT);
1869 		if (!(m_new->m_flags & M_EXT)) {
1870 			m_freem(m_new);
1871 			return (ENOBUFS);
1872 		}
1873 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1874 		if (bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_rx_sparemap,
1875 		    m_new, BUS_DMA_NOWAIT) != 0) {
1876 			m_freem(m_new);
1877 			return (ENOBUFS);
1878 		}
1879 		map = sc->dc_cdata.dc_rx_chain[i].sd_map;
1880 		sc->dc_cdata.dc_rx_chain[i].sd_map = sc->sc_rx_sparemap;
1881 		sc->sc_rx_sparemap = map;
1882 	} else {
1883 		/*
1884 		 * We're re-using a previously allocated mbuf;
1885 		 * be sure to re-init pointers and lengths to
1886 		 * default values.
1887 		 */
1888 		m_new = m;
1889 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1890 		m_new->m_data = m_new->m_ext.ext_buf;
1891 	}
1892 
1893 	m_adj(m_new, sizeof(u_int64_t));
1894 
1895 	/*
1896 	 * If this is a PNIC chip, zero the buffer. This is part
1897 	 * of the workaround for the receive bug in the 82c168 and
1898 	 * 82c169 chips.
1899 	 */
1900 	if (sc->dc_flags & DC_PNIC_RX_BUG_WAR)
1901 		bzero(mtod(m_new, char *), m_new->m_len);
1902 
1903 	bus_dmamap_sync(sc->sc_dmat, sc->dc_cdata.dc_rx_chain[i].sd_map, 0,
1904 	    sc->dc_cdata.dc_rx_chain[i].sd_map->dm_mapsize,
1905 	    BUS_DMASYNC_PREREAD);
1906 
1907 	sc->dc_cdata.dc_rx_chain[i].sd_mbuf = m_new;
1908 	c->dc_data = htole32(
1909 	    sc->dc_cdata.dc_rx_chain[i].sd_map->dm_segs[0].ds_addr +
1910 	    sizeof(u_int64_t));
1911 	c->dc_ctl = htole32(DC_RXCTL_RLINK | ETHER_MAX_DIX_LEN);
1912 	c->dc_status = htole32(DC_RXSTAT_OWN);
1913 
1914 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1915 	    offsetof(struct dc_list_data, dc_rx_list[i]),
1916 	    sizeof(struct dc_desc),
1917 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1918 
1919 	return (0);
1920 }
1921 
1922 /*
1923  * Grrrrr.
1924  * The PNIC chip has a terrible bug in it that manifests itself during
1925  * periods of heavy activity. The exact mode of failure if difficult to
1926  * pinpoint: sometimes it only happens in promiscuous mode, sometimes it
1927  * will happen on slow machines. The bug is that sometimes instead of
1928  * uploading one complete frame during reception, it uploads what looks
1929  * like the entire contents of its FIFO memory. The frame we want is at
1930  * the end of the whole mess, but we never know exactly how much data has
1931  * been uploaded, so salvaging the frame is hard.
1932  *
1933  * There is only one way to do it reliably, and it's disgusting.
1934  * Here's what we know:
1935  *
1936  * - We know there will always be somewhere between one and three extra
1937  *   descriptors uploaded.
1938  *
1939  * - We know the desired received frame will always be at the end of the
1940  *   total data upload.
1941  *
1942  * - We know the size of the desired received frame because it will be
1943  *   provided in the length field of the status word in the last descriptor.
1944  *
1945  * Here's what we do:
1946  *
1947  * - When we allocate buffers for the receive ring, we bzero() them.
1948  *   This means that we know that the buffer contents should be all
1949  *   zeros, except for data uploaded by the chip.
1950  *
1951  * - We also force the PNIC chip to upload frames that include the
1952  *   ethernet CRC at the end.
1953  *
1954  * - We gather all of the bogus frame data into a single buffer.
1955  *
1956  * - We then position a pointer at the end of this buffer and scan
1957  *   backwards until we encounter the first non-zero byte of data.
1958  *   This is the end of the received frame. We know we will encounter
1959  *   some data at the end of the frame because the CRC will always be
1960  *   there, so even if the sender transmits a packet of all zeros,
1961  *   we won't be fooled.
1962  *
1963  * - We know the size of the actual received frame, so we subtract
1964  *   that value from the current pointer location. This brings us
1965  *   to the start of the actual received packet.
1966  *
1967  * - We copy this into an mbuf and pass it on, along with the actual
1968  *   frame length.
1969  *
1970  * The performance hit is tremendous, but it beats dropping frames all
1971  * the time.
1972  */
1973 
1974 #define DC_WHOLEFRAME	(DC_RXSTAT_FIRSTFRAG|DC_RXSTAT_LASTFRAG)
1975 void
1976 dc_pnic_rx_bug_war(struct dc_softc *sc, int idx)
1977 {
1978 	struct dc_desc		*cur_rx;
1979 	struct dc_desc		*c = NULL;
1980 	struct mbuf		*m = NULL;
1981 	unsigned char		*ptr;
1982 	int			i, total_len;
1983 	u_int32_t		rxstat = 0;
1984 
1985 	i = sc->dc_pnic_rx_bug_save;
1986 	cur_rx = &sc->dc_ldata->dc_rx_list[idx];
1987 	ptr = sc->dc_pnic_rx_buf;
1988 	bzero(ptr, ETHER_MAX_DIX_LEN * 5);
1989 
1990 	/* Copy all the bytes from the bogus buffers. */
1991 	while (1) {
1992 		c = &sc->dc_ldata->dc_rx_list[i];
1993 		rxstat = letoh32(c->dc_status);
1994 		m = sc->dc_cdata.dc_rx_chain[i].sd_mbuf;
1995 		bcopy(mtod(m, char *), ptr, ETHER_MAX_DIX_LEN);
1996 		ptr += ETHER_MAX_DIX_LEN;
1997 		/* If this is the last buffer, break out. */
1998 		if (i == idx || rxstat & DC_RXSTAT_LASTFRAG)
1999 			break;
2000 		dc_newbuf(sc, i, m);
2001 		DC_INC(i, DC_RX_LIST_CNT);
2002 	}
2003 
2004 	/* Find the length of the actual receive frame. */
2005 	total_len = DC_RXBYTES(rxstat);
2006 
2007 	/* Scan backwards until we hit a non-zero byte. */
2008 	while(*ptr == 0x00)
2009 		ptr--;
2010 
2011 	/* Round off. */
2012 	if ((unsigned long)(ptr) & 0x3)
2013 		ptr -= 1;
2014 
2015 	/* Now find the start of the frame. */
2016 	ptr -= total_len;
2017 	if (ptr < sc->dc_pnic_rx_buf)
2018 		ptr = sc->dc_pnic_rx_buf;
2019 
2020 	/*
2021 	 * Now copy the salvaged frame to the last mbuf and fake up
2022 	 * the status word to make it look like a successful
2023  	 * frame reception.
2024 	 */
2025 	dc_newbuf(sc, i, m);
2026 	bcopy(ptr, mtod(m, char *), total_len);
2027 	cur_rx->dc_status = htole32(rxstat | DC_RXSTAT_FIRSTFRAG);
2028 }
2029 
2030 /*
2031  * This routine searches the RX ring for dirty descriptors in the
2032  * event that the rxeof routine falls out of sync with the chip's
2033  * current descriptor pointer. This may happen sometimes as a result
2034  * of a "no RX buffer available" condition that happens when the chip
2035  * consumes all of the RX buffers before the driver has a chance to
2036  * process the RX ring. This routine may need to be called more than
2037  * once to bring the driver back in sync with the chip, however we
2038  * should still be getting RX DONE interrupts to drive the search
2039  * for new packets in the RX ring, so we should catch up eventually.
2040  */
2041 int
2042 dc_rx_resync(struct dc_softc *sc)
2043 {
2044 	u_int32_t stat;
2045 	int i, pos, offset;
2046 
2047 	pos = sc->dc_cdata.dc_rx_prod;
2048 
2049 	for (i = 0; i < DC_RX_LIST_CNT; i++) {
2050 
2051 		offset = offsetof(struct dc_list_data, dc_rx_list[pos]);
2052 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2053 		    offset, sizeof(struct dc_desc),
2054 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2055 
2056 		stat = sc->dc_ldata->dc_rx_list[pos].dc_status;
2057 		if (!(stat & htole32(DC_RXSTAT_OWN)))
2058 			break;
2059 		DC_INC(pos, DC_RX_LIST_CNT);
2060 	}
2061 
2062 	/* If the ring really is empty, then just return. */
2063 	if (i == DC_RX_LIST_CNT)
2064 		return (0);
2065 
2066 	/* We've fallen behind the chip: catch it. */
2067 	sc->dc_cdata.dc_rx_prod = pos;
2068 
2069 	return (EAGAIN);
2070 }
2071 
2072 /*
2073  * A frame has been uploaded: pass the resulting mbuf chain up to
2074  * the higher level protocols.
2075  */
2076 int
2077 dc_rxeof(struct dc_softc *sc)
2078 {
2079 	struct mbuf *m;
2080 	struct ifnet *ifp;
2081 	struct dc_desc *cur_rx;
2082 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
2083 	int i, offset, total_len = 0, consumed = 0;
2084 	u_int32_t rxstat;
2085 
2086 	ifp = &sc->sc_arpcom.ac_if;
2087 	i = sc->dc_cdata.dc_rx_prod;
2088 
2089 	for(;;) {
2090 		struct mbuf	*m0 = NULL;
2091 
2092 		offset = offsetof(struct dc_list_data, dc_rx_list[i]);
2093 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2094 		    offset, sizeof(struct dc_desc),
2095 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2096 
2097 		cur_rx = &sc->dc_ldata->dc_rx_list[i];
2098 		rxstat = letoh32(cur_rx->dc_status);
2099 		if (rxstat & DC_RXSTAT_OWN)
2100 			break;
2101 
2102 		m = sc->dc_cdata.dc_rx_chain[i].sd_mbuf;
2103 		total_len = DC_RXBYTES(rxstat);
2104 
2105 		bus_dmamap_sync(sc->sc_dmat, sc->dc_cdata.dc_rx_chain[i].sd_map,
2106 		    0, sc->dc_cdata.dc_rx_chain[i].sd_map->dm_mapsize,
2107 		    BUS_DMASYNC_POSTREAD);
2108 
2109 		if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) {
2110 			if ((rxstat & DC_WHOLEFRAME) != DC_WHOLEFRAME) {
2111 				if (rxstat & DC_RXSTAT_FIRSTFRAG)
2112 					sc->dc_pnic_rx_bug_save = i;
2113 				if ((rxstat & DC_RXSTAT_LASTFRAG) == 0) {
2114 					DC_INC(i, DC_RX_LIST_CNT);
2115 					continue;
2116 				}
2117 				dc_pnic_rx_bug_war(sc, i);
2118 				rxstat = letoh32(cur_rx->dc_status);
2119 				total_len = DC_RXBYTES(rxstat);
2120 			}
2121 		}
2122 
2123 		sc->dc_cdata.dc_rx_chain[i].sd_mbuf = NULL;
2124 
2125 		/*
2126 		 * If an error occurs, update stats, clear the
2127 		 * status word and leave the mbuf cluster in place:
2128 		 * it should simply get re-used next time this descriptor
2129 		 * comes up in the ring.  However, don't report long
2130 		 * frames as errors since they could be VLANs.
2131 		 */
2132 		if ((rxstat & DC_RXSTAT_RXERR)) {
2133 			if (!(rxstat & DC_RXSTAT_GIANT) ||
2134 			    (rxstat & (DC_RXSTAT_CRCERR | DC_RXSTAT_DRIBBLE |
2135 				       DC_RXSTAT_MIIERE | DC_RXSTAT_COLLSEEN |
2136 				       DC_RXSTAT_RUNT   | DC_RXSTAT_DE))) {
2137 				ifp->if_ierrors++;
2138 				if (rxstat & DC_RXSTAT_COLLSEEN)
2139 					ifp->if_collisions++;
2140 				dc_newbuf(sc, i, m);
2141 				if (rxstat & DC_RXSTAT_CRCERR) {
2142 					DC_INC(i, DC_RX_LIST_CNT);
2143 					continue;
2144 				} else {
2145 					dc_init(sc);
2146 					break;
2147 				}
2148 			}
2149 		}
2150 
2151 		/* No errors; receive the packet. */
2152 		total_len -= ETHER_CRC_LEN;
2153 
2154 		m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN);
2155 		dc_newbuf(sc, i, m);
2156 		DC_INC(i, DC_RX_LIST_CNT);
2157 		if (m0 == NULL) {
2158 			ifp->if_ierrors++;
2159 			continue;
2160 		}
2161 		m = m0;
2162 
2163 		consumed++;
2164 		ml_enqueue(&ml, m);
2165 	}
2166 
2167 	sc->dc_cdata.dc_rx_prod = i;
2168 
2169 	if_input(ifp, &ml);
2170 
2171 	return (consumed);
2172 }
2173 
2174 /*
2175  * A frame was downloaded to the chip. It's safe for us to clean up
2176  * the list buffers.
2177  */
2178 
2179 void
2180 dc_txeof(struct dc_softc *sc)
2181 {
2182 	struct dc_desc *cur_tx = NULL;
2183 	struct ifnet *ifp;
2184 	int idx, offset;
2185 
2186 	ifp = &sc->sc_arpcom.ac_if;
2187 
2188 	/*
2189 	 * Go through our tx list and free mbufs for those
2190 	 * frames that have been transmitted.
2191 	 */
2192 	idx = sc->dc_cdata.dc_tx_cons;
2193 	while(idx != sc->dc_cdata.dc_tx_prod) {
2194 		u_int32_t		txstat;
2195 
2196 		offset = offsetof(struct dc_list_data, dc_tx_list[idx]);
2197 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2198 		    offset, sizeof(struct dc_desc),
2199 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2200 
2201 		cur_tx = &sc->dc_ldata->dc_tx_list[idx];
2202 		txstat = letoh32(cur_tx->dc_status);
2203 
2204 		if (txstat & DC_TXSTAT_OWN)
2205 			break;
2206 
2207 		if (!(cur_tx->dc_ctl & htole32(DC_TXCTL_LASTFRAG)) ||
2208 		    cur_tx->dc_ctl & htole32(DC_TXCTL_SETUP)) {
2209 			if (cur_tx->dc_ctl & htole32(DC_TXCTL_SETUP)) {
2210 				/*
2211 				 * Yes, the PNIC is so brain damaged
2212 				 * that it will sometimes generate a TX
2213 				 * underrun error while DMAing the RX
2214 				 * filter setup frame. If we detect this,
2215 				 * we have to send the setup frame again,
2216 				 * or else the filter won't be programmed
2217 				 * correctly.
2218 				 */
2219 				if (DC_IS_PNIC(sc)) {
2220 					if (txstat & DC_TXSTAT_ERRSUM)
2221 						dc_setfilt(sc);
2222 				}
2223 				sc->dc_cdata.dc_tx_chain[idx].sd_mbuf = NULL;
2224 			}
2225 			sc->dc_cdata.dc_tx_cnt--;
2226 			DC_INC(idx, DC_TX_LIST_CNT);
2227 			continue;
2228 		}
2229 
2230 		if (DC_IS_XIRCOM(sc) || DC_IS_CONEXANT(sc)) {
2231 			/*
2232 			 * XXX: Why does my Xircom taunt me so?
2233 			 * For some reason it likes setting the CARRLOST flag
2234 			 * even when the carrier is there. wtf?!
2235 			 * Who knows, but Conexant chips have the
2236 			 * same problem. Maybe they took lessons
2237 			 * from Xircom.
2238 			 */
2239 			if (/*sc->dc_type == DC_TYPE_21143 &&*/
2240 			    sc->dc_pmode == DC_PMODE_MII &&
2241 			    ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM|
2242 			    DC_TXSTAT_NOCARRIER)))
2243 				txstat &= ~DC_TXSTAT_ERRSUM;
2244 		} else {
2245 			if (/*sc->dc_type == DC_TYPE_21143 &&*/
2246 			    sc->dc_pmode == DC_PMODE_MII &&
2247 		    	    ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM|
2248 		    	    DC_TXSTAT_NOCARRIER|DC_TXSTAT_CARRLOST)))
2249 				txstat &= ~DC_TXSTAT_ERRSUM;
2250 		}
2251 
2252 		if (txstat & DC_TXSTAT_ERRSUM) {
2253 			ifp->if_oerrors++;
2254 			if (txstat & DC_TXSTAT_EXCESSCOLL)
2255 				ifp->if_collisions++;
2256 			if (txstat & DC_TXSTAT_LATECOLL)
2257 				ifp->if_collisions++;
2258 			if (!(txstat & DC_TXSTAT_UNDERRUN)) {
2259 				dc_init(sc);
2260 				return;
2261 			}
2262 		}
2263 
2264 		ifp->if_collisions += (txstat & DC_TXSTAT_COLLCNT) >> 3;
2265 
2266 		if (sc->dc_cdata.dc_tx_chain[idx].sd_map->dm_nsegs != 0) {
2267 			bus_dmamap_t map = sc->dc_cdata.dc_tx_chain[idx].sd_map;
2268 
2269 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2270 			    BUS_DMASYNC_POSTWRITE);
2271 			bus_dmamap_unload(sc->sc_dmat, map);
2272 		}
2273 		if (sc->dc_cdata.dc_tx_chain[idx].sd_mbuf != NULL) {
2274 			m_freem(sc->dc_cdata.dc_tx_chain[idx].sd_mbuf);
2275 			sc->dc_cdata.dc_tx_chain[idx].sd_mbuf = NULL;
2276 		}
2277 
2278 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2279 		    offset, sizeof(struct dc_desc),
2280 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2281 
2282 		sc->dc_cdata.dc_tx_cnt--;
2283 		DC_INC(idx, DC_TX_LIST_CNT);
2284 	}
2285 	sc->dc_cdata.dc_tx_cons = idx;
2286 
2287 	if (DC_TX_LIST_CNT - sc->dc_cdata.dc_tx_cnt > 5)
2288 		ifq_clr_oactive(&ifp->if_snd);
2289 	if (sc->dc_cdata.dc_tx_cnt == 0)
2290 		ifp->if_timer = 0;
2291 }
2292 
2293 void
2294 dc_tick(void *xsc)
2295 {
2296 	struct dc_softc *sc = (struct dc_softc *)xsc;
2297 	struct mii_data *mii;
2298 	struct ifnet *ifp;
2299 	int s;
2300 	u_int32_t r;
2301 
2302 	s = splnet();
2303 
2304 	ifp = &sc->sc_arpcom.ac_if;
2305 	mii = &sc->sc_mii;
2306 
2307 	if (sc->dc_flags & DC_REDUCED_MII_POLL) {
2308 		if (sc->dc_flags & DC_21143_NWAY) {
2309 			r = CSR_READ_4(sc, DC_10BTSTAT);
2310 			if (IFM_SUBTYPE(mii->mii_media_active) ==
2311 			    IFM_100_TX && (r & DC_TSTAT_LS100)) {
2312 				sc->dc_link = 0;
2313 				mii_mediachg(mii);
2314 			}
2315 			if (IFM_SUBTYPE(mii->mii_media_active) ==
2316 			    IFM_10_T && (r & DC_TSTAT_LS10)) {
2317 				sc->dc_link = 0;
2318 				mii_mediachg(mii);
2319 			}
2320 			if (sc->dc_link == 0)
2321 				mii_tick(mii);
2322 		} else {
2323 			/*
2324 			 * For NICs which never report DC_RXSTATE_WAIT, we
2325 			 * have to bite the bullet...
2326 			 */
2327 			if ((DC_HAS_BROKEN_RXSTATE(sc) || (CSR_READ_4(sc,
2328 			    DC_ISR) & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT) &&
2329 			    sc->dc_cdata.dc_tx_cnt == 0 && !DC_IS_ASIX(sc)) {
2330 				mii_tick(mii);
2331 				if (!(mii->mii_media_status & IFM_ACTIVE))
2332 					sc->dc_link = 0;
2333 			}
2334 		}
2335 	} else
2336 		mii_tick(mii);
2337 
2338 	/*
2339 	 * When the init routine completes, we expect to be able to send
2340 	 * packets right away, and in fact the network code will send a
2341 	 * gratuitous ARP the moment the init routine marks the interface
2342 	 * as running. However, even though the MAC may have been initialized,
2343 	 * there may be a delay of a few seconds before the PHY completes
2344 	 * autonegotiation and the link is brought up. Any transmissions
2345 	 * made during that delay will be lost. Dealing with this is tricky:
2346 	 * we can't just pause in the init routine while waiting for the
2347 	 * PHY to come ready since that would bring the whole system to
2348 	 * a screeching halt for several seconds.
2349 	 *
2350 	 * What we do here is prevent the TX start routine from sending
2351 	 * any packets until a link has been established. After the
2352 	 * interface has been initialized, the tick routine will poll
2353 	 * the state of the PHY until the IFM_ACTIVE flag is set. Until
2354 	 * that time, packets will stay in the send queue, and once the
2355 	 * link comes up, they will be flushed out to the wire.
2356 	 */
2357 	if (!sc->dc_link && mii->mii_media_status & IFM_ACTIVE &&
2358 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2359 		sc->dc_link++;
2360 		if (ifq_empty(&ifp->if_snd) == 0)
2361 	 	    dc_start(ifp);
2362 	}
2363 
2364 	if (sc->dc_flags & DC_21143_NWAY && !sc->dc_link)
2365 		timeout_add_msec(&sc->dc_tick_tmo, 100);
2366 	else
2367 		timeout_add_sec(&sc->dc_tick_tmo, 1);
2368 
2369 	splx(s);
2370 }
2371 
2372 /* A transmit underrun has occurred.  Back off the transmit threshold,
2373  * or switch to store and forward mode if we have to.
2374  */
2375 void
2376 dc_tx_underrun(struct dc_softc *sc)
2377 {
2378 	u_int32_t	isr;
2379 	int		i;
2380 
2381 	if (DC_IS_DAVICOM(sc))
2382 		dc_init(sc);
2383 
2384 	if (DC_IS_INTEL(sc)) {
2385 		/*
2386 		 * The real 21143 requires that the transmitter be idle
2387 		 * in order to change the transmit threshold or store
2388 		 * and forward state.
2389 		 */
2390 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
2391 
2392 		for (i = 0; i < DC_TIMEOUT; i++) {
2393 			isr = CSR_READ_4(sc, DC_ISR);
2394 			if (isr & DC_ISR_TX_IDLE)
2395 				break;
2396 			DELAY(10);
2397 		}
2398 		if (i == DC_TIMEOUT) {
2399 			printf("%s: failed to force tx to idle state\n",
2400 			    sc->sc_dev.dv_xname);
2401 			dc_init(sc);
2402 		}
2403 	}
2404 
2405 	sc->dc_txthresh += DC_TXTHRESH_INC;
2406 	if (sc->dc_txthresh > DC_TXTHRESH_MAX) {
2407 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
2408 	} else {
2409 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH);
2410 		DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh);
2411 	}
2412 
2413 	if (DC_IS_INTEL(sc))
2414 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
2415 
2416 	return;
2417 }
2418 
2419 int
2420 dc_intr(void *arg)
2421 {
2422 	struct dc_softc *sc;
2423 	struct ifnet *ifp;
2424 	u_int32_t status, ints;
2425 	int claimed = 0;
2426 
2427 	sc = arg;
2428 
2429 	ifp = &sc->sc_arpcom.ac_if;
2430 
2431 	ints = CSR_READ_4(sc, DC_ISR);
2432 	if ((ints & DC_INTRS) == 0)
2433 		return (claimed);
2434 	if (ints == 0xffffffff)
2435 		return (0);
2436 
2437 	/* Suppress unwanted interrupts */
2438 	if (!(ifp->if_flags & IFF_UP)) {
2439 		if (CSR_READ_4(sc, DC_ISR) & DC_INTRS)
2440 			dc_stop(sc, 0);
2441 		return (claimed);
2442 	}
2443 
2444 	/* Disable interrupts. */
2445 	CSR_WRITE_4(sc, DC_IMR, 0x00000000);
2446 
2447 	while (((status = CSR_READ_4(sc, DC_ISR)) & DC_INTRS) &&
2448 	    status != 0xFFFFFFFF &&
2449 	    (ifp->if_flags & IFF_RUNNING)) {
2450 
2451 		claimed = 1;
2452 		CSR_WRITE_4(sc, DC_ISR, status);
2453 
2454 		if (status & DC_ISR_RX_OK) {
2455 			if (dc_rxeof(sc) == 0) {
2456 				while(dc_rx_resync(sc))
2457 					dc_rxeof(sc);
2458 			}
2459 		}
2460 
2461 		if (status & (DC_ISR_TX_OK|DC_ISR_TX_NOBUF))
2462 			dc_txeof(sc);
2463 
2464 		if (status & DC_ISR_TX_IDLE) {
2465 			dc_txeof(sc);
2466 			if (sc->dc_cdata.dc_tx_cnt) {
2467 				DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
2468 				CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
2469 			}
2470 		}
2471 
2472 		if (status & DC_ISR_TX_UNDERRUN)
2473 			dc_tx_underrun(sc);
2474 
2475 		if ((status & DC_ISR_RX_WATDOGTIMEO)
2476 		    || (status & DC_ISR_RX_NOBUF)) {
2477 			if (dc_rxeof(sc) == 0) {
2478 				while(dc_rx_resync(sc))
2479 					dc_rxeof(sc);
2480 			}
2481 		}
2482 
2483 		if (status & DC_ISR_BUS_ERR)
2484 			dc_init(sc);
2485 	}
2486 
2487 	/* Re-enable interrupts. */
2488 	CSR_WRITE_4(sc, DC_IMR, DC_INTRS);
2489 
2490 	if (ifq_empty(&ifp->if_snd) == 0)
2491 		dc_start(ifp);
2492 
2493 	return (claimed);
2494 }
2495 
2496 /*
2497  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
2498  * pointers to the fragment pointers.
2499  */
2500 int
2501 dc_encap(struct dc_softc *sc, bus_dmamap_t map, struct mbuf *m, u_int32_t *idx)
2502 {
2503 	struct dc_desc *f = NULL;
2504 	int frag, cur, cnt = 0, i;
2505 
2506 	cur = frag = *idx;
2507 
2508 	for (i = 0; i < map->dm_nsegs; i++) {
2509 		f = &sc->dc_ldata->dc_tx_list[frag];
2510 		f->dc_ctl = htole32(DC_TXCTL_TLINK | map->dm_segs[i].ds_len);
2511 		if (cnt == 0) {
2512 			f->dc_status = htole32(0);
2513 			f->dc_ctl |= htole32(DC_TXCTL_FIRSTFRAG);
2514 		} else
2515 			f->dc_status = htole32(DC_TXSTAT_OWN);
2516 		f->dc_data = htole32(map->dm_segs[i].ds_addr);
2517 		cur = frag;
2518 		DC_INC(frag, DC_TX_LIST_CNT);
2519 		cnt++;
2520 	}
2521 
2522 	sc->dc_cdata.dc_tx_cnt += cnt;
2523 	sc->dc_cdata.dc_tx_chain[cur].sd_mbuf = m;
2524 	sc->sc_tx_sparemap = sc->dc_cdata.dc_tx_chain[cur].sd_map;
2525 	sc->dc_cdata.dc_tx_chain[cur].sd_map = map;
2526 	sc->dc_ldata->dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_LASTFRAG);
2527 	if (sc->dc_flags & DC_TX_INTR_FIRSTFRAG)
2528 		sc->dc_ldata->dc_tx_list[*idx].dc_ctl |=
2529 		    htole32(DC_TXCTL_FINT);
2530 	if (sc->dc_flags & DC_TX_INTR_ALWAYS)
2531 		sc->dc_ldata->dc_tx_list[cur].dc_ctl |=
2532 		    htole32(DC_TXCTL_FINT);
2533 	if (sc->dc_flags & DC_TX_USE_TX_INTR && sc->dc_cdata.dc_tx_cnt > 64)
2534 		sc->dc_ldata->dc_tx_list[cur].dc_ctl |=
2535 		    htole32(DC_TXCTL_FINT);
2536 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2537 	    BUS_DMASYNC_PREWRITE);
2538 
2539 	sc->dc_ldata->dc_tx_list[*idx].dc_status = htole32(DC_TXSTAT_OWN);
2540 
2541 	*idx = frag;
2542 
2543 	return (0);
2544 }
2545 
2546 /*
2547  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2548  * to the mbuf data regions directly in the transmit lists. We also save a
2549  * copy of the pointers since the transmit list fragment pointers are
2550  * physical addresses.
2551  */
2552 
2553 static inline int
2554 dc_fits(struct dc_softc *sc, int idx, bus_dmamap_t map)
2555 {
2556 	if (sc->dc_flags & DC_TX_ADMTEK_WAR) {
2557 		if (sc->dc_cdata.dc_tx_prod != idx &&
2558 		    idx + map->dm_nsegs >= DC_TX_LIST_CNT)
2559 			return (0);
2560 	}
2561 
2562 	if (sc->dc_cdata.dc_tx_cnt + map->dm_nsegs + 5 > DC_TX_LIST_CNT)
2563 		return (0);
2564 
2565 	return (1);
2566 }
2567 
2568 void
2569 dc_start(struct ifnet *ifp)
2570 {
2571 	struct dc_softc *sc = ifp->if_softc;
2572 	bus_dmamap_t map;
2573 	struct mbuf *m;
2574 	int idx;
2575 
2576 	if (!sc->dc_link && ifq_len(&ifp->if_snd) < 10)
2577 		return;
2578 
2579 	if (ifq_is_oactive(&ifp->if_snd))
2580 		return;
2581 
2582 	idx = sc->dc_cdata.dc_tx_prod;
2583 
2584 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2585 	    offsetof(struct dc_list_data, dc_tx_list),
2586 	    sizeof(struct dc_desc) * DC_TX_LIST_CNT,
2587 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2588 
2589 	for (;;) {
2590 		m = ifq_deq_begin(&ifp->if_snd);
2591 		if (m == NULL)
2592 			break;
2593 
2594 		map = sc->sc_tx_sparemap;
2595 		switch (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
2596 		    BUS_DMA_NOWAIT | BUS_DMA_OVERRUN)) {
2597 		case 0:
2598 			break;
2599 		case EFBIG:
2600 			if (m_defrag(m, M_DONTWAIT) == 0 &&
2601 			    bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
2602 			     BUS_DMA_NOWAIT | BUS_DMA_OVERRUN) == 0)
2603 				break;
2604 
2605 			/* FALLTHROUGH */
2606 		default:
2607 			ifq_deq_commit(&ifp->if_snd, m);
2608 			m_freem(m);
2609 			ifp->if_oerrors++;
2610 			continue;
2611 		}
2612 
2613 		if (!dc_fits(sc, idx, map)) {
2614 			bus_dmamap_unload(sc->sc_dmat, map);
2615 			ifq_deq_rollback(&ifp->if_snd, m);
2616 			ifq_set_oactive(&ifp->if_snd);
2617 			break;
2618 		}
2619 
2620 		/* now we are committed to transmit the packet */
2621 		ifq_deq_commit(&ifp->if_snd, m);
2622 
2623 		if (dc_encap(sc, map, m, &idx) != 0) {
2624 			m_freem(m);
2625 			ifp->if_oerrors++;
2626 			continue;
2627 		}
2628 
2629 		/*
2630 		 * If there's a BPF listener, bounce a copy of this frame
2631 		 * to him.
2632 		 */
2633 #if NBPFILTER > 0
2634 		if (ifp->if_bpf)
2635 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
2636 #endif
2637 
2638 		if (sc->dc_flags & DC_TX_ONE) {
2639 			ifq_set_oactive(&ifp->if_snd);
2640 			break;
2641 		}
2642 	}
2643 
2644 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2645 	    offsetof(struct dc_list_data, dc_tx_list),
2646 	    sizeof(struct dc_desc) * DC_TX_LIST_CNT,
2647 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2648 
2649 	if (idx == sc->dc_cdata.dc_tx_prod)
2650 		return;
2651 
2652 	/* Transmit */
2653 	sc->dc_cdata.dc_tx_prod = idx;
2654 	if (!(sc->dc_flags & DC_TX_POLL))
2655 		CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
2656 
2657 	/*
2658 	 * Set a timeout in case the chip goes out to lunch.
2659 	 */
2660 	ifp->if_timer = 5;
2661 }
2662 
2663 void
2664 dc_init(void *xsc)
2665 {
2666 	struct dc_softc *sc = xsc;
2667 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2668 	struct mii_data *mii;
2669 	int s;
2670 
2671 	s = splnet();
2672 
2673 	mii = &sc->sc_mii;
2674 
2675 	/*
2676 	 * Cancel pending I/O and free all RX/TX buffers.
2677 	 */
2678 	dc_stop(sc, 0);
2679 	dc_reset(sc);
2680 
2681 	/*
2682 	 * Set cache alignment and burst length.
2683 	 */
2684 	if (DC_IS_ASIX(sc) || DC_IS_DAVICOM(sc))
2685 		CSR_WRITE_4(sc, DC_BUSCTL, 0);
2686 	else
2687 		CSR_WRITE_4(sc, DC_BUSCTL, DC_BUSCTL_MRME|DC_BUSCTL_MRLE);
2688 	/*
2689 	 * Evenly share the bus between receive and transmit process.
2690 	 */
2691 	if (DC_IS_INTEL(sc))
2692 		DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_ARBITRATION);
2693 	if (DC_IS_DAVICOM(sc) || DC_IS_INTEL(sc)) {
2694 		DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_USECA);
2695 	} else {
2696 		DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_16LONG);
2697 	}
2698 	if (sc->dc_flags & DC_TX_POLL)
2699 		DC_SETBIT(sc, DC_BUSCTL, DC_TXPOLL_1);
2700 	switch(sc->dc_cachesize) {
2701 	case 32:
2702 		DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_32LONG);
2703 		break;
2704 	case 16:
2705 		DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_16LONG);
2706 		break;
2707 	case 8:
2708 		DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_8LONG);
2709 		break;
2710 	case 0:
2711 	default:
2712 		DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_NONE);
2713 		break;
2714 	}
2715 
2716 	if (sc->dc_flags & DC_TX_STORENFWD)
2717 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
2718 	else {
2719 		if (sc->dc_txthresh > DC_TXTHRESH_MAX) {
2720 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
2721 		} else {
2722 			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
2723 			DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh);
2724 		}
2725 	}
2726 
2727 	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_NO_RXCRC);
2728 	DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_BACKOFF);
2729 
2730 	if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) {
2731 		/*
2732 		 * The app notes for the 98713 and 98715A say that
2733 		 * in order to have the chips operate properly, a magic
2734 		 * number must be written to CSR16. Macronix does not
2735 		 * document the meaning of these bits so there's no way
2736 		 * to know exactly what they do. The 98713 has a magic
2737 		 * number all its own; the rest all use a different one.
2738 		 */
2739 		DC_CLRBIT(sc, DC_MX_MAGICPACKET, 0xFFFF0000);
2740 		if (sc->dc_type == DC_TYPE_98713)
2741 			DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98713);
2742 		else
2743 			DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98715);
2744 	}
2745 
2746 	if (DC_IS_XIRCOM(sc)) {
2747 		CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN |
2748 		    DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
2749 		DELAY(10);
2750 		CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN |
2751 		    DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
2752 		DELAY(10);
2753 	}
2754 
2755 	DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH);
2756 	DC_SETBIT(sc, DC_NETCFG, DC_TXTHRESH_MIN);
2757 
2758 	/* Init circular RX list. */
2759 	if (dc_list_rx_init(sc) == ENOBUFS) {
2760 		printf("%s: initialization failed: no "
2761 		    "memory for rx buffers\n", sc->sc_dev.dv_xname);
2762 		dc_stop(sc, 0);
2763 		splx(s);
2764 		return;
2765 	}
2766 
2767 	/*
2768 	 * Init tx descriptors.
2769 	 */
2770 	dc_list_tx_init(sc);
2771 
2772 	/*
2773 	 * Sync down both lists initialized.
2774 	 */
2775 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2776 	    0, sc->sc_listmap->dm_mapsize,
2777 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2778 
2779 	/*
2780 	 * Load the address of the RX list.
2781 	 */
2782 	CSR_WRITE_4(sc, DC_RXADDR, sc->sc_listmap->dm_segs[0].ds_addr +
2783 	    offsetof(struct dc_list_data, dc_rx_list[0]));
2784 	CSR_WRITE_4(sc, DC_TXADDR, sc->sc_listmap->dm_segs[0].ds_addr +
2785 	    offsetof(struct dc_list_data, dc_tx_list[0]));
2786 
2787 	/*
2788 	 * Enable interrupts.
2789 	 */
2790 	CSR_WRITE_4(sc, DC_IMR, DC_INTRS);
2791 	CSR_WRITE_4(sc, DC_ISR, 0xFFFFFFFF);
2792 
2793 	/* Enable transmitter. */
2794 	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
2795 
2796 	/*
2797 	 * If this is an Intel 21143 and we're not using the
2798 	 * MII port, program the LED control pins so we get
2799 	 * link and activity indications.
2800 	 */
2801 	if (sc->dc_flags & DC_TULIP_LEDS) {
2802 		CSR_WRITE_4(sc, DC_WATCHDOG,
2803 		    DC_WDOG_CTLWREN|DC_WDOG_LINK|DC_WDOG_ACTIVITY);
2804 		CSR_WRITE_4(sc, DC_WATCHDOG, 0);
2805 	}
2806 
2807 	/*
2808 	 * Load the RX/multicast filter. We do this sort of late
2809 	 * because the filter programming scheme on the 21143 and
2810 	 * some clones requires DMAing a setup frame via the TX
2811 	 * engine, and we need the transmitter enabled for that.
2812 	 */
2813 	dc_setfilt(sc);
2814 
2815 	/* Enable receiver. */
2816 	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON);
2817 	CSR_WRITE_4(sc, DC_RXSTART, 0xFFFFFFFF);
2818 
2819 	mii_mediachg(mii);
2820 	dc_setcfg(sc, sc->dc_if_media);
2821 
2822 	ifp->if_flags |= IFF_RUNNING;
2823 	ifq_clr_oactive(&ifp->if_snd);
2824 
2825 	splx(s);
2826 
2827 	timeout_set(&sc->dc_tick_tmo, dc_tick, sc);
2828 
2829 	if (IFM_SUBTYPE(mii->mii_media.ifm_media) == IFM_HPNA_1)
2830 		sc->dc_link = 1;
2831 	else {
2832 		if (sc->dc_flags & DC_21143_NWAY)
2833 			timeout_add_msec(&sc->dc_tick_tmo, 100);
2834 		else
2835 			timeout_add_sec(&sc->dc_tick_tmo, 1);
2836 	}
2837 
2838 #ifdef SRM_MEDIA
2839 	if(sc->dc_srm_media) {
2840 		struct ifreq ifr;
2841 
2842 		ifr.ifr_media = sc->dc_srm_media;
2843 		ifmedia_ioctl(ifp, &ifr, &mii->mii_media, SIOCSIFMEDIA);
2844 		sc->dc_srm_media = 0;
2845 	}
2846 #endif
2847 }
2848 
2849 /*
2850  * Set media options.
2851  */
2852 int
2853 dc_ifmedia_upd(struct ifnet *ifp)
2854 {
2855 	struct dc_softc *sc;
2856 	struct mii_data *mii;
2857 	struct ifmedia *ifm;
2858 
2859 	sc = ifp->if_softc;
2860 	mii = &sc->sc_mii;
2861 	mii_mediachg(mii);
2862 
2863 	ifm = &mii->mii_media;
2864 
2865 	if (DC_IS_DAVICOM(sc) &&
2866 	    IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1)
2867 		dc_setcfg(sc, ifm->ifm_media);
2868 	else
2869 		sc->dc_link = 0;
2870 
2871 	return (0);
2872 }
2873 
2874 /*
2875  * Report current media status.
2876  */
2877 void
2878 dc_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2879 {
2880 	struct dc_softc *sc;
2881 	struct mii_data *mii;
2882 	struct ifmedia *ifm;
2883 
2884 	sc = ifp->if_softc;
2885 	mii = &sc->sc_mii;
2886 	mii_pollstat(mii);
2887 	ifm = &mii->mii_media;
2888 	if (DC_IS_DAVICOM(sc)) {
2889 		if (IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) {
2890 			ifmr->ifm_active = ifm->ifm_media;
2891 			ifmr->ifm_status = 0;
2892 			return;
2893 		}
2894 	}
2895 	ifmr->ifm_active = mii->mii_media_active;
2896 	ifmr->ifm_status = mii->mii_media_status;
2897 }
2898 
2899 int
2900 dc_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2901 {
2902 	struct dc_softc		*sc = ifp->if_softc;
2903 	struct ifreq		*ifr = (struct ifreq *) data;
2904 	int			s, error = 0;
2905 
2906 	s = splnet();
2907 
2908 	switch(command) {
2909 	case SIOCSIFADDR:
2910 		ifp->if_flags |= IFF_UP;
2911 		if (!(ifp->if_flags & IFF_RUNNING))
2912 			dc_init(sc);
2913 		break;
2914 	case SIOCSIFFLAGS:
2915 		if (ifp->if_flags & IFF_UP) {
2916 			if (ifp->if_flags & IFF_RUNNING)
2917 				error = ENETRESET;
2918 			else {
2919 				sc->dc_txthresh = 0;
2920 				dc_init(sc);
2921 			}
2922 		} else {
2923 			if (ifp->if_flags & IFF_RUNNING)
2924 				dc_stop(sc, 0);
2925 		}
2926 		break;
2927 	case SIOCGIFMEDIA:
2928 	case SIOCSIFMEDIA:
2929 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
2930 #ifdef SRM_MEDIA
2931 		if (sc->dc_srm_media)
2932 			sc->dc_srm_media = 0;
2933 #endif
2934 		break;
2935 	default:
2936 		error = ether_ioctl(ifp, &sc->sc_arpcom, command, data);
2937 	}
2938 
2939 	if (error == ENETRESET) {
2940 		if (ifp->if_flags & IFF_RUNNING)
2941 			dc_setfilt(sc);
2942 		error = 0;
2943 	}
2944 
2945 	splx(s);
2946 	return (error);
2947 }
2948 
2949 void
2950 dc_watchdog(struct ifnet *ifp)
2951 {
2952 	struct dc_softc *sc;
2953 
2954 	sc = ifp->if_softc;
2955 
2956 	ifp->if_oerrors++;
2957 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
2958 
2959 	dc_init(sc);
2960 
2961 	if (ifq_empty(&ifp->if_snd) == 0)
2962 		dc_start(ifp);
2963 }
2964 
2965 /*
2966  * Stop the adapter and free any mbufs allocated to the
2967  * RX and TX lists.
2968  */
2969 void
2970 dc_stop(struct dc_softc *sc, int softonly)
2971 {
2972 	struct ifnet *ifp;
2973 	u_int32_t isr;
2974 	int i;
2975 
2976 	ifp = &sc->sc_arpcom.ac_if;
2977 	ifp->if_timer = 0;
2978 
2979 	timeout_del(&sc->dc_tick_tmo);
2980 
2981 	ifp->if_flags &= ~IFF_RUNNING;
2982 	ifq_clr_oactive(&ifp->if_snd);
2983 
2984 	if (!softonly) {
2985 		DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ON|DC_NETCFG_TX_ON));
2986 
2987 		for (i = 0; i < DC_TIMEOUT; i++) {
2988 			isr = CSR_READ_4(sc, DC_ISR);
2989 			if ((isr & DC_ISR_TX_IDLE ||
2990 			    (isr & DC_ISR_TX_STATE) == DC_TXSTATE_RESET) &&
2991 			    (isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED)
2992 				break;
2993 			DELAY(10);
2994 		}
2995 
2996 		if (i == DC_TIMEOUT) {
2997 			if (!((isr & DC_ISR_TX_IDLE) ||
2998 			    (isr & DC_ISR_TX_STATE) == DC_TXSTATE_RESET) &&
2999 			    !DC_IS_ASIX(sc) && !DC_IS_DAVICOM(sc))
3000 				printf("%s: failed to force tx to idle state\n",
3001 				    sc->sc_dev.dv_xname);
3002 			if (!((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED) &&
3003 			    !DC_HAS_BROKEN_RXSTATE(sc))
3004 				printf("%s: failed to force rx to idle state\n",
3005 				    sc->sc_dev.dv_xname);
3006 		}
3007 
3008 		CSR_WRITE_4(sc, DC_IMR, 0x00000000);
3009 		CSR_WRITE_4(sc, DC_TXADDR, 0x00000000);
3010 		CSR_WRITE_4(sc, DC_RXADDR, 0x00000000);
3011 		sc->dc_link = 0;
3012 	}
3013 
3014 	/*
3015 	 * Free data in the RX lists.
3016 	 */
3017 	for (i = 0; i < DC_RX_LIST_CNT; i++) {
3018 		if (sc->dc_cdata.dc_rx_chain[i].sd_map->dm_nsegs != 0) {
3019 			bus_dmamap_t map = sc->dc_cdata.dc_rx_chain[i].sd_map;
3020 
3021 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3022 			    BUS_DMASYNC_POSTREAD);
3023 			bus_dmamap_unload(sc->sc_dmat, map);
3024 		}
3025 		if (sc->dc_cdata.dc_rx_chain[i].sd_mbuf != NULL) {
3026 			m_freem(sc->dc_cdata.dc_rx_chain[i].sd_mbuf);
3027 			sc->dc_cdata.dc_rx_chain[i].sd_mbuf = NULL;
3028 		}
3029 	}
3030 	bzero(&sc->dc_ldata->dc_rx_list, sizeof(sc->dc_ldata->dc_rx_list));
3031 
3032 	/*
3033 	 * Free the TX list buffers.
3034 	 */
3035 	for (i = 0; i < DC_TX_LIST_CNT; i++) {
3036 		if (sc->dc_cdata.dc_tx_chain[i].sd_map->dm_nsegs != 0) {
3037 			bus_dmamap_t map = sc->dc_cdata.dc_tx_chain[i].sd_map;
3038 
3039 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3040 			    BUS_DMASYNC_POSTWRITE);
3041 			bus_dmamap_unload(sc->sc_dmat, map);
3042 		}
3043 		if (sc->dc_cdata.dc_tx_chain[i].sd_mbuf != NULL) {
3044 			if (sc->dc_ldata->dc_tx_list[i].dc_ctl &
3045 			    htole32(DC_TXCTL_SETUP)) {
3046 				sc->dc_cdata.dc_tx_chain[i].sd_mbuf = NULL;
3047 				continue;
3048 			}
3049 			m_freem(sc->dc_cdata.dc_tx_chain[i].sd_mbuf);
3050 			sc->dc_cdata.dc_tx_chain[i].sd_mbuf = NULL;
3051 		}
3052 	}
3053 	bzero(&sc->dc_ldata->dc_tx_list, sizeof(sc->dc_ldata->dc_tx_list));
3054 
3055 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
3056 	    0, sc->sc_listmap->dm_mapsize,
3057 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3058 }
3059 
3060 int
3061 dc_activate(struct device *self, int act)
3062 {
3063 	struct dc_softc *sc = (struct dc_softc *)self;
3064 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
3065 	int rv = 0;
3066 
3067 	switch (act) {
3068 	case DVACT_SUSPEND:
3069 		if (ifp->if_flags & IFF_RUNNING)
3070 			dc_stop(sc, 0);
3071 		rv = config_activate_children(self, act);
3072 		break;
3073 	case DVACT_RESUME:
3074 		if (ifp->if_flags & IFF_UP)
3075 			dc_init(sc);
3076 		break;
3077 	default:
3078 		rv = config_activate_children(self, act);
3079 		break;
3080 	}
3081 	return (rv);
3082 }
3083 
3084 int
3085 dc_detach(struct dc_softc *sc)
3086 {
3087 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
3088 	int i;
3089 
3090 	dc_stop(sc, 1);
3091 
3092 	if (LIST_FIRST(&sc->sc_mii.mii_phys) != NULL)
3093 		mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
3094 
3095 	if (sc->dc_srom)
3096 		free(sc->dc_srom, M_DEVBUF, sc->dc_sromsize);
3097 
3098 	for (i = 0; i < DC_RX_LIST_CNT; i++)
3099 		bus_dmamap_destroy(sc->sc_dmat, sc->dc_cdata.dc_rx_chain[i].sd_map);
3100 	if (sc->sc_rx_sparemap)
3101 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_sparemap);
3102 	for (i = 0; i < DC_TX_LIST_CNT; i++)
3103 		bus_dmamap_destroy(sc->sc_dmat, sc->dc_cdata.dc_tx_chain[i].sd_map);
3104 	if (sc->sc_tx_sparemap)
3105 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_sparemap);
3106 
3107 	/// XXX bus_dmamap_sync
3108 	bus_dmamap_unload(sc->sc_dmat, sc->sc_listmap);
3109 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_listkva, sc->sc_listnseg);
3110 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_listmap);
3111 	bus_dmamem_free(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg);
3112 
3113 	ether_ifdetach(ifp);
3114 	if_detach(ifp);
3115 	return (0);
3116 }
3117 
3118 struct cfdriver dc_cd = {
3119 	0, "dc", DV_IFNET
3120 };
3121