xref: /openbsd/sys/dev/ic/dc.c (revision dda28197)
1 /*	$OpenBSD: dc.c,v 1.154 2020/07/10 13:26:37 patrick Exp $	*/
2 
3 /*
4  * Copyright (c) 1997, 1998, 1999
5  *	Bill Paul <wpaul@ee.columbia.edu>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * $FreeBSD: src/sys/pci/if_dc.c,v 1.43 2001/01/19 23:55:07 wpaul Exp $
35  */
36 
37 /*
38  * DEC "tulip" clone ethernet driver. Supports the DEC/Intel 21143
39  * series chips and several workalikes including the following:
40  *
41  * Macronix 98713/98715/98725/98727/98732 PMAC (www.macronix.com)
42  * Macronix/Lite-On 82c115 PNIC II (www.macronix.com)
43  * Lite-On 82c168/82c169 PNIC (www.litecom.com)
44  * ASIX Electronics AX88140A (www.asix.com.tw)
45  * ASIX Electronics AX88141 (www.asix.com.tw)
46  * ADMtek AL981 (www.admtek.com.tw)
47  * ADMtek AN983 (www.admtek.com.tw)
48  * Davicom DM9100, DM9102, DM9102A (www.davicom8.com)
49  * Accton EN1217, EN2242 (www.accton.com)
50  * Xircom X3201 (www.xircom.com)
51  *
52  * Datasheets for the 21143 are available at developer.intel.com.
53  * Datasheets for the clone parts can be found at their respective sites.
54  * (Except for the PNIC; see www.freebsd.org/~wpaul/PNIC/pnic.ps.gz.)
55  * The PNIC II is essentially a Macronix 98715A chip; the only difference
56  * worth noting is that its multicast hash table is only 128 bits wide
57  * instead of 512.
58  *
59  * Written by Bill Paul <wpaul@ee.columbia.edu>
60  * Electrical Engineering Department
61  * Columbia University, New York City
62  */
63 
64 /*
65  * The Intel 21143 is the successor to the DEC 21140. It is basically
66  * the same as the 21140 but with a few new features. The 21143 supports
67  * three kinds of media attachments:
68  *
69  * o MII port, for 10Mbps and 100Mbps support and NWAY
70  *   autonegotiation provided by an external PHY.
71  * o SYM port, for symbol mode 100Mbps support.
72  * o 10baseT port.
73  * o AUI/BNC port.
74  *
75  * The 100Mbps SYM port and 10baseT port can be used together in
76  * combination with the internal NWAY support to create a 10/100
77  * autosensing configuration.
78  *
79  * Note that not all tulip workalikes are handled in this driver: we only
80  * deal with those which are relatively well behaved. The Winbond is
81  * handled separately due to its different register offsets and the
82  * special handling needed for its various bugs. The PNIC is handled
83  * here, but I'm not thrilled about it.
84  *
85  * All of the workalike chips use some form of MII transceiver support
86  * with the exception of the Macronix chips, which also have a SYM port.
87  * The ASIX AX88140A is also documented to have a SYM port, but all
88  * the cards I've seen use an MII transceiver, probably because the
89  * AX88140A doesn't support internal NWAY.
90  */
91 
92 #include "bpfilter.h"
93 
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/mbuf.h>
97 #include <sys/protosw.h>
98 #include <sys/socket.h>
99 #include <sys/ioctl.h>
100 #include <sys/errno.h>
101 #include <sys/malloc.h>
102 #include <sys/kernel.h>
103 #include <sys/device.h>
104 #include <sys/timeout.h>
105 
106 #include <net/if.h>
107 
108 #include <netinet/in.h>
109 #include <netinet/if_ether.h>
110 
111 #include <net/if_media.h>
112 
113 #if NBPFILTER > 0
114 #include <net/bpf.h>
115 #endif
116 
117 #include <dev/mii/mii.h>
118 #include <dev/mii/miivar.h>
119 
120 #include <machine/bus.h>
121 #include <dev/pci/pcidevs.h>
122 
123 #include <dev/ic/dcreg.h>
124 
125 /*
126  * The Davicom DM9102 has a broken DMA engine that reads beyond the
127  * end of the programmed transfer.  Architectures with a proper IOMMU
128  * (such as sparc64) will trap on this access.  To avoid having to
129  * copy each transmitted mbuf to guarantee enough trailing space,
130  * those architectures should implement BUS_DMA_OVERRUN that takes
131  * appropriate action to tolerate this behaviour.
132  */
133 #ifndef BUS_DMA_OVERRUN
134 #define BUS_DMA_OVERRUN 0
135 #endif
136 
137 int dc_intr(void *);
138 struct dc_type *dc_devtype(void *);
139 int dc_newbuf(struct dc_softc *, int, struct mbuf *);
140 int dc_encap(struct dc_softc *, bus_dmamap_t, struct mbuf *, u_int32_t *);
141 
142 void dc_pnic_rx_bug_war(struct dc_softc *, int);
143 int dc_rx_resync(struct dc_softc *);
144 int dc_rxeof(struct dc_softc *);
145 void dc_txeof(struct dc_softc *);
146 void dc_tick(void *);
147 void dc_tx_underrun(struct dc_softc *);
148 void dc_start(struct ifnet *);
149 int dc_ioctl(struct ifnet *, u_long, caddr_t);
150 void dc_watchdog(struct ifnet *);
151 int dc_ifmedia_upd(struct ifnet *);
152 void dc_ifmedia_sts(struct ifnet *, struct ifmediareq *);
153 
154 void dc_delay(struct dc_softc *);
155 void dc_eeprom_width(struct dc_softc *);
156 void dc_eeprom_idle(struct dc_softc *);
157 void dc_eeprom_putbyte(struct dc_softc *, int);
158 void dc_eeprom_getword(struct dc_softc *, int, u_int16_t *);
159 void dc_eeprom_getword_pnic(struct dc_softc *, int, u_int16_t *);
160 void dc_eeprom_getword_xircom(struct dc_softc *, int, u_int16_t *);
161 void dc_read_eeprom(struct dc_softc *, caddr_t, int, int, int);
162 
163 void dc_mii_writebit(struct dc_softc *, int);
164 int dc_mii_readbit(struct dc_softc *);
165 void dc_mii_sync(struct dc_softc *);
166 void dc_mii_send(struct dc_softc *, u_int32_t, int);
167 int dc_mii_readreg(struct dc_softc *, struct dc_mii_frame *);
168 int dc_mii_writereg(struct dc_softc *, struct dc_mii_frame *);
169 int dc_miibus_readreg(struct device *, int, int);
170 void dc_miibus_writereg(struct device *, int, int, int);
171 void dc_miibus_statchg(struct device *);
172 
173 void dc_setcfg(struct dc_softc *, uint64_t);
174 u_int32_t dc_crc_le(struct dc_softc *, caddr_t);
175 u_int32_t dc_crc_be(caddr_t);
176 void dc_setfilt_21143(struct dc_softc *);
177 void dc_setfilt_asix(struct dc_softc *);
178 void dc_setfilt_admtek(struct dc_softc *);
179 void dc_setfilt_xircom(struct dc_softc *);
180 
181 void dc_setfilt(struct dc_softc *);
182 
183 void dc_reset(struct dc_softc *);
184 int dc_list_rx_init(struct dc_softc *);
185 int dc_list_tx_init(struct dc_softc *);
186 
187 void dc_read_srom(struct dc_softc *, int);
188 void dc_parse_21143_srom(struct dc_softc *);
189 void dc_decode_leaf_sia(struct dc_softc *,
190 				     struct dc_eblock_sia *);
191 void dc_decode_leaf_mii(struct dc_softc *,
192 				     struct dc_eblock_mii *);
193 void dc_decode_leaf_sym(struct dc_softc *,
194 				     struct dc_eblock_sym *);
195 void dc_apply_fixup(struct dc_softc *, uint64_t);
196 
197 #define DC_SETBIT(sc, reg, x)				\
198 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x))
199 
200 #define DC_CLRBIT(sc, reg, x)				\
201 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x))
202 
203 #define SIO_SET(x)	DC_SETBIT(sc, DC_SIO, (x))
204 #define SIO_CLR(x)	DC_CLRBIT(sc, DC_SIO, (x))
205 
206 void
207 dc_delay(struct dc_softc *sc)
208 {
209 	int idx;
210 
211 	for (idx = (300 / 33) + 1; idx > 0; idx--)
212 		CSR_READ_4(sc, DC_BUSCTL);
213 }
214 
215 void
216 dc_eeprom_width(struct dc_softc *sc)
217 {
218 	int i;
219 
220 	/* Force EEPROM to idle state. */
221 	dc_eeprom_idle(sc);
222 
223 	/* Enter EEPROM access mode. */
224 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
225 	dc_delay(sc);
226 	DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
227 	dc_delay(sc);
228 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
229 	dc_delay(sc);
230 	DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
231 	dc_delay(sc);
232 
233 	for (i = 3; i--;) {
234 		if (6 & (1 << i))
235 			DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
236 		else
237 			DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
238 		dc_delay(sc);
239 		DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
240 		dc_delay(sc);
241 		DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
242 		dc_delay(sc);
243 	}
244 
245 	for (i = 1; i <= 12; i++) {
246 		DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
247 		dc_delay(sc);
248 		if (!(CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)) {
249 			DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
250 			dc_delay(sc);
251 			break;
252 		}
253 		DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
254 		dc_delay(sc);
255 	}
256 
257 	/* Turn off EEPROM access mode. */
258 	dc_eeprom_idle(sc);
259 
260 	if (i < 4 || i > 12)
261 		sc->dc_romwidth = 6;
262 	else
263 		sc->dc_romwidth = i;
264 
265 	/* Enter EEPROM access mode. */
266 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
267 	dc_delay(sc);
268 	DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
269 	dc_delay(sc);
270 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
271 	dc_delay(sc);
272 	DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
273 	dc_delay(sc);
274 
275 	/* Turn off EEPROM access mode. */
276 	dc_eeprom_idle(sc);
277 }
278 
279 void
280 dc_eeprom_idle(struct dc_softc *sc)
281 {
282 	int i;
283 
284 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
285 	dc_delay(sc);
286 	DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
287 	dc_delay(sc);
288 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
289 	dc_delay(sc);
290 	DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
291 	dc_delay(sc);
292 
293 	for (i = 0; i < 25; i++) {
294 		DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
295 		dc_delay(sc);
296 		DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
297 		dc_delay(sc);
298 	}
299 
300 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
301 	dc_delay(sc);
302 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CS);
303 	dc_delay(sc);
304 	CSR_WRITE_4(sc, DC_SIO, 0x00000000);
305 }
306 
307 /*
308  * Send a read command and address to the EEPROM, check for ACK.
309  */
310 void
311 dc_eeprom_putbyte(struct dc_softc *sc, int addr)
312 {
313 	int d, i;
314 
315 	d = DC_EECMD_READ >> 6;
316 
317 	for (i = 3; i--; ) {
318 		if (d & (1 << i))
319 			DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
320 		else
321 			DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
322 		dc_delay(sc);
323 		DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
324 		dc_delay(sc);
325 		DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
326 		dc_delay(sc);
327 	}
328 
329 	/*
330 	 * Feed in each bit and strobe the clock.
331 	 */
332 	for (i = sc->dc_romwidth; i--;) {
333 		if (addr & (1 << i)) {
334 			SIO_SET(DC_SIO_EE_DATAIN);
335 		} else {
336 			SIO_CLR(DC_SIO_EE_DATAIN);
337 		}
338 		dc_delay(sc);
339 		SIO_SET(DC_SIO_EE_CLK);
340 		dc_delay(sc);
341 		SIO_CLR(DC_SIO_EE_CLK);
342 		dc_delay(sc);
343 	}
344 }
345 
346 /*
347  * Read a word of data stored in the EEPROM at address 'addr.'
348  * The PNIC 82c168/82c169 has its own non-standard way to read
349  * the EEPROM.
350  */
351 void
352 dc_eeprom_getword_pnic(struct dc_softc *sc, int addr, u_int16_t *dest)
353 {
354 	int i;
355 	u_int32_t r;
356 
357 	CSR_WRITE_4(sc, DC_PN_SIOCTL, DC_PN_EEOPCODE_READ|addr);
358 
359 	for (i = 0; i < DC_TIMEOUT; i++) {
360 		DELAY(1);
361 		r = CSR_READ_4(sc, DC_SIO);
362 		if (!(r & DC_PN_SIOCTL_BUSY)) {
363 			*dest = (u_int16_t)(r & 0xFFFF);
364 			return;
365 		}
366 	}
367 }
368 
369 /*
370  * Read a word of data stored in the EEPROM at address 'addr.'
371  * The Xircom X3201 has its own non-standard way to read
372  * the EEPROM, too.
373  */
374 void
375 dc_eeprom_getword_xircom(struct dc_softc *sc, int addr, u_int16_t *dest)
376 {
377 	SIO_SET(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ);
378 
379 	addr *= 2;
380 	CSR_WRITE_4(sc, DC_ROM, addr | 0x160);
381 	*dest = (u_int16_t)CSR_READ_4(sc, DC_SIO) & 0xff;
382 	addr += 1;
383 	CSR_WRITE_4(sc, DC_ROM, addr | 0x160);
384 	*dest |= ((u_int16_t)CSR_READ_4(sc, DC_SIO) & 0xff) << 8;
385 
386 	SIO_CLR(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ);
387 }
388 
389 /*
390  * Read a word of data stored in the EEPROM at address 'addr.'
391  */
392 void
393 dc_eeprom_getword(struct dc_softc *sc, int addr, u_int16_t *dest)
394 {
395 	int i;
396 	u_int16_t word = 0;
397 
398 	/* Force EEPROM to idle state. */
399 	dc_eeprom_idle(sc);
400 
401 	/* Enter EEPROM access mode. */
402 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
403 	dc_delay(sc);
404 	DC_SETBIT(sc, DC_SIO,  DC_SIO_ROMCTL_READ);
405 	dc_delay(sc);
406 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
407 	dc_delay(sc);
408 	DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
409 	dc_delay(sc);
410 
411 	/*
412 	 * Send address of word we want to read.
413 	 */
414 	dc_eeprom_putbyte(sc, addr);
415 
416 	/*
417 	 * Start reading bits from EEPROM.
418 	 */
419 	for (i = 0x8000; i; i >>= 1) {
420 		SIO_SET(DC_SIO_EE_CLK);
421 		dc_delay(sc);
422 		if (CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)
423 			word |= i;
424 		dc_delay(sc);
425 		SIO_CLR(DC_SIO_EE_CLK);
426 		dc_delay(sc);
427 	}
428 
429 	/* Turn off EEPROM access mode. */
430 	dc_eeprom_idle(sc);
431 
432 	*dest = word;
433 }
434 
435 /*
436  * Read a sequence of words from the EEPROM.
437  */
438 void
439 dc_read_eeprom(struct dc_softc *sc, caddr_t dest, int off, int cnt,
440     int swap)
441 {
442 	int i;
443 	u_int16_t word = 0, *ptr;
444 
445 	for (i = 0; i < cnt; i++) {
446 		if (DC_IS_PNIC(sc))
447 			dc_eeprom_getword_pnic(sc, off + i, &word);
448 		else if (DC_IS_XIRCOM(sc))
449 			dc_eeprom_getword_xircom(sc, off + i, &word);
450 		else
451 			dc_eeprom_getword(sc, off + i, &word);
452 		ptr = (u_int16_t *)(dest + (i * 2));
453 		if (swap)
454 			*ptr = betoh16(word);
455 		else
456 			*ptr = letoh16(word);
457 	}
458 }
459 
460 /*
461  * The following two routines are taken from the Macronix 98713
462  * Application Notes pp.19-21.
463  */
464 /*
465  * Write a bit to the MII bus.
466  */
467 void
468 dc_mii_writebit(struct dc_softc *sc, int bit)
469 {
470 	if (bit)
471 		CSR_WRITE_4(sc, DC_SIO,
472 		    DC_SIO_ROMCTL_WRITE|DC_SIO_MII_DATAOUT);
473 	else
474 		CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE);
475 
476 	DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK);
477 	DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK);
478 }
479 
480 /*
481  * Read a bit from the MII bus.
482  */
483 int
484 dc_mii_readbit(struct dc_softc *sc)
485 {
486 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_READ|DC_SIO_MII_DIR);
487 	CSR_READ_4(sc, DC_SIO);
488 	DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK);
489 	DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK);
490 	if (CSR_READ_4(sc, DC_SIO) & DC_SIO_MII_DATAIN)
491 		return (1);
492 	return (0);
493 }
494 
495 /*
496  * Sync the PHYs by setting data bit and strobing the clock 32 times.
497  */
498 void
499 dc_mii_sync(struct dc_softc *sc)
500 {
501 	int i;
502 
503 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE);
504 
505 	for (i = 0; i < 32; i++)
506 		dc_mii_writebit(sc, 1);
507 }
508 
509 /*
510  * Clock a series of bits through the MII.
511  */
512 void
513 dc_mii_send(struct dc_softc *sc, u_int32_t bits, int cnt)
514 {
515 	int i;
516 
517 	for (i = (0x1 << (cnt - 1)); i; i >>= 1)
518 		dc_mii_writebit(sc, bits & i);
519 }
520 
521 /*
522  * Read an PHY register through the MII.
523  */
524 int
525 dc_mii_readreg(struct dc_softc *sc, struct dc_mii_frame *frame)
526 {
527 	int i, ack, s;
528 
529 	s = splnet();
530 
531 	/*
532 	 * Set up frame for RX.
533 	 */
534 	frame->mii_stdelim = DC_MII_STARTDELIM;
535 	frame->mii_opcode = DC_MII_READOP;
536 	frame->mii_turnaround = 0;
537 	frame->mii_data = 0;
538 
539 	/*
540 	 * Sync the PHYs.
541 	 */
542 	dc_mii_sync(sc);
543 
544 	/*
545 	 * Send command/address info.
546 	 */
547 	dc_mii_send(sc, frame->mii_stdelim, 2);
548 	dc_mii_send(sc, frame->mii_opcode, 2);
549 	dc_mii_send(sc, frame->mii_phyaddr, 5);
550 	dc_mii_send(sc, frame->mii_regaddr, 5);
551 
552 #ifdef notdef
553 	/* Idle bit */
554 	dc_mii_writebit(sc, 1);
555 	dc_mii_writebit(sc, 0);
556 #endif
557 
558 	/* Check for ack */
559 	ack = dc_mii_readbit(sc);
560 
561 	/*
562 	 * Now try reading data bits. If the ack failed, we still
563 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
564 	 */
565 	if (ack) {
566 		for(i = 0; i < 16; i++) {
567 			dc_mii_readbit(sc);
568 		}
569 		goto fail;
570 	}
571 
572 	for (i = 0x8000; i; i >>= 1) {
573 		if (!ack) {
574 			if (dc_mii_readbit(sc))
575 				frame->mii_data |= i;
576 		}
577 	}
578 
579 fail:
580 
581 	dc_mii_writebit(sc, 0);
582 	dc_mii_writebit(sc, 0);
583 
584 	splx(s);
585 
586 	if (ack)
587 		return (1);
588 	return (0);
589 }
590 
591 /*
592  * Write to a PHY register through the MII.
593  */
594 int
595 dc_mii_writereg(struct dc_softc *sc, struct dc_mii_frame *frame)
596 {
597 	int s;
598 
599 	s = splnet();
600 	/*
601 	 * Set up frame for TX.
602 	 */
603 
604 	frame->mii_stdelim = DC_MII_STARTDELIM;
605 	frame->mii_opcode = DC_MII_WRITEOP;
606 	frame->mii_turnaround = DC_MII_TURNAROUND;
607 
608 	/*
609 	 * Sync the PHYs.
610 	 */
611 	dc_mii_sync(sc);
612 
613 	dc_mii_send(sc, frame->mii_stdelim, 2);
614 	dc_mii_send(sc, frame->mii_opcode, 2);
615 	dc_mii_send(sc, frame->mii_phyaddr, 5);
616 	dc_mii_send(sc, frame->mii_regaddr, 5);
617 	dc_mii_send(sc, frame->mii_turnaround, 2);
618 	dc_mii_send(sc, frame->mii_data, 16);
619 
620 	/* Idle bit. */
621 	dc_mii_writebit(sc, 0);
622 	dc_mii_writebit(sc, 0);
623 
624 	splx(s);
625 	return (0);
626 }
627 
628 int
629 dc_miibus_readreg(struct device *self, int phy, int reg)
630 {
631 	struct dc_mii_frame frame;
632 	struct dc_softc *sc = (struct dc_softc *)self;
633 	int i, rval, phy_reg;
634 
635 	/*
636 	 * Note: both the AL981 and AN983 have internal PHYs,
637 	 * however the AL981 provides direct access to the PHY
638 	 * registers while the AN983 uses a serial MII interface.
639 	 * The AN983's MII interface is also buggy in that you
640 	 * can read from any MII address (0 to 31), but only address 1
641 	 * behaves normally. To deal with both cases, we pretend
642 	 * that the PHY is at MII address 1.
643 	 */
644 	if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR)
645 		return (0);
646 
647 	/*
648 	 * Note: the ukphy probs of the RS7112 report a PHY at
649 	 * MII address 0 (possibly HomePNA?) and 1 (ethernet)
650 	 * so we only respond to correct one.
651 	 */
652 	if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR)
653 		return (0);
654 
655 	if (sc->dc_pmode != DC_PMODE_MII) {
656 		if (phy == (MII_NPHY - 1)) {
657 			switch(reg) {
658 			case MII_BMSR:
659 				/*
660 				 * Fake something to make the probe
661 				 * code think there's a PHY here.
662 				 */
663 				return (BMSR_MEDIAMASK);
664 				break;
665 			case MII_PHYIDR1:
666 				if (DC_IS_PNIC(sc))
667 					return (PCI_VENDOR_LITEON);
668 				return (PCI_VENDOR_DEC);
669 				break;
670 			case MII_PHYIDR2:
671 				if (DC_IS_PNIC(sc))
672 					return (PCI_PRODUCT_LITEON_PNIC);
673 				return (PCI_PRODUCT_DEC_21142);
674 				break;
675 			default:
676 				return (0);
677 				break;
678 			}
679 		} else
680 			return (0);
681 	}
682 
683 	if (DC_IS_PNIC(sc)) {
684 		CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_READ |
685 		    (phy << 23) | (reg << 18));
686 		for (i = 0; i < DC_TIMEOUT; i++) {
687 			DELAY(1);
688 			rval = CSR_READ_4(sc, DC_PN_MII);
689 			if (!(rval & DC_PN_MII_BUSY)) {
690 				rval &= 0xFFFF;
691 				return (rval == 0xFFFF ? 0 : rval);
692 			}
693 		}
694 		return (0);
695 	}
696 
697 	if (DC_IS_COMET(sc)) {
698 		switch(reg) {
699 		case MII_BMCR:
700 			phy_reg = DC_AL_BMCR;
701 			break;
702 		case MII_BMSR:
703 			phy_reg = DC_AL_BMSR;
704 			break;
705 		case MII_PHYIDR1:
706 			phy_reg = DC_AL_VENID;
707 			break;
708 		case MII_PHYIDR2:
709 			phy_reg = DC_AL_DEVID;
710 			break;
711 		case MII_ANAR:
712 			phy_reg = DC_AL_ANAR;
713 			break;
714 		case MII_ANLPAR:
715 			phy_reg = DC_AL_LPAR;
716 			break;
717 		case MII_ANER:
718 			phy_reg = DC_AL_ANER;
719 			break;
720 		default:
721 			printf("%s: phy_read: bad phy register %x\n",
722 			    sc->sc_dev.dv_xname, reg);
723 			return (0);
724 			break;
725 		}
726 
727 		rval = CSR_READ_4(sc, phy_reg) & 0x0000FFFF;
728 
729 		if (rval == 0xFFFF)
730 			return (0);
731 		return (rval);
732 	}
733 
734 	bzero(&frame, sizeof(frame));
735 
736 	frame.mii_phyaddr = phy;
737 	frame.mii_regaddr = reg;
738 	if (sc->dc_type == DC_TYPE_98713) {
739 		phy_reg = CSR_READ_4(sc, DC_NETCFG);
740 		CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL);
741 	}
742 	dc_mii_readreg(sc, &frame);
743 	if (sc->dc_type == DC_TYPE_98713)
744 		CSR_WRITE_4(sc, DC_NETCFG, phy_reg);
745 
746 	return (frame.mii_data);
747 }
748 
749 void
750 dc_miibus_writereg(struct device *self, int phy, int reg, int data)
751 {
752 	struct dc_softc *sc = (struct dc_softc *)self;
753 	struct dc_mii_frame frame;
754 	int i, phy_reg;
755 
756 	bzero(&frame, sizeof(frame));
757 
758 	if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR)
759 		return;
760 	if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR)
761 		return;
762 
763 	if (DC_IS_PNIC(sc)) {
764 		CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_WRITE |
765 		    (phy << 23) | (reg << 10) | data);
766 		for (i = 0; i < DC_TIMEOUT; i++) {
767 			if (!(CSR_READ_4(sc, DC_PN_MII) & DC_PN_MII_BUSY))
768 				break;
769 		}
770 		return;
771 	}
772 
773 	if (DC_IS_COMET(sc)) {
774 		switch(reg) {
775 		case MII_BMCR:
776 			phy_reg = DC_AL_BMCR;
777 			break;
778 		case MII_BMSR:
779 			phy_reg = DC_AL_BMSR;
780 			break;
781 		case MII_PHYIDR1:
782 			phy_reg = DC_AL_VENID;
783 			break;
784 		case MII_PHYIDR2:
785 			phy_reg = DC_AL_DEVID;
786 			break;
787 		case MII_ANAR:
788 			phy_reg = DC_AL_ANAR;
789 			break;
790 		case MII_ANLPAR:
791 			phy_reg = DC_AL_LPAR;
792 			break;
793 		case MII_ANER:
794 			phy_reg = DC_AL_ANER;
795 			break;
796 		default:
797 			printf("%s: phy_write: bad phy register %x\n",
798 			    sc->sc_dev.dv_xname, reg);
799 			return;
800 		}
801 
802 		CSR_WRITE_4(sc, phy_reg, data);
803 		return;
804 	}
805 
806 	frame.mii_phyaddr = phy;
807 	frame.mii_regaddr = reg;
808 	frame.mii_data = data;
809 
810 	if (sc->dc_type == DC_TYPE_98713) {
811 		phy_reg = CSR_READ_4(sc, DC_NETCFG);
812 		CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL);
813 	}
814 	dc_mii_writereg(sc, &frame);
815 	if (sc->dc_type == DC_TYPE_98713)
816 		CSR_WRITE_4(sc, DC_NETCFG, phy_reg);
817 }
818 
819 void
820 dc_miibus_statchg(struct device *self)
821 {
822 	struct dc_softc *sc = (struct dc_softc *)self;
823 	struct mii_data *mii;
824 	struct ifmedia *ifm;
825 
826 	if (DC_IS_ADMTEK(sc))
827 		return;
828 
829 	mii = &sc->sc_mii;
830 	ifm = &mii->mii_media;
831 	if (DC_IS_DAVICOM(sc) && IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) {
832 		dc_setcfg(sc, ifm->ifm_media);
833 		sc->dc_if_media = ifm->ifm_media;
834 	} else {
835 		dc_setcfg(sc, mii->mii_media_active);
836 		sc->dc_if_media = mii->mii_media_active;
837 	}
838 }
839 
840 #define DC_BITS_512	9
841 #define DC_BITS_128	7
842 #define DC_BITS_64	6
843 
844 u_int32_t
845 dc_crc_le(struct dc_softc *sc, caddr_t addr)
846 {
847 	u_int32_t crc;
848 
849 	/* Compute CRC for the address value. */
850 	crc = ether_crc32_le(addr, ETHER_ADDR_LEN);
851 
852 	/*
853 	 * The hash table on the PNIC II and the MX98715AEC-C/D/E
854 	 * chips is only 128 bits wide.
855 	 */
856 	if (sc->dc_flags & DC_128BIT_HASH)
857 		return (crc & ((1 << DC_BITS_128) - 1));
858 
859 	/* The hash table on the MX98715BEC is only 64 bits wide. */
860 	if (sc->dc_flags & DC_64BIT_HASH)
861 		return (crc & ((1 << DC_BITS_64) - 1));
862 
863 	/* Xircom's hash filtering table is different (read: weird) */
864 	/* Xircom uses the LEAST significant bits */
865 	if (DC_IS_XIRCOM(sc)) {
866 		if ((crc & 0x180) == 0x180)
867 			return (crc & 0x0F) + (crc	& 0x70)*3 + (14 << 4);
868 		else
869 			return (crc & 0x1F) + ((crc>>1) & 0xF0)*3 + (12 << 4);
870 	}
871 
872 	return (crc & ((1 << DC_BITS_512) - 1));
873 }
874 
875 /*
876  * Calculate CRC of a multicast group address, return the lower 6 bits.
877  */
878 #define dc_crc_be(addr)	((ether_crc32_be(addr,ETHER_ADDR_LEN) >> 26) \
879 	& 0x0000003F)
880 
881 /*
882  * 21143-style RX filter setup routine. Filter programming is done by
883  * downloading a special setup frame into the TX engine. 21143, Macronix,
884  * PNIC, PNIC II and Davicom chips are programmed this way.
885  *
886  * We always program the chip using 'hash perfect' mode, i.e. one perfect
887  * address (our node address) and a 512-bit hash filter for multicast
888  * frames. We also sneak the broadcast address into the hash filter since
889  * we need that too.
890  */
891 void
892 dc_setfilt_21143(struct dc_softc *sc)
893 {
894 	struct arpcom *ac = &sc->sc_arpcom;
895 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
896 	struct ether_multi *enm;
897 	struct ether_multistep step;
898 	struct dc_desc *sframe;
899 	u_int32_t h, *sp;
900 	int i;
901 
902 	i = sc->dc_cdata.dc_tx_prod;
903 	DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT);
904 	sc->dc_cdata.dc_tx_cnt++;
905 	sframe = &sc->dc_ldata->dc_tx_list[i];
906 	sp = &sc->dc_ldata->dc_sbuf[0];
907 	bzero(sp, DC_SFRAME_LEN);
908 
909 	sframe->dc_data = htole32(sc->sc_listmap->dm_segs[0].ds_addr +
910 	    offsetof(struct dc_list_data, dc_sbuf));
911 	sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP |
912 	    DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT);
913 
914 	sc->dc_cdata.dc_tx_chain[i].sd_mbuf =
915 	    (struct mbuf *)&sc->dc_ldata->dc_sbuf[0];
916 
917 	DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ALLMULTI | DC_NETCFG_RX_PROMISC));
918 	ifp->if_flags &= ~IFF_ALLMULTI;
919 
920 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
921 		ifp->if_flags |= IFF_ALLMULTI;
922 		if (ifp->if_flags & IFF_PROMISC)
923 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
924 		else
925 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
926 	} else {
927 		ETHER_FIRST_MULTI(step, ac, enm);
928 		while (enm != NULL) {
929 			h = dc_crc_le(sc, enm->enm_addrlo);
930 
931 			sp[h >> 4] |= htole32(1 << (h & 0xF));
932 
933 			ETHER_NEXT_MULTI(step, enm);
934 		}
935 	}
936 
937 	/*
938 	 * Always accept broadcast frames.
939 	 */
940 	h = dc_crc_le(sc, (caddr_t)&etherbroadcastaddr);
941 	sp[h >> 4] |= htole32(1 << (h & 0xF));
942 
943 	/* Set our MAC address */
944 	sp[39] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 0);
945 	sp[40] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 1);
946 	sp[41] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 2);
947 
948 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
949 	    offsetof(struct dc_list_data, dc_sbuf[0]),
950 	    sizeof(struct dc_list_data) -
951 	    offsetof(struct dc_list_data, dc_sbuf[0]),
952 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
953 
954 	sframe->dc_status = htole32(DC_TXSTAT_OWN);
955 
956 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
957 	    offsetof(struct dc_list_data, dc_tx_list[i]),
958 	    sizeof(struct dc_desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
959 
960 	CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
961 
962 	/*
963 	 * The PNIC takes an exceedingly long time to process its
964 	 * setup frame; wait 10ms after posting the setup frame
965 	 * before proceeding, just so it has time to swallow its
966 	 * medicine.
967 	 */
968 	DELAY(10000);
969 
970 	ifp->if_timer = 5;
971 }
972 
973 void
974 dc_setfilt_admtek(struct dc_softc *sc)
975 {
976 	struct arpcom *ac = &sc->sc_arpcom;
977 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
978 	struct ether_multi *enm;
979 	struct ether_multistep step;
980 	u_int32_t hashes[2];
981 	int h = 0;
982 
983 	DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ALLMULTI | DC_NETCFG_RX_PROMISC));
984 	bzero(hashes, sizeof(hashes));
985 	ifp->if_flags &= ~IFF_ALLMULTI;
986 
987 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
988 		ifp->if_flags |= IFF_ALLMULTI;
989 		if (ifp->if_flags & IFF_PROMISC)
990 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
991 		else
992 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
993 	} else {
994 		/* now program new ones */
995 		ETHER_FIRST_MULTI(step, ac, enm);
996 		while (enm != NULL) {
997 			if (DC_IS_CENTAUR(sc))
998 				h = dc_crc_le(sc, enm->enm_addrlo);
999 			else
1000 				h = dc_crc_be(enm->enm_addrlo);
1001 
1002 			if (h < 32)
1003 				hashes[0] |= (1 << h);
1004 			else
1005 				hashes[1] |= (1 << (h - 32));
1006 
1007 			ETHER_NEXT_MULTI(step, enm);
1008 		}
1009 	}
1010 
1011 	/* Init our MAC address */
1012 	CSR_WRITE_4(sc, DC_AL_PAR0, ac->ac_enaddr[3] << 24 |
1013 	    ac->ac_enaddr[2] << 16 | ac->ac_enaddr[1] << 8 | ac->ac_enaddr[0]);
1014 	CSR_WRITE_4(sc, DC_AL_PAR1, ac->ac_enaddr[5] << 8 | ac->ac_enaddr[4]);
1015 
1016 	CSR_WRITE_4(sc, DC_AL_MAR0, hashes[0]);
1017 	CSR_WRITE_4(sc, DC_AL_MAR1, hashes[1]);
1018 }
1019 
1020 void
1021 dc_setfilt_asix(struct dc_softc *sc)
1022 {
1023 	struct arpcom *ac = &sc->sc_arpcom;
1024 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1025 	struct ether_multi *enm;
1026 	struct ether_multistep step;
1027 	u_int32_t hashes[2];
1028 	int h = 0;
1029 
1030 	DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ALLMULTI | DC_AX_NETCFG_RX_BROAD |
1031 	    DC_NETCFG_RX_PROMISC));
1032 	bzero(hashes, sizeof(hashes));
1033 	ifp->if_flags &= ~IFF_ALLMULTI;
1034 
1035 	/*
1036 	 * Always accept broadcast frames.
1037 	 */
1038 	DC_SETBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD);
1039 
1040 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1041 		ifp->if_flags |= IFF_ALLMULTI;
1042 		if (ifp->if_flags & IFF_PROMISC)
1043 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1044 		else
1045 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1046 	} else {
1047 		/* now program new ones */
1048 		ETHER_FIRST_MULTI(step, ac, enm);
1049 		while (enm != NULL) {
1050 			h = dc_crc_be(enm->enm_addrlo);
1051 
1052 			if (h < 32)
1053 				hashes[0] |= (1 << h);
1054 			else
1055 				hashes[1] |= (1 << (h - 32));
1056 
1057 			ETHER_NEXT_MULTI(step, enm);
1058 		}
1059 	}
1060 
1061 	/* Init our MAC address */
1062 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR0);
1063 	CSR_WRITE_4(sc, DC_AX_FILTDATA,
1064 	    *(u_int32_t *)(&sc->sc_arpcom.ac_enaddr[0]));
1065 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR1);
1066 	CSR_WRITE_4(sc, DC_AX_FILTDATA,
1067 	    *(u_int32_t *)(&sc->sc_arpcom.ac_enaddr[4]));
1068 
1069 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0);
1070 	CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[0]);
1071 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1);
1072 	CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[1]);
1073 }
1074 
1075 void
1076 dc_setfilt_xircom(struct dc_softc *sc)
1077 {
1078 	struct arpcom *ac = &sc->sc_arpcom;
1079 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1080 	struct ether_multi *enm;
1081 	struct ether_multistep step;
1082 	struct dc_desc *sframe;
1083 	u_int32_t h, *sp;
1084 	int i;
1085 
1086 	DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON));
1087 
1088 	i = sc->dc_cdata.dc_tx_prod;
1089 	DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT);
1090 	sc->dc_cdata.dc_tx_cnt++;
1091 	sframe = &sc->dc_ldata->dc_tx_list[i];
1092 	sp = &sc->dc_ldata->dc_sbuf[0];
1093 	bzero(sp, DC_SFRAME_LEN);
1094 
1095 	sframe->dc_data = htole32(sc->sc_listmap->dm_segs[0].ds_addr +
1096 	    offsetof(struct dc_list_data, dc_sbuf));
1097 	sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP |
1098 	    DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT);
1099 
1100 	sc->dc_cdata.dc_tx_chain[i].sd_mbuf =
1101 	    (struct mbuf *)&sc->dc_ldata->dc_sbuf[0];
1102 
1103 	DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ALLMULTI | DC_NETCFG_RX_PROMISC));
1104 	ifp->if_flags &= ~IFF_ALLMULTI;
1105 
1106 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1107 		ifp->if_flags |= IFF_ALLMULTI;
1108 		if (ifp->if_flags & IFF_PROMISC)
1109 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1110 		else
1111 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1112 	} else {
1113 		/* now program new ones */
1114 		ETHER_FIRST_MULTI(step, ac, enm);
1115 		while (enm != NULL) {
1116 			h = dc_crc_le(sc, enm->enm_addrlo);
1117 
1118 			sp[h >> 4] |= htole32(1 << (h & 0xF));
1119 
1120 			ETHER_NEXT_MULTI(step, enm);
1121 		}
1122 	}
1123 
1124 	/*
1125 	 * Always accept broadcast frames.
1126 	 */
1127 	h = dc_crc_le(sc, (caddr_t)&etherbroadcastaddr);
1128 	sp[h >> 4] |= htole32(1 << (h & 0xF));
1129 
1130 	/* Set our MAC address */
1131 	sp[0] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 0);
1132 	sp[1] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 1);
1133 	sp[2] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 2);
1134 
1135 	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
1136 	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON);
1137 	ifp->if_flags |= IFF_RUNNING;
1138 	sframe->dc_status = htole32(DC_TXSTAT_OWN);
1139 	CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
1140 
1141 	/*
1142 	 * wait some time...
1143 	 */
1144 	DELAY(1000);
1145 
1146 	ifp->if_timer = 5;
1147 }
1148 
1149 void
1150 dc_setfilt(struct dc_softc *sc)
1151 {
1152 	if (DC_IS_INTEL(sc) || DC_IS_MACRONIX(sc) || DC_IS_PNIC(sc) ||
1153 	    DC_IS_PNICII(sc) || DC_IS_DAVICOM(sc) || DC_IS_CONEXANT(sc))
1154 		dc_setfilt_21143(sc);
1155 
1156 	if (DC_IS_ASIX(sc))
1157 		dc_setfilt_asix(sc);
1158 
1159 	if (DC_IS_ADMTEK(sc))
1160 		dc_setfilt_admtek(sc);
1161 
1162 	if (DC_IS_XIRCOM(sc))
1163 		dc_setfilt_xircom(sc);
1164 }
1165 
1166 /*
1167  * In order to fiddle with the
1168  * 'full-duplex' and '100Mbps' bits in the netconfig register, we
1169  * first have to put the transmit and/or receive logic in the idle state.
1170  */
1171 void
1172 dc_setcfg(struct dc_softc *sc, uint64_t media)
1173 {
1174 	int i, restart = 0;
1175 	u_int32_t isr;
1176 
1177 	if (IFM_SUBTYPE(media) == IFM_NONE)
1178 		return;
1179 
1180 	if (CSR_READ_4(sc, DC_NETCFG) & (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON)) {
1181 		restart = 1;
1182 		DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON));
1183 
1184 		for (i = 0; i < DC_TIMEOUT; i++) {
1185 			isr = CSR_READ_4(sc, DC_ISR);
1186 			if (isr & DC_ISR_TX_IDLE &&
1187 			    ((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED ||
1188 			    (isr & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT))
1189 				break;
1190 			DELAY(10);
1191 		}
1192 
1193 		if (i == DC_TIMEOUT) {
1194 			if (!(isr & DC_ISR_TX_IDLE) && !DC_IS_ASIX(sc))
1195 				printf("%s: failed to force tx to idle state\n",
1196 				    sc->sc_dev.dv_xname);
1197 			if (!((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED ||
1198 			    (isr & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT) &&
1199 			    !DC_HAS_BROKEN_RXSTATE(sc))
1200 				printf("%s: failed to force rx to idle state\n",
1201 				    sc->sc_dev.dv_xname);
1202 		}
1203 	}
1204 
1205 	if (IFM_SUBTYPE(media) == IFM_100_TX) {
1206 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL);
1207 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT);
1208 		if (sc->dc_pmode == DC_PMODE_MII) {
1209 			int watchdogreg;
1210 
1211 			if (DC_IS_INTEL(sc)) {
1212 			/* there's a write enable bit here that reads as 1 */
1213 				watchdogreg = CSR_READ_4(sc, DC_WATCHDOG);
1214 				watchdogreg &= ~DC_WDOG_CTLWREN;
1215 				watchdogreg |= DC_WDOG_JABBERDIS;
1216 				CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg);
1217 			} else {
1218 				DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS);
1219 			}
1220 			DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS|
1221 			    DC_NETCFG_PORTSEL|DC_NETCFG_SCRAMBLER));
1222 			if (sc->dc_type == DC_TYPE_98713)
1223 				DC_SETBIT(sc, DC_NETCFG, (DC_NETCFG_PCS|
1224 				    DC_NETCFG_SCRAMBLER));
1225 			if (!DC_IS_DAVICOM(sc))
1226 				DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1227 			DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
1228 			if (DC_IS_INTEL(sc))
1229 				dc_apply_fixup(sc, IFM_AUTO);
1230 		} else {
1231 			if (DC_IS_PNIC(sc)) {
1232 				DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_SPEEDSEL);
1233 				DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP);
1234 				DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL);
1235 			}
1236 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1237 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
1238 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER);
1239 			if (DC_IS_INTEL(sc))
1240 				dc_apply_fixup(sc,
1241 				    (media & IFM_GMASK) == IFM_FDX ?
1242 				    IFM_100_TX|IFM_FDX : IFM_100_TX);
1243 		}
1244 	}
1245 
1246 	if (IFM_SUBTYPE(media) == IFM_10_T) {
1247 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL);
1248 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT);
1249 		if (sc->dc_pmode == DC_PMODE_MII) {
1250 			int watchdogreg;
1251 
1252 			if (DC_IS_INTEL(sc)) {
1253 			/* there's a write enable bit here that reads as 1 */
1254 				watchdogreg = CSR_READ_4(sc, DC_WATCHDOG);
1255 				watchdogreg &= ~DC_WDOG_CTLWREN;
1256 				watchdogreg |= DC_WDOG_JABBERDIS;
1257 				CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg);
1258 			} else {
1259 				DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS);
1260 			}
1261 			DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS|
1262 			    DC_NETCFG_PORTSEL|DC_NETCFG_SCRAMBLER));
1263 			if (sc->dc_type == DC_TYPE_98713)
1264 				DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
1265 			if (!DC_IS_DAVICOM(sc))
1266 				DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1267 			DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
1268 			if (DC_IS_INTEL(sc))
1269 				dc_apply_fixup(sc, IFM_AUTO);
1270 		} else {
1271 			if (DC_IS_PNIC(sc)) {
1272 				DC_PN_GPIO_CLRBIT(sc, DC_PN_GPIO_SPEEDSEL);
1273 				DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP);
1274 				DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL);
1275 			}
1276 			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1277 			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
1278 			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER);
1279 			if (DC_IS_INTEL(sc)) {
1280 				DC_CLRBIT(sc, DC_SIARESET, DC_SIA_RESET);
1281 				DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
1282 				if ((media & IFM_GMASK) == IFM_FDX)
1283 					DC_SETBIT(sc, DC_10BTCTRL, 0x7F3D);
1284 				else
1285 					DC_SETBIT(sc, DC_10BTCTRL, 0x7F3F);
1286 				DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET);
1287 				DC_CLRBIT(sc, DC_10BTCTRL,
1288 				    DC_TCTL_AUTONEGENBL);
1289 				dc_apply_fixup(sc,
1290 				    (media & IFM_GMASK) == IFM_FDX ?
1291 				    IFM_10_T|IFM_FDX : IFM_10_T);
1292 				DELAY(20000);
1293 			}
1294 		}
1295 	}
1296 
1297 	/*
1298 	 * If this is a Davicom DM9102A card with a DM9801 HomePNA
1299 	 * PHY and we want HomePNA mode, set the portsel bit to turn
1300 	 * on the external MII port.
1301 	 */
1302 	if (DC_IS_DAVICOM(sc)) {
1303 		if (IFM_SUBTYPE(media) == IFM_HPNA_1) {
1304 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1305 			sc->dc_link = 1;
1306 		} else {
1307 			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1308 		}
1309 	}
1310 
1311 	if ((media & IFM_GMASK) == IFM_FDX) {
1312 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX);
1313 		if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc))
1314 			DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX);
1315 	} else {
1316 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX);
1317 		if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc))
1318 			DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX);
1319 	}
1320 
1321 	if (restart)
1322 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON|DC_NETCFG_RX_ON);
1323 }
1324 
1325 void
1326 dc_reset(struct dc_softc *sc)
1327 {
1328 	int i;
1329 
1330 	DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET);
1331 
1332 	for (i = 0; i < DC_TIMEOUT; i++) {
1333 		DELAY(10);
1334 		if (!(CSR_READ_4(sc, DC_BUSCTL) & DC_BUSCTL_RESET))
1335 			break;
1336 	}
1337 
1338 	if (DC_IS_ASIX(sc) || DC_IS_ADMTEK(sc) || DC_IS_XIRCOM(sc) ||
1339 	    DC_IS_INTEL(sc) || DC_IS_CONEXANT(sc)) {
1340 		DELAY(10000);
1341 		DC_CLRBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET);
1342 		i = 0;
1343 	}
1344 
1345 	if (i == DC_TIMEOUT)
1346 		printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
1347 
1348 	/* Wait a little while for the chip to get its brains in order. */
1349 	DELAY(1000);
1350 
1351 	CSR_WRITE_4(sc, DC_IMR, 0x00000000);
1352 	CSR_WRITE_4(sc, DC_BUSCTL, 0x00000000);
1353 	CSR_WRITE_4(sc, DC_NETCFG, 0x00000000);
1354 
1355 	/*
1356 	 * Bring the SIA out of reset. In some cases, it looks
1357 	 * like failing to unreset the SIA soon enough gets it
1358 	 * into a state where it will never come out of reset
1359 	 * until we reset the whole chip again.
1360 	 */
1361 	if (DC_IS_INTEL(sc)) {
1362 		DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET);
1363 		CSR_WRITE_4(sc, DC_10BTCTRL, 0);
1364 		CSR_WRITE_4(sc, DC_WATCHDOG, 0);
1365 	}
1366 
1367 	if (sc->dc_type == DC_TYPE_21145)
1368 		dc_setcfg(sc, IFM_10_T);
1369 }
1370 
1371 void
1372 dc_apply_fixup(struct dc_softc *sc, uint64_t media)
1373 {
1374 	struct dc_mediainfo *m;
1375 	u_int8_t *p;
1376 	int i;
1377 	u_int32_t reg;
1378 
1379 	m = sc->dc_mi;
1380 
1381 	while (m != NULL) {
1382 		if (m->dc_media == media)
1383 			break;
1384 		m = m->dc_next;
1385 	}
1386 
1387 	if (m == NULL)
1388 		return;
1389 
1390 	for (i = 0, p = m->dc_reset_ptr; i < m->dc_reset_len; i++, p += 2) {
1391 		reg = (p[0] | (p[1] << 8)) << 16;
1392 		CSR_WRITE_4(sc, DC_WATCHDOG, reg);
1393 	}
1394 
1395 	for (i = 0, p = m->dc_gp_ptr; i < m->dc_gp_len; i++, p += 2) {
1396 		reg = (p[0] | (p[1] << 8)) << 16;
1397 		CSR_WRITE_4(sc, DC_WATCHDOG, reg);
1398 	}
1399 }
1400 
1401 void
1402 dc_decode_leaf_sia(struct dc_softc *sc, struct dc_eblock_sia *l)
1403 {
1404 	struct dc_mediainfo *m;
1405 
1406 	m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
1407 	if (m == NULL)
1408 		return;
1409 	switch (l->dc_sia_code & ~DC_SIA_CODE_EXT) {
1410 	case DC_SIA_CODE_10BT:
1411 		m->dc_media = IFM_10_T;
1412 		break;
1413 	case DC_SIA_CODE_10BT_FDX:
1414 		m->dc_media = IFM_10_T|IFM_FDX;
1415 		break;
1416 	case DC_SIA_CODE_10B2:
1417 		m->dc_media = IFM_10_2;
1418 		break;
1419 	case DC_SIA_CODE_10B5:
1420 		m->dc_media = IFM_10_5;
1421 		break;
1422 	default:
1423 		break;
1424 	}
1425 
1426 	/*
1427 	 * We need to ignore CSR13, CSR14, CSR15 for SIA mode.
1428 	 * Things apparently already work for cards that do
1429 	 * supply Media Specific Data.
1430 	 */
1431 	if (l->dc_sia_code & DC_SIA_CODE_EXT) {
1432 		m->dc_gp_len = 2;
1433 		m->dc_gp_ptr =
1434 		(u_int8_t *)&l->dc_un.dc_sia_ext.dc_sia_gpio_ctl;
1435 	} else {
1436 		m->dc_gp_len = 2;
1437 		m->dc_gp_ptr =
1438 		(u_int8_t *)&l->dc_un.dc_sia_noext.dc_sia_gpio_ctl;
1439 	}
1440 
1441 	m->dc_next = sc->dc_mi;
1442 	sc->dc_mi = m;
1443 
1444 	sc->dc_pmode = DC_PMODE_SIA;
1445 }
1446 
1447 void
1448 dc_decode_leaf_sym(struct dc_softc *sc, struct dc_eblock_sym *l)
1449 {
1450 	struct dc_mediainfo *m;
1451 
1452 	m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
1453 	if (m == NULL)
1454 		return;
1455 	if (l->dc_sym_code == DC_SYM_CODE_100BT)
1456 		m->dc_media = IFM_100_TX;
1457 
1458 	if (l->dc_sym_code == DC_SYM_CODE_100BT_FDX)
1459 		m->dc_media = IFM_100_TX|IFM_FDX;
1460 
1461 	m->dc_gp_len = 2;
1462 	m->dc_gp_ptr = (u_int8_t *)&l->dc_sym_gpio_ctl;
1463 
1464 	m->dc_next = sc->dc_mi;
1465 	sc->dc_mi = m;
1466 
1467 	sc->dc_pmode = DC_PMODE_SYM;
1468 }
1469 
1470 void
1471 dc_decode_leaf_mii(struct dc_softc *sc, struct dc_eblock_mii *l)
1472 {
1473 	u_int8_t *p;
1474 	struct dc_mediainfo *m;
1475 
1476 	m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
1477 	if (m == NULL)
1478 		return;
1479 	/* We abuse IFM_AUTO to represent MII. */
1480 	m->dc_media = IFM_AUTO;
1481 	m->dc_gp_len = l->dc_gpr_len;
1482 
1483 	p = (u_int8_t *)l;
1484 	p += sizeof(struct dc_eblock_mii);
1485 	m->dc_gp_ptr = p;
1486 	p += 2 * l->dc_gpr_len;
1487 	m->dc_reset_len = *p;
1488 	p++;
1489 	m->dc_reset_ptr = p;
1490 
1491 	m->dc_next = sc->dc_mi;
1492 	sc->dc_mi = m;
1493 }
1494 
1495 void
1496 dc_read_srom(struct dc_softc *sc, int bits)
1497 {
1498 	sc->dc_sromsize = 2 << bits;
1499 	sc->dc_srom = malloc(sc->dc_sromsize, M_DEVBUF, M_NOWAIT);
1500 	if (sc->dc_srom == NULL)
1501 		return;
1502 	dc_read_eeprom(sc, (caddr_t)sc->dc_srom, 0, (sc->dc_sromsize / 2), 0);
1503 }
1504 
1505 void
1506 dc_parse_21143_srom(struct dc_softc *sc)
1507 {
1508 	struct dc_leaf_hdr *lhdr;
1509 	struct dc_eblock_hdr *hdr;
1510 	int have_mii, i, loff;
1511 	char *ptr;
1512 
1513 	have_mii = 0;
1514 	loff = sc->dc_srom[27];
1515 	lhdr = (struct dc_leaf_hdr *)&(sc->dc_srom[loff]);
1516 
1517 	ptr = (char *)lhdr;
1518 	ptr += sizeof(struct dc_leaf_hdr) - 1;
1519 	/*
1520 	 * Look if we got a MII media block.
1521 	 */
1522 	for (i = 0; i < lhdr->dc_mcnt; i++) {
1523 		hdr = (struct dc_eblock_hdr *)ptr;
1524 		if (hdr->dc_type == DC_EBLOCK_MII)
1525 		    have_mii++;
1526 
1527 		ptr += (hdr->dc_len & 0x7F);
1528 		ptr++;
1529 	}
1530 
1531 	/*
1532 	 * Do the same thing again. Only use SIA and SYM media
1533 	 * blocks if no MII media block is available.
1534 	 */
1535 	ptr = (char *)lhdr;
1536 	ptr += sizeof(struct dc_leaf_hdr) - 1;
1537 	for (i = 0; i < lhdr->dc_mcnt; i++) {
1538 		hdr = (struct dc_eblock_hdr *)ptr;
1539 		switch(hdr->dc_type) {
1540 		case DC_EBLOCK_MII:
1541 			dc_decode_leaf_mii(sc, (struct dc_eblock_mii *)hdr);
1542 			break;
1543 		case DC_EBLOCK_SIA:
1544 			if (! have_mii)
1545 			    dc_decode_leaf_sia(sc,
1546 				(struct dc_eblock_sia *)hdr);
1547 			break;
1548 		case DC_EBLOCK_SYM:
1549 			if (! have_mii)
1550 			    dc_decode_leaf_sym(sc,
1551 				(struct dc_eblock_sym *)hdr);
1552 			break;
1553 		default:
1554 			/* Don't care. Yet. */
1555 			break;
1556 		}
1557 		ptr += (hdr->dc_len & 0x7F);
1558 		ptr++;
1559 	}
1560 }
1561 
1562 /*
1563  * Attach the interface. Allocate softc structures, do ifmedia
1564  * setup and ethernet/BPF attach.
1565  */
1566 void
1567 dc_attach(struct dc_softc *sc)
1568 {
1569 	struct ifnet *ifp;
1570 	int mac_offset, tmp, i;
1571 	u_int32_t reg;
1572 
1573 	/*
1574 	 * Get station address from the EEPROM.
1575 	 */
1576 	if (sc->sc_hasmac)
1577 		goto hasmac;
1578 
1579 	switch(sc->dc_type) {
1580 	case DC_TYPE_98713:
1581 	case DC_TYPE_98713A:
1582 	case DC_TYPE_987x5:
1583 	case DC_TYPE_PNICII:
1584 		dc_read_eeprom(sc, (caddr_t)&mac_offset,
1585 		    (DC_EE_NODEADDR_OFFSET / 2), 1, 0);
1586 		dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr,
1587 		    (mac_offset / 2), 3, 0);
1588 		break;
1589 	case DC_TYPE_PNIC:
1590 		dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr, 0, 3, 1);
1591 		break;
1592 	case DC_TYPE_DM9102:
1593 	case DC_TYPE_21143:
1594 	case DC_TYPE_21145:
1595 	case DC_TYPE_ASIX:
1596 		dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr,
1597 		    DC_EE_NODEADDR, 3, 0);
1598 		break;
1599 	case DC_TYPE_AL981:
1600 	case DC_TYPE_AN983:
1601 		reg = CSR_READ_4(sc, DC_AL_PAR0);
1602 		sc->sc_arpcom.ac_enaddr[0] = (reg & 0xff);
1603 		sc->sc_arpcom.ac_enaddr[1] = (reg >> 8) & 0xff;
1604 		sc->sc_arpcom.ac_enaddr[2] = (reg >> 16) & 0xff;
1605 		sc->sc_arpcom.ac_enaddr[3] = (reg >> 24) & 0xff;
1606 		reg = CSR_READ_4(sc, DC_AL_PAR1);
1607 		sc->sc_arpcom.ac_enaddr[4] = (reg & 0xff);
1608 		sc->sc_arpcom.ac_enaddr[5] = (reg >> 8) & 0xff;
1609 		break;
1610 	case DC_TYPE_CONEXANT:
1611 		bcopy(&sc->dc_srom + DC_CONEXANT_EE_NODEADDR,
1612 		    &sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
1613 		break;
1614 	case DC_TYPE_XIRCOM:
1615 		/* Some newer units have the MAC at offset 8 */
1616 		dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr, 8, 3, 0);
1617 
1618 		if (sc->sc_arpcom.ac_enaddr[0] == 0x00 &&
1619 		    sc->sc_arpcom.ac_enaddr[1] == 0x10 &&
1620 		    sc->sc_arpcom.ac_enaddr[2] == 0xa4)
1621 			break;
1622 		if (sc->sc_arpcom.ac_enaddr[0] == 0x00 &&
1623 		    sc->sc_arpcom.ac_enaddr[1] == 0x80 &&
1624 		    sc->sc_arpcom.ac_enaddr[2] == 0xc7)
1625 			break;
1626 		dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr, 3, 3, 0);
1627 		break;
1628 	default:
1629 		dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr,
1630 		    DC_EE_NODEADDR, 3, 0);
1631 		break;
1632 	}
1633 hasmac:
1634 
1635 	if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct dc_list_data),
1636 	    PAGE_SIZE, 0, sc->sc_listseg, 1, &sc->sc_listnseg,
1637 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) {
1638 		printf(": can't alloc list mem\n");
1639 		goto fail;
1640 	}
1641 	if (bus_dmamem_map(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg,
1642 	    sizeof(struct dc_list_data), &sc->sc_listkva,
1643 	    BUS_DMA_NOWAIT) != 0) {
1644 		printf(": can't map list mem\n");
1645 		goto fail;
1646 	}
1647 	if (bus_dmamap_create(sc->sc_dmat, sizeof(struct dc_list_data), 1,
1648 	    sizeof(struct dc_list_data), 0, BUS_DMA_NOWAIT,
1649 	    &sc->sc_listmap) != 0) {
1650 		printf(": can't alloc list map\n");
1651 		goto fail;
1652 	}
1653 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, sc->sc_listkva,
1654 	    sizeof(struct dc_list_data), NULL, BUS_DMA_NOWAIT) != 0) {
1655 		printf(": can't load list map\n");
1656 		goto fail;
1657 	}
1658 	sc->dc_ldata = (struct dc_list_data *)sc->sc_listkva;
1659 
1660 	for (i = 0; i < DC_RX_LIST_CNT; i++) {
1661 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
1662 		    0, BUS_DMA_NOWAIT,
1663 		    &sc->dc_cdata.dc_rx_chain[i].sd_map) != 0) {
1664 			printf(": can't create rx map\n");
1665 			return;
1666 		}
1667 	}
1668 	if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
1669 	    BUS_DMA_NOWAIT, &sc->sc_rx_sparemap) != 0) {
1670 		printf(": can't create rx spare map\n");
1671 		return;
1672 	}
1673 
1674 	for (i = 0; i < DC_TX_LIST_CNT; i++) {
1675 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES,
1676 		    (sc->dc_flags & DC_TX_COALESCE) ? 1 : DC_TX_LIST_CNT - 5,
1677 		    MCLBYTES, 0, BUS_DMA_NOWAIT,
1678 		    &sc->dc_cdata.dc_tx_chain[i].sd_map) != 0) {
1679 			printf(": can't create tx map\n");
1680 			return;
1681 		}
1682 	}
1683 	if (bus_dmamap_create(sc->sc_dmat, MCLBYTES,
1684 	    (sc->dc_flags & DC_TX_COALESCE) ? 1 : DC_TX_LIST_CNT - 5,
1685 	    MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_tx_sparemap) != 0) {
1686 		printf(": can't create tx spare map\n");
1687 		return;
1688 	}
1689 
1690 	/*
1691 	 * A 21143 or clone chip was detected. Inform the world.
1692 	 */
1693 	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
1694 
1695 	ifp = &sc->sc_arpcom.ac_if;
1696 	ifp->if_softc = sc;
1697 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1698 	ifp->if_ioctl = dc_ioctl;
1699 	ifp->if_start = dc_start;
1700 	ifp->if_watchdog = dc_watchdog;
1701 	ifq_set_maxlen(&ifp->if_snd, DC_TX_LIST_CNT - 1);
1702 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1703 
1704 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1705 
1706 	/* Do MII setup. If this is a 21143, check for a PHY on the
1707 	 * MII bus after applying any necessary fixups to twiddle the
1708 	 * GPIO bits. If we don't end up finding a PHY, restore the
1709 	 * old selection (SIA only or SIA/SYM) and attach the dcphy
1710 	 * driver instead.
1711 	 */
1712 	if (DC_IS_INTEL(sc)) {
1713 		dc_apply_fixup(sc, IFM_AUTO);
1714 		tmp = sc->dc_pmode;
1715 		sc->dc_pmode = DC_PMODE_MII;
1716 	}
1717 
1718 	/*
1719 	 * Setup General Purpose port mode and data so the tulip can talk
1720 	 * to the MII.  This needs to be done before mii_attach so that
1721 	 * we can actually see them.
1722 	 */
1723 	if (DC_IS_XIRCOM(sc)) {
1724 		CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN |
1725 		    DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
1726 		DELAY(10);
1727 		CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN |
1728 		    DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
1729 		DELAY(10);
1730 	}
1731 
1732 	sc->sc_mii.mii_ifp = ifp;
1733 	sc->sc_mii.mii_readreg = dc_miibus_readreg;
1734 	sc->sc_mii.mii_writereg = dc_miibus_writereg;
1735 	sc->sc_mii.mii_statchg = dc_miibus_statchg;
1736 	ifmedia_init(&sc->sc_mii.mii_media, 0, dc_ifmedia_upd, dc_ifmedia_sts);
1737 	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
1738 	    MII_OFFSET_ANY, 0);
1739 
1740 	if (DC_IS_INTEL(sc)) {
1741 		if (LIST_EMPTY(&sc->sc_mii.mii_phys)) {
1742 			sc->dc_pmode = tmp;
1743 			if (sc->dc_pmode != DC_PMODE_SIA)
1744 				sc->dc_pmode = DC_PMODE_SYM;
1745 			sc->dc_flags |= DC_21143_NWAY;
1746 			if (sc->dc_flags & DC_MOMENCO_BOTCH)
1747 				sc->dc_pmode = DC_PMODE_MII;
1748 			mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff,
1749 			    MII_PHY_ANY, MII_OFFSET_ANY, 0);
1750 		} else {
1751 			/* we have a PHY, so we must clear this bit */
1752 			sc->dc_flags &= ~DC_TULIP_LEDS;
1753 		}
1754 	}
1755 
1756 	if (LIST_EMPTY(&sc->sc_mii.mii_phys)) {
1757 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
1758 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
1759 		printf("%s: MII without any PHY!\n", sc->sc_dev.dv_xname);
1760 	} else if (sc->dc_type == DC_TYPE_21145) {
1761 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_10_T);
1762 	} else
1763 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1764 
1765 	if (DC_IS_DAVICOM(sc) && sc->dc_revision >= DC_REVISION_DM9102A)
1766 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_HPNA_1,0,NULL);
1767 
1768 	if (DC_IS_ADMTEK(sc)) {
1769 		/*
1770 		 * Set automatic TX underrun recovery for the ADMtek chips
1771 		 */
1772 		DC_SETBIT(sc, DC_AL_CR, DC_AL_CR_ATUR);
1773 	}
1774 
1775 	/*
1776 	 * Call MI attach routines.
1777 	 */
1778 	if_attach(ifp);
1779 	ether_ifattach(ifp);
1780 
1781 fail:
1782 	return;
1783 }
1784 
1785 /*
1786  * Initialize the transmit descriptors.
1787  */
1788 int
1789 dc_list_tx_init(struct dc_softc *sc)
1790 {
1791 	struct dc_chain_data *cd;
1792 	struct dc_list_data *ld;
1793 	int i;
1794 	bus_addr_t next;
1795 
1796 	cd = &sc->dc_cdata;
1797 	ld = sc->dc_ldata;
1798 	for (i = 0; i < DC_TX_LIST_CNT; i++) {
1799 		next = sc->sc_listmap->dm_segs[0].ds_addr;
1800 		if (i == (DC_TX_LIST_CNT - 1))
1801 			next +=
1802 			    offsetof(struct dc_list_data, dc_tx_list[0]);
1803 		else
1804 			next +=
1805 			    offsetof(struct dc_list_data, dc_tx_list[i + 1]);
1806 		cd->dc_tx_chain[i].sd_mbuf = NULL;
1807 		ld->dc_tx_list[i].dc_data = htole32(0);
1808 		ld->dc_tx_list[i].dc_ctl = htole32(0);
1809 		ld->dc_tx_list[i].dc_next = htole32(next);
1810 	}
1811 
1812 	cd->dc_tx_prod = cd->dc_tx_cons = cd->dc_tx_cnt = 0;
1813 
1814 	return (0);
1815 }
1816 
1817 
1818 /*
1819  * Initialize the RX descriptors and allocate mbufs for them. Note that
1820  * we arrange the descriptors in a closed ring, so that the last descriptor
1821  * points back to the first.
1822  */
1823 int
1824 dc_list_rx_init(struct dc_softc *sc)
1825 {
1826 	struct dc_chain_data *cd;
1827 	struct dc_list_data *ld;
1828 	int i;
1829 	bus_addr_t next;
1830 
1831 	cd = &sc->dc_cdata;
1832 	ld = sc->dc_ldata;
1833 
1834 	for (i = 0; i < DC_RX_LIST_CNT; i++) {
1835 		if (dc_newbuf(sc, i, NULL) == ENOBUFS)
1836 			return (ENOBUFS);
1837 		next = sc->sc_listmap->dm_segs[0].ds_addr;
1838 		if (i == (DC_RX_LIST_CNT - 1))
1839 			next +=
1840 			    offsetof(struct dc_list_data, dc_rx_list[0]);
1841 		else
1842 			next +=
1843 			    offsetof(struct dc_list_data, dc_rx_list[i + 1]);
1844 		ld->dc_rx_list[i].dc_next = htole32(next);
1845 	}
1846 
1847 	cd->dc_rx_prod = 0;
1848 
1849 	return (0);
1850 }
1851 
1852 /*
1853  * Initialize an RX descriptor and attach an MBUF cluster.
1854  */
1855 int
1856 dc_newbuf(struct dc_softc *sc, int i, struct mbuf *m)
1857 {
1858 	struct mbuf *m_new = NULL;
1859 	struct dc_desc *c;
1860 	bus_dmamap_t map;
1861 
1862 	c = &sc->dc_ldata->dc_rx_list[i];
1863 
1864 	if (m == NULL) {
1865 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1866 		if (m_new == NULL)
1867 			return (ENOBUFS);
1868 
1869 		MCLGET(m_new, M_DONTWAIT);
1870 		if (!(m_new->m_flags & M_EXT)) {
1871 			m_freem(m_new);
1872 			return (ENOBUFS);
1873 		}
1874 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1875 		if (bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_rx_sparemap,
1876 		    m_new, BUS_DMA_NOWAIT) != 0) {
1877 			m_freem(m_new);
1878 			return (ENOBUFS);
1879 		}
1880 		map = sc->dc_cdata.dc_rx_chain[i].sd_map;
1881 		sc->dc_cdata.dc_rx_chain[i].sd_map = sc->sc_rx_sparemap;
1882 		sc->sc_rx_sparemap = map;
1883 	} else {
1884 		/*
1885 		 * We're re-using a previously allocated mbuf;
1886 		 * be sure to re-init pointers and lengths to
1887 		 * default values.
1888 		 */
1889 		m_new = m;
1890 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1891 		m_new->m_data = m_new->m_ext.ext_buf;
1892 	}
1893 
1894 	m_adj(m_new, sizeof(u_int64_t));
1895 
1896 	/*
1897 	 * If this is a PNIC chip, zero the buffer. This is part
1898 	 * of the workaround for the receive bug in the 82c168 and
1899 	 * 82c169 chips.
1900 	 */
1901 	if (sc->dc_flags & DC_PNIC_RX_BUG_WAR)
1902 		bzero(mtod(m_new, char *), m_new->m_len);
1903 
1904 	bus_dmamap_sync(sc->sc_dmat, sc->dc_cdata.dc_rx_chain[i].sd_map, 0,
1905 	    sc->dc_cdata.dc_rx_chain[i].sd_map->dm_mapsize,
1906 	    BUS_DMASYNC_PREREAD);
1907 
1908 	sc->dc_cdata.dc_rx_chain[i].sd_mbuf = m_new;
1909 	c->dc_data = htole32(
1910 	    sc->dc_cdata.dc_rx_chain[i].sd_map->dm_segs[0].ds_addr +
1911 	    sizeof(u_int64_t));
1912 	c->dc_ctl = htole32(DC_RXCTL_RLINK | ETHER_MAX_DIX_LEN);
1913 	c->dc_status = htole32(DC_RXSTAT_OWN);
1914 
1915 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1916 	    offsetof(struct dc_list_data, dc_rx_list[i]),
1917 	    sizeof(struct dc_desc),
1918 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1919 
1920 	return (0);
1921 }
1922 
1923 /*
1924  * Grrrrr.
1925  * The PNIC chip has a terrible bug in it that manifests itself during
1926  * periods of heavy activity. The exact mode of failure if difficult to
1927  * pinpoint: sometimes it only happens in promiscuous mode, sometimes it
1928  * will happen on slow machines. The bug is that sometimes instead of
1929  * uploading one complete frame during reception, it uploads what looks
1930  * like the entire contents of its FIFO memory. The frame we want is at
1931  * the end of the whole mess, but we never know exactly how much data has
1932  * been uploaded, so salvaging the frame is hard.
1933  *
1934  * There is only one way to do it reliably, and it's disgusting.
1935  * Here's what we know:
1936  *
1937  * - We know there will always be somewhere between one and three extra
1938  *   descriptors uploaded.
1939  *
1940  * - We know the desired received frame will always be at the end of the
1941  *   total data upload.
1942  *
1943  * - We know the size of the desired received frame because it will be
1944  *   provided in the length field of the status word in the last descriptor.
1945  *
1946  * Here's what we do:
1947  *
1948  * - When we allocate buffers for the receive ring, we bzero() them.
1949  *   This means that we know that the buffer contents should be all
1950  *   zeros, except for data uploaded by the chip.
1951  *
1952  * - We also force the PNIC chip to upload frames that include the
1953  *   ethernet CRC at the end.
1954  *
1955  * - We gather all of the bogus frame data into a single buffer.
1956  *
1957  * - We then position a pointer at the end of this buffer and scan
1958  *   backwards until we encounter the first non-zero byte of data.
1959  *   This is the end of the received frame. We know we will encounter
1960  *   some data at the end of the frame because the CRC will always be
1961  *   there, so even if the sender transmits a packet of all zeros,
1962  *   we won't be fooled.
1963  *
1964  * - We know the size of the actual received frame, so we subtract
1965  *   that value from the current pointer location. This brings us
1966  *   to the start of the actual received packet.
1967  *
1968  * - We copy this into an mbuf and pass it on, along with the actual
1969  *   frame length.
1970  *
1971  * The performance hit is tremendous, but it beats dropping frames all
1972  * the time.
1973  */
1974 
1975 #define DC_WHOLEFRAME	(DC_RXSTAT_FIRSTFRAG|DC_RXSTAT_LASTFRAG)
1976 void
1977 dc_pnic_rx_bug_war(struct dc_softc *sc, int idx)
1978 {
1979 	struct dc_desc		*cur_rx;
1980 	struct dc_desc		*c = NULL;
1981 	struct mbuf		*m = NULL;
1982 	unsigned char		*ptr;
1983 	int			i, total_len;
1984 	u_int32_t		rxstat = 0;
1985 
1986 	i = sc->dc_pnic_rx_bug_save;
1987 	cur_rx = &sc->dc_ldata->dc_rx_list[idx];
1988 	ptr = sc->dc_pnic_rx_buf;
1989 	bzero(ptr, ETHER_MAX_DIX_LEN * 5);
1990 
1991 	/* Copy all the bytes from the bogus buffers. */
1992 	while (1) {
1993 		c = &sc->dc_ldata->dc_rx_list[i];
1994 		rxstat = letoh32(c->dc_status);
1995 		m = sc->dc_cdata.dc_rx_chain[i].sd_mbuf;
1996 		bcopy(mtod(m, char *), ptr, ETHER_MAX_DIX_LEN);
1997 		ptr += ETHER_MAX_DIX_LEN;
1998 		/* If this is the last buffer, break out. */
1999 		if (i == idx || rxstat & DC_RXSTAT_LASTFRAG)
2000 			break;
2001 		dc_newbuf(sc, i, m);
2002 		DC_INC(i, DC_RX_LIST_CNT);
2003 	}
2004 
2005 	/* Find the length of the actual receive frame. */
2006 	total_len = DC_RXBYTES(rxstat);
2007 
2008 	/* Scan backwards until we hit a non-zero byte. */
2009 	while(*ptr == 0x00)
2010 		ptr--;
2011 
2012 	/* Round off. */
2013 	if ((unsigned long)(ptr) & 0x3)
2014 		ptr -= 1;
2015 
2016 	/* Now find the start of the frame. */
2017 	ptr -= total_len;
2018 	if (ptr < sc->dc_pnic_rx_buf)
2019 		ptr = sc->dc_pnic_rx_buf;
2020 
2021 	/*
2022 	 * Now copy the salvaged frame to the last mbuf and fake up
2023 	 * the status word to make it look like a successful
2024  	 * frame reception.
2025 	 */
2026 	dc_newbuf(sc, i, m);
2027 	bcopy(ptr, mtod(m, char *), total_len);
2028 	cur_rx->dc_status = htole32(rxstat | DC_RXSTAT_FIRSTFRAG);
2029 }
2030 
2031 /*
2032  * This routine searches the RX ring for dirty descriptors in the
2033  * event that the rxeof routine falls out of sync with the chip's
2034  * current descriptor pointer. This may happen sometimes as a result
2035  * of a "no RX buffer available" condition that happens when the chip
2036  * consumes all of the RX buffers before the driver has a chance to
2037  * process the RX ring. This routine may need to be called more than
2038  * once to bring the driver back in sync with the chip, however we
2039  * should still be getting RX DONE interrupts to drive the search
2040  * for new packets in the RX ring, so we should catch up eventually.
2041  */
2042 int
2043 dc_rx_resync(struct dc_softc *sc)
2044 {
2045 	u_int32_t stat;
2046 	int i, pos, offset;
2047 
2048 	pos = sc->dc_cdata.dc_rx_prod;
2049 
2050 	for (i = 0; i < DC_RX_LIST_CNT; i++) {
2051 
2052 		offset = offsetof(struct dc_list_data, dc_rx_list[pos]);
2053 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2054 		    offset, sizeof(struct dc_desc),
2055 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2056 
2057 		stat = sc->dc_ldata->dc_rx_list[pos].dc_status;
2058 		if (!(stat & htole32(DC_RXSTAT_OWN)))
2059 			break;
2060 		DC_INC(pos, DC_RX_LIST_CNT);
2061 	}
2062 
2063 	/* If the ring really is empty, then just return. */
2064 	if (i == DC_RX_LIST_CNT)
2065 		return (0);
2066 
2067 	/* We've fallen behind the chip: catch it. */
2068 	sc->dc_cdata.dc_rx_prod = pos;
2069 
2070 	return (EAGAIN);
2071 }
2072 
2073 /*
2074  * A frame has been uploaded: pass the resulting mbuf chain up to
2075  * the higher level protocols.
2076  */
2077 int
2078 dc_rxeof(struct dc_softc *sc)
2079 {
2080 	struct mbuf *m;
2081 	struct ifnet *ifp;
2082 	struct dc_desc *cur_rx;
2083 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
2084 	int i, offset, total_len = 0, consumed = 0;
2085 	u_int32_t rxstat;
2086 
2087 	ifp = &sc->sc_arpcom.ac_if;
2088 	i = sc->dc_cdata.dc_rx_prod;
2089 
2090 	for(;;) {
2091 		struct mbuf	*m0 = NULL;
2092 
2093 		offset = offsetof(struct dc_list_data, dc_rx_list[i]);
2094 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2095 		    offset, sizeof(struct dc_desc),
2096 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2097 
2098 		cur_rx = &sc->dc_ldata->dc_rx_list[i];
2099 		rxstat = letoh32(cur_rx->dc_status);
2100 		if (rxstat & DC_RXSTAT_OWN)
2101 			break;
2102 
2103 		m = sc->dc_cdata.dc_rx_chain[i].sd_mbuf;
2104 		total_len = DC_RXBYTES(rxstat);
2105 
2106 		bus_dmamap_sync(sc->sc_dmat, sc->dc_cdata.dc_rx_chain[i].sd_map,
2107 		    0, sc->dc_cdata.dc_rx_chain[i].sd_map->dm_mapsize,
2108 		    BUS_DMASYNC_POSTREAD);
2109 
2110 		if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) {
2111 			if ((rxstat & DC_WHOLEFRAME) != DC_WHOLEFRAME) {
2112 				if (rxstat & DC_RXSTAT_FIRSTFRAG)
2113 					sc->dc_pnic_rx_bug_save = i;
2114 				if ((rxstat & DC_RXSTAT_LASTFRAG) == 0) {
2115 					DC_INC(i, DC_RX_LIST_CNT);
2116 					continue;
2117 				}
2118 				dc_pnic_rx_bug_war(sc, i);
2119 				rxstat = letoh32(cur_rx->dc_status);
2120 				total_len = DC_RXBYTES(rxstat);
2121 			}
2122 		}
2123 
2124 		sc->dc_cdata.dc_rx_chain[i].sd_mbuf = NULL;
2125 
2126 		/*
2127 		 * If an error occurs, update stats, clear the
2128 		 * status word and leave the mbuf cluster in place:
2129 		 * it should simply get re-used next time this descriptor
2130 		 * comes up in the ring.  However, don't report long
2131 		 * frames as errors since they could be VLANs.
2132 		 */
2133 		if ((rxstat & DC_RXSTAT_RXERR)) {
2134 			if (!(rxstat & DC_RXSTAT_GIANT) ||
2135 			    (rxstat & (DC_RXSTAT_CRCERR | DC_RXSTAT_DRIBBLE |
2136 				       DC_RXSTAT_MIIERE | DC_RXSTAT_COLLSEEN |
2137 				       DC_RXSTAT_RUNT   | DC_RXSTAT_DE))) {
2138 				ifp->if_ierrors++;
2139 				if (rxstat & DC_RXSTAT_COLLSEEN)
2140 					ifp->if_collisions++;
2141 				dc_newbuf(sc, i, m);
2142 				if (rxstat & DC_RXSTAT_CRCERR) {
2143 					DC_INC(i, DC_RX_LIST_CNT);
2144 					continue;
2145 				} else {
2146 					dc_init(sc);
2147 					break;
2148 				}
2149 			}
2150 		}
2151 
2152 		/* No errors; receive the packet. */
2153 		total_len -= ETHER_CRC_LEN;
2154 
2155 		m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN);
2156 		dc_newbuf(sc, i, m);
2157 		DC_INC(i, DC_RX_LIST_CNT);
2158 		if (m0 == NULL) {
2159 			ifp->if_ierrors++;
2160 			continue;
2161 		}
2162 		m = m0;
2163 
2164 		consumed++;
2165 		ml_enqueue(&ml, m);
2166 	}
2167 
2168 	sc->dc_cdata.dc_rx_prod = i;
2169 
2170 	if_input(ifp, &ml);
2171 
2172 	return (consumed);
2173 }
2174 
2175 /*
2176  * A frame was downloaded to the chip. It's safe for us to clean up
2177  * the list buffers.
2178  */
2179 
2180 void
2181 dc_txeof(struct dc_softc *sc)
2182 {
2183 	struct dc_desc *cur_tx = NULL;
2184 	struct ifnet *ifp;
2185 	int idx, offset;
2186 
2187 	ifp = &sc->sc_arpcom.ac_if;
2188 
2189 	/*
2190 	 * Go through our tx list and free mbufs for those
2191 	 * frames that have been transmitted.
2192 	 */
2193 	idx = sc->dc_cdata.dc_tx_cons;
2194 	while(idx != sc->dc_cdata.dc_tx_prod) {
2195 		u_int32_t		txstat;
2196 
2197 		offset = offsetof(struct dc_list_data, dc_tx_list[idx]);
2198 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2199 		    offset, sizeof(struct dc_desc),
2200 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2201 
2202 		cur_tx = &sc->dc_ldata->dc_tx_list[idx];
2203 		txstat = letoh32(cur_tx->dc_status);
2204 
2205 		if (txstat & DC_TXSTAT_OWN)
2206 			break;
2207 
2208 		if (!(cur_tx->dc_ctl & htole32(DC_TXCTL_LASTFRAG)) ||
2209 		    cur_tx->dc_ctl & htole32(DC_TXCTL_SETUP)) {
2210 			if (cur_tx->dc_ctl & htole32(DC_TXCTL_SETUP)) {
2211 				/*
2212 				 * Yes, the PNIC is so brain damaged
2213 				 * that it will sometimes generate a TX
2214 				 * underrun error while DMAing the RX
2215 				 * filter setup frame. If we detect this,
2216 				 * we have to send the setup frame again,
2217 				 * or else the filter won't be programmed
2218 				 * correctly.
2219 				 */
2220 				if (DC_IS_PNIC(sc)) {
2221 					if (txstat & DC_TXSTAT_ERRSUM)
2222 						dc_setfilt(sc);
2223 				}
2224 				sc->dc_cdata.dc_tx_chain[idx].sd_mbuf = NULL;
2225 			}
2226 			sc->dc_cdata.dc_tx_cnt--;
2227 			DC_INC(idx, DC_TX_LIST_CNT);
2228 			continue;
2229 		}
2230 
2231 		if (DC_IS_XIRCOM(sc) || DC_IS_CONEXANT(sc)) {
2232 			/*
2233 			 * XXX: Why does my Xircom taunt me so?
2234 			 * For some reason it likes setting the CARRLOST flag
2235 			 * even when the carrier is there. wtf?!
2236 			 * Who knows, but Conexant chips have the
2237 			 * same problem. Maybe they took lessons
2238 			 * from Xircom.
2239 			 */
2240 			if (/*sc->dc_type == DC_TYPE_21143 &&*/
2241 			    sc->dc_pmode == DC_PMODE_MII &&
2242 			    ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM|
2243 			    DC_TXSTAT_NOCARRIER)))
2244 				txstat &= ~DC_TXSTAT_ERRSUM;
2245 		} else {
2246 			if (/*sc->dc_type == DC_TYPE_21143 &&*/
2247 			    sc->dc_pmode == DC_PMODE_MII &&
2248 		    	    ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM|
2249 		    	    DC_TXSTAT_NOCARRIER|DC_TXSTAT_CARRLOST)))
2250 				txstat &= ~DC_TXSTAT_ERRSUM;
2251 		}
2252 
2253 		if (txstat & DC_TXSTAT_ERRSUM) {
2254 			ifp->if_oerrors++;
2255 			if (txstat & DC_TXSTAT_EXCESSCOLL)
2256 				ifp->if_collisions++;
2257 			if (txstat & DC_TXSTAT_LATECOLL)
2258 				ifp->if_collisions++;
2259 			if (!(txstat & DC_TXSTAT_UNDERRUN)) {
2260 				dc_init(sc);
2261 				return;
2262 			}
2263 		}
2264 
2265 		ifp->if_collisions += (txstat & DC_TXSTAT_COLLCNT) >> 3;
2266 
2267 		if (sc->dc_cdata.dc_tx_chain[idx].sd_map->dm_nsegs != 0) {
2268 			bus_dmamap_t map = sc->dc_cdata.dc_tx_chain[idx].sd_map;
2269 
2270 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2271 			    BUS_DMASYNC_POSTWRITE);
2272 			bus_dmamap_unload(sc->sc_dmat, map);
2273 		}
2274 		if (sc->dc_cdata.dc_tx_chain[idx].sd_mbuf != NULL) {
2275 			m_freem(sc->dc_cdata.dc_tx_chain[idx].sd_mbuf);
2276 			sc->dc_cdata.dc_tx_chain[idx].sd_mbuf = NULL;
2277 		}
2278 
2279 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2280 		    offset, sizeof(struct dc_desc),
2281 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2282 
2283 		sc->dc_cdata.dc_tx_cnt--;
2284 		DC_INC(idx, DC_TX_LIST_CNT);
2285 	}
2286 	sc->dc_cdata.dc_tx_cons = idx;
2287 
2288 	if (DC_TX_LIST_CNT - sc->dc_cdata.dc_tx_cnt > 5)
2289 		ifq_clr_oactive(&ifp->if_snd);
2290 	if (sc->dc_cdata.dc_tx_cnt == 0)
2291 		ifp->if_timer = 0;
2292 }
2293 
2294 void
2295 dc_tick(void *xsc)
2296 {
2297 	struct dc_softc *sc = (struct dc_softc *)xsc;
2298 	struct mii_data *mii;
2299 	struct ifnet *ifp;
2300 	int s;
2301 	u_int32_t r;
2302 
2303 	s = splnet();
2304 
2305 	ifp = &sc->sc_arpcom.ac_if;
2306 	mii = &sc->sc_mii;
2307 
2308 	if (sc->dc_flags & DC_REDUCED_MII_POLL) {
2309 		if (sc->dc_flags & DC_21143_NWAY) {
2310 			r = CSR_READ_4(sc, DC_10BTSTAT);
2311 			if (IFM_SUBTYPE(mii->mii_media_active) ==
2312 			    IFM_100_TX && (r & DC_TSTAT_LS100)) {
2313 				sc->dc_link = 0;
2314 				mii_mediachg(mii);
2315 			}
2316 			if (IFM_SUBTYPE(mii->mii_media_active) ==
2317 			    IFM_10_T && (r & DC_TSTAT_LS10)) {
2318 				sc->dc_link = 0;
2319 				mii_mediachg(mii);
2320 			}
2321 			if (sc->dc_link == 0)
2322 				mii_tick(mii);
2323 		} else {
2324 			/*
2325 			 * For NICs which never report DC_RXSTATE_WAIT, we
2326 			 * have to bite the bullet...
2327 			 */
2328 			if ((DC_HAS_BROKEN_RXSTATE(sc) || (CSR_READ_4(sc,
2329 			    DC_ISR) & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT) &&
2330 			    sc->dc_cdata.dc_tx_cnt == 0 && !DC_IS_ASIX(sc)) {
2331 				mii_tick(mii);
2332 				if (!(mii->mii_media_status & IFM_ACTIVE))
2333 					sc->dc_link = 0;
2334 			}
2335 		}
2336 	} else
2337 		mii_tick(mii);
2338 
2339 	/*
2340 	 * When the init routine completes, we expect to be able to send
2341 	 * packets right away, and in fact the network code will send a
2342 	 * gratuitous ARP the moment the init routine marks the interface
2343 	 * as running. However, even though the MAC may have been initialized,
2344 	 * there may be a delay of a few seconds before the PHY completes
2345 	 * autonegotiation and the link is brought up. Any transmissions
2346 	 * made during that delay will be lost. Dealing with this is tricky:
2347 	 * we can't just pause in the init routine while waiting for the
2348 	 * PHY to come ready since that would bring the whole system to
2349 	 * a screeching halt for several seconds.
2350 	 *
2351 	 * What we do here is prevent the TX start routine from sending
2352 	 * any packets until a link has been established. After the
2353 	 * interface has been initialized, the tick routine will poll
2354 	 * the state of the PHY until the IFM_ACTIVE flag is set. Until
2355 	 * that time, packets will stay in the send queue, and once the
2356 	 * link comes up, they will be flushed out to the wire.
2357 	 */
2358 	if (!sc->dc_link && mii->mii_media_status & IFM_ACTIVE &&
2359 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2360 		sc->dc_link++;
2361 		if (ifq_empty(&ifp->if_snd) == 0)
2362 	 	    dc_start(ifp);
2363 	}
2364 
2365 	if (sc->dc_flags & DC_21143_NWAY && !sc->dc_link)
2366 		timeout_add_msec(&sc->dc_tick_tmo, 100);
2367 	else
2368 		timeout_add_sec(&sc->dc_tick_tmo, 1);
2369 
2370 	splx(s);
2371 }
2372 
2373 /* A transmit underrun has occurred.  Back off the transmit threshold,
2374  * or switch to store and forward mode if we have to.
2375  */
2376 void
2377 dc_tx_underrun(struct dc_softc *sc)
2378 {
2379 	u_int32_t	isr;
2380 	int		i;
2381 
2382 	if (DC_IS_DAVICOM(sc))
2383 		dc_init(sc);
2384 
2385 	if (DC_IS_INTEL(sc)) {
2386 		/*
2387 		 * The real 21143 requires that the transmitter be idle
2388 		 * in order to change the transmit threshold or store
2389 		 * and forward state.
2390 		 */
2391 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
2392 
2393 		for (i = 0; i < DC_TIMEOUT; i++) {
2394 			isr = CSR_READ_4(sc, DC_ISR);
2395 			if (isr & DC_ISR_TX_IDLE)
2396 				break;
2397 			DELAY(10);
2398 		}
2399 		if (i == DC_TIMEOUT) {
2400 			printf("%s: failed to force tx to idle state\n",
2401 			    sc->sc_dev.dv_xname);
2402 			dc_init(sc);
2403 		}
2404 	}
2405 
2406 	sc->dc_txthresh += DC_TXTHRESH_INC;
2407 	if (sc->dc_txthresh > DC_TXTHRESH_MAX) {
2408 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
2409 	} else {
2410 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH);
2411 		DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh);
2412 	}
2413 
2414 	if (DC_IS_INTEL(sc))
2415 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
2416 
2417 	return;
2418 }
2419 
2420 int
2421 dc_intr(void *arg)
2422 {
2423 	struct dc_softc *sc;
2424 	struct ifnet *ifp;
2425 	u_int32_t status, ints;
2426 	int claimed = 0;
2427 
2428 	sc = arg;
2429 
2430 	ifp = &sc->sc_arpcom.ac_if;
2431 
2432 	ints = CSR_READ_4(sc, DC_ISR);
2433 	if ((ints & DC_INTRS) == 0)
2434 		return (claimed);
2435 	if (ints == 0xffffffff)
2436 		return (0);
2437 
2438 	/* Suppress unwanted interrupts */
2439 	if (!(ifp->if_flags & IFF_UP)) {
2440 		if (CSR_READ_4(sc, DC_ISR) & DC_INTRS)
2441 			dc_stop(sc, 0);
2442 		return (claimed);
2443 	}
2444 
2445 	/* Disable interrupts. */
2446 	CSR_WRITE_4(sc, DC_IMR, 0x00000000);
2447 
2448 	while (((status = CSR_READ_4(sc, DC_ISR)) & DC_INTRS) &&
2449 	    status != 0xFFFFFFFF &&
2450 	    (ifp->if_flags & IFF_RUNNING)) {
2451 
2452 		claimed = 1;
2453 		CSR_WRITE_4(sc, DC_ISR, status);
2454 
2455 		if (status & DC_ISR_RX_OK) {
2456 			if (dc_rxeof(sc) == 0) {
2457 				while(dc_rx_resync(sc))
2458 					dc_rxeof(sc);
2459 			}
2460 		}
2461 
2462 		if (status & (DC_ISR_TX_OK|DC_ISR_TX_NOBUF))
2463 			dc_txeof(sc);
2464 
2465 		if (status & DC_ISR_TX_IDLE) {
2466 			dc_txeof(sc);
2467 			if (sc->dc_cdata.dc_tx_cnt) {
2468 				DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
2469 				CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
2470 			}
2471 		}
2472 
2473 		if (status & DC_ISR_TX_UNDERRUN)
2474 			dc_tx_underrun(sc);
2475 
2476 		if ((status & DC_ISR_RX_WATDOGTIMEO)
2477 		    || (status & DC_ISR_RX_NOBUF)) {
2478 			if (dc_rxeof(sc) == 0) {
2479 				while(dc_rx_resync(sc))
2480 					dc_rxeof(sc);
2481 			}
2482 		}
2483 
2484 		if (status & DC_ISR_BUS_ERR)
2485 			dc_init(sc);
2486 	}
2487 
2488 	/* Re-enable interrupts. */
2489 	CSR_WRITE_4(sc, DC_IMR, DC_INTRS);
2490 
2491 	if (ifq_empty(&ifp->if_snd) == 0)
2492 		dc_start(ifp);
2493 
2494 	return (claimed);
2495 }
2496 
2497 /*
2498  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
2499  * pointers to the fragment pointers.
2500  */
2501 int
2502 dc_encap(struct dc_softc *sc, bus_dmamap_t map, struct mbuf *m, u_int32_t *idx)
2503 {
2504 	struct dc_desc *f = NULL;
2505 	int frag, cur, cnt = 0, i;
2506 
2507 	cur = frag = *idx;
2508 
2509 	for (i = 0; i < map->dm_nsegs; i++) {
2510 		f = &sc->dc_ldata->dc_tx_list[frag];
2511 		f->dc_ctl = htole32(DC_TXCTL_TLINK | map->dm_segs[i].ds_len);
2512 		if (cnt == 0) {
2513 			f->dc_status = htole32(0);
2514 			f->dc_ctl |= htole32(DC_TXCTL_FIRSTFRAG);
2515 		} else
2516 			f->dc_status = htole32(DC_TXSTAT_OWN);
2517 		f->dc_data = htole32(map->dm_segs[i].ds_addr);
2518 		cur = frag;
2519 		DC_INC(frag, DC_TX_LIST_CNT);
2520 		cnt++;
2521 	}
2522 
2523 	sc->dc_cdata.dc_tx_cnt += cnt;
2524 	sc->dc_cdata.dc_tx_chain[cur].sd_mbuf = m;
2525 	sc->sc_tx_sparemap = sc->dc_cdata.dc_tx_chain[cur].sd_map;
2526 	sc->dc_cdata.dc_tx_chain[cur].sd_map = map;
2527 	sc->dc_ldata->dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_LASTFRAG);
2528 	if (sc->dc_flags & DC_TX_INTR_FIRSTFRAG)
2529 		sc->dc_ldata->dc_tx_list[*idx].dc_ctl |=
2530 		    htole32(DC_TXCTL_FINT);
2531 	if (sc->dc_flags & DC_TX_INTR_ALWAYS)
2532 		sc->dc_ldata->dc_tx_list[cur].dc_ctl |=
2533 		    htole32(DC_TXCTL_FINT);
2534 	if (sc->dc_flags & DC_TX_USE_TX_INTR && sc->dc_cdata.dc_tx_cnt > 64)
2535 		sc->dc_ldata->dc_tx_list[cur].dc_ctl |=
2536 		    htole32(DC_TXCTL_FINT);
2537 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2538 	    BUS_DMASYNC_PREWRITE);
2539 
2540 	sc->dc_ldata->dc_tx_list[*idx].dc_status = htole32(DC_TXSTAT_OWN);
2541 
2542 	*idx = frag;
2543 
2544 	return (0);
2545 }
2546 
2547 /*
2548  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2549  * to the mbuf data regions directly in the transmit lists. We also save a
2550  * copy of the pointers since the transmit list fragment pointers are
2551  * physical addresses.
2552  */
2553 
2554 static inline int
2555 dc_fits(struct dc_softc *sc, int idx, bus_dmamap_t map)
2556 {
2557 	if (sc->dc_flags & DC_TX_ADMTEK_WAR) {
2558 		if (sc->dc_cdata.dc_tx_prod != idx &&
2559 		    idx + map->dm_nsegs >= DC_TX_LIST_CNT)
2560 			return (0);
2561 	}
2562 
2563 	if (sc->dc_cdata.dc_tx_cnt + map->dm_nsegs + 5 > DC_TX_LIST_CNT)
2564 		return (0);
2565 
2566 	return (1);
2567 }
2568 
2569 void
2570 dc_start(struct ifnet *ifp)
2571 {
2572 	struct dc_softc *sc = ifp->if_softc;
2573 	bus_dmamap_t map;
2574 	struct mbuf *m;
2575 	int idx;
2576 
2577 	if (!sc->dc_link && ifq_len(&ifp->if_snd) < 10)
2578 		return;
2579 
2580 	if (ifq_is_oactive(&ifp->if_snd))
2581 		return;
2582 
2583 	idx = sc->dc_cdata.dc_tx_prod;
2584 
2585 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2586 	    offsetof(struct dc_list_data, dc_tx_list),
2587 	    sizeof(struct dc_desc) * DC_TX_LIST_CNT,
2588 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2589 
2590 	for (;;) {
2591 		m = ifq_deq_begin(&ifp->if_snd);
2592 		if (m == NULL)
2593 			break;
2594 
2595 		map = sc->sc_tx_sparemap;
2596 		switch (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
2597 		    BUS_DMA_NOWAIT | BUS_DMA_OVERRUN)) {
2598 		case 0:
2599 			break;
2600 		case EFBIG:
2601 			if (m_defrag(m, M_DONTWAIT) == 0 &&
2602 			    bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
2603 			     BUS_DMA_NOWAIT | BUS_DMA_OVERRUN) == 0)
2604 				break;
2605 
2606 			/* FALLTHROUGH */
2607 		default:
2608 			ifq_deq_commit(&ifp->if_snd, m);
2609 			m_freem(m);
2610 			ifp->if_oerrors++;
2611 			continue;
2612 		}
2613 
2614 		if (!dc_fits(sc, idx, map)) {
2615 			bus_dmamap_unload(sc->sc_dmat, map);
2616 			ifq_deq_rollback(&ifp->if_snd, m);
2617 			ifq_set_oactive(&ifp->if_snd);
2618 			break;
2619 		}
2620 
2621 		/* now we are committed to transmit the packet */
2622 		ifq_deq_commit(&ifp->if_snd, m);
2623 
2624 		if (dc_encap(sc, map, m, &idx) != 0) {
2625 			m_freem(m);
2626 			ifp->if_oerrors++;
2627 			continue;
2628 		}
2629 
2630 		/*
2631 		 * If there's a BPF listener, bounce a copy of this frame
2632 		 * to him.
2633 		 */
2634 #if NBPFILTER > 0
2635 		if (ifp->if_bpf)
2636 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
2637 #endif
2638 
2639 		if (sc->dc_flags & DC_TX_ONE) {
2640 			ifq_set_oactive(&ifp->if_snd);
2641 			break;
2642 		}
2643 	}
2644 
2645 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2646 	    offsetof(struct dc_list_data, dc_tx_list),
2647 	    sizeof(struct dc_desc) * DC_TX_LIST_CNT,
2648 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2649 
2650 	if (idx == sc->dc_cdata.dc_tx_prod)
2651 		return;
2652 
2653 	/* Transmit */
2654 	sc->dc_cdata.dc_tx_prod = idx;
2655 	if (!(sc->dc_flags & DC_TX_POLL))
2656 		CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
2657 
2658 	/*
2659 	 * Set a timeout in case the chip goes out to lunch.
2660 	 */
2661 	ifp->if_timer = 5;
2662 }
2663 
2664 void
2665 dc_init(void *xsc)
2666 {
2667 	struct dc_softc *sc = xsc;
2668 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2669 	struct mii_data *mii;
2670 	int s;
2671 
2672 	s = splnet();
2673 
2674 	mii = &sc->sc_mii;
2675 
2676 	/*
2677 	 * Cancel pending I/O and free all RX/TX buffers.
2678 	 */
2679 	dc_stop(sc, 0);
2680 	dc_reset(sc);
2681 
2682 	/*
2683 	 * Set cache alignment and burst length.
2684 	 */
2685 	if (DC_IS_ASIX(sc) || DC_IS_DAVICOM(sc))
2686 		CSR_WRITE_4(sc, DC_BUSCTL, 0);
2687 	else
2688 		CSR_WRITE_4(sc, DC_BUSCTL, DC_BUSCTL_MRME|DC_BUSCTL_MRLE);
2689 	/*
2690 	 * Evenly share the bus between receive and transmit process.
2691 	 */
2692 	if (DC_IS_INTEL(sc))
2693 		DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_ARBITRATION);
2694 	if (DC_IS_DAVICOM(sc) || DC_IS_INTEL(sc)) {
2695 		DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_USECA);
2696 	} else {
2697 		DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_16LONG);
2698 	}
2699 	if (sc->dc_flags & DC_TX_POLL)
2700 		DC_SETBIT(sc, DC_BUSCTL, DC_TXPOLL_1);
2701 	switch(sc->dc_cachesize) {
2702 	case 32:
2703 		DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_32LONG);
2704 		break;
2705 	case 16:
2706 		DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_16LONG);
2707 		break;
2708 	case 8:
2709 		DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_8LONG);
2710 		break;
2711 	case 0:
2712 	default:
2713 		DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_NONE);
2714 		break;
2715 	}
2716 
2717 	if (sc->dc_flags & DC_TX_STORENFWD)
2718 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
2719 	else {
2720 		if (sc->dc_txthresh > DC_TXTHRESH_MAX) {
2721 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
2722 		} else {
2723 			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
2724 			DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh);
2725 		}
2726 	}
2727 
2728 	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_NO_RXCRC);
2729 	DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_BACKOFF);
2730 
2731 	if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) {
2732 		/*
2733 		 * The app notes for the 98713 and 98715A say that
2734 		 * in order to have the chips operate properly, a magic
2735 		 * number must be written to CSR16. Macronix does not
2736 		 * document the meaning of these bits so there's no way
2737 		 * to know exactly what they do. The 98713 has a magic
2738 		 * number all its own; the rest all use a different one.
2739 		 */
2740 		DC_CLRBIT(sc, DC_MX_MAGICPACKET, 0xFFFF0000);
2741 		if (sc->dc_type == DC_TYPE_98713)
2742 			DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98713);
2743 		else
2744 			DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98715);
2745 	}
2746 
2747 	if (DC_IS_XIRCOM(sc)) {
2748 		CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN |
2749 		    DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
2750 		DELAY(10);
2751 		CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN |
2752 		    DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
2753 		DELAY(10);
2754 	}
2755 
2756 	DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH);
2757 	DC_SETBIT(sc, DC_NETCFG, DC_TXTHRESH_MIN);
2758 
2759 	/* Init circular RX list. */
2760 	if (dc_list_rx_init(sc) == ENOBUFS) {
2761 		printf("%s: initialization failed: no "
2762 		    "memory for rx buffers\n", sc->sc_dev.dv_xname);
2763 		dc_stop(sc, 0);
2764 		splx(s);
2765 		return;
2766 	}
2767 
2768 	/*
2769 	 * Init tx descriptors.
2770 	 */
2771 	dc_list_tx_init(sc);
2772 
2773 	/*
2774 	 * Sync down both lists initialized.
2775 	 */
2776 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2777 	    0, sc->sc_listmap->dm_mapsize,
2778 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2779 
2780 	/*
2781 	 * Load the address of the RX list.
2782 	 */
2783 	CSR_WRITE_4(sc, DC_RXADDR, sc->sc_listmap->dm_segs[0].ds_addr +
2784 	    offsetof(struct dc_list_data, dc_rx_list[0]));
2785 	CSR_WRITE_4(sc, DC_TXADDR, sc->sc_listmap->dm_segs[0].ds_addr +
2786 	    offsetof(struct dc_list_data, dc_tx_list[0]));
2787 
2788 	/*
2789 	 * Enable interrupts.
2790 	 */
2791 	CSR_WRITE_4(sc, DC_IMR, DC_INTRS);
2792 	CSR_WRITE_4(sc, DC_ISR, 0xFFFFFFFF);
2793 
2794 	/* Enable transmitter. */
2795 	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
2796 
2797 	/*
2798 	 * If this is an Intel 21143 and we're not using the
2799 	 * MII port, program the LED control pins so we get
2800 	 * link and activity indications.
2801 	 */
2802 	if (sc->dc_flags & DC_TULIP_LEDS) {
2803 		CSR_WRITE_4(sc, DC_WATCHDOG,
2804 		    DC_WDOG_CTLWREN|DC_WDOG_LINK|DC_WDOG_ACTIVITY);
2805 		CSR_WRITE_4(sc, DC_WATCHDOG, 0);
2806 	}
2807 
2808 	/*
2809 	 * Load the RX/multicast filter. We do this sort of late
2810 	 * because the filter programming scheme on the 21143 and
2811 	 * some clones requires DMAing a setup frame via the TX
2812 	 * engine, and we need the transmitter enabled for that.
2813 	 */
2814 	dc_setfilt(sc);
2815 
2816 	/* Enable receiver. */
2817 	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON);
2818 	CSR_WRITE_4(sc, DC_RXSTART, 0xFFFFFFFF);
2819 
2820 	mii_mediachg(mii);
2821 	dc_setcfg(sc, sc->dc_if_media);
2822 
2823 	ifp->if_flags |= IFF_RUNNING;
2824 	ifq_clr_oactive(&ifp->if_snd);
2825 
2826 	splx(s);
2827 
2828 	timeout_set(&sc->dc_tick_tmo, dc_tick, sc);
2829 
2830 	if (IFM_SUBTYPE(mii->mii_media.ifm_media) == IFM_HPNA_1)
2831 		sc->dc_link = 1;
2832 	else {
2833 		if (sc->dc_flags & DC_21143_NWAY)
2834 			timeout_add_msec(&sc->dc_tick_tmo, 100);
2835 		else
2836 			timeout_add_sec(&sc->dc_tick_tmo, 1);
2837 	}
2838 
2839 #ifdef SRM_MEDIA
2840 	if(sc->dc_srm_media) {
2841 		struct ifreq ifr;
2842 
2843 		ifr.ifr_media = sc->dc_srm_media;
2844 		ifmedia_ioctl(ifp, &ifr, &mii->mii_media, SIOCSIFMEDIA);
2845 		sc->dc_srm_media = 0;
2846 	}
2847 #endif
2848 }
2849 
2850 /*
2851  * Set media options.
2852  */
2853 int
2854 dc_ifmedia_upd(struct ifnet *ifp)
2855 {
2856 	struct dc_softc *sc;
2857 	struct mii_data *mii;
2858 	struct ifmedia *ifm;
2859 
2860 	sc = ifp->if_softc;
2861 	mii = &sc->sc_mii;
2862 	mii_mediachg(mii);
2863 
2864 	ifm = &mii->mii_media;
2865 
2866 	if (DC_IS_DAVICOM(sc) &&
2867 	    IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1)
2868 		dc_setcfg(sc, ifm->ifm_media);
2869 	else
2870 		sc->dc_link = 0;
2871 
2872 	return (0);
2873 }
2874 
2875 /*
2876  * Report current media status.
2877  */
2878 void
2879 dc_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2880 {
2881 	struct dc_softc *sc;
2882 	struct mii_data *mii;
2883 	struct ifmedia *ifm;
2884 
2885 	sc = ifp->if_softc;
2886 	mii = &sc->sc_mii;
2887 	mii_pollstat(mii);
2888 	ifm = &mii->mii_media;
2889 	if (DC_IS_DAVICOM(sc)) {
2890 		if (IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) {
2891 			ifmr->ifm_active = ifm->ifm_media;
2892 			ifmr->ifm_status = 0;
2893 			return;
2894 		}
2895 	}
2896 	ifmr->ifm_active = mii->mii_media_active;
2897 	ifmr->ifm_status = mii->mii_media_status;
2898 }
2899 
2900 int
2901 dc_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2902 {
2903 	struct dc_softc		*sc = ifp->if_softc;
2904 	struct ifreq		*ifr = (struct ifreq *) data;
2905 	int			s, error = 0;
2906 
2907 	s = splnet();
2908 
2909 	switch(command) {
2910 	case SIOCSIFADDR:
2911 		ifp->if_flags |= IFF_UP;
2912 		if (!(ifp->if_flags & IFF_RUNNING))
2913 			dc_init(sc);
2914 		break;
2915 	case SIOCSIFFLAGS:
2916 		if (ifp->if_flags & IFF_UP) {
2917 			if (ifp->if_flags & IFF_RUNNING)
2918 				error = ENETRESET;
2919 			else {
2920 				sc->dc_txthresh = 0;
2921 				dc_init(sc);
2922 			}
2923 		} else {
2924 			if (ifp->if_flags & IFF_RUNNING)
2925 				dc_stop(sc, 0);
2926 		}
2927 		break;
2928 	case SIOCGIFMEDIA:
2929 	case SIOCSIFMEDIA:
2930 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
2931 #ifdef SRM_MEDIA
2932 		if (sc->dc_srm_media)
2933 			sc->dc_srm_media = 0;
2934 #endif
2935 		break;
2936 	default:
2937 		error = ether_ioctl(ifp, &sc->sc_arpcom, command, data);
2938 	}
2939 
2940 	if (error == ENETRESET) {
2941 		if (ifp->if_flags & IFF_RUNNING)
2942 			dc_setfilt(sc);
2943 		error = 0;
2944 	}
2945 
2946 	splx(s);
2947 	return (error);
2948 }
2949 
2950 void
2951 dc_watchdog(struct ifnet *ifp)
2952 {
2953 	struct dc_softc *sc;
2954 
2955 	sc = ifp->if_softc;
2956 
2957 	ifp->if_oerrors++;
2958 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
2959 
2960 	dc_init(sc);
2961 
2962 	if (ifq_empty(&ifp->if_snd) == 0)
2963 		dc_start(ifp);
2964 }
2965 
2966 /*
2967  * Stop the adapter and free any mbufs allocated to the
2968  * RX and TX lists.
2969  */
2970 void
2971 dc_stop(struct dc_softc *sc, int softonly)
2972 {
2973 	struct ifnet *ifp;
2974 	u_int32_t isr;
2975 	int i;
2976 
2977 	ifp = &sc->sc_arpcom.ac_if;
2978 	ifp->if_timer = 0;
2979 
2980 	timeout_del(&sc->dc_tick_tmo);
2981 
2982 	ifp->if_flags &= ~IFF_RUNNING;
2983 	ifq_clr_oactive(&ifp->if_snd);
2984 
2985 	if (!softonly) {
2986 		DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ON|DC_NETCFG_TX_ON));
2987 
2988 		for (i = 0; i < DC_TIMEOUT; i++) {
2989 			isr = CSR_READ_4(sc, DC_ISR);
2990 			if ((isr & DC_ISR_TX_IDLE ||
2991 			    (isr & DC_ISR_TX_STATE) == DC_TXSTATE_RESET) &&
2992 			    (isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED)
2993 				break;
2994 			DELAY(10);
2995 		}
2996 
2997 		if (i == DC_TIMEOUT) {
2998 			if (!((isr & DC_ISR_TX_IDLE) ||
2999 			    (isr & DC_ISR_TX_STATE) == DC_TXSTATE_RESET) &&
3000 			    !DC_IS_ASIX(sc) && !DC_IS_DAVICOM(sc))
3001 				printf("%s: failed to force tx to idle state\n",
3002 				    sc->sc_dev.dv_xname);
3003 			if (!((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED) &&
3004 			    !DC_HAS_BROKEN_RXSTATE(sc))
3005 				printf("%s: failed to force rx to idle state\n",
3006 				    sc->sc_dev.dv_xname);
3007 		}
3008 
3009 		CSR_WRITE_4(sc, DC_IMR, 0x00000000);
3010 		CSR_WRITE_4(sc, DC_TXADDR, 0x00000000);
3011 		CSR_WRITE_4(sc, DC_RXADDR, 0x00000000);
3012 		sc->dc_link = 0;
3013 	}
3014 
3015 	/*
3016 	 * Free data in the RX lists.
3017 	 */
3018 	for (i = 0; i < DC_RX_LIST_CNT; i++) {
3019 		if (sc->dc_cdata.dc_rx_chain[i].sd_map->dm_nsegs != 0) {
3020 			bus_dmamap_t map = sc->dc_cdata.dc_rx_chain[i].sd_map;
3021 
3022 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3023 			    BUS_DMASYNC_POSTREAD);
3024 			bus_dmamap_unload(sc->sc_dmat, map);
3025 		}
3026 		if (sc->dc_cdata.dc_rx_chain[i].sd_mbuf != NULL) {
3027 			m_freem(sc->dc_cdata.dc_rx_chain[i].sd_mbuf);
3028 			sc->dc_cdata.dc_rx_chain[i].sd_mbuf = NULL;
3029 		}
3030 	}
3031 	bzero(&sc->dc_ldata->dc_rx_list, sizeof(sc->dc_ldata->dc_rx_list));
3032 
3033 	/*
3034 	 * Free the TX list buffers.
3035 	 */
3036 	for (i = 0; i < DC_TX_LIST_CNT; i++) {
3037 		if (sc->dc_cdata.dc_tx_chain[i].sd_map->dm_nsegs != 0) {
3038 			bus_dmamap_t map = sc->dc_cdata.dc_tx_chain[i].sd_map;
3039 
3040 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3041 			    BUS_DMASYNC_POSTWRITE);
3042 			bus_dmamap_unload(sc->sc_dmat, map);
3043 		}
3044 		if (sc->dc_cdata.dc_tx_chain[i].sd_mbuf != NULL) {
3045 			if (sc->dc_ldata->dc_tx_list[i].dc_ctl &
3046 			    htole32(DC_TXCTL_SETUP)) {
3047 				sc->dc_cdata.dc_tx_chain[i].sd_mbuf = NULL;
3048 				continue;
3049 			}
3050 			m_freem(sc->dc_cdata.dc_tx_chain[i].sd_mbuf);
3051 			sc->dc_cdata.dc_tx_chain[i].sd_mbuf = NULL;
3052 		}
3053 	}
3054 	bzero(&sc->dc_ldata->dc_tx_list, sizeof(sc->dc_ldata->dc_tx_list));
3055 
3056 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
3057 	    0, sc->sc_listmap->dm_mapsize,
3058 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3059 }
3060 
3061 int
3062 dc_activate(struct device *self, int act)
3063 {
3064 	struct dc_softc *sc = (struct dc_softc *)self;
3065 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
3066 	int rv = 0;
3067 
3068 	switch (act) {
3069 	case DVACT_SUSPEND:
3070 		if (ifp->if_flags & IFF_RUNNING)
3071 			dc_stop(sc, 0);
3072 		rv = config_activate_children(self, act);
3073 		break;
3074 	case DVACT_RESUME:
3075 		if (ifp->if_flags & IFF_UP)
3076 			dc_init(sc);
3077 		break;
3078 	default:
3079 		rv = config_activate_children(self, act);
3080 		break;
3081 	}
3082 	return (rv);
3083 }
3084 
3085 int
3086 dc_detach(struct dc_softc *sc)
3087 {
3088 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
3089 	int i;
3090 
3091 	dc_stop(sc, 1);
3092 
3093 	if (LIST_FIRST(&sc->sc_mii.mii_phys) != NULL)
3094 		mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
3095 
3096 	if (sc->dc_srom)
3097 		free(sc->dc_srom, M_DEVBUF, sc->dc_sromsize);
3098 
3099 	for (i = 0; i < DC_RX_LIST_CNT; i++)
3100 		bus_dmamap_destroy(sc->sc_dmat, sc->dc_cdata.dc_rx_chain[i].sd_map);
3101 	if (sc->sc_rx_sparemap)
3102 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_sparemap);
3103 	for (i = 0; i < DC_TX_LIST_CNT; i++)
3104 		bus_dmamap_destroy(sc->sc_dmat, sc->dc_cdata.dc_tx_chain[i].sd_map);
3105 	if (sc->sc_tx_sparemap)
3106 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_sparemap);
3107 
3108 	/// XXX bus_dmamap_sync
3109 	bus_dmamap_unload(sc->sc_dmat, sc->sc_listmap);
3110 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_listkva, sc->sc_listnseg);
3111 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_listmap);
3112 	bus_dmamem_free(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg);
3113 
3114 	ether_ifdetach(ifp);
3115 	if_detach(ifp);
3116 	return (0);
3117 }
3118 
3119 struct cfdriver dc_cd = {
3120 	0, "dc", DV_IFNET
3121 };
3122