xref: /openbsd/sys/dev/ic/dc.c (revision a6445c1d)
1 /*	$OpenBSD: dc.c,v 1.135 2014/11/18 22:53:56 brad Exp $	*/
2 
3 /*
4  * Copyright (c) 1997, 1998, 1999
5  *	Bill Paul <wpaul@ee.columbia.edu>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * $FreeBSD: src/sys/pci/if_dc.c,v 1.43 2001/01/19 23:55:07 wpaul Exp $
35  */
36 
37 /*
38  * DEC "tulip" clone ethernet driver. Supports the DEC/Intel 21143
39  * series chips and several workalikes including the following:
40  *
41  * Macronix 98713/98715/98725/98727/98732 PMAC (www.macronix.com)
42  * Macronix/Lite-On 82c115 PNIC II (www.macronix.com)
43  * Lite-On 82c168/82c169 PNIC (www.litecom.com)
44  * ASIX Electronics AX88140A (www.asix.com.tw)
45  * ASIX Electronics AX88141 (www.asix.com.tw)
46  * ADMtek AL981 (www.admtek.com.tw)
47  * ADMtek AN983 (www.admtek.com.tw)
48  * Davicom DM9100, DM9102, DM9102A (www.davicom8.com)
49  * Accton EN1217, EN2242 (www.accton.com)
50  * Xircom X3201 (www.xircom.com)
51  *
52  * Datasheets for the 21143 are available at developer.intel.com.
53  * Datasheets for the clone parts can be found at their respective sites.
54  * (Except for the PNIC; see www.freebsd.org/~wpaul/PNIC/pnic.ps.gz.)
55  * The PNIC II is essentially a Macronix 98715A chip; the only difference
56  * worth noting is that its multicast hash table is only 128 bits wide
57  * instead of 512.
58  *
59  * Written by Bill Paul <wpaul@ee.columbia.edu>
60  * Electrical Engineering Department
61  * Columbia University, New York City
62  */
63 
64 /*
65  * The Intel 21143 is the successor to the DEC 21140. It is basically
66  * the same as the 21140 but with a few new features. The 21143 supports
67  * three kinds of media attachments:
68  *
69  * o MII port, for 10Mbps and 100Mbps support and NWAY
70  *   autonegotiation provided by an external PHY.
71  * o SYM port, for symbol mode 100Mbps support.
72  * o 10baseT port.
73  * o AUI/BNC port.
74  *
75  * The 100Mbps SYM port and 10baseT port can be used together in
76  * combination with the internal NWAY support to create a 10/100
77  * autosensing configuration.
78  *
79  * Note that not all tulip workalikes are handled in this driver: we only
80  * deal with those which are relatively well behaved. The Winbond is
81  * handled separately due to its different register offsets and the
82  * special handling needed for its various bugs. The PNIC is handled
83  * here, but I'm not thrilled about it.
84  *
85  * All of the workalike chips use some form of MII transceiver support
86  * with the exception of the Macronix chips, which also have a SYM port.
87  * The ASIX AX88140A is also documented to have a SYM port, but all
88  * the cards I've seen use an MII transceiver, probably because the
89  * AX88140A doesn't support internal NWAY.
90  */
91 
92 #include "bpfilter.h"
93 
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/mbuf.h>
97 #include <sys/protosw.h>
98 #include <sys/socket.h>
99 #include <sys/ioctl.h>
100 #include <sys/errno.h>
101 #include <sys/malloc.h>
102 #include <sys/kernel.h>
103 #include <sys/device.h>
104 #include <sys/timeout.h>
105 
106 #include <net/if.h>
107 #include <net/if_dl.h>
108 #include <net/if_types.h>
109 
110 #include <netinet/in.h>
111 #include <netinet/if_ether.h>
112 
113 #include <net/if_media.h>
114 
115 #if NBPFILTER > 0
116 #include <net/bpf.h>
117 #endif
118 
119 #include <dev/mii/mii.h>
120 #include <dev/mii/miivar.h>
121 
122 #include <machine/bus.h>
123 #include <dev/pci/pcidevs.h>
124 
125 #include <dev/ic/dcreg.h>
126 
127 int dc_intr(void *);
128 struct dc_type *dc_devtype(void *);
129 int dc_newbuf(struct dc_softc *, int, struct mbuf *);
130 int dc_encap(struct dc_softc *, struct mbuf *, u_int32_t *);
131 int dc_coal(struct dc_softc *, struct mbuf **);
132 
133 void dc_pnic_rx_bug_war(struct dc_softc *, int);
134 int dc_rx_resync(struct dc_softc *);
135 void dc_rxeof(struct dc_softc *);
136 void dc_txeof(struct dc_softc *);
137 void dc_tick(void *);
138 void dc_tx_underrun(struct dc_softc *);
139 void dc_start(struct ifnet *);
140 int dc_ioctl(struct ifnet *, u_long, caddr_t);
141 void dc_watchdog(struct ifnet *);
142 int dc_ifmedia_upd(struct ifnet *);
143 void dc_ifmedia_sts(struct ifnet *, struct ifmediareq *);
144 
145 void dc_delay(struct dc_softc *);
146 void dc_eeprom_width(struct dc_softc *);
147 void dc_eeprom_idle(struct dc_softc *);
148 void dc_eeprom_putbyte(struct dc_softc *, int);
149 void dc_eeprom_getword(struct dc_softc *, int, u_int16_t *);
150 void dc_eeprom_getword_pnic(struct dc_softc *, int, u_int16_t *);
151 void dc_eeprom_getword_xircom(struct dc_softc *, int, u_int16_t *);
152 void dc_read_eeprom(struct dc_softc *, caddr_t, int, int, int);
153 
154 void dc_mii_writebit(struct dc_softc *, int);
155 int dc_mii_readbit(struct dc_softc *);
156 void dc_mii_sync(struct dc_softc *);
157 void dc_mii_send(struct dc_softc *, u_int32_t, int);
158 int dc_mii_readreg(struct dc_softc *, struct dc_mii_frame *);
159 int dc_mii_writereg(struct dc_softc *, struct dc_mii_frame *);
160 int dc_miibus_readreg(struct device *, int, int);
161 void dc_miibus_writereg(struct device *, int, int, int);
162 void dc_miibus_statchg(struct device *);
163 
164 void dc_setcfg(struct dc_softc *, int);
165 u_int32_t dc_crc_le(struct dc_softc *, caddr_t);
166 u_int32_t dc_crc_be(caddr_t);
167 void dc_setfilt_21143(struct dc_softc *);
168 void dc_setfilt_asix(struct dc_softc *);
169 void dc_setfilt_admtek(struct dc_softc *);
170 void dc_setfilt_xircom(struct dc_softc *);
171 
172 void dc_setfilt(struct dc_softc *);
173 
174 void dc_reset(struct dc_softc *);
175 int dc_list_rx_init(struct dc_softc *);
176 int dc_list_tx_init(struct dc_softc *);
177 
178 void dc_read_srom(struct dc_softc *, int);
179 void dc_parse_21143_srom(struct dc_softc *);
180 void dc_decode_leaf_sia(struct dc_softc *,
181 				     struct dc_eblock_sia *);
182 void dc_decode_leaf_mii(struct dc_softc *,
183 				     struct dc_eblock_mii *);
184 void dc_decode_leaf_sym(struct dc_softc *,
185 				     struct dc_eblock_sym *);
186 void dc_apply_fixup(struct dc_softc *, int);
187 
188 #define DC_SETBIT(sc, reg, x)				\
189 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x))
190 
191 #define DC_CLRBIT(sc, reg, x)				\
192 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x))
193 
194 #define SIO_SET(x)	DC_SETBIT(sc, DC_SIO, (x))
195 #define SIO_CLR(x)	DC_CLRBIT(sc, DC_SIO, (x))
196 
197 void
198 dc_delay(struct dc_softc *sc)
199 {
200 	int idx;
201 
202 	for (idx = (300 / 33) + 1; idx > 0; idx--)
203 		CSR_READ_4(sc, DC_BUSCTL);
204 }
205 
206 void
207 dc_eeprom_width(struct dc_softc *sc)
208 {
209 	int i;
210 
211 	/* Force EEPROM to idle state. */
212 	dc_eeprom_idle(sc);
213 
214 	/* Enter EEPROM access mode. */
215 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
216 	dc_delay(sc);
217 	DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
218 	dc_delay(sc);
219 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
220 	dc_delay(sc);
221 	DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
222 	dc_delay(sc);
223 
224 	for (i = 3; i--;) {
225 		if (6 & (1 << i))
226 			DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
227 		else
228 			DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
229 		dc_delay(sc);
230 		DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
231 		dc_delay(sc);
232 		DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
233 		dc_delay(sc);
234 	}
235 
236 	for (i = 1; i <= 12; i++) {
237 		DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
238 		dc_delay(sc);
239 		if (!(CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)) {
240 			DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
241 			dc_delay(sc);
242 			break;
243 		}
244 		DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
245 		dc_delay(sc);
246 	}
247 
248 	/* Turn off EEPROM access mode. */
249 	dc_eeprom_idle(sc);
250 
251 	if (i < 4 || i > 12)
252 		sc->dc_romwidth = 6;
253 	else
254 		sc->dc_romwidth = i;
255 
256 	/* Enter EEPROM access mode. */
257 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
258 	dc_delay(sc);
259 	DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
260 	dc_delay(sc);
261 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
262 	dc_delay(sc);
263 	DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
264 	dc_delay(sc);
265 
266 	/* Turn off EEPROM access mode. */
267 	dc_eeprom_idle(sc);
268 }
269 
270 void
271 dc_eeprom_idle(struct dc_softc *sc)
272 {
273 	int i;
274 
275 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
276 	dc_delay(sc);
277 	DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
278 	dc_delay(sc);
279 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
280 	dc_delay(sc);
281 	DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
282 	dc_delay(sc);
283 
284 	for (i = 0; i < 25; i++) {
285 		DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
286 		dc_delay(sc);
287 		DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
288 		dc_delay(sc);
289 	}
290 
291 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
292 	dc_delay(sc);
293 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CS);
294 	dc_delay(sc);
295 	CSR_WRITE_4(sc, DC_SIO, 0x00000000);
296 }
297 
298 /*
299  * Send a read command and address to the EEPROM, check for ACK.
300  */
301 void
302 dc_eeprom_putbyte(struct dc_softc *sc, int addr)
303 {
304 	int d, i;
305 
306 	d = DC_EECMD_READ >> 6;
307 
308 	for (i = 3; i--; ) {
309 		if (d & (1 << i))
310 			DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
311 		else
312 			DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
313 		dc_delay(sc);
314 		DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
315 		dc_delay(sc);
316 		DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
317 		dc_delay(sc);
318 	}
319 
320 	/*
321 	 * Feed in each bit and strobe the clock.
322 	 */
323 	for (i = sc->dc_romwidth; i--;) {
324 		if (addr & (1 << i)) {
325 			SIO_SET(DC_SIO_EE_DATAIN);
326 		} else {
327 			SIO_CLR(DC_SIO_EE_DATAIN);
328 		}
329 		dc_delay(sc);
330 		SIO_SET(DC_SIO_EE_CLK);
331 		dc_delay(sc);
332 		SIO_CLR(DC_SIO_EE_CLK);
333 		dc_delay(sc);
334 	}
335 }
336 
337 /*
338  * Read a word of data stored in the EEPROM at address 'addr.'
339  * The PNIC 82c168/82c169 has its own non-standard way to read
340  * the EEPROM.
341  */
342 void
343 dc_eeprom_getword_pnic(struct dc_softc *sc, int addr, u_int16_t *dest)
344 {
345 	int i;
346 	u_int32_t r;
347 
348 	CSR_WRITE_4(sc, DC_PN_SIOCTL, DC_PN_EEOPCODE_READ|addr);
349 
350 	for (i = 0; i < DC_TIMEOUT; i++) {
351 		DELAY(1);
352 		r = CSR_READ_4(sc, DC_SIO);
353 		if (!(r & DC_PN_SIOCTL_BUSY)) {
354 			*dest = (u_int16_t)(r & 0xFFFF);
355 			return;
356 		}
357 	}
358 }
359 
360 /*
361  * Read a word of data stored in the EEPROM at address 'addr.'
362  * The Xircom X3201 has its own non-standard way to read
363  * the EEPROM, too.
364  */
365 void
366 dc_eeprom_getword_xircom(struct dc_softc *sc, int addr, u_int16_t *dest)
367 {
368 	SIO_SET(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ);
369 
370 	addr *= 2;
371 	CSR_WRITE_4(sc, DC_ROM, addr | 0x160);
372 	*dest = (u_int16_t)CSR_READ_4(sc, DC_SIO) & 0xff;
373 	addr += 1;
374 	CSR_WRITE_4(sc, DC_ROM, addr | 0x160);
375 	*dest |= ((u_int16_t)CSR_READ_4(sc, DC_SIO) & 0xff) << 8;
376 
377 	SIO_CLR(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ);
378 }
379 
380 /*
381  * Read a word of data stored in the EEPROM at address 'addr.'
382  */
383 void
384 dc_eeprom_getword(struct dc_softc *sc, int addr, u_int16_t *dest)
385 {
386 	int i;
387 	u_int16_t word = 0;
388 
389 	/* Force EEPROM to idle state. */
390 	dc_eeprom_idle(sc);
391 
392 	/* Enter EEPROM access mode. */
393 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
394 	dc_delay(sc);
395 	DC_SETBIT(sc, DC_SIO,  DC_SIO_ROMCTL_READ);
396 	dc_delay(sc);
397 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
398 	dc_delay(sc);
399 	DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
400 	dc_delay(sc);
401 
402 	/*
403 	 * Send address of word we want to read.
404 	 */
405 	dc_eeprom_putbyte(sc, addr);
406 
407 	/*
408 	 * Start reading bits from EEPROM.
409 	 */
410 	for (i = 0x8000; i; i >>= 1) {
411 		SIO_SET(DC_SIO_EE_CLK);
412 		dc_delay(sc);
413 		if (CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)
414 			word |= i;
415 		dc_delay(sc);
416 		SIO_CLR(DC_SIO_EE_CLK);
417 		dc_delay(sc);
418 	}
419 
420 	/* Turn off EEPROM access mode. */
421 	dc_eeprom_idle(sc);
422 
423 	*dest = word;
424 }
425 
426 /*
427  * Read a sequence of words from the EEPROM.
428  */
429 void
430 dc_read_eeprom(struct dc_softc *sc, caddr_t dest, int off, int cnt,
431     int swap)
432 {
433 	int i;
434 	u_int16_t word = 0, *ptr;
435 
436 	for (i = 0; i < cnt; i++) {
437 		if (DC_IS_PNIC(sc))
438 			dc_eeprom_getword_pnic(sc, off + i, &word);
439 		else if (DC_IS_XIRCOM(sc))
440 			dc_eeprom_getword_xircom(sc, off + i, &word);
441 		else
442 			dc_eeprom_getword(sc, off + i, &word);
443 		ptr = (u_int16_t *)(dest + (i * 2));
444 		if (swap)
445 			*ptr = betoh16(word);
446 		else
447 			*ptr = letoh16(word);
448 	}
449 }
450 
451 /*
452  * The following two routines are taken from the Macronix 98713
453  * Application Notes pp.19-21.
454  */
455 /*
456  * Write a bit to the MII bus.
457  */
458 void
459 dc_mii_writebit(struct dc_softc *sc, int bit)
460 {
461 	if (bit)
462 		CSR_WRITE_4(sc, DC_SIO,
463 		    DC_SIO_ROMCTL_WRITE|DC_SIO_MII_DATAOUT);
464 	else
465 		CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE);
466 
467 	DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK);
468 	DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK);
469 }
470 
471 /*
472  * Read a bit from the MII bus.
473  */
474 int
475 dc_mii_readbit(struct dc_softc *sc)
476 {
477 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_READ|DC_SIO_MII_DIR);
478 	CSR_READ_4(sc, DC_SIO);
479 	DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK);
480 	DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK);
481 	if (CSR_READ_4(sc, DC_SIO) & DC_SIO_MII_DATAIN)
482 		return (1);
483 	return (0);
484 }
485 
486 /*
487  * Sync the PHYs by setting data bit and strobing the clock 32 times.
488  */
489 void
490 dc_mii_sync(struct dc_softc *sc)
491 {
492 	int i;
493 
494 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE);
495 
496 	for (i = 0; i < 32; i++)
497 		dc_mii_writebit(sc, 1);
498 }
499 
500 /*
501  * Clock a series of bits through the MII.
502  */
503 void
504 dc_mii_send(struct dc_softc *sc, u_int32_t bits, int cnt)
505 {
506 	int i;
507 
508 	for (i = (0x1 << (cnt - 1)); i; i >>= 1)
509 		dc_mii_writebit(sc, bits & i);
510 }
511 
512 /*
513  * Read an PHY register through the MII.
514  */
515 int
516 dc_mii_readreg(struct dc_softc *sc, struct dc_mii_frame *frame)
517 {
518 	int i, ack, s;
519 
520 	s = splnet();
521 
522 	/*
523 	 * Set up frame for RX.
524 	 */
525 	frame->mii_stdelim = DC_MII_STARTDELIM;
526 	frame->mii_opcode = DC_MII_READOP;
527 	frame->mii_turnaround = 0;
528 	frame->mii_data = 0;
529 
530 	/*
531 	 * Sync the PHYs.
532 	 */
533 	dc_mii_sync(sc);
534 
535 	/*
536 	 * Send command/address info.
537 	 */
538 	dc_mii_send(sc, frame->mii_stdelim, 2);
539 	dc_mii_send(sc, frame->mii_opcode, 2);
540 	dc_mii_send(sc, frame->mii_phyaddr, 5);
541 	dc_mii_send(sc, frame->mii_regaddr, 5);
542 
543 #ifdef notdef
544 	/* Idle bit */
545 	dc_mii_writebit(sc, 1);
546 	dc_mii_writebit(sc, 0);
547 #endif
548 
549 	/* Check for ack */
550 	ack = dc_mii_readbit(sc);
551 
552 	/*
553 	 * Now try reading data bits. If the ack failed, we still
554 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
555 	 */
556 	if (ack) {
557 		for(i = 0; i < 16; i++) {
558 			dc_mii_readbit(sc);
559 		}
560 		goto fail;
561 	}
562 
563 	for (i = 0x8000; i; i >>= 1) {
564 		if (!ack) {
565 			if (dc_mii_readbit(sc))
566 				frame->mii_data |= i;
567 		}
568 	}
569 
570 fail:
571 
572 	dc_mii_writebit(sc, 0);
573 	dc_mii_writebit(sc, 0);
574 
575 	splx(s);
576 
577 	if (ack)
578 		return (1);
579 	return (0);
580 }
581 
582 /*
583  * Write to a PHY register through the MII.
584  */
585 int
586 dc_mii_writereg(struct dc_softc *sc, struct dc_mii_frame *frame)
587 {
588 	int s;
589 
590 	s = splnet();
591 	/*
592 	 * Set up frame for TX.
593 	 */
594 
595 	frame->mii_stdelim = DC_MII_STARTDELIM;
596 	frame->mii_opcode = DC_MII_WRITEOP;
597 	frame->mii_turnaround = DC_MII_TURNAROUND;
598 
599 	/*
600 	 * Sync the PHYs.
601 	 */
602 	dc_mii_sync(sc);
603 
604 	dc_mii_send(sc, frame->mii_stdelim, 2);
605 	dc_mii_send(sc, frame->mii_opcode, 2);
606 	dc_mii_send(sc, frame->mii_phyaddr, 5);
607 	dc_mii_send(sc, frame->mii_regaddr, 5);
608 	dc_mii_send(sc, frame->mii_turnaround, 2);
609 	dc_mii_send(sc, frame->mii_data, 16);
610 
611 	/* Idle bit. */
612 	dc_mii_writebit(sc, 0);
613 	dc_mii_writebit(sc, 0);
614 
615 	splx(s);
616 	return (0);
617 }
618 
619 int
620 dc_miibus_readreg(struct device *self, int phy, int reg)
621 {
622 	struct dc_mii_frame frame;
623 	struct dc_softc *sc = (struct dc_softc *)self;
624 	int i, rval, phy_reg;
625 
626 	/*
627 	 * Note: both the AL981 and AN983 have internal PHYs,
628 	 * however the AL981 provides direct access to the PHY
629 	 * registers while the AN983 uses a serial MII interface.
630 	 * The AN983's MII interface is also buggy in that you
631 	 * can read from any MII address (0 to 31), but only address 1
632 	 * behaves normally. To deal with both cases, we pretend
633 	 * that the PHY is at MII address 1.
634 	 */
635 	if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR)
636 		return (0);
637 
638 	/*
639 	 * Note: the ukphy probs of the RS7112 report a PHY at
640 	 * MII address 0 (possibly HomePNA?) and 1 (ethernet)
641 	 * so we only respond to correct one.
642 	 */
643 	if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR)
644 		return (0);
645 
646 	if (sc->dc_pmode != DC_PMODE_MII) {
647 		if (phy == (MII_NPHY - 1)) {
648 			switch(reg) {
649 			case MII_BMSR:
650 				/*
651 				 * Fake something to make the probe
652 				 * code think there's a PHY here.
653 				 */
654 				return (BMSR_MEDIAMASK);
655 				break;
656 			case MII_PHYIDR1:
657 				if (DC_IS_PNIC(sc))
658 					return (PCI_VENDOR_LITEON);
659 				return (PCI_VENDOR_DEC);
660 				break;
661 			case MII_PHYIDR2:
662 				if (DC_IS_PNIC(sc))
663 					return (PCI_PRODUCT_LITEON_PNIC);
664 				return (PCI_PRODUCT_DEC_21142);
665 				break;
666 			default:
667 				return (0);
668 				break;
669 			}
670 		} else
671 			return (0);
672 	}
673 
674 	if (DC_IS_PNIC(sc)) {
675 		CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_READ |
676 		    (phy << 23) | (reg << 18));
677 		for (i = 0; i < DC_TIMEOUT; i++) {
678 			DELAY(1);
679 			rval = CSR_READ_4(sc, DC_PN_MII);
680 			if (!(rval & DC_PN_MII_BUSY)) {
681 				rval &= 0xFFFF;
682 				return (rval == 0xFFFF ? 0 : rval);
683 			}
684 		}
685 		return (0);
686 	}
687 
688 	if (DC_IS_COMET(sc)) {
689 		switch(reg) {
690 		case MII_BMCR:
691 			phy_reg = DC_AL_BMCR;
692 			break;
693 		case MII_BMSR:
694 			phy_reg = DC_AL_BMSR;
695 			break;
696 		case MII_PHYIDR1:
697 			phy_reg = DC_AL_VENID;
698 			break;
699 		case MII_PHYIDR2:
700 			phy_reg = DC_AL_DEVID;
701 			break;
702 		case MII_ANAR:
703 			phy_reg = DC_AL_ANAR;
704 			break;
705 		case MII_ANLPAR:
706 			phy_reg = DC_AL_LPAR;
707 			break;
708 		case MII_ANER:
709 			phy_reg = DC_AL_ANER;
710 			break;
711 		default:
712 			printf("%s: phy_read: bad phy register %x\n",
713 			    sc->sc_dev.dv_xname, reg);
714 			return (0);
715 			break;
716 		}
717 
718 		rval = CSR_READ_4(sc, phy_reg) & 0x0000FFFF;
719 
720 		if (rval == 0xFFFF)
721 			return (0);
722 		return (rval);
723 	}
724 
725 	bzero(&frame, sizeof(frame));
726 
727 	frame.mii_phyaddr = phy;
728 	frame.mii_regaddr = reg;
729 	if (sc->dc_type == DC_TYPE_98713) {
730 		phy_reg = CSR_READ_4(sc, DC_NETCFG);
731 		CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL);
732 	}
733 	dc_mii_readreg(sc, &frame);
734 	if (sc->dc_type == DC_TYPE_98713)
735 		CSR_WRITE_4(sc, DC_NETCFG, phy_reg);
736 
737 	return (frame.mii_data);
738 }
739 
740 void
741 dc_miibus_writereg(struct device *self, int phy, int reg, int data)
742 {
743 	struct dc_softc *sc = (struct dc_softc *)self;
744 	struct dc_mii_frame frame;
745 	int i, phy_reg;
746 
747 	bzero(&frame, sizeof(frame));
748 
749 	if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR)
750 		return;
751 	if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR)
752 		return;
753 
754 	if (DC_IS_PNIC(sc)) {
755 		CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_WRITE |
756 		    (phy << 23) | (reg << 10) | data);
757 		for (i = 0; i < DC_TIMEOUT; i++) {
758 			if (!(CSR_READ_4(sc, DC_PN_MII) & DC_PN_MII_BUSY))
759 				break;
760 		}
761 		return;
762 	}
763 
764 	if (DC_IS_COMET(sc)) {
765 		switch(reg) {
766 		case MII_BMCR:
767 			phy_reg = DC_AL_BMCR;
768 			break;
769 		case MII_BMSR:
770 			phy_reg = DC_AL_BMSR;
771 			break;
772 		case MII_PHYIDR1:
773 			phy_reg = DC_AL_VENID;
774 			break;
775 		case MII_PHYIDR2:
776 			phy_reg = DC_AL_DEVID;
777 			break;
778 		case MII_ANAR:
779 			phy_reg = DC_AL_ANAR;
780 			break;
781 		case MII_ANLPAR:
782 			phy_reg = DC_AL_LPAR;
783 			break;
784 		case MII_ANER:
785 			phy_reg = DC_AL_ANER;
786 			break;
787 		default:
788 			printf("%s: phy_write: bad phy register %x\n",
789 			    sc->sc_dev.dv_xname, reg);
790 			return;
791 			break;
792 		}
793 
794 		CSR_WRITE_4(sc, phy_reg, data);
795 		return;
796 	}
797 
798 	frame.mii_phyaddr = phy;
799 	frame.mii_regaddr = reg;
800 	frame.mii_data = data;
801 
802 	if (sc->dc_type == DC_TYPE_98713) {
803 		phy_reg = CSR_READ_4(sc, DC_NETCFG);
804 		CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL);
805 	}
806 	dc_mii_writereg(sc, &frame);
807 	if (sc->dc_type == DC_TYPE_98713)
808 		CSR_WRITE_4(sc, DC_NETCFG, phy_reg);
809 }
810 
811 void
812 dc_miibus_statchg(struct device *self)
813 {
814 	struct dc_softc *sc = (struct dc_softc *)self;
815 	struct mii_data *mii;
816 	struct ifmedia *ifm;
817 
818 	if (DC_IS_ADMTEK(sc))
819 		return;
820 
821 	mii = &sc->sc_mii;
822 	ifm = &mii->mii_media;
823 	if (DC_IS_DAVICOM(sc) && IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) {
824 		dc_setcfg(sc, ifm->ifm_media);
825 		sc->dc_if_media = ifm->ifm_media;
826 	} else {
827 		dc_setcfg(sc, mii->mii_media_active);
828 		sc->dc_if_media = mii->mii_media_active;
829 	}
830 }
831 
832 #define DC_BITS_512	9
833 #define DC_BITS_128	7
834 #define DC_BITS_64	6
835 
836 u_int32_t
837 dc_crc_le(struct dc_softc *sc, caddr_t addr)
838 {
839 	u_int32_t crc;
840 
841 	/* Compute CRC for the address value. */
842 	crc = ether_crc32_le(addr, ETHER_ADDR_LEN);
843 
844 	/*
845 	 * The hash table on the PNIC II and the MX98715AEC-C/D/E
846 	 * chips is only 128 bits wide.
847 	 */
848 	if (sc->dc_flags & DC_128BIT_HASH)
849 		return (crc & ((1 << DC_BITS_128) - 1));
850 
851 	/* The hash table on the MX98715BEC is only 64 bits wide. */
852 	if (sc->dc_flags & DC_64BIT_HASH)
853 		return (crc & ((1 << DC_BITS_64) - 1));
854 
855 	/* Xircom's hash filtering table is different (read: weird) */
856 	/* Xircom uses the LEAST significant bits */
857 	if (DC_IS_XIRCOM(sc)) {
858 		if ((crc & 0x180) == 0x180)
859 			return (crc & 0x0F) + (crc	& 0x70)*3 + (14 << 4);
860 		else
861 			return (crc & 0x1F) + ((crc>>1) & 0xF0)*3 + (12 << 4);
862 	}
863 
864 	return (crc & ((1 << DC_BITS_512) - 1));
865 }
866 
867 /*
868  * Calculate CRC of a multicast group address, return the lower 6 bits.
869  */
870 #define dc_crc_be(addr)	((ether_crc32_be(addr,ETHER_ADDR_LEN) >> 26) \
871 	& 0x0000003F)
872 
873 /*
874  * 21143-style RX filter setup routine. Filter programming is done by
875  * downloading a special setup frame into the TX engine. 21143, Macronix,
876  * PNIC, PNIC II and Davicom chips are programmed this way.
877  *
878  * We always program the chip using 'hash perfect' mode, i.e. one perfect
879  * address (our node address) and a 512-bit hash filter for multicast
880  * frames. We also sneak the broadcast address into the hash filter since
881  * we need that too.
882  */
883 void
884 dc_setfilt_21143(struct dc_softc *sc)
885 {
886 	struct arpcom *ac = &sc->sc_arpcom;
887 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
888 	struct ether_multi *enm;
889 	struct ether_multistep step;
890 	struct dc_desc *sframe;
891 	u_int32_t h, *sp;
892 	int i;
893 
894 	i = sc->dc_cdata.dc_tx_prod;
895 	DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT);
896 	sc->dc_cdata.dc_tx_cnt++;
897 	sframe = &sc->dc_ldata->dc_tx_list[i];
898 	sp = &sc->dc_ldata->dc_sbuf[0];
899 	bzero(sp, DC_SFRAME_LEN);
900 
901 	sframe->dc_data = htole32(sc->sc_listmap->dm_segs[0].ds_addr +
902 	    offsetof(struct dc_list_data, dc_sbuf));
903 	sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP |
904 	    DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT);
905 
906 	sc->dc_cdata.dc_tx_chain[i].sd_mbuf =
907 	    (struct mbuf *)&sc->dc_ldata->dc_sbuf[0];
908 
909 	DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ALLMULTI | DC_NETCFG_RX_PROMISC));
910 	ifp->if_flags &= ~IFF_ALLMULTI;
911 
912 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
913 		ifp->if_flags |= IFF_ALLMULTI;
914 		if (ifp->if_flags & IFF_PROMISC)
915 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
916 		else
917 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
918 	} else {
919 		ETHER_FIRST_MULTI(step, ac, enm);
920 		while (enm != NULL) {
921 			h = dc_crc_le(sc, enm->enm_addrlo);
922 
923 			sp[h >> 4] |= htole32(1 << (h & 0xF));
924 
925 			ETHER_NEXT_MULTI(step, enm);
926 		}
927 	}
928 
929 	/*
930 	 * Always accept broadcast frames.
931 	 */
932 	h = dc_crc_le(sc, (caddr_t)&etherbroadcastaddr);
933 	sp[h >> 4] |= htole32(1 << (h & 0xF));
934 
935 	/* Set our MAC address */
936 	sp[39] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 0);
937 	sp[40] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 1);
938 	sp[41] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 2);
939 
940 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
941 	    offsetof(struct dc_list_data, dc_sbuf[0]),
942 	    sizeof(struct dc_list_data) -
943 	    offsetof(struct dc_list_data, dc_sbuf[0]),
944 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
945 
946 	sframe->dc_status = htole32(DC_TXSTAT_OWN);
947 
948 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
949 	    offsetof(struct dc_list_data, dc_tx_list[i]),
950 	    sizeof(struct dc_desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
951 
952 	CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
953 
954 	/*
955 	 * The PNIC takes an exceedingly long time to process its
956 	 * setup frame; wait 10ms after posting the setup frame
957 	 * before proceeding, just so it has time to swallow its
958 	 * medicine.
959 	 */
960 	DELAY(10000);
961 
962 	ifp->if_timer = 5;
963 }
964 
965 void
966 dc_setfilt_admtek(struct dc_softc *sc)
967 {
968 	struct arpcom *ac = &sc->sc_arpcom;
969 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
970 	struct ether_multi *enm;
971 	struct ether_multistep step;
972 	u_int32_t hashes[2];
973 	int h = 0;
974 
975 	DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ALLMULTI | DC_NETCFG_RX_PROMISC));
976 	bzero(hashes, sizeof(hashes));
977 	ifp->if_flags &= ~IFF_ALLMULTI;
978 
979 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
980 		ifp->if_flags |= IFF_ALLMULTI;
981 		if (ifp->if_flags & IFF_PROMISC)
982 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
983 		else
984 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
985 	} else {
986 		/* now program new ones */
987 		ETHER_FIRST_MULTI(step, ac, enm);
988 		while (enm != NULL) {
989 			if (DC_IS_CENTAUR(sc))
990 				h = dc_crc_le(sc, enm->enm_addrlo);
991 			else
992 				h = dc_crc_be(enm->enm_addrlo);
993 
994 			if (h < 32)
995 				hashes[0] |= (1 << h);
996 			else
997 				hashes[1] |= (1 << (h - 32));
998 
999 			ETHER_NEXT_MULTI(step, enm);
1000 		}
1001 	}
1002 
1003 	/* Init our MAC address */
1004 	CSR_WRITE_4(sc, DC_AL_PAR0, ac->ac_enaddr[3] << 24 |
1005 	    ac->ac_enaddr[2] << 16 | ac->ac_enaddr[1] << 8 | ac->ac_enaddr[0]);
1006 	CSR_WRITE_4(sc, DC_AL_PAR1, ac->ac_enaddr[5] << 8 | ac->ac_enaddr[4]);
1007 
1008 	CSR_WRITE_4(sc, DC_AL_MAR0, hashes[0]);
1009 	CSR_WRITE_4(sc, DC_AL_MAR1, hashes[1]);
1010 }
1011 
1012 void
1013 dc_setfilt_asix(struct dc_softc *sc)
1014 {
1015 	struct arpcom *ac = &sc->sc_arpcom;
1016 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1017 	struct ether_multi *enm;
1018 	struct ether_multistep step;
1019 	u_int32_t hashes[2];
1020 	int h = 0;
1021 
1022 	DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ALLMULTI | DC_AX_NETCFG_RX_BROAD |
1023 	    DC_NETCFG_RX_PROMISC));
1024 	bzero(hashes, sizeof(hashes));
1025 	ifp->if_flags &= ~IFF_ALLMULTI;
1026 
1027 	/*
1028 	 * Always accept broadcast frames.
1029 	 */
1030 	DC_SETBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD);
1031 
1032 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1033 		ifp->if_flags |= IFF_ALLMULTI;
1034 		if (ifp->if_flags & IFF_PROMISC)
1035 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1036 		else
1037 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1038 	} else {
1039 		/* now program new ones */
1040 		ETHER_FIRST_MULTI(step, ac, enm);
1041 		while (enm != NULL) {
1042 			h = dc_crc_be(enm->enm_addrlo);
1043 
1044 			if (h < 32)
1045 				hashes[0] |= (1 << h);
1046 			else
1047 				hashes[1] |= (1 << (h - 32));
1048 
1049 			ETHER_NEXT_MULTI(step, enm);
1050 		}
1051 	}
1052 
1053 	/* Init our MAC address */
1054 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR0);
1055 	CSR_WRITE_4(sc, DC_AX_FILTDATA,
1056 	    *(u_int32_t *)(&sc->sc_arpcom.ac_enaddr[0]));
1057 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR1);
1058 	CSR_WRITE_4(sc, DC_AX_FILTDATA,
1059 	    *(u_int32_t *)(&sc->sc_arpcom.ac_enaddr[4]));
1060 
1061 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0);
1062 	CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[0]);
1063 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1);
1064 	CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[1]);
1065 }
1066 
1067 void
1068 dc_setfilt_xircom(struct dc_softc *sc)
1069 {
1070 	struct arpcom *ac = &sc->sc_arpcom;
1071 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1072 	struct ether_multi *enm;
1073 	struct ether_multistep step;
1074 	struct dc_desc *sframe;
1075 	u_int32_t h, *sp;
1076 	int i;
1077 
1078 	DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON));
1079 
1080 	i = sc->dc_cdata.dc_tx_prod;
1081 	DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT);
1082 	sc->dc_cdata.dc_tx_cnt++;
1083 	sframe = &sc->dc_ldata->dc_tx_list[i];
1084 	sp = &sc->dc_ldata->dc_sbuf[0];
1085 	bzero(sp, DC_SFRAME_LEN);
1086 
1087 	sframe->dc_data = htole32(sc->sc_listmap->dm_segs[0].ds_addr +
1088 	    offsetof(struct dc_list_data, dc_sbuf));
1089 	sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP |
1090 	    DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT);
1091 
1092 	sc->dc_cdata.dc_tx_chain[i].sd_mbuf =
1093 	    (struct mbuf *)&sc->dc_ldata->dc_sbuf[0];
1094 
1095 	DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ALLMULTI | DC_NETCFG_RX_PROMISC));
1096 	ifp->if_flags &= ~IFF_ALLMULTI;
1097 
1098 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1099 		ifp->if_flags |= IFF_ALLMULTI;
1100 		if (ifp->if_flags & IFF_PROMISC)
1101 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1102 		else
1103 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1104 	} else {
1105 		/* now program new ones */
1106 		ETHER_FIRST_MULTI(step, ac, enm);
1107 		while (enm != NULL) {
1108 			h = dc_crc_le(sc, enm->enm_addrlo);
1109 
1110 			sp[h >> 4] |= htole32(1 << (h & 0xF));
1111 
1112 			ETHER_NEXT_MULTI(step, enm);
1113 		}
1114 	}
1115 
1116 	/*
1117 	 * Always accept broadcast frames.
1118 	 */
1119 	h = dc_crc_le(sc, (caddr_t)&etherbroadcastaddr);
1120 	sp[h >> 4] |= htole32(1 << (h & 0xF));
1121 
1122 	/* Set our MAC address */
1123 	sp[0] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 0);
1124 	sp[1] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 1);
1125 	sp[2] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 2);
1126 
1127 	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
1128 	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON);
1129 	ifp->if_flags |= IFF_RUNNING;
1130 	sframe->dc_status = htole32(DC_TXSTAT_OWN);
1131 	CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
1132 
1133 	/*
1134 	 * wait some time...
1135 	 */
1136 	DELAY(1000);
1137 
1138 	ifp->if_timer = 5;
1139 }
1140 
1141 void
1142 dc_setfilt(struct dc_softc *sc)
1143 {
1144 	if (DC_IS_INTEL(sc) || DC_IS_MACRONIX(sc) || DC_IS_PNIC(sc) ||
1145 	    DC_IS_PNICII(sc) || DC_IS_DAVICOM(sc) || DC_IS_CONEXANT(sc))
1146 		dc_setfilt_21143(sc);
1147 
1148 	if (DC_IS_ASIX(sc))
1149 		dc_setfilt_asix(sc);
1150 
1151 	if (DC_IS_ADMTEK(sc))
1152 		dc_setfilt_admtek(sc);
1153 
1154 	if (DC_IS_XIRCOM(sc))
1155 		dc_setfilt_xircom(sc);
1156 }
1157 
1158 /*
1159  * In order to fiddle with the
1160  * 'full-duplex' and '100Mbps' bits in the netconfig register, we
1161  * first have to put the transmit and/or receive logic in the idle state.
1162  */
1163 void
1164 dc_setcfg(struct dc_softc *sc, int media)
1165 {
1166 	int i, restart = 0;
1167 	u_int32_t isr;
1168 
1169 	if (IFM_SUBTYPE(media) == IFM_NONE)
1170 		return;
1171 
1172 	if (CSR_READ_4(sc, DC_NETCFG) & (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON)) {
1173 		restart = 1;
1174 		DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON));
1175 
1176 		for (i = 0; i < DC_TIMEOUT; i++) {
1177 			isr = CSR_READ_4(sc, DC_ISR);
1178 			if (isr & DC_ISR_TX_IDLE &&
1179 			    ((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED ||
1180 			    (isr & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT))
1181 				break;
1182 			DELAY(10);
1183 		}
1184 
1185 		if (i == DC_TIMEOUT) {
1186 			if (!(isr & DC_ISR_TX_IDLE) && !DC_IS_ASIX(sc))
1187 				printf("%s: failed to force tx to idle state\n",
1188 				    sc->sc_dev.dv_xname);
1189 			if (!((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED ||
1190 			    (isr & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT) &&
1191 			    !DC_HAS_BROKEN_RXSTATE(sc))
1192 				printf("%s: failed to force rx to idle state\n",
1193 				    sc->sc_dev.dv_xname);
1194 		}
1195 	}
1196 
1197 	if (IFM_SUBTYPE(media) == IFM_100_TX) {
1198 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL);
1199 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT);
1200 		if (sc->dc_pmode == DC_PMODE_MII) {
1201 			int watchdogreg;
1202 
1203 			if (DC_IS_INTEL(sc)) {
1204 			/* there's a write enable bit here that reads as 1 */
1205 				watchdogreg = CSR_READ_4(sc, DC_WATCHDOG);
1206 				watchdogreg &= ~DC_WDOG_CTLWREN;
1207 				watchdogreg |= DC_WDOG_JABBERDIS;
1208 				CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg);
1209 			} else {
1210 				DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS);
1211 			}
1212 			DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS|
1213 			    DC_NETCFG_PORTSEL|DC_NETCFG_SCRAMBLER));
1214 			if (sc->dc_type == DC_TYPE_98713)
1215 				DC_SETBIT(sc, DC_NETCFG, (DC_NETCFG_PCS|
1216 				    DC_NETCFG_SCRAMBLER));
1217 			if (!DC_IS_DAVICOM(sc))
1218 				DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1219 			DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
1220 			if (DC_IS_INTEL(sc))
1221 				dc_apply_fixup(sc, IFM_AUTO);
1222 		} else {
1223 			if (DC_IS_PNIC(sc)) {
1224 				DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_SPEEDSEL);
1225 				DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP);
1226 				DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL);
1227 			}
1228 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1229 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
1230 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER);
1231 			if (DC_IS_INTEL(sc))
1232 				dc_apply_fixup(sc,
1233 				    (media & IFM_GMASK) == IFM_FDX ?
1234 				    IFM_100_TX|IFM_FDX : IFM_100_TX);
1235 		}
1236 	}
1237 
1238 	if (IFM_SUBTYPE(media) == IFM_10_T) {
1239 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL);
1240 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT);
1241 		if (sc->dc_pmode == DC_PMODE_MII) {
1242 			int watchdogreg;
1243 
1244 			if (DC_IS_INTEL(sc)) {
1245 			/* there's a write enable bit here that reads as 1 */
1246 				watchdogreg = CSR_READ_4(sc, DC_WATCHDOG);
1247 				watchdogreg &= ~DC_WDOG_CTLWREN;
1248 				watchdogreg |= DC_WDOG_JABBERDIS;
1249 				CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg);
1250 			} else {
1251 				DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS);
1252 			}
1253 			DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS|
1254 			    DC_NETCFG_PORTSEL|DC_NETCFG_SCRAMBLER));
1255 			if (sc->dc_type == DC_TYPE_98713)
1256 				DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
1257 			if (!DC_IS_DAVICOM(sc))
1258 				DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1259 			DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
1260 			if (DC_IS_INTEL(sc))
1261 				dc_apply_fixup(sc, IFM_AUTO);
1262 		} else {
1263 			if (DC_IS_PNIC(sc)) {
1264 				DC_PN_GPIO_CLRBIT(sc, DC_PN_GPIO_SPEEDSEL);
1265 				DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP);
1266 				DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL);
1267 			}
1268 			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1269 			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
1270 			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER);
1271 			if (DC_IS_INTEL(sc)) {
1272 				DC_CLRBIT(sc, DC_SIARESET, DC_SIA_RESET);
1273 				DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
1274 				if ((media & IFM_GMASK) == IFM_FDX)
1275 					DC_SETBIT(sc, DC_10BTCTRL, 0x7F3D);
1276 				else
1277 					DC_SETBIT(sc, DC_10BTCTRL, 0x7F3F);
1278 				DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET);
1279 				DC_CLRBIT(sc, DC_10BTCTRL,
1280 				    DC_TCTL_AUTONEGENBL);
1281 				dc_apply_fixup(sc,
1282 				    (media & IFM_GMASK) == IFM_FDX ?
1283 				    IFM_10_T|IFM_FDX : IFM_10_T);
1284 				DELAY(20000);
1285 			}
1286 		}
1287 	}
1288 
1289 	/*
1290 	 * If this is a Davicom DM9102A card with a DM9801 HomePNA
1291 	 * PHY and we want HomePNA mode, set the portsel bit to turn
1292 	 * on the external MII port.
1293 	 */
1294 	if (DC_IS_DAVICOM(sc)) {
1295 		if (IFM_SUBTYPE(media) == IFM_HPNA_1) {
1296 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1297 			sc->dc_link = 1;
1298 		} else {
1299 			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1300 		}
1301 	}
1302 
1303 	if ((media & IFM_GMASK) == IFM_FDX) {
1304 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX);
1305 		if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc))
1306 			DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX);
1307 	} else {
1308 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX);
1309 		if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc))
1310 			DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX);
1311 	}
1312 
1313 	if (restart)
1314 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON|DC_NETCFG_RX_ON);
1315 }
1316 
1317 void
1318 dc_reset(struct dc_softc *sc)
1319 {
1320 	int i;
1321 
1322 	DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET);
1323 
1324 	for (i = 0; i < DC_TIMEOUT; i++) {
1325 		DELAY(10);
1326 		if (!(CSR_READ_4(sc, DC_BUSCTL) & DC_BUSCTL_RESET))
1327 			break;
1328 	}
1329 
1330 	if (DC_IS_ASIX(sc) || DC_IS_ADMTEK(sc) || DC_IS_XIRCOM(sc) ||
1331 	    DC_IS_INTEL(sc) || DC_IS_CONEXANT(sc)) {
1332 		DELAY(10000);
1333 		DC_CLRBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET);
1334 		i = 0;
1335 	}
1336 
1337 	if (i == DC_TIMEOUT)
1338 		printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
1339 
1340 	/* Wait a little while for the chip to get its brains in order. */
1341 	DELAY(1000);
1342 
1343 	CSR_WRITE_4(sc, DC_IMR, 0x00000000);
1344 	CSR_WRITE_4(sc, DC_BUSCTL, 0x00000000);
1345 	CSR_WRITE_4(sc, DC_NETCFG, 0x00000000);
1346 
1347 	/*
1348 	 * Bring the SIA out of reset. In some cases, it looks
1349 	 * like failing to unreset the SIA soon enough gets it
1350 	 * into a state where it will never come out of reset
1351 	 * until we reset the whole chip again.
1352 	 */
1353 	if (DC_IS_INTEL(sc)) {
1354 		DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET);
1355 		CSR_WRITE_4(sc, DC_10BTCTRL, 0);
1356 		CSR_WRITE_4(sc, DC_WATCHDOG, 0);
1357 	}
1358 
1359 	if (sc->dc_type == DC_TYPE_21145)
1360 		dc_setcfg(sc, IFM_10_T);
1361 }
1362 
1363 void
1364 dc_apply_fixup(struct dc_softc *sc, int media)
1365 {
1366 	struct dc_mediainfo *m;
1367 	u_int8_t *p;
1368 	int i;
1369 	u_int32_t reg;
1370 
1371 	m = sc->dc_mi;
1372 
1373 	while (m != NULL) {
1374 		if (m->dc_media == media)
1375 			break;
1376 		m = m->dc_next;
1377 	}
1378 
1379 	if (m == NULL)
1380 		return;
1381 
1382 	for (i = 0, p = m->dc_reset_ptr; i < m->dc_reset_len; i++, p += 2) {
1383 		reg = (p[0] | (p[1] << 8)) << 16;
1384 		CSR_WRITE_4(sc, DC_WATCHDOG, reg);
1385 	}
1386 
1387 	for (i = 0, p = m->dc_gp_ptr; i < m->dc_gp_len; i++, p += 2) {
1388 		reg = (p[0] | (p[1] << 8)) << 16;
1389 		CSR_WRITE_4(sc, DC_WATCHDOG, reg);
1390 	}
1391 }
1392 
1393 void
1394 dc_decode_leaf_sia(struct dc_softc *sc, struct dc_eblock_sia *l)
1395 {
1396 	struct dc_mediainfo *m;
1397 
1398 	m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
1399 	if (m == NULL)
1400 		return;
1401 	switch (l->dc_sia_code & ~DC_SIA_CODE_EXT) {
1402 	case DC_SIA_CODE_10BT:
1403 		m->dc_media = IFM_10_T;
1404 		break;
1405 	case DC_SIA_CODE_10BT_FDX:
1406 		m->dc_media = IFM_10_T|IFM_FDX;
1407 		break;
1408 	case DC_SIA_CODE_10B2:
1409 		m->dc_media = IFM_10_2;
1410 		break;
1411 	case DC_SIA_CODE_10B5:
1412 		m->dc_media = IFM_10_5;
1413 		break;
1414 	default:
1415 		break;
1416 	}
1417 
1418 	/*
1419 	 * We need to ignore CSR13, CSR14, CSR15 for SIA mode.
1420 	 * Things apparently already work for cards that do
1421 	 * supply Media Specific Data.
1422 	 */
1423 	if (l->dc_sia_code & DC_SIA_CODE_EXT) {
1424 		m->dc_gp_len = 2;
1425 		m->dc_gp_ptr =
1426 		(u_int8_t *)&l->dc_un.dc_sia_ext.dc_sia_gpio_ctl;
1427 	} else {
1428 		m->dc_gp_len = 2;
1429 		m->dc_gp_ptr =
1430 		(u_int8_t *)&l->dc_un.dc_sia_noext.dc_sia_gpio_ctl;
1431 	}
1432 
1433 	m->dc_next = sc->dc_mi;
1434 	sc->dc_mi = m;
1435 
1436 	sc->dc_pmode = DC_PMODE_SIA;
1437 }
1438 
1439 void
1440 dc_decode_leaf_sym(struct dc_softc *sc, struct dc_eblock_sym *l)
1441 {
1442 	struct dc_mediainfo *m;
1443 
1444 	m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
1445 	if (m == NULL)
1446 		return;
1447 	if (l->dc_sym_code == DC_SYM_CODE_100BT)
1448 		m->dc_media = IFM_100_TX;
1449 
1450 	if (l->dc_sym_code == DC_SYM_CODE_100BT_FDX)
1451 		m->dc_media = IFM_100_TX|IFM_FDX;
1452 
1453 	m->dc_gp_len = 2;
1454 	m->dc_gp_ptr = (u_int8_t *)&l->dc_sym_gpio_ctl;
1455 
1456 	m->dc_next = sc->dc_mi;
1457 	sc->dc_mi = m;
1458 
1459 	sc->dc_pmode = DC_PMODE_SYM;
1460 }
1461 
1462 void
1463 dc_decode_leaf_mii(struct dc_softc *sc, struct dc_eblock_mii *l)
1464 {
1465 	u_int8_t *p;
1466 	struct dc_mediainfo *m;
1467 
1468 	m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
1469 	if (m == NULL)
1470 		return;
1471 	/* We abuse IFM_AUTO to represent MII. */
1472 	m->dc_media = IFM_AUTO;
1473 	m->dc_gp_len = l->dc_gpr_len;
1474 
1475 	p = (u_int8_t *)l;
1476 	p += sizeof(struct dc_eblock_mii);
1477 	m->dc_gp_ptr = p;
1478 	p += 2 * l->dc_gpr_len;
1479 	m->dc_reset_len = *p;
1480 	p++;
1481 	m->dc_reset_ptr = p;
1482 
1483 	m->dc_next = sc->dc_mi;
1484 	sc->dc_mi = m;
1485 }
1486 
1487 void
1488 dc_read_srom(struct dc_softc *sc, int bits)
1489 {
1490 	int size;
1491 
1492 	size = 2 << bits;
1493 	sc->dc_srom = malloc(size, M_DEVBUF, M_NOWAIT);
1494 	if (sc->dc_srom == NULL)
1495 		return;
1496 	dc_read_eeprom(sc, (caddr_t)sc->dc_srom, 0, (size / 2), 0);
1497 }
1498 
1499 void
1500 dc_parse_21143_srom(struct dc_softc *sc)
1501 {
1502 	struct dc_leaf_hdr *lhdr;
1503 	struct dc_eblock_hdr *hdr;
1504 	int have_mii, i, loff;
1505 	char *ptr;
1506 
1507 	have_mii = 0;
1508 	loff = sc->dc_srom[27];
1509 	lhdr = (struct dc_leaf_hdr *)&(sc->dc_srom[loff]);
1510 
1511 	ptr = (char *)lhdr;
1512 	ptr += sizeof(struct dc_leaf_hdr) - 1;
1513 	/*
1514 	 * Look if we got a MII media block.
1515 	 */
1516 	for (i = 0; i < lhdr->dc_mcnt; i++) {
1517 		hdr = (struct dc_eblock_hdr *)ptr;
1518 		if (hdr->dc_type == DC_EBLOCK_MII)
1519 		    have_mii++;
1520 
1521 		ptr += (hdr->dc_len & 0x7F);
1522 		ptr++;
1523 	}
1524 
1525 	/*
1526 	 * Do the same thing again. Only use SIA and SYM media
1527 	 * blocks if no MII media block is available.
1528 	 */
1529 	ptr = (char *)lhdr;
1530 	ptr += sizeof(struct dc_leaf_hdr) - 1;
1531 	for (i = 0; i < lhdr->dc_mcnt; i++) {
1532 		hdr = (struct dc_eblock_hdr *)ptr;
1533 		switch(hdr->dc_type) {
1534 		case DC_EBLOCK_MII:
1535 			dc_decode_leaf_mii(sc, (struct dc_eblock_mii *)hdr);
1536 			break;
1537 		case DC_EBLOCK_SIA:
1538 			if (! have_mii)
1539 			    dc_decode_leaf_sia(sc,
1540 				(struct dc_eblock_sia *)hdr);
1541 			break;
1542 		case DC_EBLOCK_SYM:
1543 			if (! have_mii)
1544 			    dc_decode_leaf_sym(sc,
1545 				(struct dc_eblock_sym *)hdr);
1546 			break;
1547 		default:
1548 			/* Don't care. Yet. */
1549 			break;
1550 		}
1551 		ptr += (hdr->dc_len & 0x7F);
1552 		ptr++;
1553 	}
1554 }
1555 
1556 /*
1557  * Attach the interface. Allocate softc structures, do ifmedia
1558  * setup and ethernet/BPF attach.
1559  */
1560 void
1561 dc_attach(struct dc_softc *sc)
1562 {
1563 	struct ifnet *ifp;
1564 	int mac_offset, tmp, i;
1565 	u_int32_t reg;
1566 
1567 	/*
1568 	 * Get station address from the EEPROM.
1569 	 */
1570 	if (sc->sc_hasmac)
1571 		goto hasmac;
1572 
1573 	switch(sc->dc_type) {
1574 	case DC_TYPE_98713:
1575 	case DC_TYPE_98713A:
1576 	case DC_TYPE_987x5:
1577 	case DC_TYPE_PNICII:
1578 		dc_read_eeprom(sc, (caddr_t)&mac_offset,
1579 		    (DC_EE_NODEADDR_OFFSET / 2), 1, 0);
1580 		dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr,
1581 		    (mac_offset / 2), 3, 0);
1582 		break;
1583 	case DC_TYPE_PNIC:
1584 		dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr, 0, 3, 1);
1585 		break;
1586 	case DC_TYPE_DM9102:
1587 	case DC_TYPE_21143:
1588 	case DC_TYPE_21145:
1589 	case DC_TYPE_ASIX:
1590 		dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr,
1591 		    DC_EE_NODEADDR, 3, 0);
1592 		break;
1593 	case DC_TYPE_AL981:
1594 	case DC_TYPE_AN983:
1595 		reg = CSR_READ_4(sc, DC_AL_PAR0);
1596 		sc->sc_arpcom.ac_enaddr[0] = (reg & 0xff);
1597 		sc->sc_arpcom.ac_enaddr[1] = (reg >> 8) & 0xff;
1598 		sc->sc_arpcom.ac_enaddr[2] = (reg >> 16) & 0xff;
1599 		sc->sc_arpcom.ac_enaddr[3] = (reg >> 24) & 0xff;
1600 		reg = CSR_READ_4(sc, DC_AL_PAR1);
1601 		sc->sc_arpcom.ac_enaddr[4] = (reg & 0xff);
1602 		sc->sc_arpcom.ac_enaddr[5] = (reg >> 8) & 0xff;
1603 		break;
1604 	case DC_TYPE_CONEXANT:
1605 		bcopy(&sc->dc_srom + DC_CONEXANT_EE_NODEADDR,
1606 		    &sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
1607 		break;
1608 	case DC_TYPE_XIRCOM:
1609 		/* Some newer units have the MAC at offset 8 */
1610 		dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr, 8, 3, 0);
1611 
1612 		if (sc->sc_arpcom.ac_enaddr[0] == 0x00 &&
1613 		    sc->sc_arpcom.ac_enaddr[1] == 0x10 &&
1614 		    sc->sc_arpcom.ac_enaddr[2] == 0xa4)
1615 			break;
1616 		if (sc->sc_arpcom.ac_enaddr[0] == 0x00 &&
1617 		    sc->sc_arpcom.ac_enaddr[1] == 0x80 &&
1618 		    sc->sc_arpcom.ac_enaddr[2] == 0xc7)
1619 			break;
1620 		dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr, 3, 3, 0);
1621 		break;
1622 	default:
1623 		dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr,
1624 		    DC_EE_NODEADDR, 3, 0);
1625 		break;
1626 	}
1627 hasmac:
1628 
1629 	if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct dc_list_data),
1630 	    PAGE_SIZE, 0, sc->sc_listseg, 1, &sc->sc_listnseg,
1631 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) {
1632 		printf(": can't alloc list mem\n");
1633 		goto fail;
1634 	}
1635 	if (bus_dmamem_map(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg,
1636 	    sizeof(struct dc_list_data), &sc->sc_listkva,
1637 	    BUS_DMA_NOWAIT) != 0) {
1638 		printf(": can't map list mem\n");
1639 		goto fail;
1640 	}
1641 	if (bus_dmamap_create(sc->sc_dmat, sizeof(struct dc_list_data), 1,
1642 	    sizeof(struct dc_list_data), 0, BUS_DMA_NOWAIT,
1643 	    &sc->sc_listmap) != 0) {
1644 		printf(": can't alloc list map\n");
1645 		goto fail;
1646 	}
1647 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, sc->sc_listkva,
1648 	    sizeof(struct dc_list_data), NULL, BUS_DMA_NOWAIT) != 0) {
1649 		printf(": can't load list map\n");
1650 		goto fail;
1651 	}
1652 	sc->dc_ldata = (struct dc_list_data *)sc->sc_listkva;
1653 
1654 	for (i = 0; i < DC_RX_LIST_CNT; i++) {
1655 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
1656 		    0, BUS_DMA_NOWAIT,
1657 		    &sc->dc_cdata.dc_rx_chain[i].sd_map) != 0) {
1658 			printf(": can't create rx map\n");
1659 			return;
1660 		}
1661 	}
1662 	if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
1663 	    BUS_DMA_NOWAIT, &sc->sc_rx_sparemap) != 0) {
1664 		printf(": can't create rx spare map\n");
1665 		return;
1666 	}
1667 
1668 	for (i = 0; i < DC_TX_LIST_CNT; i++) {
1669 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES,
1670 		    DC_TX_LIST_CNT - 5, MCLBYTES, 0, BUS_DMA_NOWAIT,
1671 		    &sc->dc_cdata.dc_tx_chain[i].sd_map) != 0) {
1672 			printf(": can't create tx map\n");
1673 			return;
1674 		}
1675 	}
1676 	if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, DC_TX_LIST_CNT - 5,
1677 	    MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_tx_sparemap) != 0) {
1678 		printf(": can't create tx spare map\n");
1679 		return;
1680 	}
1681 
1682 	/*
1683 	 * A 21143 or clone chip was detected. Inform the world.
1684 	 */
1685 	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
1686 
1687 	ifp = &sc->sc_arpcom.ac_if;
1688 	ifp->if_softc = sc;
1689 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1690 	ifp->if_ioctl = dc_ioctl;
1691 	ifp->if_start = dc_start;
1692 	ifp->if_watchdog = dc_watchdog;
1693 	IFQ_SET_MAXLEN(&ifp->if_snd, DC_TX_LIST_CNT - 1);
1694 	IFQ_SET_READY(&ifp->if_snd);
1695 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1696 
1697 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1698 
1699 	/* Do MII setup. If this is a 21143, check for a PHY on the
1700 	 * MII bus after applying any necessary fixups to twiddle the
1701 	 * GPIO bits. If we don't end up finding a PHY, restore the
1702 	 * old selection (SIA only or SIA/SYM) and attach the dcphy
1703 	 * driver instead.
1704 	 */
1705 	if (DC_IS_INTEL(sc)) {
1706 		dc_apply_fixup(sc, IFM_AUTO);
1707 		tmp = sc->dc_pmode;
1708 		sc->dc_pmode = DC_PMODE_MII;
1709 	}
1710 
1711 	/*
1712 	 * Setup General Purpose port mode and data so the tulip can talk
1713 	 * to the MII.  This needs to be done before mii_attach so that
1714 	 * we can actually see them.
1715 	 */
1716 	if (DC_IS_XIRCOM(sc)) {
1717 		CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN |
1718 		    DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
1719 		DELAY(10);
1720 		CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN |
1721 		    DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
1722 		DELAY(10);
1723 	}
1724 
1725 	sc->sc_mii.mii_ifp = ifp;
1726 	sc->sc_mii.mii_readreg = dc_miibus_readreg;
1727 	sc->sc_mii.mii_writereg = dc_miibus_writereg;
1728 	sc->sc_mii.mii_statchg = dc_miibus_statchg;
1729 	ifmedia_init(&sc->sc_mii.mii_media, 0, dc_ifmedia_upd, dc_ifmedia_sts);
1730 	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
1731 	    MII_OFFSET_ANY, 0);
1732 
1733 	if (DC_IS_INTEL(sc)) {
1734 		if (LIST_EMPTY(&sc->sc_mii.mii_phys)) {
1735 			sc->dc_pmode = tmp;
1736 			if (sc->dc_pmode != DC_PMODE_SIA)
1737 				sc->dc_pmode = DC_PMODE_SYM;
1738 			sc->dc_flags |= DC_21143_NWAY;
1739 			if (sc->dc_flags & DC_MOMENCO_BOTCH)
1740 				sc->dc_pmode = DC_PMODE_MII;
1741 			mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff,
1742 			    MII_PHY_ANY, MII_OFFSET_ANY, 0);
1743 		} else {
1744 			/* we have a PHY, so we must clear this bit */
1745 			sc->dc_flags &= ~DC_TULIP_LEDS;
1746 		}
1747 	}
1748 
1749 	if (LIST_EMPTY(&sc->sc_mii.mii_phys)) {
1750 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
1751 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
1752 		printf("%s: MII without any PHY!\n", sc->sc_dev.dv_xname);
1753 	} else if (sc->dc_type == DC_TYPE_21145) {
1754 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_10_T);
1755 	} else
1756 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1757 
1758 	if (DC_IS_DAVICOM(sc) && sc->dc_revision >= DC_REVISION_DM9102A)
1759 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_HPNA_1,0,NULL);
1760 
1761 	if (DC_IS_ADMTEK(sc)) {
1762 		/*
1763 		 * Set automatic TX underrun recovery for the ADMtek chips
1764 		 */
1765 		DC_SETBIT(sc, DC_AL_CR, DC_AL_CR_ATUR);
1766 	}
1767 
1768 	/*
1769 	 * Call MI attach routines.
1770 	 */
1771 	if_attach(ifp);
1772 	ether_ifattach(ifp);
1773 
1774 fail:
1775 	return;
1776 }
1777 
1778 /*
1779  * Initialize the transmit descriptors.
1780  */
1781 int
1782 dc_list_tx_init(struct dc_softc *sc)
1783 {
1784 	struct dc_chain_data *cd;
1785 	struct dc_list_data *ld;
1786 	int i;
1787 	bus_addr_t next;
1788 
1789 	cd = &sc->dc_cdata;
1790 	ld = sc->dc_ldata;
1791 	for (i = 0; i < DC_TX_LIST_CNT; i++) {
1792 		next = sc->sc_listmap->dm_segs[0].ds_addr;
1793 		if (i == (DC_TX_LIST_CNT - 1))
1794 			next +=
1795 			    offsetof(struct dc_list_data, dc_tx_list[0]);
1796 		else
1797 			next +=
1798 			    offsetof(struct dc_list_data, dc_tx_list[i + 1]);
1799 		cd->dc_tx_chain[i].sd_mbuf = NULL;
1800 		ld->dc_tx_list[i].dc_data = htole32(0);
1801 		ld->dc_tx_list[i].dc_ctl = htole32(0);
1802 		ld->dc_tx_list[i].dc_next = htole32(next);
1803 	}
1804 
1805 	cd->dc_tx_prod = cd->dc_tx_cons = cd->dc_tx_cnt = 0;
1806 
1807 	return (0);
1808 }
1809 
1810 
1811 /*
1812  * Initialize the RX descriptors and allocate mbufs for them. Note that
1813  * we arrange the descriptors in a closed ring, so that the last descriptor
1814  * points back to the first.
1815  */
1816 int
1817 dc_list_rx_init(struct dc_softc *sc)
1818 {
1819 	struct dc_chain_data *cd;
1820 	struct dc_list_data *ld;
1821 	int i;
1822 	bus_addr_t next;
1823 
1824 	cd = &sc->dc_cdata;
1825 	ld = sc->dc_ldata;
1826 
1827 	for (i = 0; i < DC_RX_LIST_CNT; i++) {
1828 		if (dc_newbuf(sc, i, NULL) == ENOBUFS)
1829 			return (ENOBUFS);
1830 		next = sc->sc_listmap->dm_segs[0].ds_addr;
1831 		if (i == (DC_RX_LIST_CNT - 1))
1832 			next +=
1833 			    offsetof(struct dc_list_data, dc_rx_list[0]);
1834 		else
1835 			next +=
1836 			    offsetof(struct dc_list_data, dc_rx_list[i + 1]);
1837 		ld->dc_rx_list[i].dc_next = htole32(next);
1838 	}
1839 
1840 	cd->dc_rx_prod = 0;
1841 
1842 	return (0);
1843 }
1844 
1845 /*
1846  * Initialize an RX descriptor and attach an MBUF cluster.
1847  */
1848 int
1849 dc_newbuf(struct dc_softc *sc, int i, struct mbuf *m)
1850 {
1851 	struct mbuf *m_new = NULL;
1852 	struct dc_desc *c;
1853 	bus_dmamap_t map;
1854 
1855 	c = &sc->dc_ldata->dc_rx_list[i];
1856 
1857 	if (m == NULL) {
1858 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1859 		if (m_new == NULL)
1860 			return (ENOBUFS);
1861 
1862 		MCLGET(m_new, M_DONTWAIT);
1863 		if (!(m_new->m_flags & M_EXT)) {
1864 			m_freem(m_new);
1865 			return (ENOBUFS);
1866 		}
1867 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1868 		if (bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_rx_sparemap,
1869 		    m_new, BUS_DMA_NOWAIT) != 0) {
1870 			m_freem(m_new);
1871 			return (ENOBUFS);
1872 		}
1873 		map = sc->dc_cdata.dc_rx_chain[i].sd_map;
1874 		sc->dc_cdata.dc_rx_chain[i].sd_map = sc->sc_rx_sparemap;
1875 		sc->sc_rx_sparemap = map;
1876 	} else {
1877 		/*
1878 		 * We're re-using a previously allocated mbuf;
1879 		 * be sure to re-init pointers and lengths to
1880 		 * default values.
1881 		 */
1882 		m_new = m;
1883 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1884 		m_new->m_data = m_new->m_ext.ext_buf;
1885 	}
1886 
1887 	m_adj(m_new, sizeof(u_int64_t));
1888 
1889 	/*
1890 	 * If this is a PNIC chip, zero the buffer. This is part
1891 	 * of the workaround for the receive bug in the 82c168 and
1892 	 * 82c169 chips.
1893 	 */
1894 	if (sc->dc_flags & DC_PNIC_RX_BUG_WAR)
1895 		bzero(mtod(m_new, char *), m_new->m_len);
1896 
1897 	bus_dmamap_sync(sc->sc_dmat, sc->dc_cdata.dc_rx_chain[i].sd_map, 0,
1898 	    sc->dc_cdata.dc_rx_chain[i].sd_map->dm_mapsize,
1899 	    BUS_DMASYNC_PREREAD);
1900 
1901 	sc->dc_cdata.dc_rx_chain[i].sd_mbuf = m_new;
1902 	c->dc_data = htole32(
1903 	    sc->dc_cdata.dc_rx_chain[i].sd_map->dm_segs[0].ds_addr +
1904 	    sizeof(u_int64_t));
1905 	c->dc_ctl = htole32(DC_RXCTL_RLINK | ETHER_MAX_DIX_LEN);
1906 	c->dc_status = htole32(DC_RXSTAT_OWN);
1907 
1908 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1909 	    offsetof(struct dc_list_data, dc_rx_list[i]),
1910 	    sizeof(struct dc_desc),
1911 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1912 
1913 	return (0);
1914 }
1915 
1916 /*
1917  * Grrrrr.
1918  * The PNIC chip has a terrible bug in it that manifests itself during
1919  * periods of heavy activity. The exact mode of failure if difficult to
1920  * pinpoint: sometimes it only happens in promiscuous mode, sometimes it
1921  * will happen on slow machines. The bug is that sometimes instead of
1922  * uploading one complete frame during reception, it uploads what looks
1923  * like the entire contents of its FIFO memory. The frame we want is at
1924  * the end of the whole mess, but we never know exactly how much data has
1925  * been uploaded, so salvaging the frame is hard.
1926  *
1927  * There is only one way to do it reliably, and it's disgusting.
1928  * Here's what we know:
1929  *
1930  * - We know there will always be somewhere between one and three extra
1931  *   descriptors uploaded.
1932  *
1933  * - We know the desired received frame will always be at the end of the
1934  *   total data upload.
1935  *
1936  * - We know the size of the desired received frame because it will be
1937  *   provided in the length field of the status word in the last descriptor.
1938  *
1939  * Here's what we do:
1940  *
1941  * - When we allocate buffers for the receive ring, we bzero() them.
1942  *   This means that we know that the buffer contents should be all
1943  *   zeros, except for data uploaded by the chip.
1944  *
1945  * - We also force the PNIC chip to upload frames that include the
1946  *   ethernet CRC at the end.
1947  *
1948  * - We gather all of the bogus frame data into a single buffer.
1949  *
1950  * - We then position a pointer at the end of this buffer and scan
1951  *   backwards until we encounter the first non-zero byte of data.
1952  *   This is the end of the received frame. We know we will encounter
1953  *   some data at the end of the frame because the CRC will always be
1954  *   there, so even if the sender transmits a packet of all zeros,
1955  *   we won't be fooled.
1956  *
1957  * - We know the size of the actual received frame, so we subtract
1958  *   that value from the current pointer location. This brings us
1959  *   to the start of the actual received packet.
1960  *
1961  * - We copy this into an mbuf and pass it on, along with the actual
1962  *   frame length.
1963  *
1964  * The performance hit is tremendous, but it beats dropping frames all
1965  * the time.
1966  */
1967 
1968 #define DC_WHOLEFRAME	(DC_RXSTAT_FIRSTFRAG|DC_RXSTAT_LASTFRAG)
1969 void
1970 dc_pnic_rx_bug_war(struct dc_softc *sc, int idx)
1971 {
1972 	struct dc_desc		*cur_rx;
1973 	struct dc_desc		*c = NULL;
1974 	struct mbuf		*m = NULL;
1975 	unsigned char		*ptr;
1976 	int			i, total_len;
1977 	u_int32_t		rxstat = 0;
1978 
1979 	i = sc->dc_pnic_rx_bug_save;
1980 	cur_rx = &sc->dc_ldata->dc_rx_list[idx];
1981 	ptr = sc->dc_pnic_rx_buf;
1982 	bzero(ptr, ETHER_MAX_DIX_LEN * 5);
1983 
1984 	/* Copy all the bytes from the bogus buffers. */
1985 	while (1) {
1986 		c = &sc->dc_ldata->dc_rx_list[i];
1987 		rxstat = letoh32(c->dc_status);
1988 		m = sc->dc_cdata.dc_rx_chain[i].sd_mbuf;
1989 		bcopy(mtod(m, char *), ptr, ETHER_MAX_DIX_LEN);
1990 		ptr += ETHER_MAX_DIX_LEN;
1991 		/* If this is the last buffer, break out. */
1992 		if (i == idx || rxstat & DC_RXSTAT_LASTFRAG)
1993 			break;
1994 		dc_newbuf(sc, i, m);
1995 		DC_INC(i, DC_RX_LIST_CNT);
1996 	}
1997 
1998 	/* Find the length of the actual receive frame. */
1999 	total_len = DC_RXBYTES(rxstat);
2000 
2001 	/* Scan backwards until we hit a non-zero byte. */
2002 	while(*ptr == 0x00)
2003 		ptr--;
2004 
2005 	/* Round off. */
2006 	if ((unsigned long)(ptr) & 0x3)
2007 		ptr -= 1;
2008 
2009 	/* Now find the start of the frame. */
2010 	ptr -= total_len;
2011 	if (ptr < sc->dc_pnic_rx_buf)
2012 		ptr = sc->dc_pnic_rx_buf;
2013 
2014 	/*
2015 	 * Now copy the salvaged frame to the last mbuf and fake up
2016 	 * the status word to make it look like a successful
2017  	 * frame reception.
2018 	 */
2019 	dc_newbuf(sc, i, m);
2020 	bcopy(ptr, mtod(m, char *), total_len);
2021 	cur_rx->dc_status = htole32(rxstat | DC_RXSTAT_FIRSTFRAG);
2022 }
2023 
2024 /*
2025  * This routine searches the RX ring for dirty descriptors in the
2026  * event that the rxeof routine falls out of sync with the chip's
2027  * current descriptor pointer. This may happen sometimes as a result
2028  * of a "no RX buffer available" condition that happens when the chip
2029  * consumes all of the RX buffers before the driver has a chance to
2030  * process the RX ring. This routine may need to be called more than
2031  * once to bring the driver back in sync with the chip, however we
2032  * should still be getting RX DONE interrupts to drive the search
2033  * for new packets in the RX ring, so we should catch up eventually.
2034  */
2035 int
2036 dc_rx_resync(struct dc_softc *sc)
2037 {
2038 	u_int32_t stat;
2039 	int i, pos, offset;
2040 
2041 	pos = sc->dc_cdata.dc_rx_prod;
2042 
2043 	for (i = 0; i < DC_RX_LIST_CNT; i++) {
2044 
2045 		offset = offsetof(struct dc_list_data, dc_rx_list[pos]);
2046 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2047 		    offset, sizeof(struct dc_desc),
2048 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2049 
2050 		stat = sc->dc_ldata->dc_rx_list[pos].dc_status;
2051 		if (!(stat & htole32(DC_RXSTAT_OWN)))
2052 			break;
2053 		DC_INC(pos, DC_RX_LIST_CNT);
2054 	}
2055 
2056 	/* If the ring really is empty, then just return. */
2057 	if (i == DC_RX_LIST_CNT)
2058 		return (0);
2059 
2060 	/* We've fallen behind the chip: catch it. */
2061 	sc->dc_cdata.dc_rx_prod = pos;
2062 
2063 	return (EAGAIN);
2064 }
2065 
2066 /*
2067  * A frame has been uploaded: pass the resulting mbuf chain up to
2068  * the higher level protocols.
2069  */
2070 void
2071 dc_rxeof(struct dc_softc *sc)
2072 {
2073 	struct mbuf *m;
2074 	struct ifnet *ifp;
2075 	struct dc_desc *cur_rx;
2076 	int i, offset, total_len = 0;
2077 	u_int32_t rxstat;
2078 
2079 	ifp = &sc->sc_arpcom.ac_if;
2080 	i = sc->dc_cdata.dc_rx_prod;
2081 
2082 	for(;;) {
2083 		struct mbuf	*m0 = NULL;
2084 
2085 		offset = offsetof(struct dc_list_data, dc_rx_list[i]);
2086 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2087 		    offset, sizeof(struct dc_desc),
2088 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2089 
2090 		cur_rx = &sc->dc_ldata->dc_rx_list[i];
2091 		rxstat = letoh32(cur_rx->dc_status);
2092 		if (rxstat & DC_RXSTAT_OWN)
2093 			break;
2094 
2095 		m = sc->dc_cdata.dc_rx_chain[i].sd_mbuf;
2096 		total_len = DC_RXBYTES(rxstat);
2097 
2098 		bus_dmamap_sync(sc->sc_dmat, sc->dc_cdata.dc_rx_chain[i].sd_map,
2099 		    0, sc->dc_cdata.dc_rx_chain[i].sd_map->dm_mapsize,
2100 		    BUS_DMASYNC_POSTREAD);
2101 
2102 		if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) {
2103 			if ((rxstat & DC_WHOLEFRAME) != DC_WHOLEFRAME) {
2104 				if (rxstat & DC_RXSTAT_FIRSTFRAG)
2105 					sc->dc_pnic_rx_bug_save = i;
2106 				if ((rxstat & DC_RXSTAT_LASTFRAG) == 0) {
2107 					DC_INC(i, DC_RX_LIST_CNT);
2108 					continue;
2109 				}
2110 				dc_pnic_rx_bug_war(sc, i);
2111 				rxstat = letoh32(cur_rx->dc_status);
2112 				total_len = DC_RXBYTES(rxstat);
2113 			}
2114 		}
2115 
2116 		sc->dc_cdata.dc_rx_chain[i].sd_mbuf = NULL;
2117 
2118 		/*
2119 		 * If an error occurs, update stats, clear the
2120 		 * status word and leave the mbuf cluster in place:
2121 		 * it should simply get re-used next time this descriptor
2122 		 * comes up in the ring.  However, don't report long
2123 		 * frames as errors since they could be VLANs.
2124 		 */
2125 		if ((rxstat & DC_RXSTAT_RXERR)) {
2126 			if (!(rxstat & DC_RXSTAT_GIANT) ||
2127 			    (rxstat & (DC_RXSTAT_CRCERR | DC_RXSTAT_DRIBBLE |
2128 				       DC_RXSTAT_MIIERE | DC_RXSTAT_COLLSEEN |
2129 				       DC_RXSTAT_RUNT   | DC_RXSTAT_DE))) {
2130 				ifp->if_ierrors++;
2131 				if (rxstat & DC_RXSTAT_COLLSEEN)
2132 					ifp->if_collisions++;
2133 				dc_newbuf(sc, i, m);
2134 				if (rxstat & DC_RXSTAT_CRCERR) {
2135 					DC_INC(i, DC_RX_LIST_CNT);
2136 					continue;
2137 				} else {
2138 					dc_init(sc);
2139 					return;
2140 				}
2141 			}
2142 		}
2143 
2144 		/* No errors; receive the packet. */
2145 		total_len -= ETHER_CRC_LEN;
2146 
2147 		m->m_pkthdr.rcvif = ifp;
2148 		m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN, ifp);
2149 		dc_newbuf(sc, i, m);
2150 		DC_INC(i, DC_RX_LIST_CNT);
2151 		if (m0 == NULL) {
2152 			ifp->if_ierrors++;
2153 			continue;
2154 		}
2155 		m = m0;
2156 
2157 		ifp->if_ipackets++;
2158 #if NBPFILTER > 0
2159 		if (ifp->if_bpf)
2160 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
2161 #endif
2162 		ether_input_mbuf(ifp, m);
2163 	}
2164 
2165 	sc->dc_cdata.dc_rx_prod = i;
2166 }
2167 
2168 /*
2169  * A frame was downloaded to the chip. It's safe for us to clean up
2170  * the list buffers.
2171  */
2172 
2173 void
2174 dc_txeof(struct dc_softc *sc)
2175 {
2176 	struct dc_desc *cur_tx = NULL;
2177 	struct ifnet *ifp;
2178 	int idx, offset;
2179 
2180 	ifp = &sc->sc_arpcom.ac_if;
2181 
2182 	/*
2183 	 * Go through our tx list and free mbufs for those
2184 	 * frames that have been transmitted.
2185 	 */
2186 	idx = sc->dc_cdata.dc_tx_cons;
2187 	while(idx != sc->dc_cdata.dc_tx_prod) {
2188 		u_int32_t		txstat;
2189 
2190 		offset = offsetof(struct dc_list_data, dc_tx_list[idx]);
2191 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2192 		    offset, sizeof(struct dc_desc),
2193 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2194 
2195 		cur_tx = &sc->dc_ldata->dc_tx_list[idx];
2196 		txstat = letoh32(cur_tx->dc_status);
2197 
2198 		if (txstat & DC_TXSTAT_OWN)
2199 			break;
2200 
2201 		if (!(cur_tx->dc_ctl & htole32(DC_TXCTL_LASTFRAG)) ||
2202 		    cur_tx->dc_ctl & htole32(DC_TXCTL_SETUP)) {
2203 			if (cur_tx->dc_ctl & htole32(DC_TXCTL_SETUP)) {
2204 				/*
2205 				 * Yes, the PNIC is so brain damaged
2206 				 * that it will sometimes generate a TX
2207 				 * underrun error while DMAing the RX
2208 				 * filter setup frame. If we detect this,
2209 				 * we have to send the setup frame again,
2210 				 * or else the filter won't be programmed
2211 				 * correctly.
2212 				 */
2213 				if (DC_IS_PNIC(sc)) {
2214 					if (txstat & DC_TXSTAT_ERRSUM)
2215 						dc_setfilt(sc);
2216 				}
2217 				sc->dc_cdata.dc_tx_chain[idx].sd_mbuf = NULL;
2218 			}
2219 			sc->dc_cdata.dc_tx_cnt--;
2220 			DC_INC(idx, DC_TX_LIST_CNT);
2221 			continue;
2222 		}
2223 
2224 		if (DC_IS_XIRCOM(sc) || DC_IS_CONEXANT(sc)) {
2225 			/*
2226 			 * XXX: Why does my Xircom taunt me so?
2227 			 * For some reason it likes setting the CARRLOST flag
2228 			 * even when the carrier is there. wtf?!
2229 			 * Who knows, but Conexant chips have the
2230 			 * same problem. Maybe they took lessons
2231 			 * from Xircom.
2232 			 */
2233 			if (/*sc->dc_type == DC_TYPE_21143 &&*/
2234 			    sc->dc_pmode == DC_PMODE_MII &&
2235 			    ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM|
2236 			    DC_TXSTAT_NOCARRIER)))
2237 				txstat &= ~DC_TXSTAT_ERRSUM;
2238 		} else {
2239 			if (/*sc->dc_type == DC_TYPE_21143 &&*/
2240 			    sc->dc_pmode == DC_PMODE_MII &&
2241 		    	    ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM|
2242 		    	    DC_TXSTAT_NOCARRIER|DC_TXSTAT_CARRLOST)))
2243 				txstat &= ~DC_TXSTAT_ERRSUM;
2244 		}
2245 
2246 		if (txstat & DC_TXSTAT_ERRSUM) {
2247 			ifp->if_oerrors++;
2248 			if (txstat & DC_TXSTAT_EXCESSCOLL)
2249 				ifp->if_collisions++;
2250 			if (txstat & DC_TXSTAT_LATECOLL)
2251 				ifp->if_collisions++;
2252 			if (!(txstat & DC_TXSTAT_UNDERRUN)) {
2253 				dc_init(sc);
2254 				return;
2255 			}
2256 		}
2257 
2258 		ifp->if_collisions += (txstat & DC_TXSTAT_COLLCNT) >> 3;
2259 
2260 		ifp->if_opackets++;
2261 		if (sc->dc_cdata.dc_tx_chain[idx].sd_map->dm_nsegs != 0) {
2262 			bus_dmamap_t map = sc->dc_cdata.dc_tx_chain[idx].sd_map;
2263 
2264 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2265 			    BUS_DMASYNC_POSTWRITE);
2266 			bus_dmamap_unload(sc->sc_dmat, map);
2267 		}
2268 		if (sc->dc_cdata.dc_tx_chain[idx].sd_mbuf != NULL) {
2269 			m_freem(sc->dc_cdata.dc_tx_chain[idx].sd_mbuf);
2270 			sc->dc_cdata.dc_tx_chain[idx].sd_mbuf = NULL;
2271 		}
2272 
2273 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2274 		    offset, sizeof(struct dc_desc),
2275 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2276 
2277 		sc->dc_cdata.dc_tx_cnt--;
2278 		DC_INC(idx, DC_TX_LIST_CNT);
2279 	}
2280 	sc->dc_cdata.dc_tx_cons = idx;
2281 
2282 	if (DC_TX_LIST_CNT - sc->dc_cdata.dc_tx_cnt > 5)
2283 		ifp->if_flags &= ~IFF_OACTIVE;
2284 	if (sc->dc_cdata.dc_tx_cnt == 0)
2285 		ifp->if_timer = 0;
2286 }
2287 
2288 void
2289 dc_tick(void *xsc)
2290 {
2291 	struct dc_softc *sc = (struct dc_softc *)xsc;
2292 	struct mii_data *mii;
2293 	struct ifnet *ifp;
2294 	int s;
2295 	u_int32_t r;
2296 
2297 	s = splnet();
2298 
2299 	ifp = &sc->sc_arpcom.ac_if;
2300 	mii = &sc->sc_mii;
2301 
2302 	if (sc->dc_flags & DC_REDUCED_MII_POLL) {
2303 		if (sc->dc_flags & DC_21143_NWAY) {
2304 			r = CSR_READ_4(sc, DC_10BTSTAT);
2305 			if (IFM_SUBTYPE(mii->mii_media_active) ==
2306 			    IFM_100_TX && (r & DC_TSTAT_LS100)) {
2307 				sc->dc_link = 0;
2308 				mii_mediachg(mii);
2309 			}
2310 			if (IFM_SUBTYPE(mii->mii_media_active) ==
2311 			    IFM_10_T && (r & DC_TSTAT_LS10)) {
2312 				sc->dc_link = 0;
2313 				mii_mediachg(mii);
2314 			}
2315 			if (sc->dc_link == 0)
2316 				mii_tick(mii);
2317 		} else {
2318 			/*
2319 			 * For NICs which never report DC_RXSTATE_WAIT, we
2320 			 * have to bite the bullet...
2321 			 */
2322 			if ((DC_HAS_BROKEN_RXSTATE(sc) || (CSR_READ_4(sc,
2323 			    DC_ISR) & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT) &&
2324 			    sc->dc_cdata.dc_tx_cnt == 0 && !DC_IS_ASIX(sc)) {
2325 				mii_tick(mii);
2326 				if (!(mii->mii_media_status & IFM_ACTIVE))
2327 					sc->dc_link = 0;
2328 			}
2329 		}
2330 	} else
2331 		mii_tick(mii);
2332 
2333 	/*
2334 	 * When the init routine completes, we expect to be able to send
2335 	 * packets right away, and in fact the network code will send a
2336 	 * gratuitous ARP the moment the init routine marks the interface
2337 	 * as running. However, even though the MAC may have been initialized,
2338 	 * there may be a delay of a few seconds before the PHY completes
2339 	 * autonegotiation and the link is brought up. Any transmissions
2340 	 * made during that delay will be lost. Dealing with this is tricky:
2341 	 * we can't just pause in the init routine while waiting for the
2342 	 * PHY to come ready since that would bring the whole system to
2343 	 * a screeching halt for several seconds.
2344 	 *
2345 	 * What we do here is prevent the TX start routine from sending
2346 	 * any packets until a link has been established. After the
2347 	 * interface has been initialized, the tick routine will poll
2348 	 * the state of the PHY until the IFM_ACTIVE flag is set. Until
2349 	 * that time, packets will stay in the send queue, and once the
2350 	 * link comes up, they will be flushed out to the wire.
2351 	 */
2352 	if (!sc->dc_link && mii->mii_media_status & IFM_ACTIVE &&
2353 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2354 		sc->dc_link++;
2355 		if (IFQ_IS_EMPTY(&ifp->if_snd) == 0)
2356 	 	    dc_start(ifp);
2357 	}
2358 
2359 	if (sc->dc_flags & DC_21143_NWAY && !sc->dc_link)
2360 		timeout_add_msec(&sc->dc_tick_tmo, 100);
2361 	else
2362 		timeout_add_sec(&sc->dc_tick_tmo, 1);
2363 
2364 	splx(s);
2365 }
2366 
2367 /* A transmit underrun has occurred.  Back off the transmit threshold,
2368  * or switch to store and forward mode if we have to.
2369  */
2370 void
2371 dc_tx_underrun(struct dc_softc *sc)
2372 {
2373 	u_int32_t	isr;
2374 	int		i;
2375 
2376 	if (DC_IS_DAVICOM(sc))
2377 		dc_init(sc);
2378 
2379 	if (DC_IS_INTEL(sc)) {
2380 		/*
2381 		 * The real 21143 requires that the transmitter be idle
2382 		 * in order to change the transmit threshold or store
2383 		 * and forward state.
2384 		 */
2385 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
2386 
2387 		for (i = 0; i < DC_TIMEOUT; i++) {
2388 			isr = CSR_READ_4(sc, DC_ISR);
2389 			if (isr & DC_ISR_TX_IDLE)
2390 				break;
2391 			DELAY(10);
2392 		}
2393 		if (i == DC_TIMEOUT) {
2394 			printf("%s: failed to force tx to idle state\n",
2395 			    sc->sc_dev.dv_xname);
2396 			dc_init(sc);
2397 		}
2398 	}
2399 
2400 	sc->dc_txthresh += DC_TXTHRESH_INC;
2401 	if (sc->dc_txthresh > DC_TXTHRESH_MAX) {
2402 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
2403 	} else {
2404 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH);
2405 		DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh);
2406 	}
2407 
2408 	if (DC_IS_INTEL(sc))
2409 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
2410 
2411 	return;
2412 }
2413 
2414 int
2415 dc_intr(void *arg)
2416 {
2417 	struct dc_softc *sc;
2418 	struct ifnet *ifp;
2419 	u_int32_t status, ints;
2420 	int claimed = 0;
2421 
2422 	sc = arg;
2423 
2424 	ifp = &sc->sc_arpcom.ac_if;
2425 
2426 	ints = CSR_READ_4(sc, DC_ISR);
2427 	if ((ints & DC_INTRS) == 0)
2428 		return (claimed);
2429 	if (ints == 0xffffffff)
2430 		return (0);
2431 
2432 	/* Suppress unwanted interrupts */
2433 	if (!(ifp->if_flags & IFF_UP)) {
2434 		if (CSR_READ_4(sc, DC_ISR) & DC_INTRS)
2435 			dc_stop(sc, 0);
2436 		return (claimed);
2437 	}
2438 
2439 	/* Disable interrupts. */
2440 	CSR_WRITE_4(sc, DC_IMR, 0x00000000);
2441 
2442 	while (((status = CSR_READ_4(sc, DC_ISR)) & DC_INTRS) &&
2443 	    status != 0xFFFFFFFF &&
2444 	    (ifp->if_flags & IFF_RUNNING)) {
2445 
2446 		claimed = 1;
2447 		CSR_WRITE_4(sc, DC_ISR, status);
2448 
2449 		if (status & DC_ISR_RX_OK) {
2450 			int		curpkts;
2451 			curpkts = ifp->if_ipackets;
2452 			dc_rxeof(sc);
2453 			if (curpkts == ifp->if_ipackets) {
2454 				while(dc_rx_resync(sc))
2455 					dc_rxeof(sc);
2456 			}
2457 		}
2458 
2459 		if (status & (DC_ISR_TX_OK|DC_ISR_TX_NOBUF))
2460 			dc_txeof(sc);
2461 
2462 		if (status & DC_ISR_TX_IDLE) {
2463 			dc_txeof(sc);
2464 			if (sc->dc_cdata.dc_tx_cnt) {
2465 				DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
2466 				CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
2467 			}
2468 		}
2469 
2470 		if (status & DC_ISR_TX_UNDERRUN)
2471 			dc_tx_underrun(sc);
2472 
2473 		if ((status & DC_ISR_RX_WATDOGTIMEO)
2474 		    || (status & DC_ISR_RX_NOBUF)) {
2475 			int		curpkts;
2476 			curpkts = ifp->if_ipackets;
2477 			dc_rxeof(sc);
2478 			if (curpkts == ifp->if_ipackets) {
2479 				while(dc_rx_resync(sc))
2480 					dc_rxeof(sc);
2481 			}
2482 		}
2483 
2484 		if (status & DC_ISR_BUS_ERR)
2485 			dc_init(sc);
2486 	}
2487 
2488 	/* Re-enable interrupts. */
2489 	CSR_WRITE_4(sc, DC_IMR, DC_INTRS);
2490 
2491 	if (IFQ_IS_EMPTY(&ifp->if_snd) == 0)
2492 		dc_start(ifp);
2493 
2494 	return (claimed);
2495 }
2496 
2497 /*
2498  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
2499  * pointers to the fragment pointers.
2500  */
2501 int
2502 dc_encap(struct dc_softc *sc, struct mbuf *m_head, u_int32_t *txidx)
2503 {
2504 	struct dc_desc *f = NULL;
2505 	int frag, cur, cnt = 0, i;
2506 	bus_dmamap_t map;
2507 
2508 	/*
2509  	 * Start packing the mbufs in this chain into
2510 	 * the fragment pointers. Stop when we run out
2511  	 * of fragments or hit the end of the mbuf chain.
2512 	 */
2513 	map = sc->sc_tx_sparemap;
2514 
2515 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map,
2516 	    m_head, BUS_DMA_NOWAIT) != 0)
2517 		return (ENOBUFS);
2518 
2519 	cur = frag = *txidx;
2520 
2521 	for (i = 0; i < map->dm_nsegs; i++) {
2522 		if (sc->dc_flags & DC_TX_ADMTEK_WAR) {
2523 			if (*txidx != sc->dc_cdata.dc_tx_prod &&
2524 			    frag == (DC_TX_LIST_CNT - 1)) {
2525 				bus_dmamap_unload(sc->sc_dmat, map);
2526 				return (ENOBUFS);
2527 			}
2528 		}
2529 		if ((DC_TX_LIST_CNT -
2530 		    (sc->dc_cdata.dc_tx_cnt + cnt)) < 5) {
2531 			bus_dmamap_unload(sc->sc_dmat, map);
2532 			return (ENOBUFS);
2533 		}
2534 
2535 		f = &sc->dc_ldata->dc_tx_list[frag];
2536 		f->dc_ctl = htole32(DC_TXCTL_TLINK | map->dm_segs[i].ds_len);
2537 		if (cnt == 0) {
2538 			f->dc_status = htole32(0);
2539 			f->dc_ctl |= htole32(DC_TXCTL_FIRSTFRAG);
2540 		} else
2541 			f->dc_status = htole32(DC_TXSTAT_OWN);
2542 		f->dc_data = htole32(map->dm_segs[i].ds_addr);
2543 		cur = frag;
2544 		DC_INC(frag, DC_TX_LIST_CNT);
2545 		cnt++;
2546 	}
2547 
2548 	sc->dc_cdata.dc_tx_cnt += cnt;
2549 	sc->dc_cdata.dc_tx_chain[cur].sd_mbuf = m_head;
2550 	sc->sc_tx_sparemap = sc->dc_cdata.dc_tx_chain[cur].sd_map;
2551 	sc->dc_cdata.dc_tx_chain[cur].sd_map = map;
2552 	sc->dc_ldata->dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_LASTFRAG);
2553 	if (sc->dc_flags & DC_TX_INTR_FIRSTFRAG)
2554 		sc->dc_ldata->dc_tx_list[*txidx].dc_ctl |=
2555 		    htole32(DC_TXCTL_FINT);
2556 	if (sc->dc_flags & DC_TX_INTR_ALWAYS)
2557 		sc->dc_ldata->dc_tx_list[cur].dc_ctl |=
2558 		    htole32(DC_TXCTL_FINT);
2559 	if (sc->dc_flags & DC_TX_USE_TX_INTR && sc->dc_cdata.dc_tx_cnt > 64)
2560 		sc->dc_ldata->dc_tx_list[cur].dc_ctl |=
2561 		    htole32(DC_TXCTL_FINT);
2562 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2563 	    BUS_DMASYNC_PREWRITE);
2564 
2565 	sc->dc_ldata->dc_tx_list[*txidx].dc_status = htole32(DC_TXSTAT_OWN);
2566 
2567 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2568 	    offsetof(struct dc_list_data, dc_tx_list[*txidx]),
2569 	    sizeof(struct dc_desc) * cnt,
2570 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2571 
2572 	*txidx = frag;
2573 
2574 	return (0);
2575 }
2576 
2577 /*
2578  * Coalesce an mbuf chain into a single mbuf cluster buffer.
2579  * Needed for some really badly behaved chips that just can't
2580  * do scatter/gather correctly.
2581  */
2582 int
2583 dc_coal(struct dc_softc *sc, struct mbuf **m_head)
2584 {
2585 	struct mbuf		*m_new, *m;
2586 
2587 	m = *m_head;
2588 	MGETHDR(m_new, M_DONTWAIT, MT_DATA);
2589 	if (m_new == NULL)
2590 		return (ENOBUFS);
2591 	if (m->m_pkthdr.len > MHLEN) {
2592 		MCLGET(m_new, M_DONTWAIT);
2593 		if (!(m_new->m_flags & M_EXT)) {
2594 			m_freem(m_new);
2595 			return (ENOBUFS);
2596 		}
2597 	}
2598 	m_copydata(m, 0, m->m_pkthdr.len, mtod(m_new, caddr_t));
2599 	m_new->m_pkthdr.len = m_new->m_len = m->m_pkthdr.len;
2600 	m_freem(m);
2601 	*m_head = m_new;
2602 
2603 	return (0);
2604 }
2605 
2606 /*
2607  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2608  * to the mbuf data regions directly in the transmit lists. We also save a
2609  * copy of the pointers since the transmit list fragment pointers are
2610  * physical addresses.
2611  */
2612 
2613 void
2614 dc_start(struct ifnet *ifp)
2615 {
2616 	struct dc_softc *sc;
2617 	struct mbuf *m_head = NULL;
2618 	int idx;
2619 
2620 	sc = ifp->if_softc;
2621 
2622 	if (!sc->dc_link && IFQ_LEN(&ifp->if_snd) < 10)
2623 		return;
2624 
2625 	if (ifp->if_flags & IFF_OACTIVE)
2626 		return;
2627 
2628 	idx = sc->dc_cdata.dc_tx_prod;
2629 
2630 	while(sc->dc_cdata.dc_tx_chain[idx].sd_mbuf == NULL) {
2631 		IFQ_POLL(&ifp->if_snd, m_head);
2632 		if (m_head == NULL)
2633 			break;
2634 
2635 		if (sc->dc_flags & DC_TX_COALESCE &&
2636 		    (m_head->m_next != NULL ||
2637 			sc->dc_flags & DC_TX_ALIGN)) {
2638 			/* note: dc_coal breaks the poll-and-dequeue rule.
2639 			 * if dc_coal fails, we lose the packet.
2640 			 */
2641 			IFQ_DEQUEUE(&ifp->if_snd, m_head);
2642 			if (dc_coal(sc, &m_head)) {
2643 				ifp->if_flags |= IFF_OACTIVE;
2644 				break;
2645 			}
2646 		}
2647 
2648 		if (dc_encap(sc, m_head, &idx)) {
2649 			ifp->if_flags |= IFF_OACTIVE;
2650 			break;
2651 		}
2652 
2653 		/* now we are committed to transmit the packet */
2654 		if (sc->dc_flags & DC_TX_COALESCE) {
2655 			/* if mbuf is coalesced, it is already dequeued */
2656 		} else
2657 			IFQ_DEQUEUE(&ifp->if_snd, m_head);
2658 
2659 		/*
2660 		 * If there's a BPF listener, bounce a copy of this frame
2661 		 * to him.
2662 		 */
2663 #if NBPFILTER > 0
2664 		if (ifp->if_bpf)
2665 			bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
2666 #endif
2667 		if (sc->dc_flags & DC_TX_ONE) {
2668 			ifp->if_flags |= IFF_OACTIVE;
2669 			break;
2670 		}
2671 	}
2672 	if (idx == sc->dc_cdata.dc_tx_prod)
2673 		return;
2674 
2675 	/* Transmit */
2676 	sc->dc_cdata.dc_tx_prod = idx;
2677 	if (!(sc->dc_flags & DC_TX_POLL))
2678 		CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
2679 
2680 	/*
2681 	 * Set a timeout in case the chip goes out to lunch.
2682 	 */
2683 	ifp->if_timer = 5;
2684 }
2685 
2686 void
2687 dc_init(void *xsc)
2688 {
2689 	struct dc_softc *sc = xsc;
2690 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2691 	struct mii_data *mii;
2692 	int s;
2693 
2694 	s = splnet();
2695 
2696 	mii = &sc->sc_mii;
2697 
2698 	/*
2699 	 * Cancel pending I/O and free all RX/TX buffers.
2700 	 */
2701 	dc_stop(sc, 0);
2702 	dc_reset(sc);
2703 
2704 	/*
2705 	 * Set cache alignment and burst length.
2706 	 */
2707 	if (DC_IS_ASIX(sc) || DC_IS_DAVICOM(sc))
2708 		CSR_WRITE_4(sc, DC_BUSCTL, 0);
2709 	else
2710 		CSR_WRITE_4(sc, DC_BUSCTL, DC_BUSCTL_MRME|DC_BUSCTL_MRLE);
2711 	/*
2712 	 * Evenly share the bus between receive and transmit process.
2713 	 */
2714 	if (DC_IS_INTEL(sc))
2715 		DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_ARBITRATION);
2716 	if (DC_IS_DAVICOM(sc) || DC_IS_INTEL(sc)) {
2717 		DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_USECA);
2718 	} else {
2719 		DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_16LONG);
2720 	}
2721 	if (sc->dc_flags & DC_TX_POLL)
2722 		DC_SETBIT(sc, DC_BUSCTL, DC_TXPOLL_1);
2723 	switch(sc->dc_cachesize) {
2724 	case 32:
2725 		DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_32LONG);
2726 		break;
2727 	case 16:
2728 		DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_16LONG);
2729 		break;
2730 	case 8:
2731 		DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_8LONG);
2732 		break;
2733 	case 0:
2734 	default:
2735 		DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_NONE);
2736 		break;
2737 	}
2738 
2739 	if (sc->dc_flags & DC_TX_STORENFWD)
2740 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
2741 	else {
2742 		if (sc->dc_txthresh > DC_TXTHRESH_MAX) {
2743 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
2744 		} else {
2745 			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
2746 			DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh);
2747 		}
2748 	}
2749 
2750 	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_NO_RXCRC);
2751 	DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_BACKOFF);
2752 
2753 	if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) {
2754 		/*
2755 		 * The app notes for the 98713 and 98715A say that
2756 		 * in order to have the chips operate properly, a magic
2757 		 * number must be written to CSR16. Macronix does not
2758 		 * document the meaning of these bits so there's no way
2759 		 * to know exactly what they do. The 98713 has a magic
2760 		 * number all its own; the rest all use a different one.
2761 		 */
2762 		DC_CLRBIT(sc, DC_MX_MAGICPACKET, 0xFFFF0000);
2763 		if (sc->dc_type == DC_TYPE_98713)
2764 			DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98713);
2765 		else
2766 			DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98715);
2767 	}
2768 
2769 	if (DC_IS_XIRCOM(sc)) {
2770 		CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN |
2771 		    DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
2772 		DELAY(10);
2773 		CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN |
2774 		    DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
2775 		DELAY(10);
2776 	}
2777 
2778 	DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH);
2779 	DC_SETBIT(sc, DC_NETCFG, DC_TXTHRESH_MIN);
2780 
2781 	/* Init circular RX list. */
2782 	if (dc_list_rx_init(sc) == ENOBUFS) {
2783 		printf("%s: initialization failed: no "
2784 		    "memory for rx buffers\n", sc->sc_dev.dv_xname);
2785 		dc_stop(sc, 0);
2786 		splx(s);
2787 		return;
2788 	}
2789 
2790 	/*
2791 	 * Init tx descriptors.
2792 	 */
2793 	dc_list_tx_init(sc);
2794 
2795 	/*
2796 	 * Sync down both lists initialized.
2797 	 */
2798 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2799 	    0, sc->sc_listmap->dm_mapsize,
2800 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2801 
2802 	/*
2803 	 * Load the address of the RX list.
2804 	 */
2805 	CSR_WRITE_4(sc, DC_RXADDR, sc->sc_listmap->dm_segs[0].ds_addr +
2806 	    offsetof(struct dc_list_data, dc_rx_list[0]));
2807 	CSR_WRITE_4(sc, DC_TXADDR, sc->sc_listmap->dm_segs[0].ds_addr +
2808 	    offsetof(struct dc_list_data, dc_tx_list[0]));
2809 
2810 	/*
2811 	 * Enable interrupts.
2812 	 */
2813 	CSR_WRITE_4(sc, DC_IMR, DC_INTRS);
2814 	CSR_WRITE_4(sc, DC_ISR, 0xFFFFFFFF);
2815 
2816 	/* Enable transmitter. */
2817 	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
2818 
2819 	/*
2820 	 * If this is an Intel 21143 and we're not using the
2821 	 * MII port, program the LED control pins so we get
2822 	 * link and activity indications.
2823 	 */
2824 	if (sc->dc_flags & DC_TULIP_LEDS) {
2825 		CSR_WRITE_4(sc, DC_WATCHDOG,
2826 		    DC_WDOG_CTLWREN|DC_WDOG_LINK|DC_WDOG_ACTIVITY);
2827 		CSR_WRITE_4(sc, DC_WATCHDOG, 0);
2828 	}
2829 
2830 	/*
2831 	 * Load the RX/multicast filter. We do this sort of late
2832 	 * because the filter programming scheme on the 21143 and
2833 	 * some clones requires DMAing a setup frame via the TX
2834 	 * engine, and we need the transmitter enabled for that.
2835 	 */
2836 	dc_setfilt(sc);
2837 
2838 	/* Enable receiver. */
2839 	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON);
2840 	CSR_WRITE_4(sc, DC_RXSTART, 0xFFFFFFFF);
2841 
2842 	mii_mediachg(mii);
2843 	dc_setcfg(sc, sc->dc_if_media);
2844 
2845 	ifp->if_flags |= IFF_RUNNING;
2846 	ifp->if_flags &= ~IFF_OACTIVE;
2847 
2848 	splx(s);
2849 
2850 	timeout_set(&sc->dc_tick_tmo, dc_tick, sc);
2851 
2852 	if (IFM_SUBTYPE(mii->mii_media.ifm_media) == IFM_HPNA_1)
2853 		sc->dc_link = 1;
2854 	else {
2855 		if (sc->dc_flags & DC_21143_NWAY)
2856 			timeout_add_msec(&sc->dc_tick_tmo, 100);
2857 		else
2858 			timeout_add_sec(&sc->dc_tick_tmo, 1);
2859 	}
2860 
2861 #ifdef SRM_MEDIA
2862 	if(sc->dc_srm_media) {
2863 		struct ifreq ifr;
2864 
2865 		ifr.ifr_media = sc->dc_srm_media;
2866 		ifmedia_ioctl(ifp, &ifr, &mii->mii_media, SIOCSIFMEDIA);
2867 		sc->dc_srm_media = 0;
2868 	}
2869 #endif
2870 }
2871 
2872 /*
2873  * Set media options.
2874  */
2875 int
2876 dc_ifmedia_upd(struct ifnet *ifp)
2877 {
2878 	struct dc_softc *sc;
2879 	struct mii_data *mii;
2880 	struct ifmedia *ifm;
2881 
2882 	sc = ifp->if_softc;
2883 	mii = &sc->sc_mii;
2884 	mii_mediachg(mii);
2885 
2886 	ifm = &mii->mii_media;
2887 
2888 	if (DC_IS_DAVICOM(sc) &&
2889 	    IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1)
2890 		dc_setcfg(sc, ifm->ifm_media);
2891 	else
2892 		sc->dc_link = 0;
2893 
2894 	return (0);
2895 }
2896 
2897 /*
2898  * Report current media status.
2899  */
2900 void
2901 dc_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2902 {
2903 	struct dc_softc *sc;
2904 	struct mii_data *mii;
2905 	struct ifmedia *ifm;
2906 
2907 	sc = ifp->if_softc;
2908 	mii = &sc->sc_mii;
2909 	mii_pollstat(mii);
2910 	ifm = &mii->mii_media;
2911 	if (DC_IS_DAVICOM(sc)) {
2912 		if (IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) {
2913 			ifmr->ifm_active = ifm->ifm_media;
2914 			ifmr->ifm_status = 0;
2915 			return;
2916 		}
2917 	}
2918 	ifmr->ifm_active = mii->mii_media_active;
2919 	ifmr->ifm_status = mii->mii_media_status;
2920 }
2921 
2922 int
2923 dc_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2924 {
2925 	struct dc_softc		*sc = ifp->if_softc;
2926 	struct ifreq		*ifr = (struct ifreq *) data;
2927 	struct ifaddr		*ifa = (struct ifaddr *)data;
2928 	int			s, error = 0;
2929 
2930 	s = splnet();
2931 
2932 	switch(command) {
2933 	case SIOCSIFADDR:
2934 		ifp->if_flags |= IFF_UP;
2935 		if (!(ifp->if_flags & IFF_RUNNING))
2936 			dc_init(sc);
2937 #ifdef INET
2938 		if (ifa->ifa_addr->sa_family == AF_INET)
2939 			arp_ifinit(&sc->sc_arpcom, ifa);
2940 #endif
2941 		break;
2942 	case SIOCSIFFLAGS:
2943 		if (ifp->if_flags & IFF_UP) {
2944 			if (ifp->if_flags & IFF_RUNNING)
2945 				error = ENETRESET;
2946 			else {
2947 				sc->dc_txthresh = 0;
2948 				dc_init(sc);
2949 			}
2950 		} else {
2951 			if (ifp->if_flags & IFF_RUNNING)
2952 				dc_stop(sc, 0);
2953 		}
2954 		break;
2955 	case SIOCGIFMEDIA:
2956 	case SIOCSIFMEDIA:
2957 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
2958 #ifdef SRM_MEDIA
2959 		if (sc->dc_srm_media)
2960 			sc->dc_srm_media = 0;
2961 #endif
2962 		break;
2963 	default:
2964 		error = ether_ioctl(ifp, &sc->sc_arpcom, command, data);
2965 	}
2966 
2967 	if (error == ENETRESET) {
2968 		if (ifp->if_flags & IFF_RUNNING)
2969 			dc_setfilt(sc);
2970 		error = 0;
2971 	}
2972 
2973 	splx(s);
2974 	return (error);
2975 }
2976 
2977 void
2978 dc_watchdog(struct ifnet *ifp)
2979 {
2980 	struct dc_softc *sc;
2981 
2982 	sc = ifp->if_softc;
2983 
2984 	ifp->if_oerrors++;
2985 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
2986 
2987 	dc_init(sc);
2988 
2989 	if (IFQ_IS_EMPTY(&ifp->if_snd) == 0)
2990 		dc_start(ifp);
2991 }
2992 
2993 /*
2994  * Stop the adapter and free any mbufs allocated to the
2995  * RX and TX lists.
2996  */
2997 void
2998 dc_stop(struct dc_softc *sc, int softonly)
2999 {
3000 	struct ifnet *ifp;
3001 	u_int32_t isr;
3002 	int i;
3003 
3004 	ifp = &sc->sc_arpcom.ac_if;
3005 	ifp->if_timer = 0;
3006 
3007 	timeout_del(&sc->dc_tick_tmo);
3008 
3009 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3010 
3011 	if (!softonly) {
3012 		DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ON|DC_NETCFG_TX_ON));
3013 
3014 		for (i = 0; i < DC_TIMEOUT; i++) {
3015 			isr = CSR_READ_4(sc, DC_ISR);
3016 			if ((isr & DC_ISR_TX_IDLE ||
3017 			    (isr & DC_ISR_TX_STATE) == DC_TXSTATE_RESET) &&
3018 			    (isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED)
3019 				break;
3020 			DELAY(10);
3021 		}
3022 
3023 		if (i == DC_TIMEOUT) {
3024 			if (!((isr & DC_ISR_TX_IDLE) ||
3025 			    (isr & DC_ISR_TX_STATE) == DC_TXSTATE_RESET) &&
3026 			    !DC_IS_ASIX(sc) && !DC_IS_DAVICOM(sc))
3027 				printf("%s: failed to force tx to idle state\n",
3028 				    sc->sc_dev.dv_xname);
3029 			if (!((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED) &&
3030 			    !DC_HAS_BROKEN_RXSTATE(sc))
3031 				printf("%s: failed to force rx to idle state\n",
3032 				    sc->sc_dev.dv_xname);
3033 		}
3034 
3035 		CSR_WRITE_4(sc, DC_IMR, 0x00000000);
3036 		CSR_WRITE_4(sc, DC_TXADDR, 0x00000000);
3037 		CSR_WRITE_4(sc, DC_RXADDR, 0x00000000);
3038 		sc->dc_link = 0;
3039 	}
3040 
3041 	/*
3042 	 * Free data in the RX lists.
3043 	 */
3044 	for (i = 0; i < DC_RX_LIST_CNT; i++) {
3045 		if (sc->dc_cdata.dc_rx_chain[i].sd_map->dm_nsegs != 0) {
3046 			bus_dmamap_t map = sc->dc_cdata.dc_rx_chain[i].sd_map;
3047 
3048 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3049 			    BUS_DMASYNC_POSTREAD);
3050 			bus_dmamap_unload(sc->sc_dmat, map);
3051 		}
3052 		if (sc->dc_cdata.dc_rx_chain[i].sd_mbuf != NULL) {
3053 			m_freem(sc->dc_cdata.dc_rx_chain[i].sd_mbuf);
3054 			sc->dc_cdata.dc_rx_chain[i].sd_mbuf = NULL;
3055 		}
3056 	}
3057 	bzero(&sc->dc_ldata->dc_rx_list, sizeof(sc->dc_ldata->dc_rx_list));
3058 
3059 	/*
3060 	 * Free the TX list buffers.
3061 	 */
3062 	for (i = 0; i < DC_TX_LIST_CNT; i++) {
3063 		if (sc->dc_cdata.dc_tx_chain[i].sd_map->dm_nsegs != 0) {
3064 			bus_dmamap_t map = sc->dc_cdata.dc_tx_chain[i].sd_map;
3065 
3066 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3067 			    BUS_DMASYNC_POSTWRITE);
3068 			bus_dmamap_unload(sc->sc_dmat, map);
3069 		}
3070 		if (sc->dc_cdata.dc_tx_chain[i].sd_mbuf != NULL) {
3071 			if (sc->dc_ldata->dc_tx_list[i].dc_ctl &
3072 			    htole32(DC_TXCTL_SETUP)) {
3073 				sc->dc_cdata.dc_tx_chain[i].sd_mbuf = NULL;
3074 				continue;
3075 			}
3076 			m_freem(sc->dc_cdata.dc_tx_chain[i].sd_mbuf);
3077 			sc->dc_cdata.dc_tx_chain[i].sd_mbuf = NULL;
3078 		}
3079 	}
3080 	bzero(&sc->dc_ldata->dc_tx_list, sizeof(sc->dc_ldata->dc_tx_list));
3081 
3082 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
3083 	    0, sc->sc_listmap->dm_mapsize,
3084 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3085 }
3086 
3087 int
3088 dc_activate(struct device *self, int act)
3089 {
3090 	struct dc_softc *sc = (struct dc_softc *)self;
3091 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
3092 	int rv = 0;
3093 
3094 	switch (act) {
3095 	case DVACT_SUSPEND:
3096 		if (ifp->if_flags & IFF_RUNNING)
3097 			dc_stop(sc, 0);
3098 		rv = config_activate_children(self, act);
3099 		break;
3100 	case DVACT_RESUME:
3101 		if (ifp->if_flags & IFF_UP)
3102 			dc_init(sc);
3103 		break;
3104 	default:
3105 		rv = config_activate_children(self, act);
3106 		break;
3107 	}
3108 	return (rv);
3109 }
3110 
3111 int
3112 dc_detach(struct dc_softc *sc)
3113 {
3114 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
3115 	int i;
3116 
3117 	dc_stop(sc, 1);
3118 
3119 	if (LIST_FIRST(&sc->sc_mii.mii_phys) != NULL)
3120 		mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
3121 
3122 	if (sc->dc_srom)
3123 		free(sc->dc_srom, M_DEVBUF, 0);
3124 
3125 	for (i = 0; i < DC_RX_LIST_CNT; i++)
3126 		bus_dmamap_destroy(sc->sc_dmat, sc->dc_cdata.dc_rx_chain[i].sd_map);
3127 	if (sc->sc_rx_sparemap)
3128 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_sparemap);
3129 	for (i = 0; i < DC_TX_LIST_CNT; i++)
3130 		bus_dmamap_destroy(sc->sc_dmat, sc->dc_cdata.dc_tx_chain[i].sd_map);
3131 	if (sc->sc_tx_sparemap)
3132 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_sparemap);
3133 
3134 	/// XXX bus_dmamap_sync
3135 	bus_dmamap_unload(sc->sc_dmat, sc->sc_listmap);
3136 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_listkva, sc->sc_listnseg);
3137 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_listmap);
3138 	bus_dmamem_free(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg);
3139 
3140 	ether_ifdetach(ifp);
3141 	if_detach(ifp);
3142 	return (0);
3143 }
3144 
3145 struct cfdriver dc_cd = {
3146 	0, "dc", DV_IFNET
3147 };
3148