xref: /openbsd/sys/dev/ic/dc.c (revision db3296cf)
1 /*	$OpenBSD: dc.c,v 1.61 2003/06/16 03:26:00 mickey Exp $	*/
2 
3 /*
4  * Copyright (c) 1997, 1998, 1999
5  *	Bill Paul <wpaul@ee.columbia.edu>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * $FreeBSD: src/sys/pci/if_dc.c,v 1.43 2001/01/19 23:55:07 wpaul Exp $
35  */
36 
37 /*
38  * DEC "tulip" clone ethernet driver. Supports the DEC/Intel 21143
39  * series chips and several workalikes including the following:
40  *
41  * Macronix 98713/98715/98725/98727/98732 PMAC (www.macronix.com)
42  * Macronix/Lite-On 82c115 PNIC II (www.macronix.com)
43  * Lite-On 82c168/82c169 PNIC (www.litecom.com)
44  * ASIX Electronics AX88140A (www.asix.com.tw)
45  * ASIX Electronics AX88141 (www.asix.com.tw)
46  * ADMtek AL981 (www.admtek.com.tw)
47  * ADMtek AN983 (www.admtek.com.tw)
48  * Davicom DM9100, DM9102, DM9102A (www.davicom8.com)
49  * Accton EN1217, EN2242 (www.accton.com)
50  * Xircom X3201 (www.xircom.com)
51  *
52  * Datasheets for the 21143 are available at developer.intel.com.
53  * Datasheets for the clone parts can be found at their respective sites.
54  * (Except for the PNIC; see www.freebsd.org/~wpaul/PNIC/pnic.ps.gz.)
55  * The PNIC II is essentially a Macronix 98715A chip; the only difference
56  * worth noting is that its multicast hash table is only 128 bits wide
57  * instead of 512.
58  *
59  * Written by Bill Paul <wpaul@ee.columbia.edu>
60  * Electrical Engineering Department
61  * Columbia University, New York City
62  */
63 
64 /*
65  * The Intel 21143 is the successor to the DEC 21140. It is basically
66  * the same as the 21140 but with a few new features. The 21143 supports
67  * three kinds of media attachments:
68  *
69  * o MII port, for 10Mbps and 100Mbps support and NWAY
70  *   autonegotiation provided by an external PHY.
71  * o SYM port, for symbol mode 100Mbps support.
72  * o 10baseT port.
73  * o AUI/BNC port.
74  *
75  * The 100Mbps SYM port and 10baseT port can be used together in
76  * combination with the internal NWAY support to create a 10/100
77  * autosensing configuration.
78  *
79  * Note that not all tulip workalikes are handled in this driver: we only
80  * deal with those which are relatively well behaved. The Winbond is
81  * handled separately due to its different register offsets and the
82  * special handling needed for its various bugs. The PNIC is handled
83  * here, but I'm not thrilled about it.
84  *
85  * All of the workalike chips use some form of MII transceiver support
86  * with the exception of the Macronix chips, which also have a SYM port.
87  * The ASIX AX88140A is also documented to have a SYM port, but all
88  * the cards I've seen use an MII transceiver, probably because the
89  * AX88140A doesn't support internal NWAY.
90  */
91 
92 #include "bpfilter.h"
93 #include "vlan.h"
94 
95 #include <sys/param.h>
96 #include <sys/systm.h>
97 #include <sys/mbuf.h>
98 #include <sys/protosw.h>
99 #include <sys/socket.h>
100 #include <sys/ioctl.h>
101 #include <sys/errno.h>
102 #include <sys/malloc.h>
103 #include <sys/kernel.h>
104 #include <sys/device.h>
105 #include <sys/timeout.h>
106 
107 #include <net/if.h>
108 #include <net/if_dl.h>
109 #include <net/if_types.h>
110 
111 #ifdef INET
112 #include <netinet/in.h>
113 #include <netinet/in_systm.h>
114 #include <netinet/in_var.h>
115 #include <netinet/ip.h>
116 #include <netinet/if_ether.h>
117 #endif
118 
119 #include <net/if_media.h>
120 
121 #if NBPFILTER > 0
122 #include <net/bpf.h>
123 #endif
124 
125 #include <dev/mii/mii.h>
126 #include <dev/mii/miivar.h>
127 
128 #include <machine/bus.h>
129 #include <dev/pci/pcidevs.h>
130 
131 #include <dev/ic/dcreg.h>
132 
133 int dc_intr(void *);
134 void dc_shutdown(void *);
135 struct dc_type *dc_devtype(void *);
136 int dc_newbuf(struct dc_softc *, int, struct mbuf *);
137 int dc_encap(struct dc_softc *, struct mbuf *, u_int32_t *);
138 int dc_coal(struct dc_softc *, struct mbuf **);
139 
140 void dc_pnic_rx_bug_war(struct dc_softc *, int);
141 int dc_rx_resync(struct dc_softc *);
142 void dc_rxeof(struct dc_softc *);
143 void dc_txeof(struct dc_softc *);
144 void dc_tick(void *);
145 void dc_start(struct ifnet *);
146 int dc_ioctl(struct ifnet *, u_long, caddr_t);
147 void dc_init(void *);
148 void dc_stop(struct dc_softc *);
149 void dc_watchdog(struct ifnet *);
150 int dc_ifmedia_upd(struct ifnet *);
151 void dc_ifmedia_sts(struct ifnet *, struct ifmediareq *);
152 
153 void dc_delay(struct dc_softc *);
154 void dc_eeprom_width(struct dc_softc *);
155 void dc_eeprom_idle(struct dc_softc *);
156 void dc_eeprom_putbyte(struct dc_softc *, int);
157 void dc_eeprom_getword(struct dc_softc *, int, u_int16_t *);
158 void dc_eeprom_getword_pnic(struct dc_softc *, int, u_int16_t *);
159 void dc_read_eeprom(struct dc_softc *, caddr_t, int, int, int);
160 
161 void dc_mii_writebit(struct dc_softc *, int);
162 int dc_mii_readbit(struct dc_softc *);
163 void dc_mii_sync(struct dc_softc *);
164 void dc_mii_send(struct dc_softc *, u_int32_t, int);
165 int dc_mii_readreg(struct dc_softc *, struct dc_mii_frame *);
166 int dc_mii_writereg(struct dc_softc *, struct dc_mii_frame *);
167 int dc_miibus_readreg(struct device *, int, int);
168 void dc_miibus_writereg(struct device *, int, int, int);
169 void dc_miibus_statchg(struct device *);
170 
171 void dc_setcfg(struct dc_softc *, int);
172 u_int32_t dc_crc_le(struct dc_softc *, caddr_t);
173 u_int32_t dc_crc_be(caddr_t);
174 void dc_setfilt_21143(struct dc_softc *);
175 void dc_setfilt_asix(struct dc_softc *);
176 void dc_setfilt_admtek(struct dc_softc *);
177 void dc_setfilt_xircom(struct dc_softc *);
178 
179 void dc_setfilt(struct dc_softc *);
180 
181 void dc_reset(struct dc_softc *);
182 int dc_list_rx_init(struct dc_softc *);
183 int dc_list_tx_init(struct dc_softc *);
184 
185 void dc_read_srom(struct dc_softc *, int);
186 void dc_parse_21143_srom(struct dc_softc *);
187 void dc_decode_leaf_sia(struct dc_softc *,
188 				     struct dc_eblock_sia *);
189 void dc_decode_leaf_mii(struct dc_softc *,
190 				     struct dc_eblock_mii *);
191 void dc_decode_leaf_sym(struct dc_softc *,
192 				     struct dc_eblock_sym *);
193 void dc_apply_fixup(struct dc_softc *, int);
194 
195 #define DC_SETBIT(sc, reg, x)				\
196 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x))
197 
198 #define DC_CLRBIT(sc, reg, x)				\
199 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x))
200 
201 #define SIO_SET(x)	DC_SETBIT(sc, DC_SIO, (x))
202 #define SIO_CLR(x)	DC_CLRBIT(sc, DC_SIO, (x))
203 
204 void
205 dc_delay(sc)
206 	struct dc_softc *sc;
207 {
208 	int idx;
209 
210 	for (idx = (300 / 33) + 1; idx > 0; idx--)
211 		CSR_READ_4(sc, DC_BUSCTL);
212 }
213 
214 void
215 dc_eeprom_width(sc)
216 	struct dc_softc *sc;
217 {
218 	int i;
219 
220 	/* Force EEPROM to idle state. */
221 	dc_eeprom_idle(sc);
222 
223 	/* Enter EEPROM access mode. */
224 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
225 	dc_delay(sc);
226 	DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
227 	dc_delay(sc);
228 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
229 	dc_delay(sc);
230 	DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
231 	dc_delay(sc);
232 
233 	for (i = 3; i--;) {
234 		if (6 & (1 << i))
235 			DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
236 		else
237 			DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
238 		dc_delay(sc);
239 		DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
240 		dc_delay(sc);
241 		DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
242 		dc_delay(sc);
243 	}
244 
245 	for (i = 1; i <= 12; i++) {
246 		DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
247 		dc_delay(sc);
248 		if (!(CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)) {
249 			DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
250 			dc_delay(sc);
251 			break;
252 		}
253 		DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
254 		dc_delay(sc);
255 	}
256 
257 	/* Turn off EEPROM access mode. */
258 	dc_eeprom_idle(sc);
259 
260 	if (i < 4 || i > 12)
261 		sc->dc_romwidth = 6;
262 	else
263 		sc->dc_romwidth = i;
264 
265 	/* Enter EEPROM access mode. */
266 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
267 	dc_delay(sc);
268 	DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
269 	dc_delay(sc);
270 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
271 	dc_delay(sc);
272 	DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
273 	dc_delay(sc);
274 
275 	/* Turn off EEPROM access mode. */
276 	dc_eeprom_idle(sc);
277 }
278 
279 void
280 dc_eeprom_idle(sc)
281 	struct dc_softc *sc;
282 {
283 	int i;
284 
285 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
286 	dc_delay(sc);
287 	DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
288 	dc_delay(sc);
289 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
290 	dc_delay(sc);
291 	DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
292 	dc_delay(sc);
293 
294 	for (i = 0; i < 25; i++) {
295 		DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
296 		dc_delay(sc);
297 		DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
298 		dc_delay(sc);
299 	}
300 
301 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
302 	dc_delay(sc);
303 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CS);
304 	dc_delay(sc);
305 	CSR_WRITE_4(sc, DC_SIO, 0x00000000);
306 }
307 
308 /*
309  * Send a read command and address to the EEPROM, check for ACK.
310  */
311 void
312 dc_eeprom_putbyte(sc, addr)
313 	struct dc_softc *sc;
314 	int addr;
315 {
316 	int d, i;
317 
318 	d = DC_EECMD_READ >> 6;
319 
320 	for (i = 3; i--; ) {
321 		if (d & (1 << i))
322 			DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
323 		else
324 			DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
325 		dc_delay(sc);
326 		DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
327 		dc_delay(sc);
328 		DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
329 		dc_delay(sc);
330 	}
331 
332 	/*
333 	 * Feed in each bit and strobe the clock.
334 	 */
335 	for (i = sc->dc_romwidth; i--;) {
336 		if (addr & (1 << i)) {
337 			SIO_SET(DC_SIO_EE_DATAIN);
338 		} else {
339 			SIO_CLR(DC_SIO_EE_DATAIN);
340 		}
341 		dc_delay(sc);
342 		SIO_SET(DC_SIO_EE_CLK);
343 		dc_delay(sc);
344 		SIO_CLR(DC_SIO_EE_CLK);
345 		dc_delay(sc);
346 	}
347 }
348 
349 /*
350  * Read a word of data stored in the EEPROM at address 'addr.'
351  * The PNIC 82c168/82c169 has its own non-standard way to read
352  * the EEPROM.
353  */
354 void
355 dc_eeprom_getword_pnic(sc, addr, dest)
356 	struct dc_softc *sc;
357 	int addr;
358 	u_int16_t *dest;
359 {
360 	int i;
361 	u_int32_t r;
362 
363 	CSR_WRITE_4(sc, DC_PN_SIOCTL, DC_PN_EEOPCODE_READ|addr);
364 
365 	for (i = 0; i < DC_TIMEOUT; i++) {
366 		DELAY(1);
367 		r = CSR_READ_4(sc, DC_SIO);
368 		if (!(r & DC_PN_SIOCTL_BUSY)) {
369 			*dest = (u_int16_t)(r & 0xFFFF);
370 			return;
371 		}
372 	}
373 }
374 
375 /*
376  * Read a word of data stored in the EEPROM at address 'addr.'
377  */
378 void
379 dc_eeprom_getword(sc, addr, dest)
380 	struct dc_softc *sc;
381 	int addr;
382 	u_int16_t *dest;
383 {
384 	int i;
385 	u_int16_t word = 0;
386 
387 	/* Force EEPROM to idle state. */
388 	dc_eeprom_idle(sc);
389 
390 	/* Enter EEPROM access mode. */
391 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
392 	dc_delay(sc);
393 	DC_SETBIT(sc, DC_SIO,  DC_SIO_ROMCTL_READ);
394 	dc_delay(sc);
395 	DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
396 	dc_delay(sc);
397 	DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
398 	dc_delay(sc);
399 
400 	/*
401 	 * Send address of word we want to read.
402 	 */
403 	dc_eeprom_putbyte(sc, addr);
404 
405 	/*
406 	 * Start reading bits from EEPROM.
407 	 */
408 	for (i = 0x8000; i; i >>= 1) {
409 		SIO_SET(DC_SIO_EE_CLK);
410 		dc_delay(sc);
411 		if (CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)
412 			word |= i;
413 		dc_delay(sc);
414 		SIO_CLR(DC_SIO_EE_CLK);
415 		dc_delay(sc);
416 	}
417 
418 	/* Turn off EEPROM access mode. */
419 	dc_eeprom_idle(sc);
420 
421 	*dest = word;
422 }
423 
424 /*
425  * Read a sequence of words from the EEPROM.
426  */
427 void dc_read_eeprom(sc, dest, off, cnt, swap)
428 	struct dc_softc *sc;
429 	caddr_t dest;
430 	int off, cnt, swap;
431 {
432 	int i;
433 	u_int16_t word = 0, *ptr;
434 
435 	for (i = 0; i < cnt; i++) {
436 		if (DC_IS_PNIC(sc))
437 			dc_eeprom_getword_pnic(sc, off + i, &word);
438 		else
439 			dc_eeprom_getword(sc, off + i, &word);
440 		ptr = (u_int16_t *)(dest + (i * 2));
441 		if (swap)
442 			*ptr = betoh16(word);
443 		else
444 			*ptr = letoh16(word);
445 	}
446 }
447 
448 /*
449  * The following two routines are taken from the Macronix 98713
450  * Application Notes pp.19-21.
451  */
452 /*
453  * Write a bit to the MII bus.
454  */
455 void
456 dc_mii_writebit(sc, bit)
457 	struct dc_softc *sc;
458 	int bit;
459 {
460 	if (bit)
461 		CSR_WRITE_4(sc, DC_SIO,
462 		    DC_SIO_ROMCTL_WRITE|DC_SIO_MII_DATAOUT);
463 	else
464 		CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE);
465 
466 	DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK);
467 	DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK);
468 }
469 
470 /*
471  * Read a bit from the MII bus.
472  */
473 int
474 dc_mii_readbit(sc)
475 	struct dc_softc *sc;
476 {
477 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_READ|DC_SIO_MII_DIR);
478 	CSR_READ_4(sc, DC_SIO);
479 	DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK);
480 	DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK);
481 	if (CSR_READ_4(sc, DC_SIO) & DC_SIO_MII_DATAIN)
482 		return (1);
483 	return (0);
484 }
485 
486 /*
487  * Sync the PHYs by setting data bit and strobing the clock 32 times.
488  */
489 void
490 dc_mii_sync(sc)
491 	struct dc_softc *sc;
492 {
493 	int i;
494 
495 	CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE);
496 
497 	for (i = 0; i < 32; i++)
498 		dc_mii_writebit(sc, 1);
499 }
500 
501 /*
502  * Clock a series of bits through the MII.
503  */
504 void
505 dc_mii_send(sc, bits, cnt)
506 	struct dc_softc *sc;
507 	u_int32_t bits;
508 	int cnt;
509 {
510 	int i;
511 
512 	for (i = (0x1 << (cnt - 1)); i; i >>= 1)
513 		dc_mii_writebit(sc, bits & i);
514 }
515 
516 /*
517  * Read an PHY register through the MII.
518  */
519 int
520 dc_mii_readreg(sc, frame)
521 	struct dc_softc *sc;
522 	struct dc_mii_frame *frame;
523 {
524 	int i, ack, s;
525 
526 	s = splimp();
527 
528 	/*
529 	 * Set up frame for RX.
530 	 */
531 	frame->mii_stdelim = DC_MII_STARTDELIM;
532 	frame->mii_opcode = DC_MII_READOP;
533 	frame->mii_turnaround = 0;
534 	frame->mii_data = 0;
535 
536 	/*
537 	 * Sync the PHYs.
538 	 */
539 	dc_mii_sync(sc);
540 
541 	/*
542 	 * Send command/address info.
543 	 */
544 	dc_mii_send(sc, frame->mii_stdelim, 2);
545 	dc_mii_send(sc, frame->mii_opcode, 2);
546 	dc_mii_send(sc, frame->mii_phyaddr, 5);
547 	dc_mii_send(sc, frame->mii_regaddr, 5);
548 
549 #ifdef notdef
550 	/* Idle bit */
551 	dc_mii_writebit(sc, 1);
552 	dc_mii_writebit(sc, 0);
553 #endif
554 
555 	/* Check for ack */
556 	ack = dc_mii_readbit(sc);
557 
558 	/*
559 	 * Now try reading data bits. If the ack failed, we still
560 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
561 	 */
562 	if (ack) {
563 		for(i = 0; i < 16; i++) {
564 			dc_mii_readbit(sc);
565 		}
566 		goto fail;
567 	}
568 
569 	for (i = 0x8000; i; i >>= 1) {
570 		if (dc_mii_readbit(sc))
571 			frame->mii_data |= i;
572 	}
573 
574 fail:
575 
576 	dc_mii_writebit(sc, 0);
577 	dc_mii_writebit(sc, 0);
578 
579 	splx(s);
580 
581 	if (ack)
582 		return (1);
583 	return (0);
584 }
585 
586 /*
587  * Write to a PHY register through the MII.
588  */
589 int
590 dc_mii_writereg(sc, frame)
591 	struct dc_softc *sc;
592 	struct dc_mii_frame *frame;
593 {
594 	int s;
595 
596 	s = splimp();
597 	/*
598 	 * Set up frame for TX.
599 	 */
600 
601 	frame->mii_stdelim = DC_MII_STARTDELIM;
602 	frame->mii_opcode = DC_MII_WRITEOP;
603 	frame->mii_turnaround = DC_MII_TURNAROUND;
604 
605 	/*
606 	 * Sync the PHYs.
607 	 */
608 	dc_mii_sync(sc);
609 
610 	dc_mii_send(sc, frame->mii_stdelim, 2);
611 	dc_mii_send(sc, frame->mii_opcode, 2);
612 	dc_mii_send(sc, frame->mii_phyaddr, 5);
613 	dc_mii_send(sc, frame->mii_regaddr, 5);
614 	dc_mii_send(sc, frame->mii_turnaround, 2);
615 	dc_mii_send(sc, frame->mii_data, 16);
616 
617 	/* Idle bit. */
618 	dc_mii_writebit(sc, 0);
619 	dc_mii_writebit(sc, 0);
620 
621 	splx(s);
622 	return (0);
623 }
624 
625 int
626 dc_miibus_readreg(self, phy, reg)
627 	struct device *self;
628 	int phy, reg;
629 {
630 	struct dc_mii_frame frame;
631 	struct dc_softc *sc = (struct dc_softc *)self;
632 	int i, rval, phy_reg;
633 
634 	/*
635 	 * Note: both the AL981 and AN983 have internal PHYs,
636 	 * however the AL981 provides direct access to the PHY
637 	 * registers while the AN983 uses a serial MII interface.
638 	 * The AN983's MII interface is also buggy in that you
639 	 * can read from any MII address (0 to 31), but only address 1
640 	 * behaves normally. To deal with both cases, we pretend
641 	 * that the PHY is at MII address 1.
642 	 */
643 	if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR)
644 		return (0);
645 
646 	/*
647 	 * Note: the ukphy probs of the RS7112 report a PHY at
648 	 * MII address 0 (possibly HomePNA?) and 1 (ethernet)
649 	 * so we only respond to correct one.
650 	 */
651 	if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR)
652 		return (0);
653 
654 	if (sc->dc_pmode != DC_PMODE_MII) {
655 		if (phy == (MII_NPHY - 1)) {
656 			switch(reg) {
657 			case MII_BMSR:
658 				/*
659 				 * Fake something to make the probe
660 				 * code think there's a PHY here.
661 				 */
662 				return (BMSR_MEDIAMASK);
663 				break;
664 			case MII_PHYIDR1:
665 				if (DC_IS_PNIC(sc))
666 					return (PCI_VENDOR_LITEON);
667 				return (PCI_VENDOR_DEC);
668 				break;
669 			case MII_PHYIDR2:
670 				if (DC_IS_PNIC(sc))
671 					return (PCI_PRODUCT_LITEON_PNIC);
672 				return (PCI_PRODUCT_DEC_21142);
673 				break;
674 			default:
675 				return (0);
676 				break;
677 			}
678 		} else
679 			return (0);
680 	}
681 
682 	if (DC_IS_PNIC(sc)) {
683 		CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_READ |
684 		    (phy << 23) | (reg << 18));
685 		for (i = 0; i < DC_TIMEOUT; i++) {
686 			DELAY(1);
687 			rval = CSR_READ_4(sc, DC_PN_MII);
688 			if (!(rval & DC_PN_MII_BUSY)) {
689 				rval &= 0xFFFF;
690 				return (rval == 0xFFFF ? 0 : rval);
691 			}
692 		}
693 		return (0);
694 	}
695 
696 	if (DC_IS_COMET(sc)) {
697 		switch(reg) {
698 		case MII_BMCR:
699 			phy_reg = DC_AL_BMCR;
700 			break;
701 		case MII_BMSR:
702 			phy_reg = DC_AL_BMSR;
703 			break;
704 		case MII_PHYIDR1:
705 			phy_reg = DC_AL_VENID;
706 			break;
707 		case MII_PHYIDR2:
708 			phy_reg = DC_AL_DEVID;
709 			break;
710 		case MII_ANAR:
711 			phy_reg = DC_AL_ANAR;
712 			break;
713 		case MII_ANLPAR:
714 			phy_reg = DC_AL_LPAR;
715 			break;
716 		case MII_ANER:
717 			phy_reg = DC_AL_ANER;
718 			break;
719 		default:
720 			printf("%s: phy_read: bad phy register %x\n",
721 			    sc->sc_dev.dv_xname, reg);
722 			return (0);
723 			break;
724 		}
725 
726 		rval = CSR_READ_4(sc, phy_reg) & 0x0000FFFF;
727 
728 		if (rval == 0xFFFF)
729 			return (0);
730 		return (rval);
731 	}
732 
733 	bzero(&frame, sizeof(frame));
734 
735 	frame.mii_phyaddr = phy;
736 	frame.mii_regaddr = reg;
737 	if (sc->dc_type == DC_TYPE_98713) {
738 		phy_reg = CSR_READ_4(sc, DC_NETCFG);
739 		CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL);
740 	}
741 	dc_mii_readreg(sc, &frame);
742 	if (sc->dc_type == DC_TYPE_98713)
743 		CSR_WRITE_4(sc, DC_NETCFG, phy_reg);
744 
745 	return (frame.mii_data);
746 }
747 
748 void
749 dc_miibus_writereg(self, phy, reg, data)
750 	struct device *self;
751 	int phy, reg, data;
752 {
753 	struct dc_softc *sc = (struct dc_softc *)self;
754 	struct dc_mii_frame frame;
755 	int i, phy_reg;
756 
757 	bzero((char *)&frame, sizeof(frame));
758 
759 	if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR)
760 		return;
761 	if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR)
762 		return;
763 
764 	if (DC_IS_PNIC(sc)) {
765 		CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_WRITE |
766 		    (phy << 23) | (reg << 10) | data);
767 		for (i = 0; i < DC_TIMEOUT; i++) {
768 			if (!(CSR_READ_4(sc, DC_PN_MII) & DC_PN_MII_BUSY))
769 				break;
770 		}
771 		return;
772 	}
773 
774 	if (DC_IS_COMET(sc)) {
775 		switch(reg) {
776 		case MII_BMCR:
777 			phy_reg = DC_AL_BMCR;
778 			break;
779 		case MII_BMSR:
780 			phy_reg = DC_AL_BMSR;
781 			break;
782 		case MII_PHYIDR1:
783 			phy_reg = DC_AL_VENID;
784 			break;
785 		case MII_PHYIDR2:
786 			phy_reg = DC_AL_DEVID;
787 			break;
788 		case MII_ANAR:
789 			phy_reg = DC_AL_ANAR;
790 			break;
791 		case MII_ANLPAR:
792 			phy_reg = DC_AL_LPAR;
793 			break;
794 		case MII_ANER:
795 			phy_reg = DC_AL_ANER;
796 			break;
797 		default:
798 			printf("%s: phy_write: bad phy register %x\n",
799 			    sc->sc_dev.dv_xname, reg);
800 			return;
801 			break;
802 		}
803 
804 		CSR_WRITE_4(sc, phy_reg, data);
805 		return;
806 	}
807 
808 	frame.mii_phyaddr = phy;
809 	frame.mii_regaddr = reg;
810 	frame.mii_data = data;
811 
812 	if (sc->dc_type == DC_TYPE_98713) {
813 		phy_reg = CSR_READ_4(sc, DC_NETCFG);
814 		CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL);
815 	}
816 	dc_mii_writereg(sc, &frame);
817 	if (sc->dc_type == DC_TYPE_98713)
818 		CSR_WRITE_4(sc, DC_NETCFG, phy_reg);
819 }
820 
821 void
822 dc_miibus_statchg(self)
823 	struct device *self;
824 {
825 	struct dc_softc *sc = (struct dc_softc *)self;
826 	struct mii_data *mii;
827 	struct ifmedia *ifm;
828 
829 	if (DC_IS_ADMTEK(sc))
830 		return;
831 
832 	mii = &sc->sc_mii;
833 	ifm = &mii->mii_media;
834 	if (DC_IS_DAVICOM(sc) && IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) {
835 		dc_setcfg(sc, ifm->ifm_media);
836 		sc->dc_if_media = ifm->ifm_media;
837 	} else {
838 		dc_setcfg(sc, mii->mii_media_active);
839 		sc->dc_if_media = mii->mii_media_active;
840 	}
841 }
842 
843 #define DC_POLY		0xEDB88320
844 #define DC_BITS_512	9
845 #define DC_BITS_128	7
846 #define DC_BITS_64	6
847 
848 u_int32_t
849 dc_crc_le(sc, addr)
850 	struct dc_softc *sc;
851 	caddr_t addr;
852 {
853 	u_int32_t idx, bit, data, crc;
854 
855 	/* Compute CRC for the address value. */
856 	crc = 0xFFFFFFFF; /* initial value */
857 
858 	for (idx = 0; idx < 6; idx++) {
859 		for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1)
860 			crc = (crc >> 1) ^ (((crc ^ data) & 1) ? DC_POLY : 0);
861 	}
862 
863 	/*
864 	 * The hash table on the PNIC II and the MX98715AEC-C/D/E
865 	 * chips is only 128 bits wide.
866 	 */
867 	if (sc->dc_flags & DC_128BIT_HASH)
868 		return (crc & ((1 << DC_BITS_128) - 1));
869 
870 	/* The hash table on the MX98715BEC is only 64 bits wide. */
871 	if (sc->dc_flags & DC_64BIT_HASH)
872 		return (crc & ((1 << DC_BITS_64) - 1));
873 
874 	/* Xircom's hash filtering table is different (read: weird) */
875 	/* Xircom uses the LEAST significant bits */
876 	if (DC_IS_XIRCOM(sc)) {
877 		if ((crc & 0x180) == 0x180)
878 			return (crc & 0x0F) + (crc	& 0x70)*3 + (14 << 4);
879 		else
880 			return (crc & 0x1F) + ((crc>>1) & 0xF0)*3 + (12 << 4);
881 	}
882 
883 	return (crc & ((1 << DC_BITS_512) - 1));
884 }
885 
886 /*
887  * Calculate CRC of a multicast group address, return the lower 6 bits.
888  */
889 u_int32_t
890 dc_crc_be(addr)
891 	caddr_t addr;
892 {
893 	u_int32_t crc, carry;
894 	int i, j;
895 	u_int8_t c;
896 
897 	/* Compute CRC for the address value. */
898 	crc = 0xFFFFFFFF; /* initial value */
899 
900 	for (i = 0; i < 6; i++) {
901 		c = *(addr + i);
902 		for (j = 0; j < 8; j++) {
903 			carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
904 			crc <<= 1;
905 			c >>= 1;
906 			if (carry)
907 				crc = (crc ^ 0x04c11db6) | carry;
908 		}
909 	}
910 
911 	/* return the filter bit position */
912 	return ((crc >> 26) & 0x0000003F);
913 }
914 
915 /*
916  * 21143-style RX filter setup routine. Filter programming is done by
917  * downloading a special setup frame into the TX engine. 21143, Macronix,
918  * PNIC, PNIC II and Davicom chips are programmed this way.
919  *
920  * We always program the chip using 'hash perfect' mode, i.e. one perfect
921  * address (our node address) and a 512-bit hash filter for multicast
922  * frames. We also sneak the broadcast address into the hash filter since
923  * we need that too.
924  */
925 void
926 dc_setfilt_21143(sc)
927 	struct dc_softc *sc;
928 {
929 	struct dc_desc *sframe;
930 	u_int32_t h, *sp;
931 	struct arpcom *ac = &sc->sc_arpcom;
932 	struct ether_multi *enm;
933 	struct ether_multistep step;
934 	struct ifnet *ifp;
935 	int i;
936 
937 	ifp = &sc->sc_arpcom.ac_if;
938 
939 	i = sc->dc_cdata.dc_tx_prod;
940 	DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT);
941 	sc->dc_cdata.dc_tx_cnt++;
942 	sframe = &sc->dc_ldata->dc_tx_list[i];
943 	sp = &sc->dc_ldata->dc_sbuf[0];
944 	bzero((char *)sp, DC_SFRAME_LEN);
945 
946 	sframe->dc_data = htole32(sc->sc_listmap->dm_segs[0].ds_addr +
947 	    offsetof(struct dc_list_data, dc_sbuf));
948 	sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP |
949 	    DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT);
950 
951 	sc->dc_cdata.dc_tx_chain[i].sd_mbuf =
952 	    (struct mbuf *)&sc->dc_ldata->dc_sbuf[0];
953 
954 	/* If we want promiscuous mode, set the allframes bit. */
955 	if (ifp->if_flags & IFF_PROMISC)
956 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
957 	else
958 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
959 
960 	if (ifp->if_flags & IFF_ALLMULTI)
961 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
962 	else
963 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
964 
965 	ETHER_FIRST_MULTI(step, ac, enm);
966 	while (enm != NULL) {
967 		h = dc_crc_le(sc, enm->enm_addrlo);
968 		sp[h >> 4] |= htole32(1 << (h & 0xF));
969 		ETHER_NEXT_MULTI(step, enm);
970 	}
971 
972 	if (ifp->if_flags & IFF_BROADCAST) {
973 		h = dc_crc_le(sc, (caddr_t)&etherbroadcastaddr);
974 		sp[h >> 4] |= htole32(1 << (h & 0xF));
975 	}
976 
977 	/* Set our MAC address */
978 	sp[39] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 0);
979 	sp[40] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 1);
980 	sp[41] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 2);
981 
982 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
983 	    offsetof(struct dc_list_data, dc_sbuf[0]),
984 	    sizeof(struct dc_list_data) -
985 	    offsetof(struct dc_list_data, dc_sbuf[0]),
986 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
987 
988 	sframe->dc_status = htole32(DC_TXSTAT_OWN);
989 
990 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
991 	    offsetof(struct dc_list_data, dc_tx_list[i]),
992 	    sizeof(struct dc_desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
993 
994 	CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
995 
996 	/*
997 	 * The PNIC takes an exceedingly long time to process its
998 	 * setup frame; wait 10ms after posting the setup frame
999 	 * before proceeding, just so it has time to swallow its
1000 	 * medicine.
1001 	 */
1002 	DELAY(10000);
1003 
1004 	ifp->if_timer = 5;
1005 }
1006 
1007 void
1008 dc_setfilt_admtek(sc)
1009 	struct dc_softc *sc;
1010 {
1011 	struct ifnet *ifp;
1012 	struct arpcom *ac = &sc->sc_arpcom;
1013 	struct ether_multi *enm;
1014 	struct ether_multistep step;
1015 	int h = 0;
1016 	u_int32_t hashes[2] = { 0, 0 };
1017 
1018 	ifp = &sc->sc_arpcom.ac_if;
1019 
1020 	/* Init our MAC address */
1021 	CSR_WRITE_4(sc, DC_AL_PAR0, *(u_int32_t *)(&sc->sc_arpcom.ac_enaddr[0]));
1022 	CSR_WRITE_4(sc, DC_AL_PAR1, *(u_int32_t *)(&sc->sc_arpcom.ac_enaddr[4]));
1023 
1024 	/* If we want promiscuous mode, set the allframes bit. */
1025 	if (ifp->if_flags & IFF_PROMISC)
1026 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1027 	else
1028 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1029 
1030 	if (ifp->if_flags & IFF_ALLMULTI)
1031 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1032 	else
1033 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1034 
1035 	/* first, zot all the existing hash bits */
1036 	CSR_WRITE_4(sc, DC_AL_MAR0, 0);
1037 	CSR_WRITE_4(sc, DC_AL_MAR1, 0);
1038 
1039 	/*
1040 	 * If we're already in promisc or allmulti mode, we
1041 	 * don't have to bother programming the multicast filter.
1042 	 */
1043 	if (ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI))
1044 		return;
1045 
1046 	/* now program new ones */
1047 	ETHER_FIRST_MULTI(step, ac, enm);
1048 	while (enm != NULL) {
1049 		if (DC_IS_CENTAUR(sc))
1050 			h = dc_crc_le(sc, enm->enm_addrlo);
1051 		else
1052 			h = dc_crc_be(enm->enm_addrlo);
1053 		if (h < 32)
1054 			hashes[0] |= (1 << h);
1055 		else
1056 			hashes[1] |= (1 << (h - 32));
1057 		ETHER_NEXT_MULTI(step, enm);
1058 	}
1059 
1060 	CSR_WRITE_4(sc, DC_AL_MAR0, hashes[0]);
1061 	CSR_WRITE_4(sc, DC_AL_MAR1, hashes[1]);
1062 }
1063 
1064 void
1065 dc_setfilt_asix(sc)
1066 	struct dc_softc *sc;
1067 {
1068 	struct ifnet *ifp;
1069 	struct arpcom *ac = &sc->sc_arpcom;
1070 	struct ether_multi *enm;
1071 	struct ether_multistep step;
1072 	int h = 0;
1073 	u_int32_t hashes[2] = { 0, 0 };
1074 
1075 	ifp = &sc->sc_arpcom.ac_if;
1076 
1077 	/* Init our MAC address */
1078 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR0);
1079 	CSR_WRITE_4(sc, DC_AX_FILTDATA,
1080 	    *(u_int32_t *)(&sc->sc_arpcom.ac_enaddr[0]));
1081 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR1);
1082 	CSR_WRITE_4(sc, DC_AX_FILTDATA,
1083 	    *(u_int32_t *)(&sc->sc_arpcom.ac_enaddr[4]));
1084 
1085 	/* If we want promiscuous mode, set the allframes bit. */
1086 	if (ifp->if_flags & IFF_PROMISC)
1087 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1088 	else
1089 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1090 
1091 	if (ifp->if_flags & IFF_ALLMULTI)
1092 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1093 	else
1094 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1095 
1096 	/*
1097 	 * The ASIX chip has a special bit to enable reception
1098 	 * of broadcast frames.
1099 	 */
1100 	if (ifp->if_flags & IFF_BROADCAST)
1101 		DC_SETBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD);
1102 	else
1103 		DC_CLRBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD);
1104 
1105 	/* first, zot all the existing hash bits */
1106 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0);
1107 	CSR_WRITE_4(sc, DC_AX_FILTDATA, 0);
1108 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1);
1109 	CSR_WRITE_4(sc, DC_AX_FILTDATA, 0);
1110 
1111 	/*
1112 	 * If we're already in promisc or allmulti mode, we
1113 	 * don't have to bother programming the multicast filter.
1114 	 */
1115 	if (ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI))
1116 		return;
1117 
1118 	/* now program new ones */
1119 	ETHER_FIRST_MULTI(step, ac, enm);
1120 	while (enm != NULL) {
1121 		h = dc_crc_be(enm->enm_addrlo);
1122 		if (h < 32)
1123 			hashes[0] |= (1 << h);
1124 		else
1125 			hashes[1] |= (1 << (h - 32));
1126 		ETHER_NEXT_MULTI(step, enm);
1127 	}
1128 
1129 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0);
1130 	CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[0]);
1131 	CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1);
1132 	CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[1]);
1133 }
1134 
1135 void
1136 dc_setfilt_xircom(sc)
1137 	struct dc_softc *sc;
1138 {
1139 	struct dc_desc *sframe;
1140 	struct arpcom *ac = &sc->sc_arpcom;
1141 	struct ether_multi *enm;
1142 	struct ether_multistep step;
1143 	u_int32_t h, *sp;
1144 	struct ifnet *ifp;
1145 	int i;
1146 
1147 	ifp = &sc->sc_arpcom.ac_if;
1148 	DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON));
1149 
1150 	i = sc->dc_cdata.dc_tx_prod;
1151 	DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT);
1152 	sc->dc_cdata.dc_tx_cnt++;
1153 	sframe = &sc->dc_ldata->dc_tx_list[i];
1154 	sp = &sc->dc_ldata->dc_sbuf[0];
1155 	bzero((char *)sp, DC_SFRAME_LEN);
1156 
1157 	sframe->dc_data = htole32(sc->sc_listmap->dm_segs[0].ds_addr +
1158 	    offsetof(struct dc_list_data, dc_sbuf));
1159 	sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP |
1160 	    DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT);
1161 
1162 	sc->dc_cdata.dc_tx_chain[i].sd_mbuf =
1163 	    (struct mbuf *)&sc->dc_ldata->dc_sbuf[0];
1164 
1165 	/* If we want promiscuous mode, set the allframes bit. */
1166 	if (ifp->if_flags & IFF_PROMISC)
1167 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1168 	else
1169 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1170 
1171 	if (ifp->if_flags & IFF_ALLMULTI)
1172 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1173 	else
1174 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1175 
1176 	/* now program new ones */
1177 	ETHER_FIRST_MULTI(step, ac, enm);
1178 	while (enm != NULL) {
1179 		h = dc_crc_le(sc, enm->enm_addrlo);
1180 		sp[h >> 4] |= htole32(1 << (h & 0xF));
1181 		ETHER_NEXT_MULTI(step, enm);
1182 	}
1183 
1184 	if (ifp->if_flags & IFF_BROADCAST) {
1185 		h = dc_crc_le(sc, (caddr_t)&etherbroadcastaddr);
1186 		sp[h >> 4] |= htole32(1 << (h & 0xF));
1187 	}
1188 
1189 	/* Set our MAC address */
1190 	sp[0] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 0);
1191 	sp[1] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 1);
1192 	sp[2] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 2);
1193 
1194 	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
1195 	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON);
1196 	ifp->if_flags |= IFF_RUNNING;
1197 	sframe->dc_status = htole32(DC_TXSTAT_OWN);
1198 	CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
1199 
1200 	/*
1201 	 * wait some time...
1202 	 */
1203 	DELAY(1000);
1204 
1205 	ifp->if_timer = 5;
1206 }
1207 
1208 void
1209 dc_setfilt(sc)
1210 	struct dc_softc *sc;
1211 {
1212 	if (DC_IS_INTEL(sc) || DC_IS_MACRONIX(sc) || DC_IS_PNIC(sc) ||
1213 	    DC_IS_PNICII(sc) || DC_IS_DAVICOM(sc) || DC_IS_CONEXANT(sc))
1214 		dc_setfilt_21143(sc);
1215 
1216 	if (DC_IS_ASIX(sc))
1217 		dc_setfilt_asix(sc);
1218 
1219 	if (DC_IS_ADMTEK(sc))
1220 		dc_setfilt_admtek(sc);
1221 
1222 	if (DC_IS_XIRCOM(sc))
1223 		dc_setfilt_xircom(sc);
1224 }
1225 
1226 /*
1227  * In order to fiddle with the
1228  * 'full-duplex' and '100Mbps' bits in the netconfig register, we
1229  * first have to put the transmit and/or receive logic in the idle state.
1230  */
1231 void
1232 dc_setcfg(sc, media)
1233 	struct dc_softc *sc;
1234 	int media;
1235 {
1236 	int i, restart = 0;
1237 	u_int32_t isr;
1238 
1239 	if (IFM_SUBTYPE(media) == IFM_NONE)
1240 		return;
1241 
1242 	if (CSR_READ_4(sc, DC_NETCFG) & (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON)) {
1243 		restart = 1;
1244 		DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON));
1245 
1246 		for (i = 0; i < DC_TIMEOUT; i++) {
1247 			DELAY(10);
1248 			isr = CSR_READ_4(sc, DC_ISR);
1249 			if (isr & DC_ISR_TX_IDLE ||
1250 			    (isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED)
1251 				break;
1252 		}
1253 
1254 		if (i == DC_TIMEOUT)
1255 			printf("%s: failed to force tx and "
1256 			    "rx to idle state\n", sc->sc_dev.dv_xname);
1257 
1258 	}
1259 
1260 	if (IFM_SUBTYPE(media) == IFM_100_TX) {
1261 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL);
1262 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT);
1263 		if (sc->dc_pmode == DC_PMODE_MII) {
1264 			int watchdogreg;
1265 
1266 			if (DC_IS_INTEL(sc)) {
1267 			/* there's a write enable bit here that reads as 1 */
1268 				watchdogreg = CSR_READ_4(sc, DC_WATCHDOG);
1269 				watchdogreg &= ~DC_WDOG_CTLWREN;
1270 				watchdogreg |= DC_WDOG_JABBERDIS;
1271 				CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg);
1272 			} else {
1273 				DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS);
1274 			}
1275 			DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS|
1276 			    DC_NETCFG_PORTSEL|DC_NETCFG_SCRAMBLER));
1277 			if (sc->dc_type == DC_TYPE_98713)
1278 				DC_SETBIT(sc, DC_NETCFG, (DC_NETCFG_PCS|
1279 				    DC_NETCFG_SCRAMBLER));
1280 			if (!DC_IS_DAVICOM(sc))
1281 				DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1282 			DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
1283 			if (DC_IS_INTEL(sc))
1284 				dc_apply_fixup(sc, IFM_AUTO);
1285 		} else {
1286 			if (DC_IS_PNIC(sc)) {
1287 				DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_SPEEDSEL);
1288 				DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP);
1289 				DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL);
1290 			}
1291 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1292 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
1293 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER);
1294 			if (DC_IS_INTEL(sc))
1295 				dc_apply_fixup(sc,
1296 				    (media & IFM_GMASK) == IFM_FDX ?
1297 				    IFM_100_TX|IFM_FDX : IFM_100_TX);
1298 		}
1299 	}
1300 
1301 	if (IFM_SUBTYPE(media) == IFM_10_T) {
1302 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL);
1303 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT);
1304 		if (sc->dc_pmode == DC_PMODE_MII) {
1305 			int watchdogreg;
1306 
1307 			if (DC_IS_INTEL(sc)) {
1308 			/* there's a write enable bit here that reads as 1 */
1309 				watchdogreg = CSR_READ_4(sc, DC_WATCHDOG);
1310 				watchdogreg &= ~DC_WDOG_CTLWREN;
1311 				watchdogreg |= DC_WDOG_JABBERDIS;
1312 				CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg);
1313 			} else {
1314 				DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS);
1315 			}
1316 			DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS|
1317 			    DC_NETCFG_PORTSEL|DC_NETCFG_SCRAMBLER));
1318 			if (sc->dc_type == DC_TYPE_98713)
1319 				DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
1320 			if (!DC_IS_DAVICOM(sc))
1321 				DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1322 			DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
1323 			if (DC_IS_INTEL(sc))
1324 				dc_apply_fixup(sc, IFM_AUTO);
1325 		} else {
1326 			if (DC_IS_PNIC(sc)) {
1327 				DC_PN_GPIO_CLRBIT(sc, DC_PN_GPIO_SPEEDSEL);
1328 				DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP);
1329 				DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL);
1330 			}
1331 			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1332 			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
1333 			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER);
1334 			if (DC_IS_INTEL(sc)) {
1335 				DC_CLRBIT(sc, DC_SIARESET, DC_SIA_RESET);
1336 				DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
1337 				if ((media & IFM_GMASK) == IFM_FDX)
1338 					DC_SETBIT(sc, DC_10BTCTRL, 0x7F3D);
1339 				else
1340 					DC_SETBIT(sc, DC_10BTCTRL, 0x7F3F);
1341 				DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET);
1342 				DC_CLRBIT(sc, DC_10BTCTRL,
1343 				    DC_TCTL_AUTONEGENBL);
1344 				dc_apply_fixup(sc,
1345 				    (media & IFM_GMASK) == IFM_FDX ?
1346 				    IFM_10_T|IFM_FDX : IFM_10_T);
1347 				DELAY(20000);
1348 			}
1349 		}
1350 	}
1351 
1352 	/*
1353 	 * If this is a Davicom DM9102A card with a DM9801 HomePNA
1354 	 * PHY and we want HomePNA mode, set the portsel bit to turn
1355 	 * on the external MII port.
1356 	 */
1357 	if (DC_IS_DAVICOM(sc)) {
1358 		if (IFM_SUBTYPE(media) == IFM_HPNA_1) {
1359 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1360 			sc->dc_link = 1;
1361 		} else {
1362 			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1363 		}
1364 	}
1365 
1366 	if ((media & IFM_GMASK) == IFM_FDX) {
1367 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX);
1368 		if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc))
1369 			DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX);
1370 	} else {
1371 		DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX);
1372 		if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc))
1373 			DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX);
1374 	}
1375 
1376 	if (restart)
1377 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON|DC_NETCFG_RX_ON);
1378 }
1379 
1380 void
1381 dc_reset(sc)
1382 	struct dc_softc *sc;
1383 {
1384 	int i;
1385 
1386 	DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET);
1387 
1388 	for (i = 0; i < DC_TIMEOUT; i++) {
1389 		DELAY(10);
1390 		if (!(CSR_READ_4(sc, DC_BUSCTL) & DC_BUSCTL_RESET))
1391 			break;
1392 	}
1393 
1394 	if (DC_IS_ASIX(sc) || DC_IS_ADMTEK(sc) || DC_IS_XIRCOM(sc) ||
1395 	    DC_IS_INTEL(sc) || DC_IS_CONEXANT(sc)) {
1396 		DELAY(10000);
1397 		DC_CLRBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET);
1398 		i = 0;
1399 	}
1400 
1401 	if (i == DC_TIMEOUT)
1402 		printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
1403 
1404 	/* Wait a little while for the chip to get its brains in order. */
1405 	DELAY(1000);
1406 
1407 	CSR_WRITE_4(sc, DC_IMR, 0x00000000);
1408 	CSR_WRITE_4(sc, DC_BUSCTL, 0x00000000);
1409 	CSR_WRITE_4(sc, DC_NETCFG, 0x00000000);
1410 
1411 	/*
1412 	 * Bring the SIA out of reset. In some cases, it looks
1413 	 * like failing to unreset the SIA soon enough gets it
1414 	 * into a state where it will never come out of reset
1415 	 * until we reset the whole chip again.
1416 	 */
1417 	if (DC_IS_INTEL(sc)) {
1418 		DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET);
1419 		CSR_WRITE_4(sc, DC_10BTCTRL, 0);
1420 		CSR_WRITE_4(sc, DC_WATCHDOG, 0);
1421 	}
1422 
1423 	if (sc->dc_type == DC_TYPE_21145)
1424 		dc_setcfg(sc, IFM_10_T);
1425 }
1426 
1427 void
1428 dc_apply_fixup(sc, media)
1429 	struct dc_softc *sc;
1430 	int media;
1431 {
1432 	struct dc_mediainfo *m;
1433 	u_int8_t *p;
1434 	int i;
1435 	u_int32_t reg;
1436 
1437 	m = sc->dc_mi;
1438 
1439 	while (m != NULL) {
1440 		if (m->dc_media == media)
1441 			break;
1442 		m = m->dc_next;
1443 	}
1444 
1445 	if (m == NULL)
1446 		return;
1447 
1448 	for (i = 0, p = m->dc_reset_ptr; i < m->dc_reset_len; i++, p += 2) {
1449 		reg = (p[0] | (p[1] << 8)) << 16;
1450 		CSR_WRITE_4(sc, DC_WATCHDOG, reg);
1451 	}
1452 
1453 	for (i = 0, p = m->dc_gp_ptr; i < m->dc_gp_len; i++, p += 2) {
1454 		reg = (p[0] | (p[1] << 8)) << 16;
1455 		CSR_WRITE_4(sc, DC_WATCHDOG, reg);
1456 	}
1457 }
1458 
1459 void
1460 dc_decode_leaf_sia(sc, l)
1461 	struct dc_softc *sc;
1462 	struct dc_eblock_sia *l;
1463 {
1464 	struct dc_mediainfo *m;
1465 
1466 	m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT);
1467 	if (m == NULL)
1468 		return;
1469 	bzero(m, sizeof(struct dc_mediainfo));
1470 	if (l->dc_sia_code == DC_SIA_CODE_10BT)
1471 		m->dc_media = IFM_10_T;
1472 
1473 	if (l->dc_sia_code == DC_SIA_CODE_10BT_FDX)
1474 		m->dc_media = IFM_10_T|IFM_FDX;
1475 
1476 	if (l->dc_sia_code == DC_SIA_CODE_10B2)
1477 		m->dc_media = IFM_10_2;
1478 
1479 	if (l->dc_sia_code == DC_SIA_CODE_10B5)
1480 		m->dc_media = IFM_10_5;
1481 
1482 	m->dc_gp_len = 2;
1483 	m->dc_gp_ptr = (u_int8_t *)&l->dc_sia_gpio_ctl;
1484 
1485 	m->dc_next = sc->dc_mi;
1486 	sc->dc_mi = m;
1487 
1488 	sc->dc_pmode = DC_PMODE_SIA;
1489 }
1490 
1491 void
1492 dc_decode_leaf_sym(sc, l)
1493 	struct dc_softc *sc;
1494 	struct dc_eblock_sym *l;
1495 {
1496 	struct dc_mediainfo *m;
1497 
1498 	m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT);
1499 	if (m == NULL)
1500 		return;
1501 	bzero(m, sizeof(struct dc_mediainfo));
1502 	if (l->dc_sym_code == DC_SYM_CODE_100BT)
1503 		m->dc_media = IFM_100_TX;
1504 
1505 	if (l->dc_sym_code == DC_SYM_CODE_100BT_FDX)
1506 		m->dc_media = IFM_100_TX|IFM_FDX;
1507 
1508 	m->dc_gp_len = 2;
1509 	m->dc_gp_ptr = (u_int8_t *)&l->dc_sym_gpio_ctl;
1510 
1511 	m->dc_next = sc->dc_mi;
1512 	sc->dc_mi = m;
1513 
1514 	sc->dc_pmode = DC_PMODE_SYM;
1515 }
1516 
1517 void
1518 dc_decode_leaf_mii(sc, l)
1519 	struct dc_softc *sc;
1520 	struct dc_eblock_mii *l;
1521 {
1522 	u_int8_t *p;
1523 	struct dc_mediainfo *m;
1524 
1525 	m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT);
1526 	if (m == NULL)
1527 		return;
1528 	bzero(m, sizeof(struct dc_mediainfo));
1529 	/* We abuse IFM_AUTO to represent MII. */
1530 	m->dc_media = IFM_AUTO;
1531 	m->dc_gp_len = l->dc_gpr_len;
1532 
1533 	p = (u_int8_t *)l;
1534 	p += sizeof(struct dc_eblock_mii);
1535 	m->dc_gp_ptr = p;
1536 	p += 2 * l->dc_gpr_len;
1537 	m->dc_reset_len = *p;
1538 	p++;
1539 	m->dc_reset_ptr = p;
1540 
1541 	m->dc_next = sc->dc_mi;
1542 	sc->dc_mi = m;
1543 }
1544 
1545 void
1546 dc_read_srom(sc, bits)
1547 	struct dc_softc *sc;
1548 	int bits;
1549 {
1550 	int size;
1551 
1552 	size = 2 << bits;
1553 	sc->dc_srom = malloc(size, M_DEVBUF, M_NOWAIT);
1554 	if (sc->dc_srom == NULL)
1555 		return;
1556 	dc_read_eeprom(sc, (caddr_t)sc->dc_srom, 0, (size / 2), 0);
1557 }
1558 
1559 void
1560 dc_parse_21143_srom(sc)
1561 	struct dc_softc *sc;
1562 {
1563 	struct dc_leaf_hdr *lhdr;
1564 	struct dc_eblock_hdr *hdr;
1565 	int i, loff;
1566 	char *ptr;
1567 
1568 	loff = sc->dc_srom[27];
1569 	lhdr = (struct dc_leaf_hdr *)&(sc->dc_srom[loff]);
1570 
1571 	ptr = (char *)lhdr;
1572 	ptr += sizeof(struct dc_leaf_hdr) - 1;
1573 	for (i = 0; i < lhdr->dc_mcnt; i++) {
1574 		hdr = (struct dc_eblock_hdr *)ptr;
1575 		switch(hdr->dc_type) {
1576 		case DC_EBLOCK_MII:
1577 			dc_decode_leaf_mii(sc, (struct dc_eblock_mii *)hdr);
1578 			break;
1579 		case DC_EBLOCK_SIA:
1580 			dc_decode_leaf_sia(sc, (struct dc_eblock_sia *)hdr);
1581 			break;
1582 		case DC_EBLOCK_SYM:
1583 			dc_decode_leaf_sym(sc, (struct dc_eblock_sym *)hdr);
1584 			break;
1585 		default:
1586 			/* Don't care. Yet. */
1587 			break;
1588 		}
1589 		ptr += (hdr->dc_len & 0x7F);
1590 		ptr++;
1591 	}
1592 }
1593 
1594 /*
1595  * Attach the interface. Allocate softc structures, do ifmedia
1596  * setup and ethernet/BPF attach.
1597  */
1598 void
1599 dc_attach(sc)
1600 	struct dc_softc *sc;
1601 {
1602 	struct ifnet *ifp;
1603 	int mac_offset, tmp, i;
1604 
1605 	/*
1606 	 * Get station address from the EEPROM.
1607 	 */
1608 	if (sc->sc_hasmac)
1609 		goto hasmac;
1610 
1611 	switch(sc->dc_type) {
1612 	case DC_TYPE_98713:
1613 	case DC_TYPE_98713A:
1614 	case DC_TYPE_987x5:
1615 	case DC_TYPE_PNICII:
1616 		dc_read_eeprom(sc, (caddr_t)&mac_offset,
1617 		    (DC_EE_NODEADDR_OFFSET / 2), 1, 0);
1618 		dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr,
1619 		    (mac_offset / 2), 3, 0);
1620 		break;
1621 	case DC_TYPE_PNIC:
1622 		dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr, 0, 3, 1);
1623 		break;
1624 	case DC_TYPE_DM9102:
1625 	case DC_TYPE_21143:
1626 	case DC_TYPE_21145:
1627 	case DC_TYPE_ASIX:
1628 		dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr,
1629 		    DC_EE_NODEADDR, 3, 0);
1630 		break;
1631 	case DC_TYPE_AL981:
1632 	case DC_TYPE_AN983:
1633 		bcopy(&sc->dc_srom[DC_AL_EE_NODEADDR], &sc->sc_arpcom.ac_enaddr,
1634 		    ETHER_ADDR_LEN);
1635 		break;
1636 	case DC_TYPE_XIRCOM:
1637 		break;
1638 	case DC_TYPE_CONEXANT:
1639 		bcopy(&sc->dc_srom + DC_CONEXANT_EE_NODEADDR,
1640 		    &sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
1641 		break;
1642 	default:
1643 		dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr,
1644 		    DC_EE_NODEADDR, 3, 0);
1645 		break;
1646 	}
1647 hasmac:
1648 
1649 	if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct dc_list_data),
1650 	    PAGE_SIZE, 0, sc->sc_listseg, 1, &sc->sc_listnseg,
1651 	    BUS_DMA_NOWAIT) != 0) {
1652 		printf(": can't alloc list mem\n");
1653 		goto fail;
1654 	}
1655 	if (bus_dmamem_map(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg,
1656 	    sizeof(struct dc_list_data), &sc->sc_listkva,
1657 	    BUS_DMA_NOWAIT) != 0) {
1658 		printf(": can't map list mem\n");
1659 		goto fail;
1660 	}
1661 	if (bus_dmamap_create(sc->sc_dmat, sizeof(struct dc_list_data), 1,
1662 	    sizeof(struct dc_list_data), 0, BUS_DMA_NOWAIT,
1663 	    &sc->sc_listmap) != 0) {
1664 		printf(": can't alloc list map\n");
1665 		goto fail;
1666 	}
1667 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, sc->sc_listkva,
1668 	    sizeof(struct dc_list_data), NULL, BUS_DMA_NOWAIT) != 0) {
1669 		printf(": can't load list map\n");
1670 		goto fail;
1671 	}
1672 	sc->dc_ldata = (struct dc_list_data *)sc->sc_listkva;
1673 	bzero(sc->dc_ldata, sizeof(struct dc_list_data));
1674 
1675 	for (i = 0; i < DC_RX_LIST_CNT; i++) {
1676 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
1677 		    0, BUS_DMA_NOWAIT,
1678 		    &sc->dc_cdata.dc_rx_chain[i].sd_map) != 0) {
1679 			printf(": can't create rx map\n");
1680 			return;
1681 		}
1682 	}
1683 	if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
1684 	    BUS_DMA_NOWAIT, &sc->sc_rx_sparemap) != 0) {
1685 		printf(": can't create rx spare map\n");
1686 		return;
1687 	}
1688 
1689 	for (i = 0; i < DC_TX_LIST_CNT; i++) {
1690 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES,
1691 		    DC_TX_LIST_CNT - 5, MCLBYTES, 0, BUS_DMA_NOWAIT,
1692 		    &sc->dc_cdata.dc_tx_chain[i].sd_map) != 0) {
1693 			printf(": can't create tx map\n");
1694 			return;
1695 		}
1696 	}
1697 	if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, DC_TX_LIST_CNT - 5,
1698 	    MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_tx_sparemap) != 0) {
1699 		printf(": can't create tx spare map\n");
1700 		return;
1701 	}
1702 
1703 	/*
1704 	 * A 21143 or clone chip was detected. Inform the world.
1705 	 */
1706 	printf(" address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
1707 
1708 	ifp = &sc->sc_arpcom.ac_if;
1709 	ifp->if_softc = sc;
1710 	ifp->if_mtu = ETHERMTU;
1711 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1712 	ifp->if_ioctl = dc_ioctl;
1713 	ifp->if_output = ether_output;
1714 	ifp->if_start = dc_start;
1715 	ifp->if_watchdog = dc_watchdog;
1716 	ifp->if_baudrate = 10000000;
1717 	IFQ_SET_MAXLEN(&ifp->if_snd, DC_TX_LIST_CNT - 1);
1718 	IFQ_SET_READY(&ifp->if_snd);
1719 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1720 
1721 #if NVLAN > 0
1722 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1723 #endif
1724 
1725 	/* Do MII setup. If this is a 21143, check for a PHY on the
1726 	 * MII bus after applying any necessary fixups to twiddle the
1727 	 * GPIO bits. If we don't end up finding a PHY, restore the
1728 	 * old selection (SIA only or SIA/SYM) and attach the dcphy
1729 	 * driver instead.
1730 	 */
1731 	if (DC_IS_INTEL(sc)) {
1732 		dc_apply_fixup(sc, IFM_AUTO);
1733 		tmp = sc->dc_pmode;
1734 		sc->dc_pmode = DC_PMODE_MII;
1735 	}
1736 
1737 	sc->sc_mii.mii_ifp = ifp;
1738 	sc->sc_mii.mii_readreg = dc_miibus_readreg;
1739 	sc->sc_mii.mii_writereg = dc_miibus_writereg;
1740 	sc->sc_mii.mii_statchg = dc_miibus_statchg;
1741 	ifmedia_init(&sc->sc_mii.mii_media, 0, dc_ifmedia_upd, dc_ifmedia_sts);
1742 	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
1743 	    MII_OFFSET_ANY, 0);
1744 
1745 	if (DC_IS_INTEL(sc)) {
1746 		if (LIST_EMPTY(&sc->sc_mii.mii_phys)) {
1747 			sc->dc_pmode = tmp;
1748 			if (sc->dc_pmode != DC_PMODE_SIA)
1749 				sc->dc_pmode = DC_PMODE_SYM;
1750 			sc->dc_flags |= DC_21143_NWAY;
1751 			if (sc->dc_flags & DC_MOMENCO_BOTCH)
1752 				sc->dc_pmode = DC_PMODE_MII;
1753 			mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff,
1754 			    MII_PHY_ANY, MII_OFFSET_ANY, 0);
1755 		} else {
1756 			/* we have a PHY, so we must clear this bit */
1757 			sc->dc_flags &= ~DC_TULIP_LEDS;
1758 		}
1759 	}
1760 
1761 	if (LIST_EMPTY(&sc->sc_mii.mii_phys)) {
1762 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
1763 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
1764 		printf("%s: MII without any PHY!\n", sc->sc_dev.dv_xname);
1765 	} else if (sc->dc_type == DC_TYPE_21145) {
1766 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_10_T);
1767 	} else
1768 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1769 
1770 	if (DC_IS_DAVICOM(sc) && sc->dc_revision >= DC_REVISION_DM9102A)
1771 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_HPNA_1,0,NULL);
1772 
1773 	if (DC_IS_XIRCOM(sc)) {
1774 		/*
1775 		 * setup General Purpose Port mode and data so the tulip
1776 		 * can talk to the MII.
1777 		 */
1778 		CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN |
1779 		    DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
1780 		DELAY(10);
1781 		CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN |
1782 		    DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
1783 		DELAY(10);
1784 	}
1785 
1786 	/*
1787 	 * Call MI attach routines.
1788 	 */
1789 	if_attach(ifp);
1790 	ether_ifattach(ifp);
1791 
1792 	sc->sc_dhook = shutdownhook_establish(dc_shutdown, sc);
1793 
1794 fail:
1795 	return;
1796 }
1797 
1798 int
1799 dc_detach(sc)
1800 	struct dc_softc *sc;
1801 {
1802 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1803 
1804 	if (LIST_FIRST(&sc->sc_mii.mii_phys) != NULL)
1805 		mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
1806 
1807 	if (sc->dc_srom)
1808 		free(sc->dc_srom, M_DEVBUF);
1809 
1810 	timeout_del(&sc->dc_tick_tmo);
1811 
1812 	ether_ifdetach(ifp);
1813 	if_detach(ifp);
1814 
1815 	shutdownhook_disestablish(sc->sc_dhook);
1816 
1817 	return (0);
1818 }
1819 
1820 /*
1821  * Initialize the transmit descriptors.
1822  */
1823 int
1824 dc_list_tx_init(sc)
1825 	struct dc_softc *sc;
1826 {
1827 	struct dc_chain_data *cd;
1828 	struct dc_list_data *ld;
1829 	int i;
1830 	bus_addr_t next;
1831 
1832 	cd = &sc->dc_cdata;
1833 	ld = sc->dc_ldata;
1834 	for (i = 0; i < DC_TX_LIST_CNT; i++) {
1835 		next = sc->sc_listmap->dm_segs[0].ds_addr;
1836 		if (i == (DC_TX_LIST_CNT - 1))
1837 			next +=
1838 			    offsetof(struct dc_list_data, dc_tx_list[0]);
1839 		else
1840 			next +=
1841 			    offsetof(struct dc_list_data, dc_tx_list[i + 1]);
1842 		cd->dc_tx_chain[i].sd_mbuf = NULL;
1843 		ld->dc_tx_list[i].dc_data = htole32(0);
1844 		ld->dc_tx_list[i].dc_ctl = htole32(0);
1845 		ld->dc_tx_list[i].dc_next = htole32(next);
1846 	}
1847 
1848 	cd->dc_tx_prod = cd->dc_tx_cons = cd->dc_tx_cnt = 0;
1849 
1850 	return (0);
1851 }
1852 
1853 
1854 /*
1855  * Initialize the RX descriptors and allocate mbufs for them. Note that
1856  * we arrange the descriptors in a closed ring, so that the last descriptor
1857  * points back to the first.
1858  */
1859 int
1860 dc_list_rx_init(sc)
1861 	struct dc_softc *sc;
1862 {
1863 	struct dc_chain_data *cd;
1864 	struct dc_list_data *ld;
1865 	int i;
1866 	bus_addr_t next;
1867 
1868 	cd = &sc->dc_cdata;
1869 	ld = sc->dc_ldata;
1870 
1871 	for (i = 0; i < DC_RX_LIST_CNT; i++) {
1872 		if (dc_newbuf(sc, i, NULL) == ENOBUFS)
1873 			return (ENOBUFS);
1874 		next = sc->sc_listmap->dm_segs[0].ds_addr;
1875 		if (i == (DC_RX_LIST_CNT - 1))
1876 			next +=
1877 			    offsetof(struct dc_list_data, dc_rx_list[0]);
1878 		else
1879 			next +=
1880 			    offsetof(struct dc_list_data, dc_rx_list[i + 1]);
1881 		ld->dc_rx_list[i].dc_next = htole32(next);
1882 	}
1883 
1884 	cd->dc_rx_prod = 0;
1885 
1886 	return (0);
1887 }
1888 
1889 /*
1890  * Initialize an RX descriptor and attach an MBUF cluster.
1891  */
1892 int
1893 dc_newbuf(sc, i, m)
1894 	struct dc_softc *sc;
1895 	int i;
1896 	struct mbuf *m;
1897 {
1898 	struct mbuf *m_new = NULL;
1899 	struct dc_desc *c;
1900 	bus_dmamap_t map;
1901 
1902 	c = &sc->dc_ldata->dc_rx_list[i];
1903 
1904 	if (m == NULL) {
1905 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1906 		if (m_new == NULL) {
1907 			printf("%s: no memory for rx list "
1908 			    "-- packet dropped!\n", sc->sc_dev.dv_xname);
1909 			return (ENOBUFS);
1910 		}
1911 
1912 		MCLGET(m_new, M_DONTWAIT);
1913 		if (!(m_new->m_flags & M_EXT)) {
1914 			printf("%s: no memory for rx list "
1915 			    "-- packet dropped!\n", sc->sc_dev.dv_xname);
1916 			m_freem(m_new);
1917 			return (ENOBUFS);
1918 		}
1919 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1920 		if (bus_dmamap_load(sc->sc_dmat, sc->sc_rx_sparemap,
1921 		    mtod(m_new, caddr_t), MCLBYTES, NULL,
1922 		    BUS_DMA_NOWAIT) != 0) {
1923 			printf("%s: rx load failed\n", sc->sc_dev.dv_xname);
1924 			m_freem(m_new);
1925 			return (ENOBUFS);
1926 		}
1927 		map = sc->dc_cdata.dc_rx_chain[i].sd_map;
1928 		sc->dc_cdata.dc_rx_chain[i].sd_map = sc->sc_rx_sparemap;
1929 		sc->sc_rx_sparemap = map;
1930 	} else {
1931 		m_new = m;
1932 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1933 		m_new->m_data = m_new->m_ext.ext_buf;
1934 	}
1935 
1936 	m_adj(m_new, sizeof(u_int64_t));
1937 
1938 	/*
1939 	 * If this is a PNIC chip, zero the buffer. This is part
1940 	 * of the workaround for the receive bug in the 82c168 and
1941 	 * 82c169 chips.
1942 	 */
1943 	if (sc->dc_flags & DC_PNIC_RX_BUG_WAR)
1944 		bzero((char *)mtod(m_new, char *), m_new->m_len);
1945 
1946 	bus_dmamap_sync(sc->sc_dmat, sc->dc_cdata.dc_rx_chain[i].sd_map, 0,
1947 	    sc->dc_cdata.dc_rx_chain[i].sd_map->dm_mapsize,
1948 	    BUS_DMASYNC_PREREAD);
1949 
1950 	sc->dc_cdata.dc_rx_chain[i].sd_mbuf = m_new;
1951 	c->dc_data = htole32(
1952 	    sc->dc_cdata.dc_rx_chain[i].sd_map->dm_segs[0].ds_addr +
1953 	    sizeof(u_int64_t));
1954 	c->dc_ctl = htole32(DC_RXCTL_RLINK | DC_RXLEN);
1955 	c->dc_status = htole32(DC_RXSTAT_OWN);
1956 
1957 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1958 	    offsetof(struct dc_list_data, dc_rx_list[i]),
1959 	    sizeof(struct dc_desc),
1960 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1961 
1962 	return (0);
1963 }
1964 
1965 /*
1966  * Grrrrr.
1967  * The PNIC chip has a terrible bug in it that manifests itself during
1968  * periods of heavy activity. The exact mode of failure if difficult to
1969  * pinpoint: sometimes it only happens in promiscuous mode, sometimes it
1970  * will happen on slow machines. The bug is that sometimes instead of
1971  * uploading one complete frame during reception, it uploads what looks
1972  * like the entire contents of its FIFO memory. The frame we want is at
1973  * the end of the whole mess, but we never know exactly how much data has
1974  * been uploaded, so salvaging the frame is hard.
1975  *
1976  * There is only one way to do it reliably, and it's disgusting.
1977  * Here's what we know:
1978  *
1979  * - We know there will always be somewhere between one and three extra
1980  *   descriptors uploaded.
1981  *
1982  * - We know the desired received frame will always be at the end of the
1983  *   total data upload.
1984  *
1985  * - We know the size of the desired received frame because it will be
1986  *   provided in the length field of the status word in the last descriptor.
1987  *
1988  * Here's what we do:
1989  *
1990  * - When we allocate buffers for the receive ring, we bzero() them.
1991  *   This means that we know that the buffer contents should be all
1992  *   zeros, except for data uploaded by the chip.
1993  *
1994  * - We also force the PNIC chip to upload frames that include the
1995  *   ethernet CRC at the end.
1996  *
1997  * - We gather all of the bogus frame data into a single buffer.
1998  *
1999  * - We then position a pointer at the end of this buffer and scan
2000  *   backwards until we encounter the first non-zero byte of data.
2001  *   This is the end of the received frame. We know we will encounter
2002  *   some data at the end of the frame because the CRC will always be
2003  *   there, so even if the sender transmits a packet of all zeros,
2004  *   we won't be fooled.
2005  *
2006  * - We know the size of the actual received frame, so we subtract
2007  *   that value from the current pointer location. This brings us
2008  *   to the start of the actual received packet.
2009  *
2010  * - We copy this into an mbuf and pass it on, along with the actual
2011  *   frame length.
2012  *
2013  * The performance hit is tremendous, but it beats dropping frames all
2014  * the time.
2015  */
2016 
2017 #define DC_WHOLEFRAME	(DC_RXSTAT_FIRSTFRAG|DC_RXSTAT_LASTFRAG)
2018 void
2019 dc_pnic_rx_bug_war(sc, idx)
2020 	struct dc_softc *sc;
2021 	int idx;
2022 {
2023 	struct dc_desc		*cur_rx;
2024 	struct dc_desc		*c = NULL;
2025 	struct mbuf		*m = NULL;
2026 	unsigned char		*ptr;
2027 	int			i, total_len;
2028 	u_int32_t		rxstat = 0;
2029 
2030 	i = sc->dc_pnic_rx_bug_save;
2031 	cur_rx = &sc->dc_ldata->dc_rx_list[idx];
2032 	ptr = sc->dc_pnic_rx_buf;
2033 	bzero(ptr, sizeof(DC_RXLEN * 5));
2034 
2035 	/* Copy all the bytes from the bogus buffers. */
2036 	while (1) {
2037 		c = &sc->dc_ldata->dc_rx_list[i];
2038 		rxstat = letoh32(c->dc_status);
2039 		m = sc->dc_cdata.dc_rx_chain[i].sd_mbuf;
2040 		bcopy(mtod(m, char *), ptr, DC_RXLEN);
2041 		ptr += DC_RXLEN;
2042 		/* If this is the last buffer, break out. */
2043 		if (i == idx || rxstat & DC_RXSTAT_LASTFRAG)
2044 			break;
2045 		dc_newbuf(sc, i, m);
2046 		DC_INC(i, DC_RX_LIST_CNT);
2047 	}
2048 
2049 	/* Find the length of the actual receive frame. */
2050 	total_len = DC_RXBYTES(rxstat);
2051 
2052 	/* Scan backwards until we hit a non-zero byte. */
2053 	while(*ptr == 0x00)
2054 		ptr--;
2055 
2056 	/* Round off. */
2057 	if ((unsigned long)(ptr) & 0x3)
2058 		ptr -= 1;
2059 
2060 	/* Now find the start of the frame. */
2061 	ptr -= total_len;
2062 	if (ptr < sc->dc_pnic_rx_buf)
2063 		ptr = sc->dc_pnic_rx_buf;
2064 
2065 	/*
2066 	 * Now copy the salvaged frame to the last mbuf and fake up
2067 	 * the status word to make it look like a successful
2068  	 * frame reception.
2069 	 */
2070 	dc_newbuf(sc, i, m);
2071 	bcopy(ptr, mtod(m, char *), total_len);
2072 	cur_rx->dc_status = htole32(rxstat | DC_RXSTAT_FIRSTFRAG);
2073 }
2074 
2075 /*
2076  * This routine searches the RX ring for dirty descriptors in the
2077  * event that the rxeof routine falls out of sync with the chip's
2078  * current descriptor pointer. This may happen sometimes as a result
2079  * of a "no RX buffer available" condition that happens when the chip
2080  * consumes all of the RX buffers before the driver has a chance to
2081  * process the RX ring. This routine may need to be called more than
2082  * once to bring the driver back in sync with the chip, however we
2083  * should still be getting RX DONE interrupts to drive the search
2084  * for new packets in the RX ring, so we should catch up eventually.
2085  */
2086 int
2087 dc_rx_resync(sc)
2088 	struct dc_softc *sc;
2089 {
2090 	int i, pos;
2091 	struct dc_desc *cur_rx;
2092 
2093 	pos = sc->dc_cdata.dc_rx_prod;
2094 
2095 	for (i = 0; i < DC_RX_LIST_CNT; i++) {
2096 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2097 		    offsetof(struct dc_list_data, dc_rx_list[pos]),
2098 		    sizeof(struct dc_desc),
2099 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2100 
2101 		cur_rx = &sc->dc_ldata->dc_rx_list[pos];
2102 		if (!(cur_rx->dc_status & htole32(DC_RXSTAT_OWN)))
2103 			break;
2104 		DC_INC(pos, DC_RX_LIST_CNT);
2105 	}
2106 
2107 	/* If the ring really is empty, then just return. */
2108 	if (i == DC_RX_LIST_CNT)
2109 		return (0);
2110 
2111 	/* We've fallen behing the chip: catch it. */
2112 	sc->dc_cdata.dc_rx_prod = pos;
2113 
2114 	return (EAGAIN);
2115 }
2116 
2117 /*
2118  * A frame has been uploaded: pass the resulting mbuf chain up to
2119  * the higher level protocols.
2120  */
2121 void
2122 dc_rxeof(sc)
2123 	struct dc_softc *sc;
2124 {
2125 	struct mbuf *m;
2126 	struct ifnet *ifp;
2127 	struct dc_desc *cur_rx;
2128 	int i, total_len = 0;
2129 	u_int32_t rxstat;
2130 
2131 	ifp = &sc->sc_arpcom.ac_if;
2132 	i = sc->dc_cdata.dc_rx_prod;
2133 
2134 	while(!(sc->dc_ldata->dc_rx_list[i].dc_status &
2135 	    htole32(DC_RXSTAT_OWN))) {
2136 		struct mbuf		*m0 = NULL;
2137 
2138 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2139 		    offsetof(struct dc_list_data, dc_rx_list[i]),
2140 		    sizeof(struct dc_desc),
2141 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2142 
2143 		cur_rx = &sc->dc_ldata->dc_rx_list[i];
2144 		rxstat = letoh32(cur_rx->dc_status);
2145 		m = sc->dc_cdata.dc_rx_chain[i].sd_mbuf;
2146 		total_len = DC_RXBYTES(rxstat);
2147 
2148 		if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) {
2149 			if ((rxstat & DC_WHOLEFRAME) != DC_WHOLEFRAME) {
2150 				if (rxstat & DC_RXSTAT_FIRSTFRAG)
2151 					sc->dc_pnic_rx_bug_save = i;
2152 				if ((rxstat & DC_RXSTAT_LASTFRAG) == 0) {
2153 					DC_INC(i, DC_RX_LIST_CNT);
2154 					continue;
2155 				}
2156 				dc_pnic_rx_bug_war(sc, i);
2157 				rxstat = letoh32(cur_rx->dc_status);
2158 				total_len = DC_RXBYTES(rxstat);
2159 			}
2160 		}
2161 
2162 		sc->dc_cdata.dc_rx_chain[i].sd_mbuf = NULL;
2163 
2164 		/*
2165 		 * If an error occurs, update stats, clear the
2166 		 * status word and leave the mbuf cluster in place:
2167 		 * it should simply get re-used next time this descriptor
2168 	 	 * comes up in the ring.
2169 		 */
2170 		if (rxstat & DC_RXSTAT_RXERR
2171 #if NVLAN > 0
2172 		/*
2173 		 * If VLANs are enabled, allow frames up to 4 bytes
2174 		 * longer than the MTU. This should really check if
2175 		 * the giant packet has a vlan tag
2176 		 */
2177 		 && ((rxstat & (DC_RXSTAT_GIANT|DC_RXSTAT_LASTFRAG)) == 0
2178 		 && total_len <= ifp->if_mtu + 4)
2179 #endif
2180 		    ) {
2181 			ifp->if_ierrors++;
2182 			if (rxstat & DC_RXSTAT_COLLSEEN)
2183 				ifp->if_collisions++;
2184 			dc_newbuf(sc, i, m);
2185 			if (rxstat & DC_RXSTAT_CRCERR) {
2186 				DC_INC(i, DC_RX_LIST_CNT);
2187 				continue;
2188 			} else {
2189 				dc_init(sc);
2190 				return;
2191 			}
2192 		}
2193 
2194 		/* No errors; receive the packet. */
2195 		total_len -= ETHER_CRC_LEN;
2196 
2197 		bus_dmamap_sync(sc->sc_dmat, sc->dc_cdata.dc_rx_chain[i].sd_map,
2198 		    0, sc->dc_cdata.dc_rx_chain[i].sd_map->dm_mapsize,
2199 		    BUS_DMASYNC_POSTREAD);
2200 
2201 		m0 = m_devget(mtod(m, char *) - ETHER_ALIGN,
2202 		    total_len + ETHER_ALIGN, 0, ifp, NULL);
2203 		dc_newbuf(sc, i, m);
2204 		DC_INC(i, DC_RX_LIST_CNT);
2205 		if (m0 == NULL) {
2206 			ifp->if_ierrors++;
2207 			continue;
2208 		}
2209 		m_adj(m0, ETHER_ALIGN);
2210 		m = m0;
2211 
2212 		ifp->if_ipackets++;
2213 
2214 #if NBPFILTER > 0
2215 		if (ifp->if_bpf)
2216 			bpf_mtap(ifp->if_bpf, m);
2217 #endif
2218 		ether_input_mbuf(ifp, m);
2219 	}
2220 
2221 	sc->dc_cdata.dc_rx_prod = i;
2222 }
2223 
2224 /*
2225  * A frame was downloaded to the chip. It's safe for us to clean up
2226  * the list buffers.
2227  */
2228 
2229 void
2230 dc_txeof(sc)
2231 	struct dc_softc *sc;
2232 {
2233 	struct dc_desc *cur_tx = NULL;
2234 	struct ifnet *ifp;
2235 	int idx;
2236 
2237 	ifp = &sc->sc_arpcom.ac_if;
2238 
2239 	/* Clear the timeout timer. */
2240 	ifp->if_timer = 0;
2241 
2242 	/*
2243 	 * Go through our tx list and free mbufs for those
2244 	 * frames that have been transmitted.
2245 	 */
2246 	idx = sc->dc_cdata.dc_tx_cons;
2247 	while(idx != sc->dc_cdata.dc_tx_prod) {
2248 		u_int32_t		txstat;
2249 
2250 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2251 		    offsetof(struct dc_list_data, dc_tx_list[idx]),
2252 		    sizeof(struct dc_desc),
2253 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2254 
2255 		cur_tx = &sc->dc_ldata->dc_tx_list[idx];
2256 		txstat = letoh32(cur_tx->dc_status);
2257 
2258 		if (txstat & DC_TXSTAT_OWN)
2259 			break;
2260 
2261 		if (!(cur_tx->dc_ctl & htole32(DC_TXCTL_LASTFRAG)) ||
2262 		    cur_tx->dc_ctl & htole32(DC_TXCTL_SETUP)) {
2263 			sc->dc_cdata.dc_tx_cnt--;
2264 			if (cur_tx->dc_ctl & htole32(DC_TXCTL_SETUP)) {
2265 				/*
2266 				 * Yes, the PNIC is so brain damaged
2267 				 * that it will sometimes generate a TX
2268 				 * underrun error while DMAing the RX
2269 				 * filter setup frame. If we detect this,
2270 				 * we have to send the setup frame again,
2271 				 * or else the filter won't be programmed
2272 				 * correctly.
2273 				 */
2274 				if (DC_IS_PNIC(sc)) {
2275 					if (txstat & DC_TXSTAT_ERRSUM)
2276 						dc_setfilt(sc);
2277 				}
2278 				sc->dc_cdata.dc_tx_chain[idx].sd_mbuf = NULL;
2279 			}
2280 			DC_INC(idx, DC_TX_LIST_CNT);
2281 			continue;
2282 		}
2283 
2284 		if (DC_IS_XIRCOM(sc) || DC_IS_CONEXANT(sc)) {
2285 			/*
2286 			 * XXX: Why does my Xircom taunt me so?
2287 			 * For some reason it likes setting the CARRLOST flag
2288 			 * even when the carrier is there. wtf?!
2289 			 * Who knows, but Conexant chips have the
2290 			 * same problem. Maybe they took lessons
2291 			 * from Xircom.
2292 			 */
2293 			if (/*sc->dc_type == DC_TYPE_21143 &&*/
2294 			    sc->dc_pmode == DC_PMODE_MII &&
2295 			    ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM|
2296 			    DC_TXSTAT_NOCARRIER)))
2297 				txstat &= ~DC_TXSTAT_ERRSUM;
2298 		} else {
2299 			if (/*sc->dc_type == DC_TYPE_21143 &&*/
2300 			    sc->dc_pmode == DC_PMODE_MII &&
2301 		    	    ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM|
2302 		    	    DC_TXSTAT_NOCARRIER|DC_TXSTAT_CARRLOST)))
2303 				txstat &= ~DC_TXSTAT_ERRSUM;
2304 		}
2305 
2306 		if (txstat & DC_TXSTAT_ERRSUM) {
2307 			ifp->if_oerrors++;
2308 			if (txstat & DC_TXSTAT_EXCESSCOLL)
2309 				ifp->if_collisions++;
2310 			if (txstat & DC_TXSTAT_LATECOLL)
2311 				ifp->if_collisions++;
2312 			if (!(txstat & DC_TXSTAT_UNDERRUN)) {
2313 				dc_init(sc);
2314 				return;
2315 			}
2316 		}
2317 
2318 		ifp->if_collisions += (txstat & DC_TXSTAT_COLLCNT) >> 3;
2319 
2320 		ifp->if_opackets++;
2321 		if (sc->dc_cdata.dc_tx_chain[idx].sd_map->dm_nsegs != 0) {
2322 			bus_dmamap_t map = sc->dc_cdata.dc_tx_chain[idx].sd_map;
2323 
2324 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2325 			    BUS_DMASYNC_POSTWRITE);
2326 			bus_dmamap_unload(sc->sc_dmat, map);
2327 		}
2328 		if (sc->dc_cdata.dc_tx_chain[idx].sd_mbuf != NULL) {
2329 			m_freem(sc->dc_cdata.dc_tx_chain[idx].sd_mbuf);
2330 			sc->dc_cdata.dc_tx_chain[idx].sd_mbuf = NULL;
2331 		}
2332 		sc->dc_cdata.dc_tx_cnt--;
2333 		DC_INC(idx, DC_TX_LIST_CNT);
2334 	}
2335 
2336 	sc->dc_cdata.dc_tx_cons = idx;
2337 	if (cur_tx != NULL)
2338 		ifp->if_flags &= ~IFF_OACTIVE;
2339 }
2340 
2341 void
2342 dc_tick(xsc)
2343 	void *xsc;
2344 {
2345 	struct dc_softc *sc = (struct dc_softc *)xsc;
2346 	struct mii_data *mii;
2347 	struct ifnet *ifp;
2348 	int s;
2349 	u_int32_t r;
2350 
2351 	s = splimp();
2352 
2353 	ifp = &sc->sc_arpcom.ac_if;
2354 	mii = &sc->sc_mii;
2355 
2356 	if (sc->dc_flags & DC_REDUCED_MII_POLL) {
2357 		if (sc->dc_flags & DC_21143_NWAY) {
2358 			r = CSR_READ_4(sc, DC_10BTSTAT);
2359 			if (IFM_SUBTYPE(mii->mii_media_active) ==
2360 			    IFM_100_TX && (r & DC_TSTAT_LS100)) {
2361 				sc->dc_link = 0;
2362 				mii_mediachg(mii);
2363 			}
2364 			if (IFM_SUBTYPE(mii->mii_media_active) ==
2365 			    IFM_10_T && (r & DC_TSTAT_LS10)) {
2366 				sc->dc_link = 0;
2367 				mii_mediachg(mii);
2368 			}
2369 			if (sc->dc_link == 0)
2370 				mii_tick(mii);
2371 		} else {
2372 			r = CSR_READ_4(sc, DC_ISR);
2373 			if ((r & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT &&
2374 			    sc->dc_cdata.dc_tx_cnt == 0 && !DC_IS_ASIX(sc))
2375 				mii_tick(mii);
2376 			if (!(mii->mii_media_status & IFM_ACTIVE))
2377 				sc->dc_link = 0;
2378 		}
2379 	} else
2380 		mii_tick(mii);
2381 
2382 	/*
2383 	 * When the init routine completes, we expect to be able to send
2384 	 * packets right away, and in fact the network code will send a
2385 	 * gratuitous ARP the moment the init routine marks the interface
2386 	 * as running. However, even though the MAC may have been initialized,
2387 	 * there may be a delay of a few seconds before the PHY completes
2388 	 * autonegotiation and the link is brought up. Any transmissions
2389 	 * made during that delay will be lost. Dealing with this is tricky:
2390 	 * we can't just pause in the init routine while waiting for the
2391 	 * PHY to come ready since that would bring the whole system to
2392 	 * a screeching halt for several seconds.
2393 	 *
2394 	 * What we do here is prevent the TX start routine from sending
2395 	 * any packets until a link has been established. After the
2396 	 * interface has been initialized, the tick routine will poll
2397 	 * the state of the PHY until the IFM_ACTIVE flag is set. Until
2398 	 * that time, packets will stay in the send queue, and once the
2399 	 * link comes up, they will be flushed out to the wire.
2400 	 */
2401 	if (!sc->dc_link) {
2402 		mii_pollstat(mii);
2403 		if (mii->mii_media_status & IFM_ACTIVE &&
2404 		    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2405 			sc->dc_link++;
2406 			if (IFQ_IS_EMPTY(&ifp->if_snd) == 0)
2407 				dc_start(ifp);
2408 		}
2409 	}
2410 
2411 	if (sc->dc_flags & DC_21143_NWAY && !sc->dc_link)
2412 		timeout_add(&sc->dc_tick_tmo, hz / 10);
2413 	else
2414 		timeout_add(&sc->dc_tick_tmo, hz);
2415 
2416 	splx(s);
2417 }
2418 
2419 int
2420 dc_intr(arg)
2421 	void *arg;
2422 {
2423 	struct dc_softc *sc;
2424 	struct ifnet *ifp;
2425 	u_int32_t status;
2426 	int claimed = 0;
2427 
2428 	sc = arg;
2429 	ifp = &sc->sc_arpcom.ac_if;
2430 
2431 	/* Supress unwanted interrupts */
2432 	if (!(ifp->if_flags & IFF_UP)) {
2433 		if (CSR_READ_4(sc, DC_ISR) & DC_INTRS)
2434 			dc_stop(sc);
2435 		return (claimed);
2436 	}
2437 
2438 	/* Disable interrupts. */
2439 	CSR_WRITE_4(sc, DC_IMR, 0x00000000);
2440 
2441 	while(((status = CSR_READ_4(sc, DC_ISR)) & DC_INTRS) &&
2442 	    status != 0xFFFFFFFF) {
2443 
2444 		claimed = 1;
2445 
2446 		CSR_WRITE_4(sc, DC_ISR, status);
2447 		if ((status & DC_INTRS) == 0) {
2448 			claimed = 0;
2449 			break;
2450 		}
2451 
2452 		if (status & DC_ISR_RX_OK) {
2453 			int		curpkts;
2454 			curpkts = ifp->if_ipackets;
2455 			dc_rxeof(sc);
2456 			if (curpkts == ifp->if_ipackets) {
2457 				while(dc_rx_resync(sc))
2458 					dc_rxeof(sc);
2459 			}
2460 		}
2461 
2462 		if (status & (DC_ISR_TX_OK|DC_ISR_TX_NOBUF))
2463 			dc_txeof(sc);
2464 
2465 		if (status & DC_ISR_TX_IDLE) {
2466 			dc_txeof(sc);
2467 			if (sc->dc_cdata.dc_tx_cnt) {
2468 				DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
2469 				CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
2470 			}
2471 		}
2472 
2473 		if (status & DC_ISR_TX_UNDERRUN) {
2474 			u_int32_t		cfg;
2475 
2476 			if (DC_IS_DAVICOM(sc) || DC_IS_INTEL(sc))
2477 				dc_init(sc);
2478 			cfg = CSR_READ_4(sc, DC_NETCFG);
2479 			cfg &= ~DC_NETCFG_TX_THRESH;
2480 			if (sc->dc_txthresh == DC_TXTHRESH_160BYTES) {
2481 				DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
2482 			} else if (sc->dc_flags & DC_TX_STORENFWD) {
2483 			} else {
2484 				sc->dc_txthresh += 0x4000;
2485 				CSR_WRITE_4(sc, DC_NETCFG, cfg);
2486 				DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh);
2487 				DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
2488 			}
2489 		}
2490 
2491 		if ((status & DC_ISR_RX_WATDOGTIMEO)
2492 		    || (status & DC_ISR_RX_NOBUF)) {
2493 			int		curpkts;
2494 			curpkts = ifp->if_ipackets;
2495 			dc_rxeof(sc);
2496 			if (curpkts == ifp->if_ipackets) {
2497 				while(dc_rx_resync(sc))
2498 					dc_rxeof(sc);
2499 			}
2500 		}
2501 
2502 		if (status & DC_ISR_BUS_ERR) {
2503 			dc_reset(sc);
2504 			dc_init(sc);
2505 		}
2506 	}
2507 
2508 	/* Re-enable interrupts. */
2509 	CSR_WRITE_4(sc, DC_IMR, DC_INTRS);
2510 
2511 	if (IFQ_IS_EMPTY(&ifp->if_snd) == 0)
2512 		dc_start(ifp);
2513 
2514 	return (claimed);
2515 }
2516 
2517 /*
2518  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
2519  * pointers to the fragment pointers.
2520  */
2521 int
2522 dc_encap(sc, m_head, txidx)
2523 	struct dc_softc *sc;
2524 	struct mbuf *m_head;
2525 	u_int32_t *txidx;
2526 {
2527 	struct dc_desc *f = NULL;
2528 	int frag, cur, cnt = 0, i;
2529 	bus_dmamap_t map;
2530 
2531 	/*
2532  	 * Start packing the mbufs in this chain into
2533 	 * the fragment pointers. Stop when we run out
2534  	 * of fragments or hit the end of the mbuf chain.
2535 	 */
2536 	map = sc->sc_tx_sparemap;
2537 
2538 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map,
2539 	    m_head, BUS_DMA_NOWAIT) != 0)
2540 		return (ENOBUFS);
2541 
2542 	cur = frag = *txidx;
2543 
2544 	for (i = 0; i < map->dm_nsegs; i++) {
2545 		if (sc->dc_flags & DC_TX_ADMTEK_WAR) {
2546 			if (*txidx != sc->dc_cdata.dc_tx_prod &&
2547 			    frag == (DC_TX_LIST_CNT - 1)) {
2548 				bus_dmamap_unload(sc->sc_dmat, map);
2549 				return (ENOBUFS);
2550 			}
2551 		}
2552 		if ((DC_TX_LIST_CNT -
2553 		    (sc->dc_cdata.dc_tx_cnt + cnt)) < 5) {
2554 			bus_dmamap_unload(sc->sc_dmat, map);
2555 			return (ENOBUFS);
2556 		}
2557 
2558 		f = &sc->dc_ldata->dc_tx_list[frag];
2559 		f->dc_ctl = htole32(DC_TXCTL_TLINK | map->dm_segs[i].ds_len);
2560 		if (cnt == 0) {
2561 			f->dc_status = htole32(0);
2562 			f->dc_ctl |= htole32(DC_TXCTL_FIRSTFRAG);
2563 		} else
2564 			f->dc_status = htole32(DC_TXSTAT_OWN);
2565 		f->dc_data = htole32(map->dm_segs[i].ds_addr);
2566 		cur = frag;
2567 		DC_INC(frag, DC_TX_LIST_CNT);
2568 		cnt++;
2569 	}
2570 
2571 	sc->dc_cdata.dc_tx_cnt += cnt;
2572 	sc->dc_cdata.dc_tx_chain[cur].sd_mbuf = m_head;
2573 	sc->sc_tx_sparemap = sc->dc_cdata.dc_tx_chain[cur].sd_map;
2574 	sc->dc_cdata.dc_tx_chain[cur].sd_map = map;
2575 	sc->dc_ldata->dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_LASTFRAG);
2576 	if (sc->dc_flags & DC_TX_INTR_FIRSTFRAG)
2577 		sc->dc_ldata->dc_tx_list[*txidx].dc_ctl |=
2578 		    htole32(DC_TXCTL_FINT);
2579 	if (sc->dc_flags & DC_TX_INTR_ALWAYS)
2580 		sc->dc_ldata->dc_tx_list[cur].dc_ctl |=
2581 		    htole32(DC_TXCTL_FINT);
2582 	if (sc->dc_flags & DC_TX_USE_TX_INTR && sc->dc_cdata.dc_tx_cnt > 64)
2583 		sc->dc_ldata->dc_tx_list[cur].dc_ctl |=
2584 		    htole32(DC_TXCTL_FINT);
2585 #ifdef ALTQ
2586 	else if ((sc->dc_flags & DC_TX_USE_TX_INTR) &&
2587 		 TBR_IS_ENABLED(&sc->sc_arpcom.ac_if.if_snd))
2588 		sc->dc_ldata->dc_tx_list[cur].dc_ctl |=
2589 		    htole32(DC_TXCTL_FINT);
2590 #endif
2591 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2592 	    BUS_DMASYNC_PREWRITE);
2593 
2594 	sc->dc_ldata->dc_tx_list[*txidx].dc_status = htole32(DC_TXSTAT_OWN);
2595 
2596 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2597 	    offsetof(struct dc_list_data, dc_tx_list[0]),
2598 	    sizeof(struct dc_desc) * DC_TX_LIST_CNT,
2599 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2600 
2601 	*txidx = frag;
2602 
2603 	return (0);
2604 }
2605 
2606 /*
2607  * Coalesce an mbuf chain into a single mbuf cluster buffer.
2608  * Needed for some really badly behaved chips that just can't
2609  * do scatter/gather correctly.
2610  */
2611 int
2612 dc_coal(sc, m_head)
2613 	struct dc_softc *sc;
2614 	struct mbuf **m_head;
2615 {
2616 	struct mbuf		*m_new, *m;
2617 
2618 	m = *m_head;
2619 	MGETHDR(m_new, M_DONTWAIT, MT_DATA);
2620 	if (m_new == NULL) {
2621 		printf("%s: no memory for tx list", sc->sc_dev.dv_xname);
2622 		return (ENOBUFS);
2623 	}
2624 	if (m->m_pkthdr.len > MHLEN) {
2625 		MCLGET(m_new, M_DONTWAIT);
2626 		if (!(m_new->m_flags & M_EXT)) {
2627 			m_freem(m_new);
2628 			printf("%s: no memory for tx list",
2629 			    sc->sc_dev.dv_xname);
2630 			return (ENOBUFS);
2631 		}
2632 	}
2633 	m_copydata(m, 0, m->m_pkthdr.len, mtod(m_new, caddr_t));
2634 	m_new->m_pkthdr.len = m_new->m_len = m->m_pkthdr.len;
2635 	m_freem(m);
2636 	*m_head = m_new;
2637 
2638 	return (0);
2639 }
2640 
2641 /*
2642  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2643  * to the mbuf data regions directly in the transmit lists. We also save a
2644  * copy of the pointers since the transmit list fragment pointers are
2645  * physical addresses.
2646  */
2647 
2648 void
2649 dc_start(ifp)
2650 	struct ifnet *ifp;
2651 {
2652 	struct dc_softc *sc;
2653 	struct mbuf *m_head = NULL;
2654 	int idx;
2655 
2656 	sc = ifp->if_softc;
2657 
2658 	if (!sc->dc_link)
2659 		return;
2660 
2661 	if (ifp->if_flags & IFF_OACTIVE)
2662 		return;
2663 
2664 	idx = sc->dc_cdata.dc_tx_prod;
2665 
2666 	while(sc->dc_cdata.dc_tx_chain[idx].sd_mbuf == NULL) {
2667 		IFQ_POLL(&ifp->if_snd, m_head);
2668 		if (m_head == NULL)
2669 			break;
2670 
2671 		if (sc->dc_flags & DC_TX_COALESCE) {
2672 #ifdef ALTQ
2673 			/* note: dc_coal breaks the poll-and-dequeue rule.
2674 			 * if dc_coal fails, we lose the packet.
2675 			 */
2676 #endif
2677 			IFQ_DEQUEUE(&ifp->if_snd, m_head);
2678 			if (dc_coal(sc, &m_head)) {
2679 				ifp->if_flags |= IFF_OACTIVE;
2680 				break;
2681 			}
2682 		}
2683 
2684 		if (dc_encap(sc, m_head, &idx)) {
2685 			ifp->if_flags |= IFF_OACTIVE;
2686 			break;
2687 		}
2688 
2689 		/* now we are committed to transmit the packet */
2690 		if (sc->dc_flags & DC_TX_COALESCE) {
2691 			/* if mbuf is coalesced, it is already dequeued */
2692 		} else
2693 			IFQ_DEQUEUE(&ifp->if_snd, m_head);
2694 
2695 		/*
2696 		 * If there's a BPF listener, bounce a copy of this frame
2697 		 * to him.
2698 		 */
2699 #if NBPFILTER > 0
2700 		if (ifp->if_bpf)
2701 			bpf_mtap(ifp->if_bpf, m_head);
2702 #endif
2703 		if (sc->dc_flags & DC_TX_ONE) {
2704 			ifp->if_flags |= IFF_OACTIVE;
2705 			break;
2706 		}
2707 	}
2708 	if (idx == sc->dc_cdata.dc_tx_prod)
2709 		return;
2710 
2711 	/* Transmit */
2712 	sc->dc_cdata.dc_tx_prod = idx;
2713 	if (!(sc->dc_flags & DC_TX_POLL))
2714 		CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
2715 
2716 	/*
2717 	 * Set a timeout in case the chip goes out to lunch.
2718 	 */
2719 	ifp->if_timer = 5;
2720 }
2721 
2722 void
2723 dc_init(xsc)
2724 	void *xsc;
2725 {
2726 	struct dc_softc *sc = xsc;
2727 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2728 	struct mii_data *mii;
2729 	int s;
2730 
2731 	s = splimp();
2732 
2733 	mii = &sc->sc_mii;
2734 
2735 	/*
2736 	 * Cancel pending I/O and free all RX/TX buffers.
2737 	 */
2738 	dc_stop(sc);
2739 	dc_reset(sc);
2740 
2741 	/*
2742 	 * Set cache alignment and burst length.
2743 	 */
2744 	if (DC_IS_ASIX(sc) || DC_IS_DAVICOM(sc))
2745 		CSR_WRITE_4(sc, DC_BUSCTL, 0);
2746 	else
2747 		CSR_WRITE_4(sc, DC_BUSCTL, DC_BUSCTL_MRME|DC_BUSCTL_MRLE);
2748 	if (DC_IS_DAVICOM(sc) || DC_IS_INTEL(sc)) {
2749 		DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_USECA);
2750 	} else {
2751 		DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_16LONG);
2752 	}
2753 	if (sc->dc_flags & DC_TX_POLL)
2754 		DC_SETBIT(sc, DC_BUSCTL, DC_TXPOLL_1);
2755 	switch(sc->dc_cachesize) {
2756 	case 32:
2757 		DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_32LONG);
2758 		break;
2759 	case 16:
2760 		DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_16LONG);
2761 		break;
2762 	case 8:
2763 		DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_8LONG);
2764 		break;
2765 	case 0:
2766 	default:
2767 		DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_NONE);
2768 		break;
2769 	}
2770 
2771 	if (sc->dc_flags & DC_TX_STORENFWD)
2772 		DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
2773 	else {
2774 		if (sc->dc_txthresh == DC_TXTHRESH_160BYTES) {
2775 			DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
2776 		} else {
2777 			DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
2778 			DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh);
2779 		}
2780 	}
2781 
2782 	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_NO_RXCRC);
2783 	DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_BACKOFF);
2784 
2785 	if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) {
2786 		/*
2787 		 * The app notes for the 98713 and 98715A say that
2788 		 * in order to have the chips operate properly, a magic
2789 		 * number must be written to CSR16. Macronix does not
2790 		 * document the meaning of these bits so there's no way
2791 		 * to know exactly what they do. The 98713 has a magic
2792 		 * number all its own; the rest all use a different one.
2793 		 */
2794 		DC_CLRBIT(sc, DC_MX_MAGICPACKET, 0xFFFF0000);
2795 		if (sc->dc_type == DC_TYPE_98713)
2796 			DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98713);
2797 		else
2798 			DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98715);
2799 	}
2800 
2801 	if (DC_IS_XIRCOM(sc)) {
2802 		CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN |
2803 		    DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
2804 		DELAY(10);
2805 		CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN |
2806 		    DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
2807 		DELAY(10);
2808 	}
2809 
2810 	DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH);
2811 	DC_SETBIT(sc, DC_NETCFG, DC_TXTHRESH_72BYTES);
2812 
2813 	/* Init circular RX list. */
2814 	if (dc_list_rx_init(sc) == ENOBUFS) {
2815 		printf("%s: initialization failed: no "
2816 		    "memory for rx buffers\n", sc->sc_dev.dv_xname);
2817 		dc_stop(sc);
2818 		splx(s);
2819 		return;
2820 	}
2821 
2822 	/*
2823 	 * Init tx descriptors.
2824 	 */
2825 	dc_list_tx_init(sc);
2826 
2827 	/*
2828 	 * Load the address of the RX list.
2829 	 */
2830 	CSR_WRITE_4(sc, DC_RXADDR, sc->sc_listmap->dm_segs[0].ds_addr +
2831 	    offsetof(struct dc_list_data, dc_rx_list[0]));
2832 	CSR_WRITE_4(sc, DC_TXADDR, sc->sc_listmap->dm_segs[0].ds_addr +
2833 	    offsetof(struct dc_list_data, dc_tx_list[0]));
2834 
2835 	/*
2836 	 * Enable interrupts.
2837 	 */
2838 	CSR_WRITE_4(sc, DC_IMR, DC_INTRS);
2839 	CSR_WRITE_4(sc, DC_ISR, 0xFFFFFFFF);
2840 
2841 	/* Enable transmitter. */
2842 	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
2843 
2844 	/*
2845 	 * If this is an Intel 21143 and we're not using the
2846 	 * MII port, program the LED control pins so we get
2847 	 * link and activity indications.
2848 	 */
2849 	if (sc->dc_flags & DC_TULIP_LEDS) {
2850 		CSR_WRITE_4(sc, DC_WATCHDOG,
2851 		    DC_WDOG_CTLWREN|DC_WDOG_LINK|DC_WDOG_ACTIVITY);
2852 		CSR_WRITE_4(sc, DC_WATCHDOG, 0);
2853 	}
2854 
2855 	/*
2856 	 * Load the RX/multicast filter. We do this sort of late
2857 	 * because the filter programming scheme on the 21143 and
2858 	 * some clones requires DMAing a setup frame via the TX
2859 	 * engine, and we need the transmitter enabled for that.
2860 	 */
2861 	dc_setfilt(sc);
2862 
2863 	/* Enable receiver. */
2864 	DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON);
2865 	CSR_WRITE_4(sc, DC_RXSTART, 0xFFFFFFFF);
2866 
2867 	mii_mediachg(mii);
2868 	dc_setcfg(sc, sc->dc_if_media);
2869 
2870 	ifp->if_flags |= IFF_RUNNING;
2871 	ifp->if_flags &= ~IFF_OACTIVE;
2872 
2873 	splx(s);
2874 
2875 	timeout_set(&sc->dc_tick_tmo, dc_tick, sc);
2876 
2877 	if (IFM_SUBTYPE(mii->mii_media.ifm_media) == IFM_HPNA_1)
2878 		sc->dc_link = 1;
2879 	else {
2880 		if (sc->dc_flags & DC_21143_NWAY)
2881 			timeout_add(&sc->dc_tick_tmo, hz / 10);
2882 		else
2883 			timeout_add(&sc->dc_tick_tmo, hz);
2884 	}
2885 
2886 #ifdef SRM_MEDIA
2887 	if(sc->dc_srm_media) {
2888 		struct ifreq ifr;
2889 
2890 		ifr.ifr_media = sc->dc_srm_media;
2891 		ifmedia_ioctl(ifp, &ifr, &mii->mii_media, SIOCSIFMEDIA);
2892 		sc->dc_srm_media = 0;
2893 	}
2894 #endif
2895 }
2896 
2897 /*
2898  * Set media options.
2899  */
2900 int
2901 dc_ifmedia_upd(ifp)
2902 	struct ifnet *ifp;
2903 {
2904 	struct dc_softc *sc;
2905 	struct mii_data *mii;
2906 	struct ifmedia *ifm;
2907 
2908 	sc = ifp->if_softc;
2909 	mii = &sc->sc_mii;
2910 	mii_mediachg(mii);
2911 
2912 	ifm = &mii->mii_media;
2913 
2914 	if (DC_IS_DAVICOM(sc) &&
2915 	    IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1)
2916 		dc_setcfg(sc, ifm->ifm_media);
2917 	else
2918 		sc->dc_link = 0;
2919 
2920 	return (0);
2921 }
2922 
2923 /*
2924  * Report current media status.
2925  */
2926 void
2927 dc_ifmedia_sts(ifp, ifmr)
2928 	struct ifnet *ifp;
2929 	struct ifmediareq *ifmr;
2930 {
2931 	struct dc_softc *sc;
2932 	struct mii_data *mii;
2933 	struct ifmedia *ifm;
2934 
2935 	sc = ifp->if_softc;
2936 	mii = &sc->sc_mii;
2937 	mii_pollstat(mii);
2938 	ifm = &mii->mii_media;
2939 	if (DC_IS_DAVICOM(sc)) {
2940 		if (IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) {
2941 			ifmr->ifm_active = ifm->ifm_media;
2942 			ifmr->ifm_status = 0;
2943 			return;
2944 		}
2945 	}
2946 	ifmr->ifm_active = mii->mii_media_active;
2947 	ifmr->ifm_status = mii->mii_media_status;
2948 }
2949 
2950 int
2951 dc_ioctl(ifp, command, data)
2952 	struct ifnet *ifp;
2953 	u_long command;
2954 	caddr_t data;
2955 {
2956 	struct dc_softc		*sc = ifp->if_softc;
2957 	struct ifreq		*ifr = (struct ifreq *) data;
2958 	struct ifaddr		*ifa = (struct ifaddr *)data;
2959 	struct mii_data		*mii;
2960 	int			s, error = 0;
2961 
2962 	s = splimp();
2963 
2964 	if ((error = ether_ioctl(ifp, &sc->sc_arpcom, command, data)) > 0) {
2965 		splx(s);
2966 		return (error);
2967 	}
2968 
2969 	switch(command) {
2970 	case SIOCSIFADDR:
2971 		ifp->if_flags |= IFF_UP;
2972 		switch (ifa->ifa_addr->sa_family) {
2973 		case AF_INET:
2974 			dc_init(sc);
2975 			arp_ifinit(&sc->sc_arpcom, ifa);
2976 			break;
2977 		default:
2978 			dc_init(sc);
2979 			break;
2980 		}
2981 		break;
2982 	case SIOCSIFFLAGS:
2983 		if (ifp->if_flags & IFF_UP) {
2984 			if (ifp->if_flags & IFF_RUNNING &&
2985 			    ifp->if_flags & IFF_PROMISC &&
2986 			    !(sc->dc_if_flags & IFF_PROMISC)) {
2987 				dc_setfilt(sc);
2988 			} else if (ifp->if_flags & IFF_RUNNING &&
2989 			    !(ifp->if_flags & IFF_PROMISC) &&
2990 			    sc->dc_if_flags & IFF_PROMISC) {
2991 				dc_setfilt(sc);
2992 			} else if (!(ifp->if_flags & IFF_RUNNING)) {
2993 				sc->dc_txthresh = 0;
2994 				dc_init(sc);
2995 			}
2996 		} else {
2997 			if (ifp->if_flags & IFF_RUNNING)
2998 				dc_stop(sc);
2999 		}
3000 		sc->dc_if_flags = ifp->if_flags;
3001 		error = 0;
3002 		break;
3003 	case SIOCADDMULTI:
3004 	case SIOCDELMULTI:
3005 		error = (command == SIOCADDMULTI) ?
3006 		    ether_addmulti(ifr, &sc->sc_arpcom) :
3007 		    ether_delmulti(ifr, &sc->sc_arpcom);
3008 
3009 		if (error == ENETRESET) {
3010 			/*
3011 			 * Multicast list has changed; set the hardware
3012 			 * filter accordingly.
3013 			 */
3014 			dc_setfilt(sc);
3015 			error = 0;
3016 		}
3017 		break;
3018 	case SIOCGIFMEDIA:
3019 	case SIOCSIFMEDIA:
3020 		mii = &sc->sc_mii;
3021 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
3022 #ifdef SRM_MEDIA
3023 		if (sc->dc_srm_media)
3024 			sc->dc_srm_media = 0;
3025 #endif
3026 		break;
3027 	default:
3028 		error = EINVAL;
3029 		break;
3030 	}
3031 
3032 	splx(s);
3033 
3034 	return (error);
3035 }
3036 
3037 void
3038 dc_watchdog(ifp)
3039 	struct ifnet *ifp;
3040 {
3041 	struct dc_softc *sc;
3042 
3043 	sc = ifp->if_softc;
3044 
3045 	ifp->if_oerrors++;
3046 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
3047 
3048 	dc_stop(sc);
3049 	dc_reset(sc);
3050 	dc_init(sc);
3051 
3052 	if (IFQ_IS_EMPTY(&ifp->if_snd) == 0)
3053 		dc_start(ifp);
3054 }
3055 
3056 /*
3057  * Stop the adapter and free any mbufs allocated to the
3058  * RX and TX lists.
3059  */
3060 void
3061 dc_stop(sc)
3062 	struct dc_softc *sc;
3063 {
3064 	struct ifnet *ifp;
3065 	int i;
3066 
3067 	ifp = &sc->sc_arpcom.ac_if;
3068 	ifp->if_timer = 0;
3069 
3070 	timeout_del(&sc->dc_tick_tmo);
3071 
3072 	DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ON|DC_NETCFG_TX_ON));
3073 	CSR_WRITE_4(sc, DC_IMR, 0x00000000);
3074 	CSR_WRITE_4(sc, DC_TXADDR, 0x00000000);
3075 	CSR_WRITE_4(sc, DC_RXADDR, 0x00000000);
3076 	sc->dc_link = 0;
3077 
3078 	/*
3079 	 * Free data in the RX lists.
3080 	 */
3081 	for (i = 0; i < DC_RX_LIST_CNT; i++) {
3082 		if (sc->dc_cdata.dc_rx_chain[i].sd_map->dm_nsegs != 0) {
3083 			bus_dmamap_t map = sc->dc_cdata.dc_rx_chain[i].sd_map;
3084 
3085 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3086 			    BUS_DMASYNC_POSTREAD);
3087 			bus_dmamap_unload(sc->sc_dmat, map);
3088 		}
3089 		if (sc->dc_cdata.dc_rx_chain[i].sd_mbuf != NULL) {
3090 			m_freem(sc->dc_cdata.dc_rx_chain[i].sd_mbuf);
3091 			sc->dc_cdata.dc_rx_chain[i].sd_mbuf = NULL;
3092 		}
3093 	}
3094 	bzero((char *)&sc->dc_ldata->dc_rx_list,
3095 		sizeof(sc->dc_ldata->dc_rx_list));
3096 
3097 	/*
3098 	 * Free the TX list buffers.
3099 	 */
3100 	for (i = 0; i < DC_TX_LIST_CNT; i++) {
3101 		if (sc->dc_cdata.dc_tx_chain[i].sd_map->dm_nsegs != 0) {
3102 			bus_dmamap_t map = sc->dc_cdata.dc_tx_chain[i].sd_map;
3103 
3104 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3105 			    BUS_DMASYNC_POSTWRITE);
3106 			bus_dmamap_unload(sc->sc_dmat, map);
3107 		}
3108 		if (sc->dc_cdata.dc_tx_chain[i].sd_mbuf != NULL) {
3109 			if (sc->dc_ldata->dc_tx_list[i].dc_ctl &
3110 			    htole32(DC_TXCTL_SETUP)) {
3111 				sc->dc_cdata.dc_tx_chain[i].sd_mbuf = NULL;
3112 				continue;
3113 			}
3114 			m_freem(sc->dc_cdata.dc_tx_chain[i].sd_mbuf);
3115 			sc->dc_cdata.dc_tx_chain[i].sd_mbuf = NULL;
3116 		}
3117 	}
3118 
3119 	bzero((char *)&sc->dc_ldata->dc_tx_list,
3120 		sizeof(sc->dc_ldata->dc_tx_list));
3121 
3122 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3123 }
3124 
3125 /*
3126  * Stop all chip I/O so that the kernel's probe routines don't
3127  * get confused by errant DMAs when rebooting.
3128  */
3129 void
3130 dc_shutdown(v)
3131 	void *v;
3132 {
3133 	struct dc_softc *sc = (struct dc_softc *)v;
3134 
3135 	dc_stop(sc);
3136 }
3137 
3138 struct cfdriver dc_cd = {
3139 	0, "dc", DV_IFNET
3140 };
3141