xref: /openbsd/sys/dev/ic/xl.c (revision 610f49f8)
1 /*	$OpenBSD: xl.c,v 1.34 2002/02/15 20:45:31 nordin Exp $	*/
2 
3 /*
4  * Copyright (c) 1997, 1998, 1999
5  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * $FreeBSD: if_xl.c,v 1.77 2000/08/28 20:40:03 wpaul Exp $
35  */
36 
37 /*
38  * 3Com 3c90x Etherlink XL PCI NIC driver
39  *
40  * Supports the 3Com "boomerang", "cyclone", and "hurricane" PCI
41  * bus-master chips (3c90x cards and embedded controllers) including
42  * the following:
43  *
44  * 3Com 3c900-TPO	10Mbps/RJ-45
45  * 3Com 3c900-COMBO	10Mbps/RJ-45,AUI,BNC
46  * 3Com 3c905-TX	10/100Mbps/RJ-45
47  * 3Com 3c905-T4	10/100Mbps/RJ-45
48  * 3Com 3c900B-TPO	10Mbps/RJ-45
49  * 3Com 3c900B-COMBO	10Mbps/RJ-45,AUI,BNC
50  * 3Com 3c900B-TPC	10Mbps/RJ-45,BNC
51  * 3Com 3c900B-FL	10Mbps/Fiber-optic
52  * 3Com 3c905B-COMBO	10/100Mbps/RJ-45,AUI,BNC
53  * 3Com 3c905B-TX	10/100Mbps/RJ-45
54  * 3Com 3c900-FL/FX	10/100Mbps/Fiber-optic
55  * 3Com 3c905C-TX	10/100Mbps/RJ-45 (Tornado ASIC)
56  * 3Com 3c450-TX	10/100Mbps/RJ-45 (Tornado ASIC)
57  * 3Com 3c555		10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
58  * 3Com 3c556		10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
59  * 3Com 3c556B		10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
60  * 3Com 3c980-TX	10/100Mbps server adapter (Hurricane ASIC)
61  * 3Com 3c980C-TX	10/100Mbps server adapter (Tornado ASIC)
62  * 3Com 3C575TX		10/100Mbps LAN CardBus PC Card
63  * 3Com 3CCFE575BT	10/100Mbps LAN CardBus PC Card
64  * 3Com 3CCFE575CT	10/100Mbps LAN CardBus PC Card
65  * 3Com 3C3FE575CT	10/100Mbps LAN CardBus Type III PC Card
66  * 3Com 3CCFEM656	10/100Mbps LAN+56k Modem CardBus PC Card
67  * 3Com 3CCFEM656B	10/100Mbps LAN+56k Modem CardBus PC Card
68  * 3Com 3CCFEM656C	10/100Mbps LAN+56k Global Modem CardBus PC Card
69  * 3Com 3C3FEM656C	10/100Mbps LAN+56k Global Modem CardBus Type III PC Card
70  * 3Com 3cSOHO100-TX	10/100Mbps/RJ-45 (Hurricane ASIC)
71  * Dell Optiplex GX1 on-board 3c918 10/100Mbps/RJ-45
72  * Dell on-board 3c920	10/100Mbps/RJ-45
73  * Dell Precision on-board 3c905B 10/100Mbps/RJ-45
74  * Dell Latitude laptop docking station embedded 3c905-TX
75  *
76  * Written by Bill Paul <wpaul@ctr.columbia.edu>
77  * Electrical Engineering Department
78  * Columbia University, New York City
79  */
80 
81 /*
82  * The 3c90x series chips use a bus-master DMA interface for transfering
83  * packets to and from the controller chip. Some of the "vortex" cards
84  * (3c59x) also supported a bus master mode, however for those chips
85  * you could only DMA packets to/from a contiguous memory buffer. For
86  * transmission this would mean copying the contents of the queued mbuf
87  * chain into a an mbuf cluster and then DMAing the cluster. This extra
88  * copy would sort of defeat the purpose of the bus master support for
89  * any packet that doesn't fit into a single mbuf.
90  *
91  * By contrast, the 3c90x cards support a fragment-based bus master
92  * mode where mbuf chains can be encapsulated using TX descriptors.
93  * This is similar to other PCI chips such as the Texas Instruments
94  * ThunderLAN and the Intel 82557/82558.
95  *
96  * The "vortex" driver (if_vx.c) happens to work for the "boomerang"
97  * bus master chips because they maintain the old PIO interface for
98  * backwards compatibility, but starting with the 3c905B and the
99  * "cyclone" chips, the compatibility interface has been dropped.
100  * Since using bus master DMA is a big win, we use this driver to
101  * support the PCI "boomerang" chips even though they work with the
102  * "vortex" driver in order to obtain better performance.
103  *
104  * This driver is in the /sys/pci directory because it only supports
105  * PCI-based NICs.
106  */
107 
108 #include "bpfilter.h"
109 #include "vlan.h"
110 
111 #include <sys/param.h>
112 #include <sys/systm.h>
113 #include <sys/mbuf.h>
114 #include <sys/protosw.h>
115 #include <sys/socket.h>
116 #include <sys/ioctl.h>
117 #include <sys/errno.h>
118 #include <sys/malloc.h>
119 #include <sys/kernel.h>
120 #include <sys/proc.h>   /* only for declaration of wakeup() used by vm.h */
121 #include <sys/device.h>
122 
123 #include <net/if.h>
124 #include <net/if_dl.h>
125 #include <net/if_types.h>
126 #include <net/if_media.h>
127 
128 #ifdef INET
129 #include <netinet/in.h>
130 #include <netinet/in_systm.h>
131 #include <netinet/in_var.h>
132 #include <netinet/ip.h>
133 #include <netinet/if_ether.h>
134 #endif
135 
136 #include <dev/mii/mii.h>
137 #include <dev/mii/miivar.h>
138 
139 #include <machine/bus.h>
140 
141 #if NBPFILTER > 0
142 #include <net/bpf.h>
143 #endif
144 
145 #include <uvm/uvm_extern.h>              /* for vtophys */
146 
147 #include <dev/ic/xlreg.h>
148 
149 int xl_newbuf		__P((struct xl_softc *, struct xl_chain_onefrag *));
150 void xl_stats_update	__P((void *));
151 int xl_encap		__P((struct xl_softc *, struct xl_chain *,
152     struct mbuf * ));
153 int xl_encap_90xB	__P((struct xl_softc *, struct xl_chain *,
154     struct mbuf * ));
155 void xl_rxeof		__P((struct xl_softc *));
156 int xl_rx_resync	__P((struct xl_softc *));
157 void xl_txeof		__P((struct xl_softc *));
158 void xl_txeof_90xB	__P((struct xl_softc *));
159 void xl_txeoc		__P((struct xl_softc *));
160 int xl_intr		__P((void *));
161 void xl_start		__P((struct ifnet *));
162 void xl_start_90xB	__P((struct ifnet *));
163 int xl_ioctl		__P((struct ifnet *, u_long, caddr_t));
164 void xl_init		__P((void *));
165 void xl_stop		__P((struct xl_softc *));
166 void xl_freetxrx	__P((struct xl_softc *));
167 void xl_watchdog	__P((struct ifnet *));
168 void xl_shutdown	__P((void *));
169 int xl_ifmedia_upd	__P((struct ifnet *));
170 void xl_ifmedia_sts	__P((struct ifnet *, struct ifmediareq *));
171 
172 int xl_eeprom_wait	__P((struct xl_softc *));
173 int xl_read_eeprom	__P((struct xl_softc *, caddr_t, int, int, int));
174 void xl_mii_sync	__P((struct xl_softc *));
175 void xl_mii_send	__P((struct xl_softc *, u_int32_t, int));
176 int xl_mii_readreg	__P((struct xl_softc *, struct xl_mii_frame *));
177 int xl_mii_writereg	__P((struct xl_softc *, struct xl_mii_frame *));
178 
179 void xl_setcfg		__P((struct xl_softc *));
180 void xl_setmode		__P((struct xl_softc *, int));
181 u_int8_t xl_calchash	__P((caddr_t));
182 void xl_setmulti	__P((struct xl_softc *));
183 void xl_setmulti_hash	__P((struct xl_softc *));
184 void xl_reset		__P((struct xl_softc *, int));
185 int xl_list_rx_init	__P((struct xl_softc *));
186 int xl_list_tx_init	__P((struct xl_softc *));
187 int xl_list_tx_init_90xB	__P((struct xl_softc *));
188 void xl_wait		__P((struct xl_softc *));
189 void xl_mediacheck	__P((struct xl_softc *));
190 void xl_choose_xcvr	__P((struct xl_softc *, int));
191 #ifdef notdef
192 void xl_testpacket	__P((struct xl_softc *));
193 #endif
194 
195 int xl_miibus_readreg	__P((struct device *, int, int));
196 void xl_miibus_writereg	__P((struct device *, int, int, int));
197 void xl_miibus_statchg	__P((struct device *));
198 
199 void xl_power __P((int, void *));
200 
201 void
202 xl_power(why, arg)
203 	int why;
204 	void *arg;
205 {
206 	struct xl_softc *sc = arg;
207 	struct ifnet *ifp;
208 	int s;
209 
210 	s = splimp();
211 	if (why != PWR_RESUME)
212 		xl_stop(sc);
213 	else {
214 		ifp = &sc->arpcom.ac_if;
215 		if (ifp->if_flags & IFF_UP) {
216 			xl_reset(sc, 1);
217 			xl_init(sc);
218 		}
219 	}
220 	splx(s);
221 }
222 
223 /*
224  * Murphy's law says that it's possible the chip can wedge and
225  * the 'command in progress' bit may never clear. Hence, we wait
226  * only a finite amount of time to avoid getting caught in an
227  * infinite loop. Normally this delay routine would be a macro,
228  * but it isn't called during normal operation so we can afford
229  * to make it a function.
230  */
231 void xl_wait(sc)
232 	struct xl_softc		*sc;
233 {
234 	register int		i;
235 
236 	for (i = 0; i < XL_TIMEOUT; i++) {
237 		if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY))
238 			break;
239 	}
240 
241 #ifdef DIAGNOSTIC
242 	if (i == XL_TIMEOUT)
243 		printf("xl%d: command never completed!\n", sc->xl_unit);
244 #endif
245 
246 	return;
247 }
248 
249 /*
250  * MII access routines are provided for adapters with external
251  * PHYs (3c905-TX, 3c905-T4, 3c905B-T4) and those with built-in
252  * autoneg logic that's faked up to look like a PHY (3c905B-TX).
253  * Note: if you don't perform the MDIO operations just right,
254  * it's possible to end up with code that works correctly with
255  * some chips/CPUs/processor speeds/bus speeds/etc but not
256  * with others.
257  */
258 #define MII_SET(x)					\
259 	CSR_WRITE_2(sc, XL_W4_PHY_MGMT,			\
260 		CSR_READ_2(sc, XL_W4_PHY_MGMT) | x)
261 
262 #define MII_CLR(x)					\
263 	CSR_WRITE_2(sc, XL_W4_PHY_MGMT,			\
264 		CSR_READ_2(sc, XL_W4_PHY_MGMT) & ~x)
265 
266 /*
267  * Sync the PHYs by setting data bit and strobing the clock 32 times.
268  */
269 void xl_mii_sync(sc)
270 	struct xl_softc		*sc;
271 {
272 	register int		i;
273 
274 	XL_SEL_WIN(4);
275 	MII_SET(XL_MII_DIR|XL_MII_DATA);
276 
277 	for (i = 0; i < 32; i++) {
278 		MII_SET(XL_MII_CLK);
279 		DELAY(1);
280 		MII_CLR(XL_MII_CLK);
281 		DELAY(1);
282 	}
283 
284 	return;
285 }
286 
287 /*
288  * Clock a series of bits through the MII.
289  */
290 void xl_mii_send(sc, bits, cnt)
291 	struct xl_softc		*sc;
292 	u_int32_t		bits;
293 	int			cnt;
294 {
295 	int			i;
296 
297 	XL_SEL_WIN(4);
298 	MII_CLR(XL_MII_CLK);
299 
300 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
301                 if (bits & i) {
302 			MII_SET(XL_MII_DATA);
303                 } else {
304 			MII_CLR(XL_MII_DATA);
305                 }
306 		DELAY(1);
307 		MII_CLR(XL_MII_CLK);
308 		DELAY(1);
309 		MII_SET(XL_MII_CLK);
310 	}
311 }
312 
313 /*
314  * Read an PHY register through the MII.
315  */
316 int xl_mii_readreg(sc, frame)
317 	struct xl_softc		*sc;
318 	struct xl_mii_frame	*frame;
319 
320 {
321 	int			i, ack, s;
322 
323 	s = splimp();
324 
325 	/*
326 	 * Set up frame for RX.
327 	 */
328 	frame->mii_stdelim = XL_MII_STARTDELIM;
329 	frame->mii_opcode = XL_MII_READOP;
330 	frame->mii_turnaround = 0;
331 	frame->mii_data = 0;
332 
333 	/*
334 	 * Select register window 4.
335 	 */
336 
337 	XL_SEL_WIN(4);
338 
339 	CSR_WRITE_2(sc, XL_W4_PHY_MGMT, 0);
340 	/*
341  	 * Turn on data xmit.
342 	 */
343 	MII_SET(XL_MII_DIR);
344 
345 	xl_mii_sync(sc);
346 
347 	/*
348 	 * Send command/address info.
349 	 */
350 	xl_mii_send(sc, frame->mii_stdelim, 2);
351 	xl_mii_send(sc, frame->mii_opcode, 2);
352 	xl_mii_send(sc, frame->mii_phyaddr, 5);
353 	xl_mii_send(sc, frame->mii_regaddr, 5);
354 
355 	/* Idle bit */
356 	MII_CLR((XL_MII_CLK|XL_MII_DATA));
357 	DELAY(1);
358 	MII_SET(XL_MII_CLK);
359 	DELAY(1);
360 
361 	/* Turn off xmit. */
362 	MII_CLR(XL_MII_DIR);
363 
364 	/* Check for ack */
365 	MII_CLR(XL_MII_CLK);
366 	DELAY(1);
367 	MII_SET(XL_MII_CLK);
368 	DELAY(1);
369 	ack = CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA;
370 
371 	/*
372 	 * Now try reading data bits. If the ack failed, we still
373 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
374 	 */
375 	if (ack) {
376 		for(i = 0; i < 16; i++) {
377 			MII_CLR(XL_MII_CLK);
378 			DELAY(1);
379 			MII_SET(XL_MII_CLK);
380 			DELAY(1);
381 		}
382 		goto fail;
383 	}
384 
385 	for (i = 0x8000; i; i >>= 1) {
386 		MII_CLR(XL_MII_CLK);
387 		DELAY(1);
388 		if (!ack) {
389 			if (CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA)
390 				frame->mii_data |= i;
391 			DELAY(1);
392 		}
393 		MII_SET(XL_MII_CLK);
394 		DELAY(1);
395 	}
396 
397 fail:
398 
399 	MII_CLR(XL_MII_CLK);
400 	DELAY(1);
401 	MII_SET(XL_MII_CLK);
402 	DELAY(1);
403 
404 	splx(s);
405 
406 	if (ack)
407 		return(1);
408 	return(0);
409 }
410 
411 /*
412  * Write to a PHY register through the MII.
413  */
414 int xl_mii_writereg(sc, frame)
415 	struct xl_softc		*sc;
416 	struct xl_mii_frame	*frame;
417 
418 {
419 	int			s;
420 
421 	s = splimp();
422 	/*
423 	 * Set up frame for TX.
424 	 */
425 
426 	frame->mii_stdelim = XL_MII_STARTDELIM;
427 	frame->mii_opcode = XL_MII_WRITEOP;
428 	frame->mii_turnaround = XL_MII_TURNAROUND;
429 
430 	/*
431 	 * Select the window 4.
432 	 */
433 	XL_SEL_WIN(4);
434 
435 	/*
436  	 * Turn on data output.
437 	 */
438 	MII_SET(XL_MII_DIR);
439 
440 	xl_mii_sync(sc);
441 
442 	xl_mii_send(sc, frame->mii_stdelim, 2);
443 	xl_mii_send(sc, frame->mii_opcode, 2);
444 	xl_mii_send(sc, frame->mii_phyaddr, 5);
445 	xl_mii_send(sc, frame->mii_regaddr, 5);
446 	xl_mii_send(sc, frame->mii_turnaround, 2);
447 	xl_mii_send(sc, frame->mii_data, 16);
448 
449 	/* Idle bit. */
450 	MII_SET(XL_MII_CLK);
451 	DELAY(1);
452 	MII_CLR(XL_MII_CLK);
453 	DELAY(1);
454 
455 	/*
456 	 * Turn off xmit.
457 	 */
458 	MII_CLR(XL_MII_DIR);
459 
460 	splx(s);
461 
462 	return(0);
463 }
464 
465 int
466 xl_miibus_readreg(self, phy, reg)
467 	struct device *self;
468 	int phy, reg;
469 {
470 	struct xl_softc *sc = (struct xl_softc *)self;
471 	struct xl_mii_frame	frame;
472 
473 	if (!(sc->xl_flags & XL_FLAG_PHYOK) && phy != 24)
474 		return (0);
475 
476 	bzero((char *)&frame, sizeof(frame));
477 
478 	frame.mii_phyaddr = phy;
479 	frame.mii_regaddr = reg;
480 	xl_mii_readreg(sc, &frame);
481 
482 	return(frame.mii_data);
483 }
484 
485 void
486 xl_miibus_writereg(self, phy, reg, data)
487 	struct device *self;
488 	int phy, reg, data;
489 {
490 	struct xl_softc *sc = (struct xl_softc *)self;
491 	struct xl_mii_frame	frame;
492 
493 	if (!(sc->xl_flags & XL_FLAG_PHYOK) && phy != 24)
494 		return;
495 
496 	bzero((char *)&frame, sizeof(frame));
497 
498 	frame.mii_phyaddr = phy;
499 	frame.mii_regaddr = reg;
500 	frame.mii_data = data;
501 
502 	xl_mii_writereg(sc, &frame);
503 }
504 
505 void
506 xl_miibus_statchg(self)
507 	struct device *self;
508 {
509 	struct xl_softc *sc = (struct xl_softc *)self;
510 
511 	xl_setcfg(sc);
512 
513 	XL_SEL_WIN(3);
514 	if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX)
515 		CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
516 	else
517 		CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
518 		    (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
519 }
520 
521 /*
522  * The EEPROM is slow: give it time to come ready after issuing
523  * it a command.
524  */
525 int xl_eeprom_wait(sc)
526 	struct xl_softc		*sc;
527 {
528 	int			i;
529 
530 	for (i = 0; i < 100; i++) {
531 		if (CSR_READ_2(sc, XL_W0_EE_CMD) & XL_EE_BUSY)
532 			DELAY(162);
533 		else
534 			break;
535 	}
536 
537 	if (i == 100) {
538 		printf("xl%d: eeprom failed to come ready\n", sc->xl_unit);
539 		return(1);
540 	}
541 
542 	return(0);
543 }
544 
545 /*
546  * Read a sequence of words from the EEPROM. Note that ethernet address
547  * data is stored in the EEPROM in network byte order.
548  */
549 int xl_read_eeprom(sc, dest, off, cnt, swap)
550 	struct xl_softc		*sc;
551 	caddr_t			dest;
552 	int			off;
553 	int			cnt;
554 	int			swap;
555 {
556 	int			err = 0, i;
557 	u_int16_t		word = 0, *ptr;
558 #define EEPROM_5BIT_OFFSET(A) ((((A) << 2) & 0x7F00) | ((A) & 0x003F))
559 	/* WARNING! DANGER!
560 	 * It's easy to accidentally overwrite the rom content!
561 	 * Note: the 3c575 uses 8bit EEPROM offsets.
562 	 */
563 	XL_SEL_WIN(0);
564 
565 	if (xl_eeprom_wait(sc))
566 		return(1);
567 
568 	if (sc->xl_flags & XL_FLAG_EEPROM_OFFSET_30)
569 		off += 0x30;
570 
571 	for (i = 0; i < cnt; i++) {
572 		if (sc->xl_flags & XL_FLAG_8BITROM)
573 			CSR_WRITE_2(sc, XL_W0_EE_CMD, (2<<8) | (off + i ));
574 		else
575 			CSR_WRITE_2(sc, XL_W0_EE_CMD,
576 			    XL_EE_READ | EEPROM_5BIT_OFFSET(off + i));
577 		err = xl_eeprom_wait(sc);
578 		if (err)
579 			break;
580 		word = CSR_READ_2(sc, XL_W0_EE_DATA);
581 		ptr = (u_int16_t *)(dest + (i * 2));
582 		if (swap)
583 			*ptr = ntohs(word);
584 		else
585 			*ptr = word;
586 	}
587 
588 	return(err ? 1 : 0);
589 }
590 
591 /*
592  * This routine is taken from the 3Com Etherlink XL manual,
593  * page 10-7. It calculates a CRC of the supplied multicast
594  * group address and returns the lower 8 bits, which are used
595  * as the multicast filter position.
596  * Note: the 3c905B currently only supports a 64-bit hash table,
597  * which means we really only need 6 bits, but the manual indicates
598  * that future chip revisions will have a 256-bit hash table,
599  * hence the routine is set up to calculate 8 bits of position
600  * info in case we need it some day.
601  * Note II, The Sequel: _CURRENT_ versions of the 3c905B have a
602  * 256 bit hash table. This means we have to use all 8 bits regardless.
603  * On older cards, the upper 2 bits will be ignored. Grrrr....
604  */
605 u_int8_t xl_calchash(addr)
606 	caddr_t			addr;
607 {
608 	u_int32_t		crc, carry;
609 	int			i, j;
610 	u_int8_t		c;
611 
612 	/* Compute CRC for the address value. */
613 	crc = 0xFFFFFFFF; /* initial value */
614 
615 	for (i = 0; i < 6; i++) {
616 		c = *(addr + i);
617 		for (j = 0; j < 8; j++) {
618 			carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
619 			crc <<= 1;
620 			c >>= 1;
621 			if (carry)
622 				crc = (crc ^ 0x04c11db6) | carry;
623 		}
624 	}
625 
626 	/* return the filter bit position */
627 	return(crc & 0x000000FF);
628 }
629 
630 /*
631  * NICs older than the 3c905B have only one multicast option, which
632  * is to enable reception of all multicast frames.
633  */
634 void xl_setmulti(sc)
635 	struct xl_softc		*sc;
636 {
637 	struct ifnet		*ifp;
638 	struct arpcom *ac = &sc->arpcom;
639 	struct ether_multi *enm;
640 	struct ether_multistep step;
641 	u_int8_t		rxfilt;
642 	int			mcnt = 0;
643 
644 	ifp = &sc->arpcom.ac_if;
645 
646 	XL_SEL_WIN(5);
647 	rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
648 
649 	if (ifp->if_flags & IFF_ALLMULTI) {
650 		rxfilt |= XL_RXFILTER_ALLMULTI;
651 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
652 		return;
653 	}
654 
655 	ETHER_FIRST_MULTI(step, ac, enm);
656 	while (enm != NULL) {
657 		mcnt++;
658 		ETHER_NEXT_MULTI(step, enm);
659 	}
660 
661 	if (mcnt)
662 		rxfilt |= XL_RXFILTER_ALLMULTI;
663 	else
664 		rxfilt &= ~XL_RXFILTER_ALLMULTI;
665 
666 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
667 
668 	return;
669 }
670 
671 /*
672  * 3c905B adapters have a hash filter that we can program.
673  */
674 void xl_setmulti_hash(sc)
675 	struct xl_softc		*sc;
676 {
677 	struct ifnet		*ifp;
678 	int			h = 0, i;
679 	struct arpcom *ac = &sc->arpcom;
680 	struct ether_multi *enm;
681 	struct ether_multistep step;
682 	u_int8_t		rxfilt;
683 	int			mcnt = 0;
684 
685 	ifp = &sc->arpcom.ac_if;
686 
687 	XL_SEL_WIN(5);
688 	rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
689 
690 	if (ifp->if_flags & IFF_ALLMULTI) {
691 allmulti:
692 		rxfilt |= XL_RXFILTER_ALLMULTI;
693 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
694 		return;
695 	} else
696 		rxfilt &= ~XL_RXFILTER_ALLMULTI;
697 
698 
699 	/* first, zot all the existing hash bits */
700 	for (i = 0; i < XL_HASHFILT_SIZE; i++)
701 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH|i);
702 
703 	/* now program new ones */
704 	ETHER_FIRST_MULTI(step, ac, enm);
705 	while (enm != NULL) {
706 		if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
707 			ifp->if_flags |= IFF_ALLMULTI;
708 			goto allmulti;
709 		}
710 		h = xl_calchash(enm->enm_addrlo);
711 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH|XL_HASH_SET|h);
712 		mcnt++;
713 		ETHER_NEXT_MULTI(step, enm);
714 	}
715 
716 	if (mcnt)
717 		rxfilt |= XL_RXFILTER_MULTIHASH;
718 	else
719 		rxfilt &= ~XL_RXFILTER_MULTIHASH;
720 
721 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
722 
723 	return;
724 }
725 
726 #ifdef notdef
727 void xl_testpacket(sc)
728 	struct xl_softc		*sc;
729 {
730 	struct mbuf		*m;
731 	struct ifnet		*ifp;
732 	int			error;
733 
734 	ifp = &sc->arpcom.ac_if;
735 
736 	MGETHDR(m, M_DONTWAIT, MT_DATA);
737 
738 	if (m == NULL)
739 		return;
740 
741 	bcopy(&sc->arpcom.ac_enaddr,
742 		mtod(m, struct ether_header *)->ether_dhost, ETHER_ADDR_LEN);
743 	bcopy(&sc->arpcom.ac_enaddr,
744 		mtod(m, struct ether_header *)->ether_shost, ETHER_ADDR_LEN);
745 	mtod(m, struct ether_header *)->ether_type = htons(3);
746 	mtod(m, unsigned char *)[14] = 0;
747 	mtod(m, unsigned char *)[15] = 0;
748 	mtod(m, unsigned char *)[16] = 0xE3;
749 	m->m_len = m->m_pkthdr.len = sizeof(struct ether_header) + 3;
750 	IFQ_ENQUEUE(&ifp->if_snd, m, NULL, error);
751 	xl_start(ifp);
752 
753 	return;
754 }
755 #endif
756 
757 void xl_setcfg(sc)
758 	struct xl_softc *sc;
759 {
760 	u_int32_t icfg;
761 
762 	XL_SEL_WIN(3);
763 	icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
764 	icfg &= ~XL_ICFG_CONNECTOR_MASK;
765 	if (sc->xl_media & XL_MEDIAOPT_MII ||
766 	    sc->xl_media & XL_MEDIAOPT_BT4)
767 		icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS);
768 	if (sc->xl_media & XL_MEDIAOPT_BTX)
769 		icfg |= (XL_XCVR_AUTO << XL_ICFG_CONNECTOR_BITS);
770 
771 	CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
772 	CSR_WRITE_4(sc, XL_COMMAND, XL_CMD_COAX_STOP);
773 }
774 
775 void xl_setmode(sc, media)
776 	struct xl_softc *sc;
777 	int media;
778 {
779 	u_int32_t icfg;
780 	u_int16_t mediastat;
781 
782 	printf("xl%d: selecting ", sc->xl_unit);
783 
784 	XL_SEL_WIN(4);
785 	mediastat = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
786 	XL_SEL_WIN(3);
787 	icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
788 
789 	if (sc->xl_media & XL_MEDIAOPT_BT) {
790 		if (IFM_SUBTYPE(media) == IFM_10_T) {
791 			printf("10baseT transceiver, ");
792 			sc->xl_xcvr = XL_XCVR_10BT;
793 			icfg &= ~XL_ICFG_CONNECTOR_MASK;
794 			icfg |= (XL_XCVR_10BT << XL_ICFG_CONNECTOR_BITS);
795 			mediastat |= XL_MEDIASTAT_LINKBEAT|
796 					XL_MEDIASTAT_JABGUARD;
797 			mediastat &= ~XL_MEDIASTAT_SQEENB;
798 		}
799 	}
800 
801 	if (sc->xl_media & XL_MEDIAOPT_BFX) {
802 		if (IFM_SUBTYPE(media) == IFM_100_FX) {
803 			printf("100baseFX port, ");
804 			sc->xl_xcvr = XL_XCVR_100BFX;
805 			icfg &= ~XL_ICFG_CONNECTOR_MASK;
806 			icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS);
807 			mediastat |= XL_MEDIASTAT_LINKBEAT;
808 			mediastat &= ~XL_MEDIASTAT_SQEENB;
809 		}
810 	}
811 
812 	if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
813 		if (IFM_SUBTYPE(media) == IFM_10_5) {
814 			printf("AUI port, ");
815 			sc->xl_xcvr = XL_XCVR_AUI;
816 			icfg &= ~XL_ICFG_CONNECTOR_MASK;
817 			icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
818 			mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
819 					XL_MEDIASTAT_JABGUARD);
820 			mediastat |= ~XL_MEDIASTAT_SQEENB;
821 		}
822 		if (IFM_SUBTYPE(media) == IFM_10_FL) {
823 			printf("10baseFL transceiver, ");
824 			sc->xl_xcvr = XL_XCVR_AUI;
825 			icfg &= ~XL_ICFG_CONNECTOR_MASK;
826 			icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
827 			mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
828 					XL_MEDIASTAT_JABGUARD);
829 			mediastat |= ~XL_MEDIASTAT_SQEENB;
830 		}
831 	}
832 
833 	if (sc->xl_media & XL_MEDIAOPT_BNC) {
834 		if (IFM_SUBTYPE(media) == IFM_10_2) {
835 			printf("BNC port, ");
836 			sc->xl_xcvr = XL_XCVR_COAX;
837 			icfg &= ~XL_ICFG_CONNECTOR_MASK;
838 			icfg |= (XL_XCVR_COAX << XL_ICFG_CONNECTOR_BITS);
839 			mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
840 					XL_MEDIASTAT_JABGUARD|
841 					XL_MEDIASTAT_SQEENB);
842 		}
843 	}
844 
845 	if ((media & IFM_GMASK) == IFM_FDX ||
846 			IFM_SUBTYPE(media) == IFM_100_FX) {
847 		printf("full duplex\n");
848 		XL_SEL_WIN(3);
849 		CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
850 	} else {
851 		printf("half duplex\n");
852 		XL_SEL_WIN(3);
853 		CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
854 			(CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
855 	}
856 
857 	if (IFM_SUBTYPE(media) == IFM_10_2)
858 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
859 	else
860 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
861 	CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
862 	XL_SEL_WIN(4);
863 	CSR_WRITE_2(sc, XL_W4_MEDIA_STATUS, mediastat);
864 	DELAY(800);
865 	XL_SEL_WIN(7);
866 }
867 
868 void xl_reset(sc, hard)
869 	struct xl_softc		*sc;
870 	int hard;
871 {
872 	register int		i;
873 
874 	XL_SEL_WIN(0);
875 	if (hard || (sc->xl_flags & XL_FLAG_WEIRDRESET)) {
876 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RESET |
877 		    ((sc->xl_flags & XL_FLAG_WEIRDRESET)?0xFF:0));
878 	}
879 	else
880 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RESET | 0x0010);
881 	xl_wait(sc);
882 
883 	for (i = 0; i < XL_TIMEOUT; i++) {
884 		DELAY(10);
885 		if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY))
886 			break;
887 	}
888 
889 	DELAY(100000);
890 
891 	/* Reset TX and RX. */
892 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
893 	xl_wait(sc);
894 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
895 	xl_wait(sc);
896 
897 	if (sc->xl_flags & XL_FLAG_WEIRDRESET) {
898 		XL_SEL_WIN(2);
899 		CSR_WRITE_2(sc, XL_W2_RESET_OPTIONS, CSR_READ_2(sc,
900 		    XL_W2_RESET_OPTIONS) | 0x4010);
901 	}
902 
903 	/* Wait a little while for the chip to get its brains in order. */
904 	DELAY(100000);
905         return;
906 }
907 
908 /*
909  * This routine is a kludge to work around possible hardware faults
910  * or manufacturing defects that can cause the media options register
911  * (or reset options register, as it's called for the first generation
912  * 3c90x adapters) to return an incorrect result. I have encountered
913  * one Dell Latitude laptop docking station with an integrated 3c905-TX
914  * which doesn't have any of the 'mediaopt' bits set. This screws up
915  * the attach routine pretty badly because it doesn't know what media
916  * to look for. If we find ourselves in this predicament, this routine
917  * will try to guess the media options values and warn the user of a
918  * possible manufacturing defect with his adapter/system/whatever.
919  */
920 void xl_mediacheck(sc)
921 	struct xl_softc		*sc;
922 {
923 	/*
924 	 * If some of the media options bits are set, assume they are
925 	 * correct. If not, try to figure it out down below.
926 	 * XXX I should check for 10baseFL, but I don't have an adapter
927 	 * to test with.
928 	 */
929 	if (sc->xl_media & (XL_MEDIAOPT_MASK & ~XL_MEDIAOPT_VCO)) {
930 		/*
931 	 	 * Check the XCVR value. If it's not in the normal range
932 	 	 * of values, we need to fake it up here.
933 	 	 */
934 		if (sc->xl_xcvr <= XL_XCVR_AUTO)
935 			return;
936 		else {
937 			printf("xl%d: bogus xcvr value "
938 			"in EEPROM (%x)\n", sc->xl_unit, sc->xl_xcvr);
939 			printf("xl%d: choosing new default based "
940 				"on card type\n", sc->xl_unit);
941 		}
942 	} else {
943 		if (sc->xl_type == XL_TYPE_905B &&
944 		    sc->xl_media & XL_MEDIAOPT_10FL)
945 			return;
946 		printf("xl%d: WARNING: no media options bits set in "
947 			"the media options register!!\n", sc->xl_unit);
948 		printf("xl%d: this could be a manufacturing defect in "
949 			"your adapter or system\n", sc->xl_unit);
950 		printf("xl%d: attempting to guess media type; you "
951 			"should probably consult your vendor\n", sc->xl_unit);
952 	}
953 
954 	xl_choose_xcvr(sc, 1);
955 }
956 
957 void xl_choose_xcvr(sc, verbose)
958 	struct xl_softc *sc;
959 	int verbose;
960 {
961 	u_int16_t devid;
962 
963 	/*
964 	 * Read the device ID from the EEPROM.
965 	 * This is what's loaded into the PCI device ID register, so it has
966 	 * to be correct otherwise we wouldn't have gotten this far.
967 	 */
968 	xl_read_eeprom(sc, (caddr_t)&devid, XL_EE_PRODID, 1, 0);
969 
970 	switch(devid) {
971 	case TC_DEVICEID_BOOMERANG_10BT:	/* 3c900-TPO */
972 	case TC_DEVICEID_KRAKATOA_10BT:		/* 3c900B-TPO */
973 		sc->xl_media = XL_MEDIAOPT_BT;
974 		sc->xl_xcvr = XL_XCVR_10BT;
975 		if (verbose)
976 			printf("xl%d: guessing 10BaseT transceiver\n",
977 			    sc->xl_unit);
978 		break;
979 	case TC_DEVICEID_BOOMERANG_10BT_COMBO:	/* 3c900-COMBO */
980 	case TC_DEVICEID_KRAKATOA_10BT_COMBO:	/* 3c900B-COMBO */
981 		sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
982 		sc->xl_xcvr = XL_XCVR_10BT;
983 		if (verbose)
984 			printf("xl%d: guessing COMBO (AUI/BNC/TP)\n",
985 			    sc->xl_unit);
986 		break;
987 	case TC_DEVICEID_KRAKATOA_10BT_TPC:	/* 3c900B-TPC */
988 		sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC;
989 		sc->xl_xcvr = XL_XCVR_10BT;
990 		if (verbose)
991 			printf("xl%d: guessing TPC (BNC/TP)\n", sc->xl_unit);
992 		break;
993 	case TC_DEVICEID_CYCLONE_10FL:		/* 3c900B-FL */
994 		sc->xl_media = XL_MEDIAOPT_10FL;
995 		sc->xl_xcvr = XL_XCVR_AUI;
996 		if (verbose)
997 			printf("xl%d: guessing 10baseFL\n", sc->xl_unit);
998 		break;
999 	case TC_DEVICEID_BOOMERANG_10_100BT:	/* 3c905-TX */
1000 	case TC_DEVICEID_HURRICANE_555:		/* 3c555 */
1001 	case TC_DEVICEID_HURRICANE_556:		/* 3c556 */
1002 	case TC_DEVICEID_HURRICANE_556B:	/* 3c556B */
1003 		sc->xl_media = XL_MEDIAOPT_MII;
1004 		sc->xl_xcvr = XL_XCVR_MII;
1005 		if (verbose)
1006 			printf("xl%d: guessing MII\n", sc->xl_unit);
1007 		break;
1008 	case TC_DEVICEID_BOOMERANG_100BT4:	/* 3c905-T4 */
1009 	case TC_DEVICEID_CYCLONE_10_100BT4:	/* 3c905B-T4 */
1010 		sc->xl_media = XL_MEDIAOPT_BT4;
1011 		sc->xl_xcvr = XL_XCVR_MII;
1012 		if (verbose)
1013 			printf("xl%d: guessing 100BaseT4/MII\n", sc->xl_unit);
1014 		break;
1015 	case TC_DEVICEID_HURRICANE_10_100BT:	/* 3c905B-TX */
1016 	case TC_DEVICEID_HURRICANE_10_100BT_SERV:/* 3c980-TX */
1017 	case TC_DEVICEID_TORNADO_10_100BT_SERV:	/* 3c980C-TX */
1018 	case TC_DEVICEID_HURRICANE_SOHO100TX:	/* 3cSOHO100-TX */
1019 	case TC_DEVICEID_TORNADO_10_100BT:	/* 3c905C-TX */
1020 	case TC_DEVICEID_TORNADO_HOMECONNECT:	/* 3c450-TX */
1021 		sc->xl_media = XL_MEDIAOPT_BTX;
1022 		sc->xl_xcvr = XL_XCVR_AUTO;
1023 		if (verbose)
1024 			printf("xl%d: guessing 10/100 internal\n",
1025 			    sc->xl_unit);
1026 		break;
1027 	case TC_DEVICEID_CYCLONE_10_100_COMBO:	/* 3c905B-COMBO */
1028 		sc->xl_media = XL_MEDIAOPT_BTX|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
1029 		sc->xl_xcvr = XL_XCVR_AUTO;
1030 		if (verbose)
1031 			printf("xl%d: guessing 10/100 plus BNC/AUI\n",
1032 			    sc->xl_unit);
1033 		break;
1034 	case TC_DEVICEID_3C575_CARDBUS:
1035 	case TC_DEVICEID_3CCFE575BT_CARDBUS:
1036 	case TC_DEVICEID_3CCFE575CT_CARDBUS:
1037 	case TC_DEVICEID_3CCFEM656_CARDBUS:
1038 	case TC_DEVICEID_3CCFEM656B_CARDBUS:
1039 	case TC_DEVICEID_3CCFEM656C_CARDBUS:
1040 		sc->xl_media = XL_MEDIAOPT_MII;
1041 		sc->xl_xcvr = XL_XCVR_MII;
1042 		break;
1043 	default:
1044 		printf("xl%d: unknown device ID: %x -- "
1045 			"defaulting to 10baseT\n", sc->xl_unit, devid);
1046 		sc->xl_media = XL_MEDIAOPT_BT;
1047 		break;
1048 	}
1049 
1050 	return;
1051 }
1052 
1053 /*
1054  * Initialize the transmit descriptors.
1055  */
1056 int xl_list_tx_init(sc)
1057 	struct xl_softc		*sc;
1058 {
1059 	struct xl_chain_data	*cd;
1060 	struct xl_list_data	*ld;
1061 	int			i;
1062 
1063 	cd = &sc->xl_cdata;
1064 	ld = sc->xl_ldata;
1065 	for (i = 0; i < XL_TX_LIST_CNT; i++) {
1066 		cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
1067 		if (i == (XL_TX_LIST_CNT - 1))
1068 			cd->xl_tx_chain[i].xl_next = NULL;
1069 		else
1070 			cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1];
1071 	}
1072 
1073 	cd->xl_tx_free = &cd->xl_tx_chain[0];
1074 	cd->xl_tx_tail = cd->xl_tx_head = NULL;
1075 
1076 	return(0);
1077 }
1078 
1079 /*
1080  * Initialize the transmit desriptors.
1081  */
1082 int
1083 xl_list_tx_init_90xB(sc)
1084 	struct xl_softc *sc;
1085 {
1086 	struct xl_chain_data *cd;
1087 	struct xl_list_data *ld;
1088 	int i;
1089 
1090 	cd = &sc->xl_cdata;
1091 	ld = sc->xl_ldata;
1092 	for (i = 0; i < XL_TX_LIST_CNT; i++) {
1093 		cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
1094 		cd->xl_tx_chain[i].xl_phys = vtophys(&ld->xl_tx_list[i]);
1095 		if (i == (XL_TX_LIST_CNT - 1))
1096 			cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[0];
1097 		else
1098 			cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1];
1099 		if (i == 0)
1100 			cd->xl_tx_chain[i].xl_prev =
1101 			    &cd->xl_tx_chain[XL_TX_LIST_CNT - 1];
1102 		else
1103 			cd->xl_tx_chain[i].xl_prev =
1104 			    &cd->xl_tx_chain[i - 1];
1105 	}
1106 
1107 	bzero((char *)ld->xl_tx_list, sizeof(struct xl_list) * XL_TX_LIST_CNT);
1108 	ld->xl_tx_list[0].xl_status = XL_TXSTAT_EMPTY;
1109 
1110 	cd->xl_tx_prod = 1;
1111 	cd->xl_tx_cons = 1;
1112 	cd->xl_tx_cnt = 0;
1113 
1114 	return (0);
1115 }
1116 
1117 /*
1118  * Initialize the RX descriptors and allocate mbufs for them. Note that
1119  * we arrange the descriptors in a closed ring, so that the last descriptor
1120  * points back to the first.
1121  */
1122 int xl_list_rx_init(sc)
1123 	struct xl_softc		*sc;
1124 {
1125 	struct xl_chain_data	*cd;
1126 	struct xl_list_data	*ld;
1127 	int			i;
1128 
1129 	cd = &sc->xl_cdata;
1130 	ld = sc->xl_ldata;
1131 
1132 	for (i = 0; i < XL_RX_LIST_CNT; i++) {
1133 		cd->xl_rx_chain[i].xl_ptr =
1134 			(struct xl_list_onefrag *)&ld->xl_rx_list[i];
1135 		if (xl_newbuf(sc, &cd->xl_rx_chain[i]) == ENOBUFS)
1136 			return(ENOBUFS);
1137 		if (i == (XL_RX_LIST_CNT - 1)) {
1138 			cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[0];
1139 			ld->xl_rx_list[i].xl_next =
1140 			    vtophys(&ld->xl_rx_list[0]);
1141 		} else {
1142 			cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[i + 1];
1143 			ld->xl_rx_list[i].xl_next =
1144 			    vtophys(&ld->xl_rx_list[i + 1]);
1145 		}
1146 	}
1147 
1148 	cd->xl_rx_head = &cd->xl_rx_chain[0];
1149 
1150 	return(0);
1151 }
1152 
1153 /*
1154  * Initialize an RX descriptor and attach an MBUF cluster.
1155  */
1156 int xl_newbuf(sc, c)
1157 	struct xl_softc		*sc;
1158 	struct xl_chain_onefrag	*c;
1159 {
1160 	struct mbuf		*m_new = NULL;
1161 
1162 	MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1163 	if (m_new == NULL)
1164 		return(ENOBUFS);
1165 
1166 	MCLGET(m_new, M_DONTWAIT);
1167 	if (!(m_new->m_flags & M_EXT)) {
1168 		m_freem(m_new);
1169 		return(ENOBUFS);
1170 	}
1171 
1172 	m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1173 
1174 	/* Force longword alignment for packet payload. */
1175 	m_adj(m_new, ETHER_ALIGN);
1176 
1177 	c->xl_mbuf = m_new;
1178 	c->xl_ptr->xl_frag.xl_addr = vtophys(mtod(m_new, caddr_t));
1179 	c->xl_ptr->xl_frag.xl_len = MCLBYTES | XL_LAST_FRAG;
1180 	c->xl_ptr->xl_status = 0;
1181 
1182 	return(0);
1183 }
1184 
1185 int xl_rx_resync(sc)
1186 	struct xl_softc *sc;
1187 {
1188 	struct xl_chain_onefrag *pos;
1189 	int i;
1190 
1191 	pos = sc->xl_cdata.xl_rx_head;
1192 
1193 	for (i = 0; i < XL_RX_LIST_CNT; i++) {
1194 		if (pos->xl_ptr->xl_status)
1195 			break;
1196 		pos = pos->xl_next;
1197 	}
1198 
1199 	if (i == XL_RX_LIST_CNT)
1200 		return (0);
1201 
1202 	sc->xl_cdata.xl_rx_head = pos;
1203 
1204 	return (EAGAIN);
1205 }
1206 
1207 /*
1208  * A frame has been uploaded: pass the resulting mbuf chain up to
1209  * the higher level protocols.
1210  */
1211 void xl_rxeof(sc)
1212 	struct xl_softc		*sc;
1213 {
1214         struct mbuf		*m;
1215         struct ifnet		*ifp;
1216 	struct xl_chain_onefrag	*cur_rx;
1217 	int			total_len = 0;
1218 	u_int16_t		rxstat;
1219 
1220 	ifp = &sc->arpcom.ac_if;
1221 
1222 again:
1223 
1224 	while((rxstat = sc->xl_cdata.xl_rx_head->xl_ptr->xl_status)) {
1225 		cur_rx = sc->xl_cdata.xl_rx_head;
1226 		sc->xl_cdata.xl_rx_head = cur_rx->xl_next;
1227 
1228 		/*
1229 		 * If an error occurs, update stats, clear the
1230 		 * status word and leave the mbuf cluster in place:
1231 		 * it should simply get re-used next time this descriptor
1232 	 	 * comes up in the ring.
1233 		 */
1234 		if (rxstat & XL_RXSTAT_UP_ERROR) {
1235 			ifp->if_ierrors++;
1236 			cur_rx->xl_ptr->xl_status = 0;
1237 			continue;
1238 		}
1239 
1240 		/*
1241 		 * If there error bit was not set, the upload complete
1242 		 * bit should be set which means we have a valid packet.
1243 		 * If not, something truly strange has happened.
1244 		 */
1245 		if (!(rxstat & XL_RXSTAT_UP_CMPLT)) {
1246 			printf("xl%d: bad receive status -- "
1247 			    "packet dropped", sc->xl_unit);
1248 			ifp->if_ierrors++;
1249 			cur_rx->xl_ptr->xl_status = 0;
1250 			continue;
1251 		}
1252 
1253 		/* No errors; receive the packet. */
1254 		m = cur_rx->xl_mbuf;
1255 		total_len = cur_rx->xl_ptr->xl_status & XL_RXSTAT_LENMASK;
1256 
1257 		/*
1258 		 * Try to conjure up a new mbuf cluster. If that
1259 		 * fails, it means we have an out of memory condition and
1260 		 * should leave the buffer in place and continue. This will
1261 		 * result in a lost packet, but there's little else we
1262 		 * can do in this situation.
1263 		 */
1264 		if (xl_newbuf(sc, cur_rx) == ENOBUFS) {
1265 			ifp->if_ierrors++;
1266 			cur_rx->xl_ptr->xl_status = 0;
1267 			continue;
1268 		}
1269 
1270 		ifp->if_ipackets++;
1271 		m->m_pkthdr.rcvif = ifp;
1272 		m->m_pkthdr.len = m->m_len = total_len;
1273 #if NBPFILTER > 0
1274 		/*
1275 		 * Handle BPF listeners. Let the BPF user see the packet.
1276 		 */
1277 		if (ifp->if_bpf) {
1278 			bpf_mtap(ifp->if_bpf, m);
1279 		}
1280 #endif
1281 		ether_input_mbuf(ifp, m);
1282 	}
1283 
1284 	/*
1285 	 * Handle the 'end of channel' condition. When the upload
1286 	 * engine hits the end of the RX ring, it will stall. This
1287 	 * is our cue to flush the RX ring, reload the uplist pointer
1288 	 * register and unstall the engine.
1289 	 * XXX This is actually a little goofy. With the ThunderLAN
1290 	 * chip, you get an interrupt when the receiver hits the end
1291 	 * of the receive ring, which tells you exactly when you
1292 	 * you need to reload the ring pointer. Here we have to
1293 	 * fake it. I'm mad at myself for not being clever enough
1294 	 * to avoid the use of a goto here.
1295 	 */
1296 	if (CSR_READ_4(sc, XL_UPLIST_PTR) == 0 ||
1297 		CSR_READ_4(sc, XL_UPLIST_STATUS) & XL_PKTSTAT_UP_STALLED) {
1298 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
1299 		xl_wait(sc);
1300 		CSR_WRITE_4(sc, XL_UPLIST_PTR,
1301 			vtophys(&sc->xl_ldata->xl_rx_list[0]));
1302 		sc->xl_cdata.xl_rx_head = &sc->xl_cdata.xl_rx_chain[0];
1303 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
1304 		goto again;
1305 	}
1306 
1307 	return;
1308 }
1309 
1310 /*
1311  * A frame was downloaded to the chip. It's safe for us to clean up
1312  * the list buffers.
1313  */
1314 void xl_txeof(sc)
1315 	struct xl_softc		*sc;
1316 {
1317 	struct xl_chain		*cur_tx;
1318 	struct ifnet		*ifp;
1319 
1320 	ifp = &sc->arpcom.ac_if;
1321 
1322 	/* Clear the timeout timer. */
1323 	ifp->if_timer = 0;
1324 
1325 	/*
1326 	 * Go through our tx list and free mbufs for those
1327 	 * frames that have been uploaded. Note: the 3c905B
1328 	 * sets a special bit in the status word to let us
1329 	 * know that a frame has been downloaded, but the
1330 	 * original 3c900/3c905 adapters don't do that.
1331 	 * Consequently, we have to use a different test if
1332 	 * xl_type != XL_TYPE_905B.
1333 	 */
1334 	while(sc->xl_cdata.xl_tx_head != NULL) {
1335 		cur_tx = sc->xl_cdata.xl_tx_head;
1336 
1337 		if (CSR_READ_4(sc, XL_DOWNLIST_PTR))
1338 			break;
1339 
1340 		sc->xl_cdata.xl_tx_head = cur_tx->xl_next;
1341 		m_freem(cur_tx->xl_mbuf);
1342 		cur_tx->xl_mbuf = NULL;
1343 		ifp->if_opackets++;
1344 
1345 		cur_tx->xl_next = sc->xl_cdata.xl_tx_free;
1346 		sc->xl_cdata.xl_tx_free = cur_tx;
1347 	}
1348 
1349 	if (sc->xl_cdata.xl_tx_head == NULL) {
1350 		ifp->if_flags &= ~IFF_OACTIVE;
1351 		sc->xl_cdata.xl_tx_tail = NULL;
1352 	} else {
1353 		if (CSR_READ_4(sc, XL_DMACTL) & XL_DMACTL_DOWN_STALLED ||
1354 			!CSR_READ_4(sc, XL_DOWNLIST_PTR)) {
1355 			CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
1356 				vtophys(sc->xl_cdata.xl_tx_head->xl_ptr));
1357 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1358 		}
1359 	}
1360 
1361 	return;
1362 }
1363 
1364 void
1365 xl_txeof_90xB(sc)
1366 	struct xl_softc *sc;
1367 {
1368 	struct xl_chain *cur_tx = NULL;
1369 	struct ifnet *ifp;
1370 	int idx;
1371 
1372 	ifp = &sc->arpcom.ac_if;
1373 
1374 	idx = sc->xl_cdata.xl_tx_cons;
1375 	while(idx != sc->xl_cdata.xl_tx_prod) {
1376 
1377 		cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
1378 
1379 		if (!(cur_tx->xl_ptr->xl_status & XL_TXSTAT_DL_COMPLETE))
1380 			break;
1381 
1382 		if (cur_tx->xl_mbuf != NULL) {
1383 			m_freem(cur_tx->xl_mbuf);
1384 			cur_tx->xl_mbuf = NULL;
1385 		}
1386 
1387 		ifp->if_opackets++;
1388 
1389 		sc->xl_cdata.xl_tx_cnt--;
1390 		XL_INC(idx, XL_TX_LIST_CNT);
1391 		ifp->if_timer = 0;
1392 	}
1393 
1394 	sc->xl_cdata.xl_tx_cons = idx;
1395 
1396 	if (cur_tx != NULL)
1397 		ifp->if_flags &= ~IFF_OACTIVE;
1398 }
1399 
1400 /*
1401  * TX 'end of channel' interrupt handler. Actually, we should
1402  * only get a 'TX complete' interrupt if there's a transmit error,
1403  * so this is really TX error handler.
1404  */
1405 void xl_txeoc(sc)
1406 	struct xl_softc		*sc;
1407 {
1408 	u_int8_t		txstat;
1409 
1410 	while((txstat = CSR_READ_1(sc, XL_TX_STATUS))) {
1411 		if (txstat & XL_TXSTATUS_UNDERRUN ||
1412 			txstat & XL_TXSTATUS_JABBER ||
1413 			txstat & XL_TXSTATUS_RECLAIM) {
1414 			if (txstat != 0x90) {
1415 				printf("xl%d: transmission error: %x\n",
1416 				    sc->xl_unit, txstat);
1417 			}
1418 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
1419 			xl_wait(sc);
1420 			if (sc->xl_type == XL_TYPE_905B) {
1421 				int i;
1422 				struct xl_chain *c;
1423 				i = sc->xl_cdata.xl_tx_cons;
1424 				c = &sc->xl_cdata.xl_tx_chain[i];
1425 				CSR_WRITE_4(sc, XL_DOWNLIST_PTR, c->xl_phys);
1426 				CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
1427 			} else {
1428 				if (sc->xl_cdata.xl_tx_head != NULL)
1429 					CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
1430 					    vtophys(sc->xl_cdata.xl_tx_head->xl_ptr));
1431 			}
1432 			/*
1433 			 * Remember to set this for the
1434 			 * first generation 3c90X chips.
1435 			 */
1436 			CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
1437 			if (txstat & XL_TXSTATUS_UNDERRUN &&
1438 			    sc->xl_tx_thresh < XL_PACKET_SIZE) {
1439 				sc->xl_tx_thresh += XL_MIN_FRAMELEN;
1440 #ifdef notdef
1441 				printf("xl%d: tx underrun, increasing tx start"
1442 				    " threshold to %d\n", sc->xl_unit,
1443 				    sc->xl_tx_thresh);
1444 #endif
1445 			}
1446 			CSR_WRITE_2(sc, XL_COMMAND,
1447 			    XL_CMD_TX_SET_START|sc->xl_tx_thresh);
1448 			if (sc->xl_type == XL_TYPE_905B) {
1449 				CSR_WRITE_2(sc, XL_COMMAND,
1450 				XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
1451 			}
1452 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
1453 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1454 		} else {
1455 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
1456 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1457 		}
1458 		/*
1459 		 * Write an arbitrary byte to the TX_STATUS register
1460 	 	 * to clear this interrupt/error and advance to the next.
1461 		 */
1462 		CSR_WRITE_1(sc, XL_TX_STATUS, 0x01);
1463 	}
1464 
1465 	return;
1466 }
1467 
1468 int xl_intr(arg)
1469 	void			*arg;
1470 {
1471 	struct xl_softc		*sc;
1472 	struct ifnet		*ifp;
1473 	u_int16_t		status;
1474 	int claimed = 0;
1475 
1476 	sc = arg;
1477 	ifp = &sc->arpcom.ac_if;
1478 
1479 	while ((status = CSR_READ_2(sc, XL_STATUS)) & XL_INTRS) {
1480 
1481 		claimed = 1;
1482 
1483 		CSR_WRITE_2(sc, XL_COMMAND,
1484 		    XL_CMD_INTR_ACK|(status & XL_INTRS));
1485 
1486 		if (sc->intr_ack)
1487 			(*sc->intr_ack)(sc);
1488 
1489 		if (status & XL_STAT_UP_COMPLETE) {
1490 			int curpkts;
1491 
1492 			curpkts = ifp->if_ipackets;
1493 			xl_rxeof(sc);
1494 			if (curpkts == ifp->if_ipackets) {
1495 				while (xl_rx_resync(sc))
1496 					xl_rxeof(sc);
1497 			}
1498 		}
1499 
1500 		if (status & XL_STAT_DOWN_COMPLETE) {
1501 			if (sc->xl_type == XL_TYPE_905B)
1502 				xl_txeof_90xB(sc);
1503 			else
1504 				xl_txeof(sc);
1505 		}
1506 
1507 		if (status & XL_STAT_TX_COMPLETE) {
1508 			ifp->if_oerrors++;
1509 			xl_txeoc(sc);
1510 		}
1511 
1512 		if (status & XL_STAT_ADFAIL) {
1513 			xl_reset(sc, 0);
1514 			xl_init(sc);
1515 		}
1516 
1517 		if (status & XL_STAT_STATSOFLOW) {
1518 			sc->xl_stats_no_timeout = 1;
1519 			xl_stats_update(sc);
1520 			sc->xl_stats_no_timeout = 0;
1521 		}
1522 	}
1523 
1524 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1525 		(*ifp->if_start)(ifp);
1526 
1527 	return (claimed);
1528 }
1529 
1530 void xl_stats_update(xsc)
1531 	void			*xsc;
1532 {
1533 	struct xl_softc		*sc;
1534 	struct ifnet		*ifp;
1535 	struct xl_stats		xl_stats;
1536 	u_int8_t		*p;
1537 	int			i;
1538 	struct mii_data		*mii = NULL;
1539 
1540 	bzero((char *)&xl_stats, sizeof(struct xl_stats));
1541 
1542 	sc = xsc;
1543 	ifp = &sc->arpcom.ac_if;
1544 	if (sc->xl_hasmii)
1545 		mii = &sc->sc_mii;
1546 
1547 	p = (u_int8_t *)&xl_stats;
1548 
1549 	/* Read all the stats registers. */
1550 	XL_SEL_WIN(6);
1551 
1552 	for (i = 0; i < 16; i++)
1553 		*p++ = CSR_READ_1(sc, XL_W6_CARRIER_LOST + i);
1554 
1555 	ifp->if_ierrors += xl_stats.xl_rx_overrun;
1556 
1557 	ifp->if_collisions += xl_stats.xl_tx_multi_collision +
1558 				xl_stats.xl_tx_single_collision +
1559 				xl_stats.xl_tx_late_collision;
1560 
1561 	/*
1562 	 * Boomerang and cyclone chips have an extra stats counter
1563 	 * in window 4 (BadSSD). We have to read this too in order
1564 	 * to clear out all the stats registers and avoid a statsoflow
1565 	 * interrupt.
1566 	 */
1567 	XL_SEL_WIN(4);
1568 	CSR_READ_1(sc, XL_W4_BADSSD);
1569 
1570 	if (mii != NULL)
1571 		mii_tick(mii);
1572 
1573 	XL_SEL_WIN(7);
1574 
1575 	if (!sc->xl_stats_no_timeout)
1576 		timeout_add(&sc->xl_stsup_tmo, hz);
1577 
1578 	return;
1579 }
1580 
1581 /*
1582  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1583  * pointers to the fragment pointers.
1584  */
1585 int xl_encap(sc, c, m_head)
1586 	struct xl_softc		*sc;
1587 	struct xl_chain		*c;
1588 	struct mbuf		*m_head;
1589 {
1590 	int			frag = 0;
1591 	struct xl_frag		*f = NULL;
1592 	int			total_len;
1593 	struct mbuf		*m;
1594 
1595 	/*
1596  	 * Start packing the mbufs in this chain into
1597 	 * the fragment pointers. Stop when we run out
1598  	 * of fragments or hit the end of the mbuf chain.
1599 	 */
1600 	m = m_head;
1601 	total_len = 0;
1602 
1603 	for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
1604 		if (m->m_len != 0) {
1605 			if (frag == XL_MAXFRAGS)
1606 				break;
1607 			total_len+= m->m_len;
1608 			c->xl_ptr->xl_frag[frag].xl_addr =
1609 					vtophys(mtod(m, vm_offset_t));
1610 			c->xl_ptr->xl_frag[frag].xl_len = m->m_len;
1611 			frag++;
1612 		}
1613 	}
1614 
1615 	/*
1616 	 * Handle special case: we used up all 63 fragments,
1617 	 * but we have more mbufs left in the chain. Copy the
1618 	 * data into an mbuf cluster. Note that we don't
1619 	 * bother clearing the values in the other fragment
1620 	 * pointers/counters; it wouldn't gain us anything,
1621 	 * and would waste cycles.
1622 	 */
1623 	if (m != NULL) {
1624 		struct mbuf		*m_new = NULL;
1625 
1626 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1627 		if (m_new == NULL)
1628 			return(1);
1629 		if (m_head->m_pkthdr.len > MHLEN) {
1630 			MCLGET(m_new, M_DONTWAIT);
1631 			if (!(m_new->m_flags & M_EXT)) {
1632 				m_freem(m_new);
1633 				return(1);
1634 			}
1635 		}
1636 		m_copydata(m_head, 0, m_head->m_pkthdr.len,
1637 					mtod(m_new, caddr_t));
1638 		m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1639 		m_freem(m_head);
1640 		m_head = m_new;
1641 		f = &c->xl_ptr->xl_frag[0];
1642 		f->xl_addr = vtophys(mtod(m_new, caddr_t));
1643 		f->xl_len = total_len = m_new->m_len;
1644 		frag = 1;
1645 	}
1646 
1647 	c->xl_mbuf = m_head;
1648 	c->xl_ptr->xl_frag[frag - 1].xl_len |=  XL_LAST_FRAG;
1649 	c->xl_ptr->xl_status = total_len;
1650 	c->xl_ptr->xl_next = 0;
1651 
1652 	return(0);
1653 }
1654 
1655 /*
1656  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1657  * to the mbuf data regions directly in the transmit lists. We also save a
1658  * copy of the pointers since the transmit list fragment pointers are
1659  * physical addresses.
1660  */
1661 void xl_start(ifp)
1662 	struct ifnet		*ifp;
1663 {
1664 	struct xl_softc		*sc;
1665 	struct mbuf		*m_head = NULL;
1666 	struct xl_chain		*prev = NULL, *cur_tx = NULL, *start_tx;
1667 
1668 	sc = ifp->if_softc;
1669 
1670 	/*
1671 	 * Check for an available queue slot. If there are none,
1672 	 * punt.
1673 	 */
1674 	if (sc->xl_cdata.xl_tx_free == NULL) {
1675 		xl_txeoc(sc);
1676 		xl_txeof(sc);
1677 		if (sc->xl_cdata.xl_tx_free == NULL) {
1678 			ifp->if_flags |= IFF_OACTIVE;
1679 			return;
1680 		}
1681 	}
1682 
1683 	start_tx = sc->xl_cdata.xl_tx_free;
1684 
1685 	while(sc->xl_cdata.xl_tx_free != NULL) {
1686 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1687 		if (m_head == NULL)
1688 			break;
1689 
1690 		/* Pick a descriptor off the free list. */
1691 		cur_tx = sc->xl_cdata.xl_tx_free;
1692 		sc->xl_cdata.xl_tx_free = cur_tx->xl_next;
1693 
1694 		cur_tx->xl_next = NULL;
1695 
1696 		/* Pack the data into the descriptor. */
1697 		xl_encap(sc, cur_tx, m_head);
1698 
1699 		/* Chain it together. */
1700 		if (prev != NULL) {
1701 			prev->xl_next = cur_tx;
1702 			prev->xl_ptr->xl_next = vtophys(cur_tx->xl_ptr);
1703 		}
1704 		prev = cur_tx;
1705 
1706 #if NBPFILTER > 0
1707 		/*
1708 		 * If there's a BPF listener, bounce a copy of this frame
1709 		 * to him.
1710 		 */
1711 		if (ifp->if_bpf)
1712 			bpf_mtap(ifp->if_bpf, cur_tx->xl_mbuf);
1713 #endif
1714 	}
1715 
1716 	/*
1717 	 * If there are no packets queued, bail.
1718 	 */
1719 	if (cur_tx == NULL)
1720 		return;
1721 
1722 	/*
1723 	 * Place the request for the upload interrupt
1724 	 * in the last descriptor in the chain. This way, if
1725 	 * we're chaining several packets at once, we'll only
1726 	 * get an interupt once for the whole chain rather than
1727 	 * once for each packet.
1728 	 */
1729 	cur_tx->xl_ptr->xl_status |= XL_TXSTAT_DL_INTR;
1730 
1731 	/*
1732 	 * Queue the packets. If the TX channel is clear, update
1733 	 * the downlist pointer register.
1734 	 */
1735 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
1736 	xl_wait(sc);
1737 
1738 	if (sc->xl_cdata.xl_tx_head != NULL) {
1739 		sc->xl_cdata.xl_tx_tail->xl_next = start_tx;
1740 		sc->xl_cdata.xl_tx_tail->xl_ptr->xl_next =
1741 					vtophys(start_tx->xl_ptr);
1742 		sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status &=
1743 					~XL_TXSTAT_DL_INTR;
1744 		sc->xl_cdata.xl_tx_tail = cur_tx;
1745 	} else {
1746 		sc->xl_cdata.xl_tx_head = start_tx;
1747 		sc->xl_cdata.xl_tx_tail = cur_tx;
1748 	}
1749 	if (!CSR_READ_4(sc, XL_DOWNLIST_PTR))
1750 		CSR_WRITE_4(sc, XL_DOWNLIST_PTR, vtophys(start_tx->xl_ptr));
1751 
1752 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1753 
1754 	XL_SEL_WIN(7);
1755 
1756 	/*
1757 	 * Set a timeout in case the chip goes out to lunch.
1758 	 */
1759 	ifp->if_timer = 5;
1760 
1761 	/*
1762 	 * XXX Under certain conditions, usually on slower machines
1763 	 * where interrupts may be dropped, it's possible for the
1764 	 * adapter to chew up all the buffers in the receive ring
1765 	 * and stall, without us being able to do anything about it.
1766 	 * To guard against this, we need to make a pass over the
1767 	 * RX queue to make sure there aren't any packets pending.
1768 	 * Doing it here means we can flush the receive ring at the
1769 	 * same time the chip is DMAing the transmit descriptors we
1770 	 * just gave it.
1771  	 *
1772 	 * 3Com goes to some lengths to emphasize the Parallel Tasking (tm)
1773 	 * nature of their chips in all their marketing literature;
1774 	 * we may as well take advantage of it. :)
1775 	 */
1776 	xl_rxeof(sc);
1777 
1778 	return;
1779 }
1780 
1781 int xl_encap_90xB(sc, c, m_head)
1782 	struct xl_softc *sc;
1783 	struct xl_chain *c;
1784 	struct mbuf *m_head;
1785 {
1786 	int frag = 0;
1787 	struct xl_frag *f = NULL;
1788 	struct mbuf *m;
1789 	struct xl_list *d;
1790 
1791 	/*
1792 	 * Start packing the mbufs in this chain into
1793 	 * the fragment pointers. Stop when we run out
1794 	 * of fragments or hit the end of the mbuf chain.
1795 	 */
1796 	d = c->xl_ptr;
1797 	d->xl_status = 0;
1798 	d->xl_next = 0;
1799 
1800 	for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
1801 		if (m->m_len != 0) {
1802 			if (frag == XL_MAXFRAGS)
1803 				break;
1804 			f = &d->xl_frag[frag];
1805 			f->xl_addr = vtophys(mtod(m, vm_offset_t));
1806 			f->xl_len = m->m_len;
1807 			frag++;
1808 		}
1809 	}
1810 
1811 	c->xl_mbuf = m_head;
1812 	c->xl_ptr->xl_frag[frag - 1].xl_len |= XL_LAST_FRAG;
1813 	c->xl_ptr->xl_status = XL_TXSTAT_RND_DEFEAT;
1814 
1815 	return(0);
1816 }
1817 
1818 void
1819 xl_start_90xB(ifp)
1820 	struct ifnet *ifp;
1821 {
1822 	struct xl_softc *sc;
1823 	struct mbuf *m_head = NULL;
1824 	struct xl_chain *prev = NULL, *cur_tx = NULL, *start_tx;
1825 	int idx;
1826 
1827 	sc = ifp->if_softc;
1828 
1829 	if (ifp->if_flags & IFF_OACTIVE)
1830 		return;
1831 
1832 	idx = sc->xl_cdata.xl_tx_prod;
1833 	start_tx = &sc->xl_cdata.xl_tx_chain[idx];
1834 
1835 	while (sc->xl_cdata.xl_tx_chain[idx].xl_mbuf == NULL) {
1836 
1837 		if ((XL_TX_LIST_CNT - sc->xl_cdata.xl_tx_cnt) < 3) {
1838 			ifp->if_flags |= IFF_OACTIVE;
1839 			break;
1840 		}
1841 
1842 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1843 		if (m_head == NULL)
1844 			break;
1845 
1846 		cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
1847 
1848 		/* Pack the data into the descriptor. */
1849 		xl_encap_90xB(sc, cur_tx, m_head);
1850 
1851 		/* Chain it together. */
1852 		if (prev != NULL)
1853 			prev->xl_ptr->xl_next = cur_tx->xl_phys;
1854 		prev = cur_tx;
1855 
1856 #if NBPFILTER > 0
1857 		/*
1858 		 * If there's a BPF listener, bounce a copy of this frame
1859 		 * to him.
1860 		 */
1861 		if (ifp->if_bpf)
1862 			bpf_mtap(ifp->if_bpf, cur_tx->xl_mbuf);
1863 #endif
1864 
1865 		XL_INC(idx, XL_TX_LIST_CNT);
1866 		sc->xl_cdata.xl_tx_cnt++;
1867 	}
1868 
1869 	/*
1870 	 * If there are no packets queued, bail.
1871 	 */
1872 	if (cur_tx == NULL)
1873 		return;
1874 
1875 	/*
1876 	 * Place the request for the upload interrupt
1877 	 * in the last descriptor in the chain. This way, if
1878 	 * we're chaining several packets at once, we'll only
1879 	 * get an interupt once for the whole chain rather than
1880 	 * once for each packet.
1881 	 */
1882 	cur_tx->xl_ptr->xl_status |= XL_TXSTAT_DL_INTR;
1883 
1884 	/* Start transmission */
1885 	sc->xl_cdata.xl_tx_prod = idx;
1886 	start_tx->xl_prev->xl_ptr->xl_next = start_tx->xl_phys;
1887 
1888 	/*
1889 	 * Set a timeout in case the chip goes out to lunch.
1890 	 */
1891 	ifp->if_timer = 5;
1892 }
1893 
1894 void xl_init(xsc)
1895 	void			*xsc;
1896 {
1897 	struct xl_softc		*sc = xsc;
1898 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1899 	int			s, i;
1900 	u_int16_t		rxfilt = 0;
1901 	struct mii_data		*mii = NULL;
1902 
1903 	s = splimp();
1904 
1905 	/*
1906 	 * Cancel pending I/O and free all RX/TX buffers.
1907 	 */
1908 	xl_stop(sc);
1909 
1910 	if (sc->xl_hasmii)
1911 		mii = &sc->sc_mii;
1912 
1913 	if (mii == NULL) {
1914 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
1915 		xl_wait(sc);
1916 	}
1917 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
1918 	xl_wait(sc);
1919 	DELAY(10000);
1920 
1921 
1922 	/* Init our MAC address */
1923 	XL_SEL_WIN(2);
1924 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
1925 		CSR_WRITE_1(sc, XL_W2_STATION_ADDR_LO + i,
1926 				sc->arpcom.ac_enaddr[i]);
1927 	}
1928 
1929 	/* Clear the station mask. */
1930 	for (i = 0; i < 3; i++)
1931 		CSR_WRITE_2(sc, XL_W2_STATION_MASK_LO + (i * 2), 0);
1932 #ifdef notdef
1933 	/* Reset TX and RX. */
1934 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
1935 	xl_wait(sc);
1936 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
1937 	xl_wait(sc);
1938 #endif
1939 	/* Init circular RX list. */
1940 	if (xl_list_rx_init(sc) == ENOBUFS) {
1941 		printf("xl%d: initialization failed: no "
1942 			"memory for rx buffers\n", sc->xl_unit);
1943 		xl_stop(sc);
1944 		splx(s);
1945 		return;
1946 	}
1947 
1948 	/* Init TX descriptors. */
1949 	if (sc->xl_type == XL_TYPE_905B)
1950 		xl_list_tx_init_90xB(sc);
1951 	else
1952 		xl_list_tx_init(sc);
1953 
1954 	/*
1955 	 * Set the TX freethresh value.
1956 	 * Note that this has no effect on 3c905B "cyclone"
1957 	 * cards but is required for 3c900/3c905 "boomerang"
1958 	 * cards in order to enable the download engine.
1959 	 */
1960 	CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
1961 
1962 	/* Set the TX start threshold for best performance. */
1963 	sc->xl_tx_thresh = XL_MIN_FRAMELEN;
1964 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_SET_START|sc->xl_tx_thresh);
1965 
1966 	/*
1967 	 * If this is a 3c905B, also set the tx reclaim threshold.
1968 	 * This helps cut down on the number of tx reclaim errors
1969 	 * that could happen on a busy network. The chip multiplies
1970 	 * the register value by 16 to obtain the actual threshold
1971 	 * in bytes, so we divide by 16 when setting the value here.
1972 	 * The existing threshold value can be examined by reading
1973 	 * the register at offset 9 in window 5.
1974 	 */
1975 	if (sc->xl_type == XL_TYPE_905B) {
1976 		CSR_WRITE_2(sc, XL_COMMAND,
1977 		    XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
1978 	}
1979 
1980 	/* Set RX filter bits. */
1981 	XL_SEL_WIN(5);
1982 	rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
1983 
1984 	/* Set the individual bit to receive frames for this host only. */
1985 	rxfilt |= XL_RXFILTER_INDIVIDUAL;
1986 
1987 	/* If we want promiscuous mode, set the allframes bit. */
1988 	if (ifp->if_flags & IFF_PROMISC) {
1989 		rxfilt |= XL_RXFILTER_ALLFRAMES;
1990 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
1991 	} else {
1992 		rxfilt &= ~XL_RXFILTER_ALLFRAMES;
1993 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
1994 	}
1995 
1996 	/*
1997 	 * Set capture broadcast bit to capture broadcast frames.
1998 	 */
1999 	if (ifp->if_flags & IFF_BROADCAST) {
2000 		rxfilt |= XL_RXFILTER_BROADCAST;
2001 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2002 	} else {
2003 		rxfilt &= ~XL_RXFILTER_BROADCAST;
2004 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2005 	}
2006 
2007 	/*
2008 	 * Program the multicast filter, if necessary.
2009 	 */
2010 #if 0
2011 	if (sc->xl_type == XL_TYPE_905B)
2012 #else
2013 	if (0)	/* xl_setmulti_hash() does not work right */
2014 #endif
2015 		xl_setmulti_hash(sc);
2016 	else
2017 		xl_setmulti(sc);
2018 
2019 	/*
2020 	 * Load the address of the RX list. We have to
2021 	 * stall the upload engine before we can manipulate
2022 	 * the uplist pointer register, then unstall it when
2023 	 * we're finished. We also have to wait for the
2024 	 * stall command to complete before proceeding.
2025 	 * Note that we have to do this after any RX resets
2026 	 * have completed since the uplist register is cleared
2027 	 * by a reset.
2028 	 */
2029 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
2030 	xl_wait(sc);
2031 	CSR_WRITE_4(sc, XL_UPLIST_PTR, vtophys(&sc->xl_ldata->xl_rx_list[0]));
2032 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
2033 	xl_wait(sc);
2034 
2035 	if (sc->xl_type == XL_TYPE_905B) {
2036 		/* Set polling interval */
2037 		CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
2038 		/* Load the address of the TX list */
2039 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
2040 		xl_wait(sc);
2041 		CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
2042 		    vtophys(&sc->xl_ldata->xl_tx_list[0]));
2043 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2044 		xl_wait(sc);
2045 	}
2046 
2047 	/*
2048 	 * If the coax transceiver is on, make sure to enable
2049 	 * the DC-DC converter.
2050  	 */
2051 	XL_SEL_WIN(3);
2052 	if (sc->xl_xcvr == XL_XCVR_COAX)
2053 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
2054 	else
2055 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
2056 
2057 #if NVLAN > 0
2058 	/* Set max packet size to handle VLAN frames, only on 3c905B */
2059 	if (sc->xl_type == XL_TYPE_905B)
2060 		CSR_WRITE_2(sc, XL_W3_MAX_PKT_SIZE, 1514 + 4);
2061 #endif
2062 
2063 	/* Clear out the stats counters. */
2064 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
2065 	sc->xl_stats_no_timeout = 1;
2066 	xl_stats_update(sc);
2067 	sc->xl_stats_no_timeout = 0;
2068 	XL_SEL_WIN(4);
2069 	CSR_WRITE_2(sc, XL_W4_NET_DIAG, XL_NETDIAG_UPPER_BYTES_ENABLE);
2070 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_ENABLE);
2071 
2072 	/*
2073 	 * Enable interrupts.
2074 	 */
2075 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF);
2076 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|XL_INTRS);
2077 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS);
2078 
2079 	if (sc->intr_ack)
2080 		(*sc->intr_ack)(sc);
2081 
2082 	/* Set the RX early threshold */
2083 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_THRESH|(XL_PACKET_SIZE >>2));
2084 	CSR_WRITE_2(sc, XL_DMACTL, XL_DMACTL_UP_RX_EARLY);
2085 
2086 	/* Enable receiver and transmitter. */
2087 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
2088 	xl_wait(sc);
2089 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE);
2090 	xl_wait(sc);
2091 
2092 	/* Restore state of BMCR */
2093 	if (mii != NULL)
2094 		mii_mediachg(mii);
2095 
2096 	/* Select window 7 for normal operations. */
2097 	XL_SEL_WIN(7);
2098 
2099 	ifp->if_flags |= IFF_RUNNING;
2100 	ifp->if_flags &= ~IFF_OACTIVE;
2101 
2102 	splx(s);
2103 
2104 	timeout_add(&sc->xl_stsup_tmo, hz);
2105 
2106 	return;
2107 }
2108 
2109 /*
2110  * Set media options.
2111  */
2112 int xl_ifmedia_upd(ifp)
2113 	struct ifnet		*ifp;
2114 {
2115 	struct xl_softc		*sc;
2116 	struct ifmedia		*ifm = NULL;
2117 	struct mii_data		*mii = NULL;
2118 
2119 	sc = ifp->if_softc;
2120 
2121 	if (sc->xl_hasmii)
2122 		mii = &sc->sc_mii;
2123 	if (mii == NULL)
2124 		ifm = &sc->ifmedia;
2125 	else
2126 		ifm = &mii->mii_media;
2127 
2128 	switch(IFM_SUBTYPE(ifm->ifm_media)) {
2129 	case IFM_100_FX:
2130 	case IFM_10_FL:
2131 	case IFM_10_2:
2132 	case IFM_10_5:
2133 		xl_setmode(sc, ifm->ifm_media);
2134 		return (0);
2135 		break;
2136 	default:
2137 		break;
2138 	}
2139 
2140 	if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX
2141 		|| sc->xl_media & XL_MEDIAOPT_BT4) {
2142 		xl_init(sc);
2143 	} else {
2144 		xl_setmode(sc, ifm->ifm_media);
2145 	}
2146 
2147 	return(0);
2148 }
2149 
2150 /*
2151  * Report current media status.
2152  */
2153 void xl_ifmedia_sts(ifp, ifmr)
2154 	struct ifnet		*ifp;
2155 	struct ifmediareq	*ifmr;
2156 {
2157 	struct xl_softc		*sc;
2158 	u_int32_t		icfg;
2159 	struct mii_data		*mii = NULL;
2160 
2161 	sc = ifp->if_softc;
2162 	if (sc->xl_hasmii != 0)
2163 		mii = &sc->sc_mii;
2164 
2165 	XL_SEL_WIN(3);
2166 	icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG) & XL_ICFG_CONNECTOR_MASK;
2167 	icfg >>= XL_ICFG_CONNECTOR_BITS;
2168 
2169 	ifmr->ifm_active = IFM_ETHER;
2170 
2171 	switch(icfg) {
2172 	case XL_XCVR_10BT:
2173 		ifmr->ifm_active = IFM_ETHER|IFM_10_T;
2174 		if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
2175 			ifmr->ifm_active |= IFM_FDX;
2176 		else
2177 			ifmr->ifm_active |= IFM_HDX;
2178 		break;
2179 	case XL_XCVR_AUI:
2180 		if (sc->xl_type == XL_TYPE_905B &&
2181 		    sc->xl_media == XL_MEDIAOPT_10FL) {
2182 			ifmr->ifm_active = IFM_ETHER|IFM_10_FL;
2183 			if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
2184 				ifmr->ifm_active |= IFM_FDX;
2185 			else
2186 				ifmr->ifm_active |= IFM_FDX;
2187 		} else
2188 			ifmr->ifm_active = IFM_ETHER|IFM_10_5;
2189 		break;
2190 	case XL_XCVR_COAX:
2191 		ifmr->ifm_active = IFM_ETHER|IFM_10_2;
2192 		break;
2193 	/*
2194 	 * XXX MII and BTX/AUTO should be separate cases.
2195 	 */
2196 
2197 	case XL_XCVR_100BTX:
2198 	case XL_XCVR_AUTO:
2199 	case XL_XCVR_MII:
2200 		if (mii != NULL) {
2201 			mii_pollstat(mii);
2202 			ifmr->ifm_active = mii->mii_media_active;
2203 			ifmr->ifm_status = mii->mii_media_status;
2204 		}
2205 		break;
2206 	case XL_XCVR_100BFX:
2207 		ifmr->ifm_active = IFM_ETHER|IFM_100_FX;
2208 		break;
2209 	default:
2210 		printf("xl%d: unknown XCVR type: %d\n", sc->xl_unit, icfg);
2211 		break;
2212 	}
2213 
2214 	return;
2215 }
2216 
2217 int
2218 xl_ioctl(ifp, command, data)
2219 	struct ifnet *ifp;
2220 	u_long command;
2221 	caddr_t data;
2222 {
2223 	struct xl_softc *sc = ifp->if_softc;
2224 	struct ifreq *ifr = (struct ifreq *)data;
2225 	struct ifaddr *ifa = (struct ifaddr *)data;
2226 	int s, error = 0;
2227 	struct mii_data *mii = NULL;
2228 	u_int8_t rxfilt;
2229 
2230 	s = splimp();
2231 
2232 	if ((error = ether_ioctl(ifp, &sc->arpcom, command, data)) > 0) {
2233 		splx(s);
2234 		return error;
2235 	}
2236 
2237 	switch(command) {
2238 	case SIOCSIFADDR:
2239 		ifp->if_flags |= IFF_UP;
2240 		switch (ifa->ifa_addr->sa_family) {
2241 #ifdef INET
2242 		case AF_INET:
2243 			xl_init(sc);
2244 			arp_ifinit(&sc->arpcom, ifa);
2245 			break;
2246 #endif /* INET */
2247 		default:
2248 			xl_init(sc);
2249 			break;
2250 		}
2251 		break;
2252 
2253 	case SIOCSIFMTU:
2254 		if(ifr->ifr_mtu > ETHERMTU || ifr->ifr_mtu < ETHERMIN) {
2255 			error = EINVAL;
2256 		} else if (ifp->if_mtu != ifr->ifr_mtu) {
2257 			ifp->if_mtu = ifr->ifr_mtu;
2258 		}
2259 		break;
2260 
2261 	case SIOCSIFFLAGS:
2262 		XL_SEL_WIN(5);
2263 		rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
2264 		if (ifp->if_flags & IFF_UP) {
2265 			if (ifp->if_flags & IFF_RUNNING &&
2266 			    ifp->if_flags & IFF_PROMISC &&
2267 			    !(sc->xl_if_flags & IFF_PROMISC)) {
2268 				rxfilt |= XL_RXFILTER_ALLFRAMES;
2269 				CSR_WRITE_2(sc, XL_COMMAND,
2270 				    XL_CMD_RX_SET_FILT|rxfilt);
2271 				XL_SEL_WIN(7);
2272 			} else if (ifp->if_flags & IFF_RUNNING &&
2273 			    !(ifp->if_flags & IFF_PROMISC) &&
2274 			    sc->xl_if_flags & IFF_PROMISC) {
2275 				rxfilt &= ~XL_RXFILTER_ALLFRAMES;
2276 				CSR_WRITE_2(sc, XL_COMMAND,
2277 				    XL_CMD_RX_SET_FILT|rxfilt);
2278 				XL_SEL_WIN(7);
2279 			} else
2280 				xl_init(sc);
2281 		} else {
2282 			if (ifp->if_flags & IFF_RUNNING)
2283 				xl_stop(sc);
2284 		}
2285 		sc->xl_if_flags = ifp->if_flags;
2286 		error = 0;
2287 		break;
2288 	case SIOCADDMULTI:
2289 	case SIOCDELMULTI:
2290 		error = (command == SIOCADDMULTI) ?
2291 		    ether_addmulti(ifr, &sc->arpcom) :
2292 		    ether_delmulti(ifr, &sc->arpcom);
2293 
2294 		if (error == ENETRESET) {
2295 			/*
2296 			 * Multicast list has changed; set the hardware
2297 			 * filter accordingly.
2298 			 */
2299 #if 0
2300 			if (sc->xl_type == XL_TYPE_905B)
2301 #else
2302 			if (0)	/* xl_setmulti_hash() does not work right */
2303 #endif
2304 				xl_setmulti_hash(sc);
2305 			else
2306 				xl_setmulti(sc);
2307 			error = 0;
2308 		}
2309 		break;
2310 	case SIOCGIFMEDIA:
2311 	case SIOCSIFMEDIA:
2312 		if (sc->xl_hasmii != 0)
2313 			mii = &sc->sc_mii;
2314 		if (mii == NULL)
2315 			error = ifmedia_ioctl(ifp, ifr,
2316 			    &sc->ifmedia, command);
2317 		else
2318 			error = ifmedia_ioctl(ifp, ifr,
2319 			    &mii->mii_media, command);
2320 		break;
2321 	default:
2322 		error = EINVAL;
2323 		break;
2324 	}
2325 
2326 	splx(s);
2327 
2328 	return(error);
2329 }
2330 
2331 void xl_watchdog(ifp)
2332 	struct ifnet		*ifp;
2333 {
2334 	struct xl_softc		*sc;
2335 	u_int16_t		status = 0;
2336 
2337 	sc = ifp->if_softc;
2338 
2339 	ifp->if_oerrors++;
2340 	XL_SEL_WIN(4);
2341 	status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
2342 	printf("xl%d: watchdog timeout\n", sc->xl_unit);
2343 
2344 	if (status & XL_MEDIASTAT_CARRIER)
2345 		printf("xl%d: no carrier - transceiver cable problem?\n",
2346 								sc->xl_unit);
2347 	xl_txeoc(sc);
2348 	xl_txeof(sc);
2349 	xl_rxeof(sc);
2350 	xl_reset(sc, 0);
2351 	xl_init(sc);
2352 
2353 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
2354 		(*ifp->if_start)(ifp);
2355 
2356 	return;
2357 }
2358 
2359 void
2360 xl_freetxrx(sc)
2361 	struct xl_softc *sc;
2362 {
2363 	int i;
2364 
2365 	/*
2366 	 * Free data in the RX lists.
2367 	 */
2368 	for (i = 0; i < XL_RX_LIST_CNT; i++) {
2369 		if (sc->xl_cdata.xl_rx_chain[i].xl_mbuf != NULL) {
2370 			m_freem(sc->xl_cdata.xl_rx_chain[i].xl_mbuf);
2371 			sc->xl_cdata.xl_rx_chain[i].xl_mbuf = NULL;
2372 		}
2373 	}
2374 	bzero((char *)&sc->xl_ldata->xl_rx_list,
2375 		sizeof(sc->xl_ldata->xl_rx_list));
2376 	/*
2377 	 * Free the TX list buffers.
2378 	 */
2379 	for (i = 0; i < XL_TX_LIST_CNT; i++) {
2380 		if (sc->xl_cdata.xl_tx_chain[i].xl_mbuf != NULL) {
2381 			m_freem(sc->xl_cdata.xl_tx_chain[i].xl_mbuf);
2382 			sc->xl_cdata.xl_tx_chain[i].xl_mbuf = NULL;
2383 		}
2384 	}
2385 	bzero((char *)&sc->xl_ldata->xl_tx_list,
2386 		sizeof(sc->xl_ldata->xl_tx_list));
2387 }
2388 
2389 /*
2390  * Stop the adapter and free any mbufs allocated to the
2391  * RX and TX lists.
2392  */
2393 void xl_stop(sc)
2394 	struct xl_softc *sc;
2395 {
2396 	struct ifnet *ifp;
2397 
2398 	ifp = &sc->arpcom.ac_if;
2399 	ifp->if_timer = 0;
2400 
2401 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISABLE);
2402 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
2403 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB);
2404 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISCARD);
2405 	xl_wait(sc);
2406 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_DISABLE);
2407 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
2408 	DELAY(800);
2409 
2410 #ifdef foo
2411 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
2412 	xl_wait(sc);
2413 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
2414 	xl_wait(sc);
2415 #endif
2416 
2417 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|XL_STAT_INTLATCH);
2418 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|0);
2419 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0);
2420 
2421 	if (sc->intr_ack)
2422 		(*sc->intr_ack)(sc);
2423 
2424 	/* Stop the stats updater. */
2425 	timeout_del(&sc->xl_stsup_tmo);
2426 
2427 	xl_freetxrx(sc);
2428 
2429 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2430 
2431 	return;
2432 }
2433 
2434 void
2435 xl_attach(sc)
2436 	struct xl_softc *sc;
2437 {
2438 	u_int8_t enaddr[ETHER_ADDR_LEN];
2439 	struct ifnet *ifp = &sc->arpcom.ac_if;
2440 	caddr_t roundptr;
2441 	u_int round;
2442 	int i, media = IFM_ETHER|IFM_100_TX|IFM_FDX;
2443 	struct ifmedia *ifm;
2444 
2445 	sc->xl_unit = sc->sc_dev.dv_unit;
2446 	xl_reset(sc, 1);
2447 
2448 	/*
2449 	 * Get station address from the EEPROM.
2450 	 */
2451 	if (xl_read_eeprom(sc, (caddr_t)&enaddr, XL_EE_OEM_ADR0, 3, 1)) {
2452 		printf("\n%s: failed to read station address\n",
2453 		    sc->sc_dev.dv_xname);
2454 		return;
2455 	}
2456 	bcopy(enaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
2457 
2458 	printf(" address %s\n", ether_sprintf(sc->arpcom.ac_enaddr));
2459 
2460 	if (sc->xl_flags & (XL_FLAG_INVERT_LED_PWR|XL_FLAG_INVERT_MII_PWR)) {
2461 		u_int16_t n;
2462 
2463 		XL_SEL_WIN(2);
2464 		n = CSR_READ_2(sc, 12);
2465 
2466 		if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR)
2467 			n |= 0x0010;
2468 
2469 		if (sc->xl_flags & XL_FLAG_INVERT_MII_PWR)
2470 			n |= 0x4000;
2471 
2472 		CSR_WRITE_2(sc, 12, n);
2473 	}
2474 
2475 	sc->xl_ldata_ptr = malloc(sizeof(struct xl_list_data) + 8,
2476 	    M_DEVBUF, M_NOWAIT);
2477 	if (sc->xl_ldata_ptr == NULL) {
2478 		printf("%s: no memory for list buffers\n",sc->sc_dev.dv_xname);
2479 		return;
2480 	}
2481 
2482 	sc->xl_ldata = (struct xl_list_data *)sc->xl_ldata_ptr;
2483 #ifdef __alpha__
2484 	round = (u_int64_t)sc->xl_ldata_ptr & 0xf;
2485 #else
2486 	round = (u_int32_t)sc->xl_ldata_ptr & 0xf;
2487 #endif
2488 	roundptr = sc->xl_ldata_ptr;
2489 	for (i = 0; i < 8; i++) {
2490 		if (round % 8) {
2491 			round++;
2492 			roundptr++;
2493 		} else
2494 			break;
2495 	}
2496 	sc->xl_ldata = (struct xl_list_data *)roundptr;
2497 	bzero(sc->xl_ldata, sizeof(struct xl_list_data));
2498 
2499 	/*
2500 	 * Figure out the card type. 3c905B adapters have the
2501 	 * 'supportsNoTxLength' bit set in the capabilities
2502 	 * word in the EEPROM.
2503 	 */
2504 	xl_read_eeprom(sc, (caddr_t)&sc->xl_caps, XL_EE_CAPS, 1, 0);
2505 	if (sc->xl_caps & XL_CAPS_NO_TXLENGTH)
2506 		sc->xl_type = XL_TYPE_905B;
2507 	else
2508 		sc->xl_type = XL_TYPE_90X;
2509 
2510 	timeout_set(&sc->xl_stsup_tmo, xl_stats_update, sc);
2511 
2512 	ifp->if_softc = sc;
2513 	ifp->if_mtu = ETHERMTU;
2514 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2515 	ifp->if_ioctl = xl_ioctl;
2516 	ifp->if_output = ether_output;
2517 	if (sc->xl_type == XL_TYPE_905B)
2518 		ifp->if_start = xl_start_90xB;
2519 	else
2520 		ifp->if_start = xl_start;
2521 	ifp->if_watchdog = xl_watchdog;
2522 	ifp->if_baudrate = 10000000;
2523 	IFQ_SET_MAXLEN(&ifp->if_snd, XL_TX_LIST_CNT - 1);
2524 	IFQ_SET_READY(&ifp->if_snd);
2525 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
2526 
2527 #if NVLAN > 0
2528 	if (sc->xl_type == XL_TYPE_905B)
2529 		ifp->if_capabilities = IFCAP_VLAN_MTU;
2530 	/*
2531 	 * XXX
2532 	 * Do other cards filter large packets or simply pass them through?
2533 	 * Apparently only the 905B has the capability to set a larger size.
2534  	 */
2535 #endif
2536 
2537 	XL_SEL_WIN(3);
2538 	sc->xl_media = CSR_READ_2(sc, XL_W3_MEDIA_OPT);
2539 
2540 	xl_read_eeprom(sc, (char *)&sc->xl_xcvr, XL_EE_ICFG_0, 2, 0);
2541 	sc->xl_xcvr &= XL_ICFG_CONNECTOR_MASK;
2542 	sc->xl_xcvr >>= XL_ICFG_CONNECTOR_BITS;
2543 
2544 	DELAY(100000);
2545 
2546 	xl_mediacheck(sc);
2547 
2548 	if (sc->xl_flags & XL_FLAG_INVERT_MII_PWR) {
2549 		XL_SEL_WIN(2);
2550 		CSR_WRITE_2(sc, 12, 0x4000 | CSR_READ_2(sc, 12));
2551 	}
2552 
2553 	DELAY(100000);
2554 
2555 	if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX
2556 	    || sc->xl_media & XL_MEDIAOPT_BT4) {
2557 		ifmedia_init(&sc->sc_mii.mii_media, 0,
2558 		    xl_ifmedia_upd, xl_ifmedia_sts);
2559 		sc->xl_hasmii = 1;
2560 		sc->sc_mii.mii_ifp = ifp;
2561 		sc->sc_mii.mii_readreg = xl_miibus_readreg;
2562 		sc->sc_mii.mii_writereg = xl_miibus_writereg;
2563 		sc->sc_mii.mii_statchg = xl_miibus_statchg;
2564 		xl_setcfg(sc);
2565 		mii_attach((struct device *)sc, &sc->sc_mii, 0xffffffff,
2566 		    MII_PHY_ANY, MII_OFFSET_ANY, 0);
2567 
2568 		if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2569 			ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE,
2570 			    0, NULL);
2571 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2572 		}
2573 		else {
2574 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2575 		}
2576 		ifm = &sc->sc_mii.mii_media;
2577 	}
2578 	else {
2579 		ifmedia_init(&sc->ifmedia, 0, xl_ifmedia_upd, xl_ifmedia_sts);
2580 		sc->xl_hasmii = 0;
2581 		ifm = &sc->ifmedia;
2582 	}
2583 
2584 	/*
2585 	 * Sanity check. If the user has selected "auto" and this isn't
2586 	 * a 10/100 card of some kind, we need to force the transceiver
2587 	 * type to something sane.
2588 	 */
2589 	if (sc->xl_xcvr == XL_XCVR_AUTO) {
2590 		xl_choose_xcvr(sc, 0);
2591 		xl_reset(sc, 0);
2592 	}
2593 
2594 	if (sc->xl_media & XL_MEDIAOPT_BT) {
2595 		ifmedia_add(ifm, IFM_ETHER|IFM_10_T, 0, NULL);
2596 		ifmedia_add(ifm, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
2597 		if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
2598 			ifmedia_add(ifm, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
2599 	}
2600 
2601 	if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
2602 		/*
2603 		 * Check for a 10baseFL board in disguise.
2604 		 */
2605 		if (sc->xl_type == XL_TYPE_905B &&
2606 		    sc->xl_media == XL_MEDIAOPT_10FL) {
2607 			ifmedia_add(ifm, IFM_ETHER|IFM_10_FL, 0, NULL);
2608 			ifmedia_add(ifm, IFM_ETHER|IFM_10_FL|IFM_HDX,
2609 			    0, NULL);
2610 			if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
2611 				ifmedia_add(ifm,
2612 				    IFM_ETHER|IFM_10_FL|IFM_FDX, 0, NULL);
2613 		} else {
2614 			ifmedia_add(ifm, IFM_ETHER|IFM_10_5, 0, NULL);
2615 		}
2616 	}
2617 
2618 	if (sc->xl_media & XL_MEDIAOPT_BNC) {
2619 		ifmedia_add(ifm, IFM_ETHER|IFM_10_2, 0, NULL);
2620 	}
2621 
2622 	if (sc->xl_media & XL_MEDIAOPT_BFX) {
2623 		ifp->if_baudrate = 100000000;
2624 		ifmedia_add(ifm, IFM_ETHER|IFM_100_FX, 0, NULL);
2625 	}
2626 
2627 	/* Choose a default media. */
2628 	switch(sc->xl_xcvr) {
2629 	case XL_XCVR_10BT:
2630 		media = IFM_ETHER|IFM_10_T;
2631 		xl_setmode(sc, media);
2632 		break;
2633 	case XL_XCVR_AUI:
2634 		if (sc->xl_type == XL_TYPE_905B &&
2635 		    sc->xl_media == XL_MEDIAOPT_10FL) {
2636 			media = IFM_ETHER|IFM_10_FL;
2637 			xl_setmode(sc, media);
2638 		} else {
2639 			media = IFM_ETHER|IFM_10_5;
2640 			xl_setmode(sc, media);
2641 		}
2642 		break;
2643 	case XL_XCVR_COAX:
2644 		media = IFM_ETHER|IFM_10_2;
2645 		xl_setmode(sc, media);
2646 		break;
2647 	case XL_XCVR_AUTO:
2648 	case XL_XCVR_100BTX:
2649 	case XL_XCVR_MII:
2650 		/* Chosen by miibus */
2651 		break;
2652 	case XL_XCVR_100BFX:
2653 		media = IFM_ETHER|IFM_100_FX;
2654 		xl_setmode(sc, media);
2655 		break;
2656 	default:
2657 		printf("xl%d: unknown XCVR type: %d\n", sc->xl_unit,
2658 							sc->xl_xcvr);
2659 		/*
2660 		 * This will probably be wrong, but it prevents
2661 		 * the ifmedia code from panicking.
2662 		 */
2663 		media = IFM_ETHER | IFM_10_T;
2664 		break;
2665 	}
2666 
2667 	if (sc->xl_hasmii == 0)
2668 		ifmedia_set(&sc->ifmedia, media);
2669 
2670 	/*
2671 	 * Call MI attach routines.
2672 	 */
2673 	if_attach(ifp);
2674 	ether_ifattach(ifp);
2675 
2676 	sc->sc_sdhook = shutdownhook_establish(xl_shutdown, sc);
2677 	sc->sc_pwrhook = powerhook_establish(xl_power, sc);
2678 }
2679 
2680 int
2681 xl_detach(sc)
2682 	struct xl_softc *sc;
2683 {
2684 	struct ifnet *ifp = &sc->arpcom.ac_if;
2685 
2686 	/* Unhook our tick handler. */
2687 	timeout_del(&sc->xl_stsup_tmo);
2688 
2689 	xl_freetxrx(sc);
2690 
2691 	/* Detach all PHYs */
2692 	if (sc->xl_hasmii)
2693 		mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2694 
2695 	/* Delete all remaining media. */
2696 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2697 
2698 	ether_ifdetach(ifp);
2699 	if_detach(ifp);
2700 
2701 	shutdownhook_disestablish(sc->sc_sdhook);
2702 	powerhook_disestablish(sc->sc_pwrhook);
2703 
2704 	return (0);
2705 }
2706 
2707 void
2708 xl_shutdown(v)
2709 	void *v;
2710 {
2711 	struct xl_softc	*sc = (struct xl_softc *)v;
2712 
2713 	xl_reset(sc, 1);
2714 	xl_stop(sc);
2715 }
2716 
2717 struct cfdriver xl_cd = {
2718 	0, "xl", DV_IFNET
2719 };
2720