xref: /openbsd/sys/dev/ic/xl.c (revision 891d7ab6)
1 /*	$OpenBSD: xl.c,v 1.102 2011/06/21 16:52:45 tedu Exp $	*/
2 
3 /*
4  * Copyright (c) 1997, 1998, 1999
5  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * $FreeBSD: if_xl.c,v 1.77 2000/08/28 20:40:03 wpaul Exp $
35  */
36 
37 /*
38  * 3Com 3c90x Etherlink XL PCI NIC driver
39  *
40  * Supports the 3Com "boomerang", "cyclone", and "hurricane" PCI
41  * bus-master chips (3c90x cards and embedded controllers) including
42  * the following:
43  *
44  * 3Com 3c900-TPO	10Mbps/RJ-45
45  * 3Com 3c900-COMBO	10Mbps/RJ-45,AUI,BNC
46  * 3Com 3c905-TX	10/100Mbps/RJ-45
47  * 3Com 3c905-T4	10/100Mbps/RJ-45
48  * 3Com 3c900B-TPO	10Mbps/RJ-45
49  * 3Com 3c900B-COMBO	10Mbps/RJ-45,AUI,BNC
50  * 3Com 3c900B-TPC	10Mbps/RJ-45,BNC
51  * 3Com 3c900B-FL	10Mbps/Fiber-optic
52  * 3Com 3c905B-COMBO	10/100Mbps/RJ-45,AUI,BNC
53  * 3Com 3c905B-TX	10/100Mbps/RJ-45
54  * 3Com 3c905B-FL/FX	10/100Mbps/Fiber-optic
55  * 3Com 3c905C-TX	10/100Mbps/RJ-45 (Tornado ASIC)
56  * 3Com 3c980-TX	10/100Mbps server adapter (Hurricane ASIC)
57  * 3Com 3c980C-TX	10/100Mbps server adapter (Tornado ASIC)
58  * 3Com 3cSOHO100-TX	10/100Mbps/RJ-45 (Hurricane ASIC)
59  * 3Com 3c450-TX	10/100Mbps/RJ-45 (Tornado ASIC)
60  * 3Com 3c555		10/100Mbps/RJ-45 (MiniPCI, Laptop Hurricane)
61  * 3Com 3c556		10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
62  * 3Com 3c556B		10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
63  * 3Com 3c575TX		10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
64  * 3Com 3c575B		10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
65  * 3Com 3c575C		10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
66  * 3Com 3cxfem656	10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
67  * 3Com 3cxfem656b	10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
68  * 3Com 3cxfem656c	10/100Mbps/RJ-45 (Cardbus, Tornado ASIC)
69  * Dell Optiplex GX1 on-board 3c918 10/100Mbps/RJ-45
70  * Dell on-board 3c920 10/100Mbps/RJ-45
71  * Dell Precision on-board 3c905B 10/100Mbps/RJ-45
72  * Dell Latitude laptop docking station embedded 3c905-TX
73  *
74  * Written by Bill Paul <wpaul@ctr.columbia.edu>
75  * Electrical Engineering Department
76  * Columbia University, New York City
77  */
78 
79 /*
80  * The 3c90x series chips use a bus-master DMA interface for transfering
81  * packets to and from the controller chip. Some of the "vortex" cards
82  * (3c59x) also supported a bus master mode, however for those chips
83  * you could only DMA packets to/from a contiguous memory buffer. For
84  * transmission this would mean copying the contents of the queued mbuf
85  * chain into an mbuf cluster and then DMAing the cluster. This extra
86  * copy would sort of defeat the purpose of the bus master support for
87  * any packet that doesn't fit into a single mbuf.
88  *
89  * By contrast, the 3c90x cards support a fragment-based bus master
90  * mode where mbuf chains can be encapsulated using TX descriptors.
91  * This is similar to other PCI chips such as the Texas Instruments
92  * ThunderLAN and the Intel 82557/82558.
93  *
94  * The "vortex" driver (if_vx.c) happens to work for the "boomerang"
95  * bus master chips because they maintain the old PIO interface for
96  * backwards compatibility, but starting with the 3c905B and the
97  * "cyclone" chips, the compatibility interface has been dropped.
98  * Since using bus master DMA is a big win, we use this driver to
99  * support the PCI "boomerang" chips even though they work with the
100  * "vortex" driver in order to obtain better performance.
101  */
102 
103 #include "bpfilter.h"
104 
105 #include <sys/param.h>
106 #include <sys/systm.h>
107 #include <sys/mbuf.h>
108 #include <sys/protosw.h>
109 #include <sys/socket.h>
110 #include <sys/ioctl.h>
111 #include <sys/errno.h>
112 #include <sys/malloc.h>
113 #include <sys/kernel.h>
114 #include <sys/proc.h>   /* only for declaration of wakeup() used by vm.h */
115 #include <sys/device.h>
116 
117 #include <net/if.h>
118 #include <net/if_dl.h>
119 #include <net/if_types.h>
120 #include <net/if_media.h>
121 
122 #ifdef INET
123 #include <netinet/in.h>
124 #include <netinet/in_systm.h>
125 #include <netinet/in_var.h>
126 #include <netinet/ip.h>
127 #include <netinet/if_ether.h>
128 #endif
129 
130 #include <dev/mii/mii.h>
131 #include <dev/mii/miivar.h>
132 
133 #include <machine/bus.h>
134 
135 #if NBPFILTER > 0
136 #include <net/bpf.h>
137 #endif
138 
139 #include <dev/ic/xlreg.h>
140 
141 /*
142  * TX Checksumming is disabled by default for two reasons:
143  * - TX Checksumming will occasionally produce corrupt packets
144  * - TX Checksumming seems to reduce performance
145  *
146  * Only 905B/C cards were reported to have this problem, it is possible
147  * that later chips _may_ be immune.
148  */
149 #define	XL905B_TXCSUM_BROKEN	1
150 
151 int xl_newbuf(struct xl_softc *, struct xl_chain_onefrag *);
152 void xl_stats_update(void *);
153 int xl_encap(struct xl_softc *, struct xl_chain *,
154     struct mbuf * );
155 void xl_rxeof(struct xl_softc *);
156 void xl_txeof(struct xl_softc *);
157 void xl_txeof_90xB(struct xl_softc *);
158 void xl_txeoc(struct xl_softc *);
159 int xl_intr(void *);
160 void xl_start(struct ifnet *);
161 void xl_start_90xB(struct ifnet *);
162 int xl_ioctl(struct ifnet *, u_long, caddr_t);
163 void xl_freetxrx(struct xl_softc *);
164 void xl_watchdog(struct ifnet *);
165 int xl_ifmedia_upd(struct ifnet *);
166 void xl_ifmedia_sts(struct ifnet *, struct ifmediareq *);
167 
168 int xl_eeprom_wait(struct xl_softc *);
169 int xl_read_eeprom(struct xl_softc *, caddr_t, int, int, int);
170 void xl_mii_sync(struct xl_softc *);
171 void xl_mii_send(struct xl_softc *, u_int32_t, int);
172 int xl_mii_readreg(struct xl_softc *, struct xl_mii_frame *);
173 int xl_mii_writereg(struct xl_softc *, struct xl_mii_frame *);
174 
175 void xl_setcfg(struct xl_softc *);
176 void xl_setmode(struct xl_softc *, int);
177 void xl_iff(struct xl_softc *);
178 void xl_iff_90x(struct xl_softc *);
179 void xl_iff_905b(struct xl_softc *);
180 int xl_list_rx_init(struct xl_softc *);
181 void xl_fill_rx_ring(struct xl_softc *);
182 int xl_list_tx_init(struct xl_softc *);
183 int xl_list_tx_init_90xB(struct xl_softc *);
184 void xl_wait(struct xl_softc *);
185 void xl_mediacheck(struct xl_softc *);
186 void xl_choose_xcvr(struct xl_softc *, int);
187 #ifdef notdef
188 void xl_testpacket(struct xl_softc *);
189 #endif
190 
191 int xl_miibus_readreg(struct device *, int, int);
192 void xl_miibus_writereg(struct device *, int, int, int);
193 void xl_miibus_statchg(struct device *);
194 #ifndef SMALL_KERNEL
195 int xl_wol(struct ifnet *, int);
196 #endif
197 
198 int
199 xl_activate(struct device *self, int act)
200 {
201 	struct xl_softc *sc = (struct xl_softc *)self;
202 	struct ifnet	*ifp = &sc->sc_arpcom.ac_if;
203 	int rv = 0;
204 
205 	switch (act) {
206 	case DVACT_QUIESCE:
207 		rv = config_activate_children(self, act);
208 		break;
209 	case DVACT_SUSPEND:
210 		if (ifp->if_flags & IFF_RUNNING) {
211 			xl_reset(sc);
212 			xl_stop(sc);
213 		}
214 		rv = config_activate_children(self, act);
215 		break;
216 	case DVACT_RESUME:
217 		xl_reset(sc);
218 		rv = config_activate_children(self, act);
219 		if (ifp->if_flags & IFF_UP)
220 			xl_init(sc);
221 		break;
222 	}
223 	return (rv);
224 }
225 
226 /*
227  * Murphy's law says that it's possible the chip can wedge and
228  * the 'command in progress' bit may never clear. Hence, we wait
229  * only a finite amount of time to avoid getting caught in an
230  * infinite loop. Normally this delay routine would be a macro,
231  * but it isn't called during normal operation so we can afford
232  * to make it a function.
233  */
234 void
235 xl_wait(struct xl_softc *sc)
236 {
237 	int	i;
238 
239 	for (i = 0; i < XL_TIMEOUT; i++) {
240 		if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY))
241 			break;
242 	}
243 
244 	if (i == XL_TIMEOUT)
245 		printf("%s: command never completed!\n", sc->sc_dev.dv_xname);
246 }
247 
248 /*
249  * MII access routines are provided for adapters with external
250  * PHYs (3c905-TX, 3c905-T4, 3c905B-T4) and those with built-in
251  * autoneg logic that's faked up to look like a PHY (3c905B-TX).
252  * Note: if you don't perform the MDIO operations just right,
253  * it's possible to end up with code that works correctly with
254  * some chips/CPUs/processor speeds/bus speeds/etc but not
255  * with others.
256  */
257 #define MII_SET(x)					\
258 	CSR_WRITE_2(sc, XL_W4_PHY_MGMT,			\
259 		CSR_READ_2(sc, XL_W4_PHY_MGMT) | (x))
260 
261 #define MII_CLR(x)					\
262 	CSR_WRITE_2(sc, XL_W4_PHY_MGMT,			\
263 		CSR_READ_2(sc, XL_W4_PHY_MGMT) & ~(x))
264 
265 /*
266  * Sync the PHYs by setting data bit and strobing the clock 32 times.
267  */
268 void
269 xl_mii_sync(struct xl_softc *sc)
270 {
271 	int	i;
272 
273 	XL_SEL_WIN(4);
274 	MII_SET(XL_MII_DIR|XL_MII_DATA);
275 
276 	for (i = 0; i < 32; i++) {
277 		MII_SET(XL_MII_CLK);
278 		MII_SET(XL_MII_DATA);
279 		MII_SET(XL_MII_DATA);
280 		MII_CLR(XL_MII_CLK);
281 		MII_SET(XL_MII_DATA);
282 		MII_SET(XL_MII_DATA);
283 	}
284 }
285 
286 /*
287  * Clock a series of bits through the MII.
288  */
289 void
290 xl_mii_send(struct xl_softc *sc, u_int32_t bits, int cnt)
291 {
292 	int	i;
293 
294 	XL_SEL_WIN(4);
295 	MII_CLR(XL_MII_CLK);
296 
297 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
298                 if (bits & i) {
299 			MII_SET(XL_MII_DATA);
300                 } else {
301 			MII_CLR(XL_MII_DATA);
302                 }
303 		MII_CLR(XL_MII_CLK);
304 		MII_SET(XL_MII_CLK);
305 	}
306 }
307 
308 /*
309  * Read an PHY register through the MII.
310  */
311 int
312 xl_mii_readreg(struct xl_softc *sc, struct xl_mii_frame *frame)
313 {
314 	int	i, ack, s;
315 
316 	s = splnet();
317 
318 	/*
319 	 * Set up frame for RX.
320 	 */
321 	frame->mii_stdelim = XL_MII_STARTDELIM;
322 	frame->mii_opcode = XL_MII_READOP;
323 	frame->mii_turnaround = 0;
324 	frame->mii_data = 0;
325 
326 	/*
327 	 * Select register window 4.
328 	 */
329 
330 	XL_SEL_WIN(4);
331 
332 	CSR_WRITE_2(sc, XL_W4_PHY_MGMT, 0);
333 	/*
334  	 * Turn on data xmit.
335 	 */
336 	MII_SET(XL_MII_DIR);
337 
338 	xl_mii_sync(sc);
339 
340 	/*
341 	 * Send command/address info.
342 	 */
343 	xl_mii_send(sc, frame->mii_stdelim, 2);
344 	xl_mii_send(sc, frame->mii_opcode, 2);
345 	xl_mii_send(sc, frame->mii_phyaddr, 5);
346 	xl_mii_send(sc, frame->mii_regaddr, 5);
347 
348 	/* Idle bit */
349 	MII_CLR((XL_MII_CLK|XL_MII_DATA));
350 	MII_SET(XL_MII_CLK);
351 
352 	/* Turn off xmit. */
353 	MII_CLR(XL_MII_DIR);
354 
355 	/* Check for ack */
356 	MII_CLR(XL_MII_CLK);
357 	ack = CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA;
358 	MII_SET(XL_MII_CLK);
359 
360 	/*
361 	 * Now try reading data bits. If the ack failed, we still
362 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
363 	 */
364 	if (ack) {
365 		for(i = 0; i < 16; i++) {
366 			MII_CLR(XL_MII_CLK);
367 			MII_SET(XL_MII_CLK);
368 		}
369 		goto fail;
370 	}
371 
372 	for (i = 0x8000; i; i >>= 1) {
373 		MII_CLR(XL_MII_CLK);
374 		if (!ack) {
375 			if (CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA)
376 				frame->mii_data |= i;
377 		}
378 		MII_SET(XL_MII_CLK);
379 	}
380 
381 fail:
382 
383 	MII_CLR(XL_MII_CLK);
384 	MII_SET(XL_MII_CLK);
385 
386 	splx(s);
387 
388 	if (ack)
389 		return (1);
390 	return (0);
391 }
392 
393 /*
394  * Write to a PHY register through the MII.
395  */
396 int
397 xl_mii_writereg(struct xl_softc *sc, struct xl_mii_frame *frame)
398 {
399 	int	s;
400 
401 	s = splnet();
402 
403 	/*
404 	 * Set up frame for TX.
405 	 */
406 
407 	frame->mii_stdelim = XL_MII_STARTDELIM;
408 	frame->mii_opcode = XL_MII_WRITEOP;
409 	frame->mii_turnaround = XL_MII_TURNAROUND;
410 
411 	/*
412 	 * Select the window 4.
413 	 */
414 	XL_SEL_WIN(4);
415 
416 	/*
417  	 * Turn on data output.
418 	 */
419 	MII_SET(XL_MII_DIR);
420 
421 	xl_mii_sync(sc);
422 
423 	xl_mii_send(sc, frame->mii_stdelim, 2);
424 	xl_mii_send(sc, frame->mii_opcode, 2);
425 	xl_mii_send(sc, frame->mii_phyaddr, 5);
426 	xl_mii_send(sc, frame->mii_regaddr, 5);
427 	xl_mii_send(sc, frame->mii_turnaround, 2);
428 	xl_mii_send(sc, frame->mii_data, 16);
429 
430 	/* Idle bit. */
431 	MII_SET(XL_MII_CLK);
432 	MII_CLR(XL_MII_CLK);
433 
434 	/*
435 	 * Turn off xmit.
436 	 */
437 	MII_CLR(XL_MII_DIR);
438 
439 	splx(s);
440 
441 	return (0);
442 }
443 
444 int
445 xl_miibus_readreg(struct device *self, int phy, int reg)
446 {
447 	struct xl_softc *sc = (struct xl_softc *)self;
448 	struct xl_mii_frame	frame;
449 
450 	if (!(sc->xl_flags & XL_FLAG_PHYOK) && phy != 24)
451 		return (0);
452 
453 	bzero(&frame, sizeof(frame));
454 
455 	frame.mii_phyaddr = phy;
456 	frame.mii_regaddr = reg;
457 	xl_mii_readreg(sc, &frame);
458 
459 	return (frame.mii_data);
460 }
461 
462 void
463 xl_miibus_writereg(struct device *self, int phy, int reg, int data)
464 {
465 	struct xl_softc *sc = (struct xl_softc *)self;
466 	struct xl_mii_frame	frame;
467 
468 	if (!(sc->xl_flags & XL_FLAG_PHYOK) && phy != 24)
469 		return;
470 
471 	bzero(&frame, sizeof(frame));
472 
473 	frame.mii_phyaddr = phy;
474 	frame.mii_regaddr = reg;
475 	frame.mii_data = data;
476 
477 	xl_mii_writereg(sc, &frame);
478 }
479 
480 void
481 xl_miibus_statchg(struct device *self)
482 {
483 	struct xl_softc *sc = (struct xl_softc *)self;
484 
485 	xl_setcfg(sc);
486 
487 	/* Set ASIC's duplex mode to match the PHY. */
488 	XL_SEL_WIN(3);
489 	if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX)
490 		CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
491 	else
492 		CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
493 		    (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
494 }
495 
496 /*
497  * The EEPROM is slow: give it time to come ready after issuing
498  * it a command.
499  */
500 int
501 xl_eeprom_wait(struct xl_softc *sc)
502 {
503 	int	i;
504 
505 	for (i = 0; i < 100; i++) {
506 		if (CSR_READ_2(sc, XL_W0_EE_CMD) & XL_EE_BUSY)
507 			DELAY(162);
508 		else
509 			break;
510 	}
511 
512 	if (i == 100) {
513 		printf("%s: eeprom failed to come ready\n", sc->sc_dev.dv_xname);
514 		return (1);
515 	}
516 
517 	return (0);
518 }
519 
520 /*
521  * Read a sequence of words from the EEPROM. Note that ethernet address
522  * data is stored in the EEPROM in network byte order.
523  */
524 int
525 xl_read_eeprom(struct xl_softc *sc, caddr_t dest, int off, int cnt, int swap)
526 {
527 	int		err = 0, i;
528 	u_int16_t	word = 0, *ptr;
529 #define EEPROM_5BIT_OFFSET(A) ((((A) << 2) & 0x7F00) | ((A) & 0x003F))
530 #define EEPROM_8BIT_OFFSET(A) ((A) & 0x003F)
531 	/* WARNING! DANGER!
532 	 * It's easy to accidentally overwrite the rom content!
533 	 * Note: the 3c575 uses 8bit EEPROM offsets.
534 	 */
535 	XL_SEL_WIN(0);
536 
537 	if (xl_eeprom_wait(sc))
538 		return (1);
539 
540 	if (sc->xl_flags & XL_FLAG_EEPROM_OFFSET_30)
541 		off += 0x30;
542 
543 	for (i = 0; i < cnt; i++) {
544 		if (sc->xl_flags & XL_FLAG_8BITROM)
545 			CSR_WRITE_2(sc, XL_W0_EE_CMD,
546 			    XL_EE_8BIT_READ | EEPROM_8BIT_OFFSET(off + i));
547 		else
548 			CSR_WRITE_2(sc, XL_W0_EE_CMD,
549 			    XL_EE_READ | EEPROM_5BIT_OFFSET(off + i));
550 		err = xl_eeprom_wait(sc);
551 		if (err)
552 			break;
553 		word = CSR_READ_2(sc, XL_W0_EE_DATA);
554 		ptr = (u_int16_t *)(dest + (i * 2));
555 		if (swap)
556 			*ptr = ntohs(word);
557 		else
558 			*ptr = word;
559 	}
560 
561 	return (err ? 1 : 0);
562 }
563 
564 void
565 xl_iff(struct xl_softc *sc)
566 {
567 	if (sc->xl_type == XL_TYPE_905B)
568 		xl_iff_905b(sc);
569 	else
570 		xl_iff_90x(sc);
571 }
572 
573 /*
574  * NICs older than the 3c905B have only one multicast option, which
575  * is to enable reception of all multicast frames.
576  */
577 void
578 xl_iff_90x(struct xl_softc *sc)
579 {
580 	struct ifnet	*ifp = &sc->sc_arpcom.ac_if;
581 	struct arpcom	*ac = &sc->sc_arpcom;
582 	u_int8_t	rxfilt;
583 
584 	XL_SEL_WIN(5);
585 
586 	rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
587 	rxfilt &= ~(XL_RXFILTER_ALLFRAMES | XL_RXFILTER_ALLMULTI |
588 	    XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL);
589 	ifp->if_flags &= ~IFF_ALLMULTI;
590 
591 	/*
592 	 * Always accept broadcast frames.
593 	 * Always accept frames destined to our station address.
594 	 */
595 	rxfilt |= XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL;
596 
597 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 ||
598 	    ac->ac_multicnt > 0) {
599 		ifp->if_flags |= IFF_ALLMULTI;
600 		if (ifp->if_flags & IFF_PROMISC)
601 			rxfilt |= XL_RXFILTER_ALLFRAMES;
602 		else
603 			rxfilt |= XL_RXFILTER_ALLMULTI;
604 	}
605 
606 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT | rxfilt);
607 
608 	XL_SEL_WIN(7);
609 }
610 
611 /*
612  * 3c905B adapters have a hash filter that we can program.
613  */
614 void
615 xl_iff_905b(struct xl_softc *sc)
616 {
617 	struct ifnet	*ifp = &sc->sc_arpcom.ac_if;
618 	struct arpcom	*ac = &sc->sc_arpcom;
619 	int		h = 0, i;
620 	struct ether_multi *enm;
621 	struct ether_multistep step;
622 	u_int8_t	rxfilt;
623 
624 	XL_SEL_WIN(5);
625 
626 	rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
627 	rxfilt &= ~(XL_RXFILTER_ALLFRAMES | XL_RXFILTER_ALLMULTI |
628 	    XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL |
629 	    XL_RXFILTER_MULTIHASH);
630 	ifp->if_flags &= ~IFF_ALLMULTI;
631 
632 	/*
633 	 * Always accept broadcast frames.
634 	 * Always accept frames destined to our station address.
635 	 */
636 	rxfilt |= XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL;
637 
638 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
639 		ifp->if_flags |= IFF_ALLMULTI;
640 		if (ifp->if_flags & IFF_PROMISC)
641 			rxfilt |= XL_RXFILTER_ALLFRAMES;
642 		else
643 			rxfilt |= XL_RXFILTER_ALLMULTI;
644 	} else {
645 		rxfilt |= XL_RXFILTER_MULTIHASH;
646 
647 		/* first, zot all the existing hash bits */
648 		for (i = 0; i < XL_HASHFILT_SIZE; i++)
649 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH|i);
650 
651 		/* now program new ones */
652 		ETHER_FIRST_MULTI(step, ac, enm);
653 		while (enm != NULL) {
654 			h = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) &
655 			    0x000000FF;
656 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH |
657 			    XL_HASH_SET | h);
658 
659 			ETHER_NEXT_MULTI(step, enm);
660 		}
661 	}
662 
663 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT | rxfilt);
664 
665 	XL_SEL_WIN(7);
666 }
667 
668 #ifdef notdef
669 void
670 xl_testpacket(struct xl_softc *sc)
671 {
672 	struct mbuf	*m;
673 	struct ifnet	*ifp;
674 	int		error;
675 
676 	ifp = &sc->sc_arpcom.ac_if;
677 
678 	MGETHDR(m, M_DONTWAIT, MT_DATA);
679 
680 	if (m == NULL)
681 		return;
682 
683 	bcopy(&sc->sc_arpcom.ac_enaddr,
684 		mtod(m, struct ether_header *)->ether_dhost, ETHER_ADDR_LEN);
685 	bcopy(&sc->sc_arpcom.ac_enaddr,
686 		mtod(m, struct ether_header *)->ether_shost, ETHER_ADDR_LEN);
687 	mtod(m, struct ether_header *)->ether_type = htons(3);
688 	mtod(m, unsigned char *)[14] = 0;
689 	mtod(m, unsigned char *)[15] = 0;
690 	mtod(m, unsigned char *)[16] = 0xE3;
691 	m->m_len = m->m_pkthdr.len = sizeof(struct ether_header) + 3;
692 	IFQ_ENQUEUE(&ifp->if_snd, m, NULL, error);
693 	xl_start(ifp);
694 }
695 #endif
696 
697 void
698 xl_setcfg(struct xl_softc *sc)
699 {
700 	u_int32_t icfg;
701 
702 	XL_SEL_WIN(3);
703 	icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
704 	icfg &= ~XL_ICFG_CONNECTOR_MASK;
705 	if (sc->xl_media & XL_MEDIAOPT_MII ||
706 		sc->xl_media & XL_MEDIAOPT_BT4)
707 		icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS);
708 	if (sc->xl_media & XL_MEDIAOPT_BTX)
709 		icfg |= (XL_XCVR_AUTO << XL_ICFG_CONNECTOR_BITS);
710 
711 	CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
712 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
713 }
714 
715 void
716 xl_setmode(struct xl_softc *sc, int media)
717 {
718 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
719 	u_int32_t icfg;
720 	u_int16_t mediastat;
721 
722 	XL_SEL_WIN(4);
723 	mediastat = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
724 	XL_SEL_WIN(3);
725 	icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
726 
727 	if (sc->xl_media & XL_MEDIAOPT_BT) {
728 		if (IFM_SUBTYPE(media) == IFM_10_T) {
729 			ifp->if_baudrate = IF_Mbps(10);
730 			sc->xl_xcvr = XL_XCVR_10BT;
731 			icfg &= ~XL_ICFG_CONNECTOR_MASK;
732 			icfg |= (XL_XCVR_10BT << XL_ICFG_CONNECTOR_BITS);
733 			mediastat |= XL_MEDIASTAT_LINKBEAT|
734 					XL_MEDIASTAT_JABGUARD;
735 			mediastat &= ~XL_MEDIASTAT_SQEENB;
736 		}
737 	}
738 
739 	if (sc->xl_media & XL_MEDIAOPT_BFX) {
740 		if (IFM_SUBTYPE(media) == IFM_100_FX) {
741 			ifp->if_baudrate = IF_Mbps(100);
742 			sc->xl_xcvr = XL_XCVR_100BFX;
743 			icfg &= ~XL_ICFG_CONNECTOR_MASK;
744 			icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS);
745 			mediastat |= XL_MEDIASTAT_LINKBEAT;
746 			mediastat &= ~XL_MEDIASTAT_SQEENB;
747 		}
748 	}
749 
750 	if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
751 		if (IFM_SUBTYPE(media) == IFM_10_5) {
752 			ifp->if_baudrate = IF_Mbps(10);
753 			sc->xl_xcvr = XL_XCVR_AUI;
754 			icfg &= ~XL_ICFG_CONNECTOR_MASK;
755 			icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
756 			mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
757 					XL_MEDIASTAT_JABGUARD);
758 			mediastat |= ~XL_MEDIASTAT_SQEENB;
759 		}
760 		if (IFM_SUBTYPE(media) == IFM_10_FL) {
761 			ifp->if_baudrate = IF_Mbps(10);
762 			sc->xl_xcvr = XL_XCVR_AUI;
763 			icfg &= ~XL_ICFG_CONNECTOR_MASK;
764 			icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
765 			mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
766 					XL_MEDIASTAT_JABGUARD);
767 			mediastat |= ~XL_MEDIASTAT_SQEENB;
768 		}
769 	}
770 
771 	if (sc->xl_media & XL_MEDIAOPT_BNC) {
772 		if (IFM_SUBTYPE(media) == IFM_10_2) {
773 			ifp->if_baudrate = IF_Mbps(10);
774 			sc->xl_xcvr = XL_XCVR_COAX;
775 			icfg &= ~XL_ICFG_CONNECTOR_MASK;
776 			icfg |= (XL_XCVR_COAX << XL_ICFG_CONNECTOR_BITS);
777 			mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
778 					XL_MEDIASTAT_JABGUARD|
779 					XL_MEDIASTAT_SQEENB);
780 		}
781 	}
782 
783 	if ((media & IFM_GMASK) == IFM_FDX ||
784 			IFM_SUBTYPE(media) == IFM_100_FX) {
785 		XL_SEL_WIN(3);
786 		CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
787 	} else {
788 		XL_SEL_WIN(3);
789 		CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
790 			(CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
791 	}
792 
793 	if (IFM_SUBTYPE(media) == IFM_10_2)
794 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
795 	else
796 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
797 	CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
798 	XL_SEL_WIN(4);
799 	CSR_WRITE_2(sc, XL_W4_MEDIA_STATUS, mediastat);
800 	DELAY(800);
801 	XL_SEL_WIN(7);
802 }
803 
804 void
805 xl_reset(struct xl_softc *sc)
806 {
807 	int	i;
808 
809 	XL_SEL_WIN(0);
810 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RESET |
811 		    ((sc->xl_flags & XL_FLAG_WEIRDRESET) ?
812 		     XL_RESETOPT_DISADVFD:0));
813 
814 	/*
815 	 * Pause briefly after issuing the reset command before trying
816 	 * to access any other registers. With my 3c575C cardbus card,
817 	 * failing to do this results in the system locking up while
818 	 * trying to poll the command busy bit in the status register.
819 	 */
820 	DELAY(100000);
821 
822 	for (i = 0; i < XL_TIMEOUT; i++) {
823 		DELAY(10);
824 		if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY))
825 			break;
826 	}
827 
828 	if (i == XL_TIMEOUT)
829 		printf("%s: reset didn't complete\n", sc->sc_dev.dv_xname);
830 
831 	/* Note: the RX reset takes an absurd amount of time
832 	 * on newer versions of the Tornado chips such as those
833 	 * on the 3c905CX and newer 3c908C cards. We wait an
834 	 * extra amount of time so that xl_wait() doesn't complain
835 	 * and annoy the users.
836 	 */
837 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
838 	DELAY(100000);
839 	xl_wait(sc);
840 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
841 	xl_wait(sc);
842 
843 	if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR ||
844 	    sc->xl_flags & XL_FLAG_INVERT_MII_PWR) {
845 		XL_SEL_WIN(2);
846 		CSR_WRITE_2(sc, XL_W2_RESET_OPTIONS, CSR_READ_2(sc,
847 		    XL_W2_RESET_OPTIONS)
848 		    | ((sc->xl_flags & XL_FLAG_INVERT_LED_PWR)?XL_RESETOPT_INVERT_LED:0)
849 		    | ((sc->xl_flags & XL_FLAG_INVERT_MII_PWR)?XL_RESETOPT_INVERT_MII:0)
850 		    );
851 	}
852 
853 	/* Wait a little while for the chip to get its brains in order. */
854 	DELAY(100000);
855 }
856 
857 /*
858  * This routine is a kludge to work around possible hardware faults
859  * or manufacturing defects that can cause the media options register
860  * (or reset options register, as it's called for the first generation
861  * 3c90x adapters) to return an incorrect result. I have encountered
862  * one Dell Latitude laptop docking station with an integrated 3c905-TX
863  * which doesn't have any of the 'mediaopt' bits set. This screws up
864  * the attach routine pretty badly because it doesn't know what media
865  * to look for. If we find ourselves in this predicament, this routine
866  * will try to guess the media options values and warn the user of a
867  * possible manufacturing defect with his adapter/system/whatever.
868  */
869 void
870 xl_mediacheck(struct xl_softc *sc)
871 {
872 	/*
873 	 * If some of the media options bits are set, assume they are
874 	 * correct. If not, try to figure it out down below.
875 	 * XXX I should check for 10baseFL, but I don't have an adapter
876 	 * to test with.
877 	 */
878 	if (sc->xl_media & (XL_MEDIAOPT_MASK & ~XL_MEDIAOPT_VCO)) {
879 		/*
880 	 	 * Check the XCVR value. If it's not in the normal range
881 	 	 * of values, we need to fake it up here.
882 	 	 */
883 		if (sc->xl_xcvr <= XL_XCVR_AUTO)
884 			return;
885 		else {
886 			printf("%s: bogus xcvr value "
887 			"in EEPROM (%x)\n", sc->sc_dev.dv_xname, sc->xl_xcvr);
888 			printf("%s: choosing new default based "
889 				"on card type\n", sc->sc_dev.dv_xname);
890 		}
891 	} else {
892 		if (sc->xl_type == XL_TYPE_905B &&
893 		    sc->xl_media & XL_MEDIAOPT_10FL)
894 			return;
895 		printf("%s: WARNING: no media options bits set in "
896 			"the media options register!!\n", sc->sc_dev.dv_xname);
897 		printf("%s: this could be a manufacturing defect in "
898 			"your adapter or system\n", sc->sc_dev.dv_xname);
899 		printf("%s: attempting to guess media type; you "
900 			"should probably consult your vendor\n", sc->sc_dev.dv_xname);
901 	}
902 
903 	xl_choose_xcvr(sc, 1);
904 }
905 
906 void
907 xl_choose_xcvr(struct xl_softc *sc, int verbose)
908 {
909 	u_int16_t devid;
910 
911 	/*
912 	 * Read the device ID from the EEPROM.
913 	 * This is what's loaded into the PCI device ID register, so it has
914 	 * to be correct otherwise we wouldn't have gotten this far.
915 	 */
916 	xl_read_eeprom(sc, (caddr_t)&devid, XL_EE_PRODID, 1, 0);
917 
918 	switch(devid) {
919 	case TC_DEVICEID_BOOMERANG_10BT:	/* 3c900-TPO */
920 	case TC_DEVICEID_KRAKATOA_10BT:		/* 3c900B-TPO */
921 		sc->xl_media = XL_MEDIAOPT_BT;
922 		sc->xl_xcvr = XL_XCVR_10BT;
923 		if (verbose)
924 			printf("%s: guessing 10BaseT transceiver\n",
925 			    sc->sc_dev.dv_xname);
926 		break;
927 	case TC_DEVICEID_BOOMERANG_10BT_COMBO:	/* 3c900-COMBO */
928 	case TC_DEVICEID_KRAKATOA_10BT_COMBO:	/* 3c900B-COMBO */
929 		sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
930 		sc->xl_xcvr = XL_XCVR_10BT;
931 		if (verbose)
932 			printf("%s: guessing COMBO (AUI/BNC/TP)\n",
933 			    sc->sc_dev.dv_xname);
934 		break;
935 	case TC_DEVICEID_KRAKATOA_10BT_TPC:	/* 3c900B-TPC */
936 		sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC;
937 		sc->xl_xcvr = XL_XCVR_10BT;
938 		if (verbose)
939 			printf("%s: guessing TPC (BNC/TP)\n", sc->sc_dev.dv_xname);
940 		break;
941 	case TC_DEVICEID_CYCLONE_10FL:		/* 3c900B-FL */
942 		sc->xl_media = XL_MEDIAOPT_10FL;
943 		sc->xl_xcvr = XL_XCVR_AUI;
944 		if (verbose)
945 			printf("%s: guessing 10baseFL\n", sc->sc_dev.dv_xname);
946 		break;
947 	case TC_DEVICEID_BOOMERANG_10_100BT:	/* 3c905-TX */
948 	case TC_DEVICEID_HURRICANE_555:		/* 3c555 */
949 	case TC_DEVICEID_HURRICANE_556:		/* 3c556 */
950 	case TC_DEVICEID_HURRICANE_556B:	/* 3c556B */
951 	case TC_DEVICEID_HURRICANE_575A:	/* 3c575TX */
952 	case TC_DEVICEID_HURRICANE_575B:	/* 3c575B */
953 	case TC_DEVICEID_HURRICANE_575C:	/* 3c575C */
954 	case TC_DEVICEID_HURRICANE_656:		/* 3c656 */
955 	case TC_DEVICEID_HURRICANE_656B:	/* 3c656B */
956 	case TC_DEVICEID_TORNADO_656C:		/* 3c656C */
957 	case TC_DEVICEID_TORNADO_10_100BT_920B: /* 3c920B-EMB */
958 		sc->xl_media = XL_MEDIAOPT_MII;
959 		sc->xl_xcvr = XL_XCVR_MII;
960 		if (verbose)
961 			printf("%s: guessing MII\n", sc->sc_dev.dv_xname);
962 		break;
963 	case TC_DEVICEID_BOOMERANG_100BT4:	/* 3c905-T4 */
964 	case TC_DEVICEID_CYCLONE_10_100BT4:	/* 3c905B-T4 */
965 		sc->xl_media = XL_MEDIAOPT_BT4;
966 		sc->xl_xcvr = XL_XCVR_MII;
967 		if (verbose)
968 			printf("%s: guessing 100BaseT4/MII\n", sc->sc_dev.dv_xname);
969 		break;
970 	case TC_DEVICEID_HURRICANE_10_100BT:	/* 3c905B-TX */
971 	case TC_DEVICEID_HURRICANE_10_100BT_SERV:/* 3c980-TX */
972 	case TC_DEVICEID_TORNADO_10_100BT_SERV:	/* 3c980C-TX */
973 	case TC_DEVICEID_HURRICANE_SOHO100TX:	/* 3cSOHO100-TX */
974 	case TC_DEVICEID_TORNADO_10_100BT:	/* 3c905C-TX */
975 	case TC_DEVICEID_TORNADO_HOMECONNECT:	/* 3c450-TX */
976 		sc->xl_media = XL_MEDIAOPT_BTX;
977 		sc->xl_xcvr = XL_XCVR_AUTO;
978 		if (verbose)
979 			printf("%s: guessing 10/100 internal\n",
980 			    sc->sc_dev.dv_xname);
981 		break;
982 	case TC_DEVICEID_CYCLONE_10_100_COMBO:	/* 3c905B-COMBO */
983 		sc->xl_media = XL_MEDIAOPT_BTX|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
984 		sc->xl_xcvr = XL_XCVR_AUTO;
985 		if (verbose)
986 			printf("%s: guessing 10/100 plus BNC/AUI\n",
987 			    sc->sc_dev.dv_xname);
988 		break;
989 	default:
990 		printf("%s: unknown device ID: %x -- "
991 			"defaulting to 10baseT\n", sc->sc_dev.dv_xname, devid);
992 		sc->xl_media = XL_MEDIAOPT_BT;
993 		break;
994 	}
995 }
996 
997 /*
998  * Initialize the transmit descriptors.
999  */
1000 int
1001 xl_list_tx_init(struct xl_softc *sc)
1002 {
1003 	struct xl_chain_data	*cd;
1004 	struct xl_list_data	*ld;
1005 	int			i;
1006 
1007 	cd = &sc->xl_cdata;
1008 	ld = sc->xl_ldata;
1009 	for (i = 0; i < XL_TX_LIST_CNT; i++) {
1010 		cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
1011 		if (i == (XL_TX_LIST_CNT - 1))
1012 			cd->xl_tx_chain[i].xl_next = NULL;
1013 		else
1014 			cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1];
1015 	}
1016 
1017 	cd->xl_tx_free = &cd->xl_tx_chain[0];
1018 	cd->xl_tx_tail = cd->xl_tx_head = NULL;
1019 
1020 	return (0);
1021 }
1022 
1023 /*
1024  * Initialize the transmit descriptors.
1025  */
1026 int
1027 xl_list_tx_init_90xB(struct xl_softc *sc)
1028 {
1029 	struct xl_chain_data	*cd;
1030 	struct xl_list_data	*ld;
1031 	int			i, next, prev;
1032 
1033 	cd = &sc->xl_cdata;
1034 	ld = sc->xl_ldata;
1035 	for (i = 0; i < XL_TX_LIST_CNT; i++) {
1036 		if (i == (XL_TX_LIST_CNT - 1))
1037 			next = 0;
1038 		else
1039 			next = i + 1;
1040 		if (i == 0)
1041 			prev = XL_TX_LIST_CNT - 1;
1042 		else
1043 			prev = i - 1;
1044 		cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
1045 		cd->xl_tx_chain[i].xl_phys =
1046 		    sc->sc_listmap->dm_segs[0].ds_addr +
1047 		    offsetof(struct xl_list_data, xl_tx_list[i]);
1048 		cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[next];
1049 		cd->xl_tx_chain[i].xl_prev = &cd->xl_tx_chain[prev];
1050 	}
1051 
1052 	bzero(ld->xl_tx_list, sizeof(struct xl_list) * XL_TX_LIST_CNT);
1053 	ld->xl_tx_list[0].xl_status = htole32(XL_TXSTAT_EMPTY);
1054 
1055 	cd->xl_tx_prod = 1;
1056 	cd->xl_tx_cons = 1;
1057 	cd->xl_tx_cnt = 0;
1058 
1059 	return (0);
1060 }
1061 
1062 /*
1063  * Initialize the RX descriptors and allocate mbufs for them. Note that
1064  * we arrange the descriptors in a closed ring, so that the last descriptor
1065  * points back to the first.
1066  */
1067 int
1068 xl_list_rx_init(struct xl_softc *sc)
1069 {
1070 	struct xl_chain_data	*cd;
1071 	struct xl_list_data	*ld;
1072 	int			i, n;
1073 	bus_addr_t		next;
1074 
1075 	cd = &sc->xl_cdata;
1076 	ld = sc->xl_ldata;
1077 
1078 	for (i = 0; i < XL_RX_LIST_CNT; i++) {
1079 		cd->xl_rx_chain[i].xl_ptr =
1080 			(struct xl_list_onefrag *)&ld->xl_rx_list[i];
1081 		if (i == (XL_RX_LIST_CNT - 1))
1082 			n = 0;
1083 		else
1084 			n = i + 1;
1085 		cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[n];
1086 		next = sc->sc_listmap->dm_segs[0].ds_addr +
1087 		       offsetof(struct xl_list_data, xl_rx_list[n]);
1088 		ld->xl_rx_list[i].xl_next = htole32(next);
1089 	}
1090 
1091 	cd->xl_rx_prod = cd->xl_rx_cons = &cd->xl_rx_chain[0];
1092 	cd->xl_rx_cnt = 0;
1093 	xl_fill_rx_ring(sc);
1094 	return (0);
1095 }
1096 
1097 void
1098 xl_fill_rx_ring(struct xl_softc *sc)
1099 {
1100 	struct xl_chain_data    *cd;
1101 	struct xl_list_data     *ld;
1102 
1103 	cd = &sc->xl_cdata;
1104 	ld = sc->xl_ldata;
1105 
1106 	while (cd->xl_rx_cnt < XL_RX_LIST_CNT) {
1107 		if (xl_newbuf(sc, cd->xl_rx_prod) == ENOBUFS)
1108 			break;
1109 		cd->xl_rx_prod = cd->xl_rx_prod->xl_next;
1110 		cd->xl_rx_cnt++;
1111 	}
1112 }
1113 
1114 
1115 /*
1116  * Initialize an RX descriptor and attach an MBUF cluster.
1117  */
1118 int
1119 xl_newbuf(struct xl_softc *sc, struct xl_chain_onefrag *c)
1120 {
1121 	struct mbuf	*m_new = NULL;
1122 	bus_dmamap_t	map;
1123 
1124 	m_new = MCLGETI(NULL, M_DONTWAIT, &sc->sc_arpcom.ac_if, MCLBYTES);
1125 
1126 	if (!m_new)
1127 		return (ENOBUFS);
1128 
1129 	m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1130 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_rx_sparemap,
1131 	    mtod(m_new, caddr_t), MCLBYTES, NULL, BUS_DMA_NOWAIT) != 0) {
1132 		m_freem(m_new);
1133 		return (ENOBUFS);
1134 	}
1135 
1136 	/* sync the old map, and unload it (if necessary) */
1137 	if (c->map->dm_nsegs != 0) {
1138 		bus_dmamap_sync(sc->sc_dmat, c->map,
1139 		    0, c->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1140 		bus_dmamap_unload(sc->sc_dmat, c->map);
1141 	}
1142 
1143 	map = c->map;
1144 	c->map = sc->sc_rx_sparemap;
1145 	sc->sc_rx_sparemap = map;
1146 
1147 	/* Force longword alignment for packet payload. */
1148 	m_adj(m_new, ETHER_ALIGN);
1149 
1150 	bus_dmamap_sync(sc->sc_dmat, c->map, 0, c->map->dm_mapsize,
1151 	    BUS_DMASYNC_PREREAD);
1152 
1153 	c->xl_mbuf = m_new;
1154 	c->xl_ptr->xl_frag.xl_addr =
1155 	    htole32(c->map->dm_segs[0].ds_addr + ETHER_ALIGN);
1156 	c->xl_ptr->xl_frag.xl_len =
1157 	    htole32(c->map->dm_segs[0].ds_len | XL_LAST_FRAG);
1158 	c->xl_ptr->xl_status = htole32(0);
1159 
1160 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1161 	    ((caddr_t)c->xl_ptr - sc->sc_listkva), sizeof(struct xl_list),
1162 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1163 
1164 	return (0);
1165 }
1166 
1167 
1168 /*
1169  * A frame has been uploaded: pass the resulting mbuf chain up to
1170  * the higher level protocols.
1171  */
1172 void
1173 xl_rxeof(struct xl_softc *sc)
1174 {
1175         struct mbuf		*m;
1176         struct ifnet		*ifp;
1177 	struct xl_chain_onefrag	*cur_rx;
1178 	int			total_len = 0;
1179 	u_int32_t		rxstat;
1180 	u_int16_t		sumflags = 0;
1181 
1182 	ifp = &sc->sc_arpcom.ac_if;
1183 
1184 again:
1185 
1186 	while (sc->xl_cdata.xl_rx_cnt > 0) {
1187 		cur_rx = sc->xl_cdata.xl_rx_cons;
1188 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1189 		    ((caddr_t)cur_rx->xl_ptr - sc->sc_listkva),
1190 		    sizeof(struct xl_list),
1191 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1192 		if ((rxstat = letoh32(sc->xl_cdata.xl_rx_cons->xl_ptr->xl_status)) == 0)
1193 			break;
1194 		m = cur_rx->xl_mbuf;
1195 		cur_rx->xl_mbuf = NULL;
1196 		sc->xl_cdata.xl_rx_cons = cur_rx->xl_next;
1197 		sc->xl_cdata.xl_rx_cnt--;
1198 		total_len = rxstat & XL_RXSTAT_LENMASK;
1199 
1200 		/*
1201 		 * Since we have told the chip to allow large frames,
1202 		 * we need to trap giant frame errors in software. We allow
1203 		 * a little more than the normal frame size to account for
1204 		 * frames with VLAN tags.
1205 		 */
1206 		if (total_len > XL_MAX_FRAMELEN)
1207 			rxstat |= (XL_RXSTAT_UP_ERROR|XL_RXSTAT_OVERSIZE);
1208 
1209 		/*
1210 		 * If an error occurs, update stats, clear the
1211 		 * status word and leave the mbuf cluster in place:
1212 		 * it should simply get re-used next time this descriptor
1213 	 	 * comes up in the ring.
1214 		 */
1215 		if (rxstat & XL_RXSTAT_UP_ERROR) {
1216 			ifp->if_ierrors++;
1217 			cur_rx->xl_ptr->xl_status = htole32(0);
1218 			m_freem(m);
1219 			continue;
1220 		}
1221 
1222 		/*
1223 		 * If the error bit was not set, the upload complete
1224 		 * bit should be set which means we have a valid packet.
1225 		 * If not, something truly strange has happened.
1226 		 */
1227 		if (!(rxstat & XL_RXSTAT_UP_CMPLT)) {
1228 			printf("%s: bad receive status -- "
1229 			    "packet dropped\n", sc->sc_dev.dv_xname);
1230 			ifp->if_ierrors++;
1231 			cur_rx->xl_ptr->xl_status = htole32(0);
1232 			m_freem(m);
1233 			continue;
1234 		}
1235 
1236 		ifp->if_ipackets++;
1237 		m->m_pkthdr.rcvif = ifp;
1238 		m->m_pkthdr.len = m->m_len = total_len;
1239 #if NBPFILTER > 0
1240 		/*
1241 		 * Handle BPF listeners. Let the BPF user see the packet.
1242 		 */
1243 		if (ifp->if_bpf) {
1244 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
1245 		}
1246 #endif
1247 
1248 		if (sc->xl_type == XL_TYPE_905B) {
1249 			if (!(rxstat & XL_RXSTAT_IPCKERR) &&
1250 			    (rxstat & XL_RXSTAT_IPCKOK))
1251 				sumflags |= M_IPV4_CSUM_IN_OK;
1252 
1253 			if (!(rxstat & XL_RXSTAT_TCPCKERR) &&
1254 			    (rxstat & XL_RXSTAT_TCPCKOK))
1255 				sumflags |= M_TCP_CSUM_IN_OK;
1256 
1257 			if (!(rxstat & XL_RXSTAT_UDPCKERR) &&
1258 			    (rxstat & XL_RXSTAT_UDPCKOK))
1259 				sumflags |= M_UDP_CSUM_IN_OK;
1260 
1261 			m->m_pkthdr.csum_flags = sumflags;
1262 		}
1263 
1264 		ether_input_mbuf(ifp, m);
1265 	}
1266 	xl_fill_rx_ring(sc);
1267 	/*
1268 	 * Handle the 'end of channel' condition. When the upload
1269 	 * engine hits the end of the RX ring, it will stall. This
1270 	 * is our cue to flush the RX ring, reload the uplist pointer
1271 	 * register and unstall the engine.
1272 	 * XXX This is actually a little goofy. With the ThunderLAN
1273 	 * chip, you get an interrupt when the receiver hits the end
1274 	 * of the receive ring, which tells you exactly when you
1275 	 * you need to reload the ring pointer. Here we have to
1276 	 * fake it. I'm mad at myself for not being clever enough
1277 	 * to avoid the use of a goto here.
1278 	 */
1279 	if (CSR_READ_4(sc, XL_UPLIST_PTR) == 0 ||
1280 		CSR_READ_4(sc, XL_UPLIST_STATUS) & XL_PKTSTAT_UP_STALLED) {
1281 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
1282 		xl_wait(sc);
1283 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
1284 		xl_fill_rx_ring(sc);
1285 		goto again;
1286 	}
1287 
1288 }
1289 
1290 /*
1291  * A frame was downloaded to the chip. It's safe for us to clean up
1292  * the list buffers.
1293  */
1294 void
1295 xl_txeof(struct xl_softc *sc)
1296 {
1297 	struct xl_chain		*cur_tx;
1298 	struct ifnet		*ifp;
1299 
1300 	ifp = &sc->sc_arpcom.ac_if;
1301 
1302 	/*
1303 	 * Go through our tx list and free mbufs for those
1304 	 * frames that have been uploaded. Note: the 3c905B
1305 	 * sets a special bit in the status word to let us
1306 	 * know that a frame has been downloaded, but the
1307 	 * original 3c900/3c905 adapters don't do that.
1308 	 * Consequently, we have to use a different test if
1309 	 * xl_type != XL_TYPE_905B.
1310 	 */
1311 	while (sc->xl_cdata.xl_tx_head != NULL) {
1312 		cur_tx = sc->xl_cdata.xl_tx_head;
1313 
1314 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1315 		    ((caddr_t)cur_tx->xl_ptr - sc->sc_listkva),
1316 		    sizeof(struct xl_list),
1317 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1318 
1319 		if (CSR_READ_4(sc, XL_DOWNLIST_PTR))
1320 			break;
1321 
1322 		sc->xl_cdata.xl_tx_head = cur_tx->xl_next;
1323 		ifp->if_opackets++;
1324 		if (cur_tx->map->dm_nsegs != 0) {
1325 			bus_dmamap_t map = cur_tx->map;
1326 
1327 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1328 			    BUS_DMASYNC_POSTWRITE);
1329 			bus_dmamap_unload(sc->sc_dmat, map);
1330 		}
1331 		if (cur_tx->xl_mbuf != NULL) {
1332 			m_freem(cur_tx->xl_mbuf);
1333 			cur_tx->xl_mbuf = NULL;
1334 		}
1335 		cur_tx->xl_next = sc->xl_cdata.xl_tx_free;
1336 		sc->xl_cdata.xl_tx_free = cur_tx;
1337 	}
1338 
1339 	if (sc->xl_cdata.xl_tx_head == NULL) {
1340 		ifp->if_flags &= ~IFF_OACTIVE;
1341 		/* Clear the timeout timer. */
1342 		ifp->if_timer = 0;
1343 		sc->xl_cdata.xl_tx_tail = NULL;
1344 	} else {
1345 		if (CSR_READ_4(sc, XL_DMACTL) & XL_DMACTL_DOWN_STALLED ||
1346 			!CSR_READ_4(sc, XL_DOWNLIST_PTR)) {
1347 			CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
1348 			    sc->sc_listmap->dm_segs[0].ds_addr +
1349 			    ((caddr_t)sc->xl_cdata.xl_tx_head->xl_ptr -
1350 			    sc->sc_listkva));
1351 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1352 		}
1353 	}
1354 }
1355 
1356 void
1357 xl_txeof_90xB(struct xl_softc *sc)
1358 {
1359 	struct xl_chain *cur_tx = NULL;
1360 	struct ifnet *ifp;
1361 	int idx;
1362 
1363 	ifp = &sc->sc_arpcom.ac_if;
1364 
1365 	idx = sc->xl_cdata.xl_tx_cons;
1366 	while (idx != sc->xl_cdata.xl_tx_prod) {
1367 
1368 		cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
1369 
1370 		if ((cur_tx->xl_ptr->xl_status &
1371 		    htole32(XL_TXSTAT_DL_COMPLETE)) == 0)
1372 			break;
1373 
1374 		if (cur_tx->xl_mbuf != NULL) {
1375 			m_freem(cur_tx->xl_mbuf);
1376 			cur_tx->xl_mbuf = NULL;
1377 		}
1378 
1379 		if (cur_tx->map->dm_nsegs != 0) {
1380 			bus_dmamap_sync(sc->sc_dmat, cur_tx->map,
1381 			    0, cur_tx->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1382 			bus_dmamap_unload(sc->sc_dmat, cur_tx->map);
1383 		}
1384 
1385 		ifp->if_opackets++;
1386 
1387 		sc->xl_cdata.xl_tx_cnt--;
1388 		XL_INC(idx, XL_TX_LIST_CNT);
1389 	}
1390 
1391 	sc->xl_cdata.xl_tx_cons = idx;
1392 
1393 	if (cur_tx != NULL)
1394 		ifp->if_flags &= ~IFF_OACTIVE;
1395 	if (sc->xl_cdata.xl_tx_cnt == 0)
1396 		ifp->if_timer = 0;
1397 }
1398 
1399 /*
1400  * TX 'end of channel' interrupt handler. Actually, we should
1401  * only get a 'TX complete' interrupt if there's a transmit error,
1402  * so this is really TX error handler.
1403  */
1404 void
1405 xl_txeoc(struct xl_softc *sc)
1406 {
1407 	u_int8_t	txstat;
1408 
1409 	while ((txstat = CSR_READ_1(sc, XL_TX_STATUS))) {
1410 		if (txstat & XL_TXSTATUS_UNDERRUN ||
1411 			txstat & XL_TXSTATUS_JABBER ||
1412 			txstat & XL_TXSTATUS_RECLAIM) {
1413 			if (txstat != 0x90) {
1414 				printf("%s: transmission error: %x\n",
1415 				    sc->sc_dev.dv_xname, txstat);
1416 			}
1417 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
1418 			xl_wait(sc);
1419 			if (sc->xl_type == XL_TYPE_905B) {
1420 				if (sc->xl_cdata.xl_tx_cnt) {
1421 					int i;
1422 					struct xl_chain *c;
1423 
1424 					i = sc->xl_cdata.xl_tx_cons;
1425 					c = &sc->xl_cdata.xl_tx_chain[i];
1426 					CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
1427 					    c->xl_phys);
1428 					CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
1429 				}
1430 			} else {
1431 				if (sc->xl_cdata.xl_tx_head != NULL)
1432 					CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
1433 					    sc->sc_listmap->dm_segs[0].ds_addr +
1434 					    ((caddr_t)sc->xl_cdata.xl_tx_head->xl_ptr -
1435 					    sc->sc_listkva));
1436 			}
1437 			/*
1438 			 * Remember to set this for the
1439 			 * first generation 3c90X chips.
1440 			 */
1441 			CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
1442 			if (txstat & XL_TXSTATUS_UNDERRUN &&
1443 			    sc->xl_tx_thresh < XL_PACKET_SIZE) {
1444 				sc->xl_tx_thresh += XL_MIN_FRAMELEN;
1445 #ifdef notdef
1446 				printf("%s: tx underrun, increasing tx start"
1447 				    " threshold to %d\n", sc->sc_dev.dv_xname,
1448 				    sc->xl_tx_thresh);
1449 #endif
1450 			}
1451 			CSR_WRITE_2(sc, XL_COMMAND,
1452 			    XL_CMD_TX_SET_START|sc->xl_tx_thresh);
1453 			if (sc->xl_type == XL_TYPE_905B) {
1454 				CSR_WRITE_2(sc, XL_COMMAND,
1455 				XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
1456 			}
1457 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
1458 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1459 		} else {
1460 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
1461 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1462 		}
1463 		/*
1464 		 * Write an arbitrary byte to the TX_STATUS register
1465 	 	 * to clear this interrupt/error and advance to the next.
1466 		 */
1467 		CSR_WRITE_1(sc, XL_TX_STATUS, 0x01);
1468 	}
1469 }
1470 
1471 int
1472 xl_intr(void *arg)
1473 {
1474 	struct xl_softc		*sc;
1475 	struct ifnet		*ifp;
1476 	u_int16_t		status;
1477 	int			claimed = 0;
1478 
1479 	sc = arg;
1480 	ifp = &sc->sc_arpcom.ac_if;
1481 
1482 	while ((status = CSR_READ_2(sc, XL_STATUS)) & XL_INTRS && status != 0xFFFF) {
1483 
1484 		claimed = 1;
1485 
1486 		CSR_WRITE_2(sc, XL_COMMAND,
1487 		    XL_CMD_INTR_ACK|(status & XL_INTRS));
1488 
1489 		if (sc->intr_ack)
1490 			(*sc->intr_ack)(sc);
1491 
1492 		if (status & XL_STAT_UP_COMPLETE)
1493 			xl_rxeof(sc);
1494 
1495 
1496 		if (status & XL_STAT_DOWN_COMPLETE) {
1497 			if (sc->xl_type == XL_TYPE_905B)
1498 				xl_txeof_90xB(sc);
1499 			else
1500 				xl_txeof(sc);
1501 		}
1502 
1503 		if (status & XL_STAT_TX_COMPLETE) {
1504 			ifp->if_oerrors++;
1505 			xl_txeoc(sc);
1506 		}
1507 
1508 		if (status & XL_STAT_ADFAIL) {
1509 			xl_reset(sc);
1510 			xl_init(sc);
1511 		}
1512 
1513 		if (status & XL_STAT_STATSOFLOW) {
1514 			sc->xl_stats_no_timeout = 1;
1515 			xl_stats_update(sc);
1516 			sc->xl_stats_no_timeout = 0;
1517 		}
1518 	}
1519 
1520 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1521 		(*ifp->if_start)(ifp);
1522 
1523 	return (claimed);
1524 }
1525 
1526 void
1527 xl_stats_update(void *xsc)
1528 {
1529 	struct xl_softc		*sc;
1530 	struct ifnet		*ifp;
1531 	struct xl_stats		xl_stats;
1532 	u_int8_t		*p;
1533 	int			i;
1534 	struct mii_data		*mii = NULL;
1535 
1536 	bzero(&xl_stats, sizeof(struct xl_stats));
1537 
1538 	sc = xsc;
1539 	ifp = &sc->sc_arpcom.ac_if;
1540 	if (sc->xl_hasmii)
1541 		mii = &sc->sc_mii;
1542 
1543 	p = (u_int8_t *)&xl_stats;
1544 
1545 	/* Read all the stats registers. */
1546 	XL_SEL_WIN(6);
1547 
1548 	for (i = 0; i < 16; i++)
1549 		*p++ = CSR_READ_1(sc, XL_W6_CARRIER_LOST + i);
1550 
1551 	ifp->if_ierrors += xl_stats.xl_rx_overrun;
1552 
1553 	ifp->if_collisions += xl_stats.xl_tx_multi_collision +
1554 				xl_stats.xl_tx_single_collision +
1555 				xl_stats.xl_tx_late_collision;
1556 
1557 	/*
1558 	 * Boomerang and cyclone chips have an extra stats counter
1559 	 * in window 4 (BadSSD). We have to read this too in order
1560 	 * to clear out all the stats registers and avoid a statsoflow
1561 	 * interrupt.
1562 	 */
1563 	XL_SEL_WIN(4);
1564 	CSR_READ_1(sc, XL_W4_BADSSD);
1565 
1566 	if (mii != NULL && (!sc->xl_stats_no_timeout))
1567 		mii_tick(mii);
1568 
1569 	XL_SEL_WIN(7);
1570 
1571 	if (!sc->xl_stats_no_timeout)
1572 		timeout_add_sec(&sc->xl_stsup_tmo, 1);
1573 }
1574 
1575 /*
1576  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1577  * pointers to the fragment pointers.
1578  */
1579 int
1580 xl_encap(struct xl_softc *sc, struct xl_chain *c, struct mbuf *m_head)
1581 {
1582 	int		error, frag, total_len;
1583 	u_int32_t	status;
1584 	bus_dmamap_t	map;
1585 
1586 	map = sc->sc_tx_sparemap;
1587 
1588 reload:
1589 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map,
1590 	    m_head, BUS_DMA_NOWAIT);
1591 
1592 	if (error && error != EFBIG) {
1593 		m_freem(m_head);
1594 		return (1);
1595 	}
1596 
1597 	/*
1598  	 * Start packing the mbufs in this chain into
1599 	 * the fragment pointers. Stop when we run out
1600  	 * of fragments or hit the end of the mbuf chain.
1601 	 */
1602 	for (frag = 0, total_len = 0; frag < map->dm_nsegs; frag++) {
1603 		if (frag == XL_MAXFRAGS)
1604 			break;
1605 		total_len += map->dm_segs[frag].ds_len;
1606 		c->xl_ptr->xl_frag[frag].xl_addr =
1607 		    htole32(map->dm_segs[frag].ds_addr);
1608 		c->xl_ptr->xl_frag[frag].xl_len =
1609 		    htole32(map->dm_segs[frag].ds_len);
1610 	}
1611 
1612 	/*
1613 	 * Handle special case: we used up all 63 fragments,
1614 	 * but we have more mbufs left in the chain. Copy the
1615 	 * data into an mbuf cluster. Note that we don't
1616 	 * bother clearing the values in the other fragment
1617 	 * pointers/counters; it wouldn't gain us anything,
1618 	 * and would waste cycles.
1619 	 */
1620 	if (error) {
1621 		struct mbuf	*m_new = NULL;
1622 
1623 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1624 		if (m_new == NULL) {
1625 			m_freem(m_head);
1626 			return (1);
1627 		}
1628 		if (m_head->m_pkthdr.len > MHLEN) {
1629 			MCLGET(m_new, M_DONTWAIT);
1630 			if (!(m_new->m_flags & M_EXT)) {
1631 				m_freem(m_new);
1632 				m_freem(m_head);
1633 				return (1);
1634 			}
1635 		}
1636 		m_copydata(m_head, 0, m_head->m_pkthdr.len,
1637 		    mtod(m_new, caddr_t));
1638 		m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1639 		m_freem(m_head);
1640 		m_head = m_new;
1641 		goto reload;
1642 	}
1643 
1644 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1645 	    BUS_DMASYNC_PREWRITE);
1646 
1647 	if (c->map->dm_nsegs != 0) {
1648 		bus_dmamap_sync(sc->sc_dmat, c->map,
1649 		    0, c->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1650 		bus_dmamap_unload(sc->sc_dmat, c->map);
1651 	}
1652 
1653 	c->xl_mbuf = m_head;
1654 	sc->sc_tx_sparemap = c->map;
1655 	c->map = map;
1656 	c->xl_ptr->xl_frag[frag - 1].xl_len |= htole32(XL_LAST_FRAG);
1657 	c->xl_ptr->xl_status = htole32(total_len);
1658 	c->xl_ptr->xl_next = 0;
1659 
1660 	if (sc->xl_type == XL_TYPE_905B) {
1661 		status = XL_TXSTAT_RND_DEFEAT;
1662 
1663 #ifndef XL905B_TXCSUM_BROKEN
1664 		if (m_head->m_pkthdr.csum_flags) {
1665 			if (m_head->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
1666 				status |= XL_TXSTAT_IPCKSUM;
1667 			if (m_head->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
1668 				status |= XL_TXSTAT_TCPCKSUM;
1669 			if (m_head->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
1670 				status |= XL_TXSTAT_UDPCKSUM;
1671 		}
1672 #endif
1673 		c->xl_ptr->xl_status = htole32(status);
1674 	}
1675 
1676 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1677 	    offsetof(struct xl_list_data, xl_tx_list[0]),
1678 	    sizeof(struct xl_list) * XL_TX_LIST_CNT,
1679 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1680 
1681 	return (0);
1682 }
1683 
1684 /*
1685  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1686  * to the mbuf data regions directly in the transmit lists. We also save a
1687  * copy of the pointers since the transmit list fragment pointers are
1688  * physical addresses.
1689  */
1690 void
1691 xl_start(struct ifnet *ifp)
1692 {
1693 	struct xl_softc		*sc;
1694 	struct mbuf		*m_head = NULL;
1695 	struct xl_chain		*prev = NULL, *cur_tx = NULL, *start_tx;
1696 	struct xl_chain		*prev_tx;
1697 	int			error;
1698 
1699 	sc = ifp->if_softc;
1700 
1701 	/*
1702 	 * Check for an available queue slot. If there are none,
1703 	 * punt.
1704 	 */
1705 	if (sc->xl_cdata.xl_tx_free == NULL) {
1706 		xl_txeoc(sc);
1707 		xl_txeof(sc);
1708 		if (sc->xl_cdata.xl_tx_free == NULL) {
1709 			ifp->if_flags |= IFF_OACTIVE;
1710 			return;
1711 		}
1712 	}
1713 
1714 	start_tx = sc->xl_cdata.xl_tx_free;
1715 
1716 	while (sc->xl_cdata.xl_tx_free != NULL) {
1717 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1718 		if (m_head == NULL)
1719 			break;
1720 
1721 		/* Pick a descriptor off the free list. */
1722 		prev_tx = cur_tx;
1723 		cur_tx = sc->xl_cdata.xl_tx_free;
1724 
1725 		/* Pack the data into the descriptor. */
1726 		error = xl_encap(sc, cur_tx, m_head);
1727 		if (error) {
1728 			cur_tx = prev_tx;
1729 			continue;
1730 		}
1731 
1732 		sc->xl_cdata.xl_tx_free = cur_tx->xl_next;
1733 		cur_tx->xl_next = NULL;
1734 
1735 		/* Chain it together. */
1736 		if (prev != NULL) {
1737 			prev->xl_next = cur_tx;
1738 			prev->xl_ptr->xl_next =
1739 			    sc->sc_listmap->dm_segs[0].ds_addr +
1740 			    ((caddr_t)cur_tx->xl_ptr - sc->sc_listkva);
1741 
1742 		}
1743 		prev = cur_tx;
1744 
1745 #if NBPFILTER > 0
1746 		/*
1747 		 * If there's a BPF listener, bounce a copy of this frame
1748 		 * to him.
1749 		 */
1750 		if (ifp->if_bpf)
1751 			bpf_mtap(ifp->if_bpf, cur_tx->xl_mbuf,
1752 			    BPF_DIRECTION_OUT);
1753 #endif
1754 	}
1755 
1756 	/*
1757 	 * If there are no packets queued, bail.
1758 	 */
1759 	if (cur_tx == NULL)
1760 		return;
1761 
1762 	/*
1763 	 * Place the request for the upload interrupt
1764 	 * in the last descriptor in the chain. This way, if
1765 	 * we're chaining several packets at once, we'll only
1766 	 * get an interrupt once for the whole chain rather than
1767 	 * once for each packet.
1768 	 */
1769 	cur_tx->xl_ptr->xl_status |= htole32(XL_TXSTAT_DL_INTR);
1770 
1771 	/*
1772 	 * Queue the packets. If the TX channel is clear, update
1773 	 * the downlist pointer register.
1774 	 */
1775 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
1776 	xl_wait(sc);
1777 
1778 	if (sc->xl_cdata.xl_tx_head != NULL) {
1779 		sc->xl_cdata.xl_tx_tail->xl_next = start_tx;
1780 		sc->xl_cdata.xl_tx_tail->xl_ptr->xl_next =
1781 		    sc->sc_listmap->dm_segs[0].ds_addr +
1782 		    ((caddr_t)start_tx->xl_ptr - sc->sc_listkva);
1783 		sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status &=
1784 		    htole32(~XL_TXSTAT_DL_INTR);
1785 		sc->xl_cdata.xl_tx_tail = cur_tx;
1786 	} else {
1787 		sc->xl_cdata.xl_tx_head = start_tx;
1788 		sc->xl_cdata.xl_tx_tail = cur_tx;
1789 	}
1790 	if (!CSR_READ_4(sc, XL_DOWNLIST_PTR))
1791 		CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
1792 		    sc->sc_listmap->dm_segs[0].ds_addr +
1793 		    ((caddr_t)start_tx->xl_ptr - sc->sc_listkva));
1794 
1795 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1796 
1797 	XL_SEL_WIN(7);
1798 
1799 	/*
1800 	 * Set a timeout in case the chip goes out to lunch.
1801 	 */
1802 	ifp->if_timer = 5;
1803 
1804 	/*
1805 	 * XXX Under certain conditions, usually on slower machines
1806 	 * where interrupts may be dropped, it's possible for the
1807 	 * adapter to chew up all the buffers in the receive ring
1808 	 * and stall, without us being able to do anything about it.
1809 	 * To guard against this, we need to make a pass over the
1810 	 * RX queue to make sure there aren't any packets pending.
1811 	 * Doing it here means we can flush the receive ring at the
1812 	 * same time the chip is DMAing the transmit descriptors we
1813 	 * just gave it.
1814  	 *
1815 	 * 3Com goes to some lengths to emphasize the Parallel Tasking (tm)
1816 	 * nature of their chips in all their marketing literature;
1817 	 * we may as well take advantage of it. :)
1818 	 */
1819 	xl_rxeof(sc);
1820 }
1821 
1822 void
1823 xl_start_90xB(struct ifnet *ifp)
1824 {
1825 	struct xl_softc	*sc;
1826 	struct mbuf	*m_head = NULL;
1827 	struct xl_chain	*prev = NULL, *cur_tx = NULL, *start_tx;
1828 	struct xl_chain	*prev_tx;
1829 	int		error, idx;
1830 
1831 	sc = ifp->if_softc;
1832 
1833 	if (ifp->if_flags & IFF_OACTIVE)
1834 		return;
1835 
1836 	idx = sc->xl_cdata.xl_tx_prod;
1837 	start_tx = &sc->xl_cdata.xl_tx_chain[idx];
1838 
1839 	while (sc->xl_cdata.xl_tx_chain[idx].xl_mbuf == NULL) {
1840 
1841 		if ((XL_TX_LIST_CNT - sc->xl_cdata.xl_tx_cnt) < 3) {
1842 			ifp->if_flags |= IFF_OACTIVE;
1843 			break;
1844 		}
1845 
1846 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1847 		if (m_head == NULL)
1848 			break;
1849 
1850 		prev_tx = cur_tx;
1851 		cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
1852 
1853 		/* Pack the data into the descriptor. */
1854 		error = xl_encap(sc, cur_tx, m_head);
1855 		if (error) {
1856 			cur_tx = prev_tx;
1857 			continue;
1858 		}
1859 
1860 		/* Chain it together. */
1861 		if (prev != NULL)
1862 			prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys);
1863 		prev = cur_tx;
1864 
1865 #if NBPFILTER > 0
1866 		/*
1867 		 * If there's a BPF listener, bounce a copy of this frame
1868 		 * to him.
1869 		 */
1870 		if (ifp->if_bpf)
1871 			bpf_mtap(ifp->if_bpf, cur_tx->xl_mbuf,
1872 			    BPF_DIRECTION_OUT);
1873 #endif
1874 
1875 		XL_INC(idx, XL_TX_LIST_CNT);
1876 		sc->xl_cdata.xl_tx_cnt++;
1877 	}
1878 
1879 	/*
1880 	 * If there are no packets queued, bail.
1881 	 */
1882 	if (cur_tx == NULL)
1883 		return;
1884 
1885 	/*
1886 	 * Place the request for the upload interrupt
1887 	 * in the last descriptor in the chain. This way, if
1888 	 * we're chaining several packets at once, we'll only
1889 	 * get an interrupt once for the whole chain rather than
1890 	 * once for each packet.
1891 	 */
1892 	cur_tx->xl_ptr->xl_status |= htole32(XL_TXSTAT_DL_INTR);
1893 
1894 	/* Start transmission */
1895 	sc->xl_cdata.xl_tx_prod = idx;
1896 	start_tx->xl_prev->xl_ptr->xl_next = htole32(start_tx->xl_phys);
1897 
1898 	/*
1899 	 * Set a timeout in case the chip goes out to lunch.
1900 	 */
1901 	ifp->if_timer = 5;
1902 }
1903 
1904 void
1905 xl_init(void *xsc)
1906 {
1907 	struct xl_softc		*sc = xsc;
1908 	struct ifnet		*ifp = &sc->sc_arpcom.ac_if;
1909 	int			s, i;
1910 	struct mii_data		*mii = NULL;
1911 
1912 	s = splnet();
1913 
1914 	/*
1915 	 * Cancel pending I/O and free all RX/TX buffers.
1916 	 */
1917 	xl_stop(sc);
1918 
1919 	if (sc->xl_hasmii)
1920 		mii = &sc->sc_mii;
1921 
1922 	if (mii == NULL) {
1923 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
1924 		xl_wait(sc);
1925 	}
1926 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
1927 	xl_wait(sc);
1928 	DELAY(10000);
1929 
1930 	/* Init our MAC address */
1931 	XL_SEL_WIN(2);
1932 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
1933 		CSR_WRITE_1(sc, XL_W2_STATION_ADDR_LO + i,
1934 				sc->sc_arpcom.ac_enaddr[i]);
1935 	}
1936 
1937 	/* Clear the station mask. */
1938 	for (i = 0; i < 3; i++)
1939 		CSR_WRITE_2(sc, XL_W2_STATION_MASK_LO + (i * 2), 0);
1940 #ifdef notdef
1941 	/* Reset TX and RX. */
1942 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
1943 	xl_wait(sc);
1944 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
1945 	xl_wait(sc);
1946 #endif
1947 	/* Init circular RX list. */
1948 	if (xl_list_rx_init(sc) == ENOBUFS) {
1949 		printf("%s: initialization failed: no "
1950 			"memory for rx buffers\n", sc->sc_dev.dv_xname);
1951 		xl_stop(sc);
1952 		splx(s);
1953 		return;
1954 	}
1955 
1956 	/* Init TX descriptors. */
1957 	if (sc->xl_type == XL_TYPE_905B)
1958 		xl_list_tx_init_90xB(sc);
1959 	else
1960 		xl_list_tx_init(sc);
1961 
1962 	/*
1963 	 * Set the TX freethresh value.
1964 	 * Note that this has no effect on 3c905B "cyclone"
1965 	 * cards but is required for 3c900/3c905 "boomerang"
1966 	 * cards in order to enable the download engine.
1967 	 */
1968 	CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
1969 
1970 	/* Set the TX start threshold for best performance. */
1971 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_SET_START|sc->xl_tx_thresh);
1972 
1973 	/*
1974 	 * If this is a 3c905B, also set the tx reclaim threshold.
1975 	 * This helps cut down on the number of tx reclaim errors
1976 	 * that could happen on a busy network. The chip multiplies
1977 	 * the register value by 16 to obtain the actual threshold
1978 	 * in bytes, so we divide by 16 when setting the value here.
1979 	 * The existing threshold value can be examined by reading
1980 	 * the register at offset 9 in window 5.
1981 	 */
1982 	if (sc->xl_type == XL_TYPE_905B) {
1983 		CSR_WRITE_2(sc, XL_COMMAND,
1984 		    XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
1985 	}
1986 
1987 	/* Program promiscuous mode and multicast filters. */
1988 	xl_iff(sc);
1989 
1990 	/*
1991 	 * Load the address of the RX list. We have to
1992 	 * stall the upload engine before we can manipulate
1993 	 * the uplist pointer register, then unstall it when
1994 	 * we're finished. We also have to wait for the
1995 	 * stall command to complete before proceeding.
1996 	 * Note that we have to do this after any RX resets
1997 	 * have completed since the uplist register is cleared
1998 	 * by a reset.
1999 	 */
2000 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
2001 	xl_wait(sc);
2002 	CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->sc_listmap->dm_segs[0].ds_addr +
2003 	    offsetof(struct xl_list_data, xl_rx_list[0]));
2004 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
2005 	xl_wait(sc);
2006 
2007 	if (sc->xl_type == XL_TYPE_905B) {
2008 		/* Set polling interval */
2009 		CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
2010 		/* Load the address of the TX list */
2011 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
2012 		xl_wait(sc);
2013 		CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
2014 		    sc->sc_listmap->dm_segs[0].ds_addr +
2015 		    offsetof(struct xl_list_data, xl_tx_list[0]));
2016 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2017 		xl_wait(sc);
2018 	}
2019 
2020 	/*
2021 	 * If the coax transceiver is on, make sure to enable
2022 	 * the DC-DC converter.
2023  	 */
2024 	XL_SEL_WIN(3);
2025 	if (sc->xl_xcvr == XL_XCVR_COAX)
2026 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
2027 	else
2028 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
2029 
2030 	/*
2031 	 * increase packet size to allow reception of 802.1q or ISL packets.
2032 	 * For the 3c90x chip, set the 'allow large packets' bit in the MAC
2033 	 * control register. For 3c90xB/C chips, use the RX packet size
2034 	 * register.
2035 	 */
2036 
2037 	if (sc->xl_type == XL_TYPE_905B)
2038 		CSR_WRITE_2(sc, XL_W3_MAXPKTSIZE, XL_PACKET_SIZE);
2039 	else {
2040 		u_int8_t macctl;
2041 		macctl = CSR_READ_1(sc, XL_W3_MAC_CTRL);
2042 		macctl |= XL_MACCTRL_ALLOW_LARGE_PACK;
2043 		CSR_WRITE_1(sc, XL_W3_MAC_CTRL, macctl);
2044 	}
2045 
2046 	/* Clear out the stats counters. */
2047 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
2048 	sc->xl_stats_no_timeout = 1;
2049 	xl_stats_update(sc);
2050 	sc->xl_stats_no_timeout = 0;
2051 	XL_SEL_WIN(4);
2052 	CSR_WRITE_2(sc, XL_W4_NET_DIAG, XL_NETDIAG_UPPER_BYTES_ENABLE);
2053 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_ENABLE);
2054 
2055 	/*
2056 	 * Enable interrupts.
2057 	 */
2058 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF);
2059 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|XL_INTRS);
2060 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS);
2061 
2062 	if (sc->intr_ack)
2063 		(*sc->intr_ack)(sc);
2064 
2065 	/* Set the RX early threshold */
2066 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_THRESH|(XL_PACKET_SIZE >>2));
2067 	CSR_WRITE_2(sc, XL_DMACTL, XL_DMACTL_UP_RX_EARLY);
2068 
2069 	/* Enable receiver and transmitter. */
2070 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
2071 	xl_wait(sc);
2072 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE);
2073 	xl_wait(sc);
2074 
2075 	/* Restore state of BMCR */
2076 	if (mii != NULL)
2077 		mii_mediachg(mii);
2078 
2079 	/* Select window 7 for normal operations. */
2080 	XL_SEL_WIN(7);
2081 
2082 	ifp->if_flags |= IFF_RUNNING;
2083 	ifp->if_flags &= ~IFF_OACTIVE;
2084 
2085 	splx(s);
2086 
2087 	timeout_add_sec(&sc->xl_stsup_tmo, 1);
2088 }
2089 
2090 /*
2091  * Set media options.
2092  */
2093 int
2094 xl_ifmedia_upd(struct ifnet *ifp)
2095 {
2096 	struct xl_softc		*sc;
2097 	struct ifmedia		*ifm = NULL;
2098 	struct mii_data		*mii = NULL;
2099 
2100 	sc = ifp->if_softc;
2101 
2102 	if (sc->xl_hasmii)
2103 		mii = &sc->sc_mii;
2104 	if (mii == NULL)
2105 		ifm = &sc->ifmedia;
2106 	else
2107 		ifm = &mii->mii_media;
2108 
2109 	switch(IFM_SUBTYPE(ifm->ifm_media)) {
2110 	case IFM_100_FX:
2111 	case IFM_10_FL:
2112 	case IFM_10_2:
2113 	case IFM_10_5:
2114 		xl_setmode(sc, ifm->ifm_media);
2115 		return (0);
2116 		break;
2117 	default:
2118 		break;
2119 	}
2120 
2121 	if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX
2122 		|| sc->xl_media & XL_MEDIAOPT_BT4) {
2123 		xl_init(sc);
2124 	} else {
2125 		xl_setmode(sc, ifm->ifm_media);
2126 	}
2127 
2128 	return (0);
2129 }
2130 
2131 /*
2132  * Report current media status.
2133  */
2134 void
2135 xl_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2136 {
2137 	struct xl_softc		*sc;
2138 	u_int32_t		icfg;
2139 	u_int16_t		status = 0;
2140 	struct mii_data		*mii = NULL;
2141 
2142 	sc = ifp->if_softc;
2143 	if (sc->xl_hasmii != 0)
2144 		mii = &sc->sc_mii;
2145 
2146 	XL_SEL_WIN(4);
2147 	status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
2148 
2149 	XL_SEL_WIN(3);
2150 	icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG) & XL_ICFG_CONNECTOR_MASK;
2151 	icfg >>= XL_ICFG_CONNECTOR_BITS;
2152 
2153 	ifmr->ifm_active = IFM_ETHER;
2154 	ifmr->ifm_status = IFM_AVALID;
2155 
2156 	if ((status & XL_MEDIASTAT_CARRIER) == 0)
2157 		ifmr->ifm_status |= IFM_ACTIVE;
2158 
2159 	switch(icfg) {
2160 	case XL_XCVR_10BT:
2161 		ifmr->ifm_active = IFM_ETHER|IFM_10_T;
2162 		if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
2163 			ifmr->ifm_active |= IFM_FDX;
2164 		else
2165 			ifmr->ifm_active |= IFM_HDX;
2166 		break;
2167 	case XL_XCVR_AUI:
2168 		if (sc->xl_type == XL_TYPE_905B &&
2169 		    sc->xl_media == XL_MEDIAOPT_10FL) {
2170 			ifmr->ifm_active = IFM_ETHER|IFM_10_FL;
2171 			if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
2172 				ifmr->ifm_active |= IFM_FDX;
2173 			else
2174 				ifmr->ifm_active |= IFM_HDX;
2175 		} else
2176 			ifmr->ifm_active = IFM_ETHER|IFM_10_5;
2177 		break;
2178 	case XL_XCVR_COAX:
2179 		ifmr->ifm_active = IFM_ETHER|IFM_10_2;
2180 		break;
2181 	/*
2182 	 * XXX MII and BTX/AUTO should be separate cases.
2183 	 */
2184 
2185 	case XL_XCVR_100BTX:
2186 	case XL_XCVR_AUTO:
2187 	case XL_XCVR_MII:
2188 		if (mii != NULL) {
2189 			mii_pollstat(mii);
2190 			ifmr->ifm_active = mii->mii_media_active;
2191 			ifmr->ifm_status = mii->mii_media_status;
2192 		}
2193 		break;
2194 	case XL_XCVR_100BFX:
2195 		ifmr->ifm_active = IFM_ETHER|IFM_100_FX;
2196 		break;
2197 	default:
2198 		printf("%s: unknown XCVR type: %d\n", sc->sc_dev.dv_xname, icfg);
2199 		break;
2200 	}
2201 }
2202 
2203 int
2204 xl_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2205 {
2206 	struct xl_softc *sc = ifp->if_softc;
2207 	struct ifreq *ifr = (struct ifreq *)data;
2208 	struct ifaddr *ifa = (struct ifaddr *)data;
2209 	int s, error = 0;
2210 	struct mii_data *mii = NULL;
2211 
2212 	s = splnet();
2213 
2214 	switch(command) {
2215 	case SIOCSIFADDR:
2216 		ifp->if_flags |= IFF_UP;
2217 		if (!(ifp->if_flags & IFF_RUNNING))
2218 			xl_init(sc);
2219 #ifdef INET
2220 		if (ifa->ifa_addr->sa_family == AF_INET)
2221 			arp_ifinit(&sc->sc_arpcom, ifa);
2222 #endif
2223 		break;
2224 
2225 	case SIOCSIFFLAGS:
2226 		if (ifp->if_flags & IFF_UP) {
2227 			if (ifp->if_flags & IFF_RUNNING)
2228 				error = ENETRESET;
2229 			else
2230 				xl_init(sc);
2231 		} else {
2232 			if (ifp->if_flags & IFF_RUNNING)
2233 				xl_stop(sc);
2234 		}
2235 		break;
2236 
2237 	case SIOCGIFMEDIA:
2238 	case SIOCSIFMEDIA:
2239 		if (sc->xl_hasmii != 0)
2240 			mii = &sc->sc_mii;
2241 		if (mii == NULL)
2242 			error = ifmedia_ioctl(ifp, ifr,
2243 			    &sc->ifmedia, command);
2244 		else
2245 			error = ifmedia_ioctl(ifp, ifr,
2246 			    &mii->mii_media, command);
2247 		break;
2248 
2249 	default:
2250 		error = ether_ioctl(ifp, &sc->sc_arpcom, command, data);
2251 	}
2252 
2253 	if (error == ENETRESET) {
2254 		if (ifp->if_flags & IFF_RUNNING)
2255 			xl_iff(sc);
2256 		error = 0;
2257 	}
2258 
2259 	splx(s);
2260 	return (error);
2261 }
2262 
2263 void
2264 xl_watchdog(struct ifnet *ifp)
2265 {
2266 	struct xl_softc		*sc;
2267 	u_int16_t		status = 0;
2268 
2269 	sc = ifp->if_softc;
2270 
2271 	ifp->if_oerrors++;
2272 	XL_SEL_WIN(4);
2273 	status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
2274 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
2275 
2276 	if (status & XL_MEDIASTAT_CARRIER)
2277 		printf("%s: no carrier - transceiver cable problem?\n",
2278 								sc->sc_dev.dv_xname);
2279 	xl_txeoc(sc);
2280 	xl_txeof(sc);
2281 	xl_rxeof(sc);
2282 	xl_reset(sc);
2283 	xl_init(sc);
2284 
2285 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
2286 		(*ifp->if_start)(ifp);
2287 }
2288 
2289 void
2290 xl_freetxrx(struct xl_softc *sc)
2291 {
2292 	bus_dmamap_t	map;
2293 	int		i;
2294 
2295 	/*
2296 	 * Free data in the RX lists.
2297 	 */
2298 	for (i = 0; i < XL_RX_LIST_CNT; i++) {
2299 		if (sc->xl_cdata.xl_rx_chain[i].map->dm_nsegs != 0) {
2300 			map = sc->xl_cdata.xl_rx_chain[i].map;
2301 
2302 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2303 			    BUS_DMASYNC_POSTREAD);
2304 			bus_dmamap_unload(sc->sc_dmat, map);
2305 		}
2306 		if (sc->xl_cdata.xl_rx_chain[i].xl_mbuf != NULL) {
2307 			m_freem(sc->xl_cdata.xl_rx_chain[i].xl_mbuf);
2308 			sc->xl_cdata.xl_rx_chain[i].xl_mbuf = NULL;
2309 		}
2310 	}
2311 	bzero(&sc->xl_ldata->xl_rx_list, sizeof(sc->xl_ldata->xl_rx_list));
2312 	/*
2313 	 * Free the TX list buffers.
2314 	 */
2315 	for (i = 0; i < XL_TX_LIST_CNT; i++) {
2316 		if (sc->xl_cdata.xl_tx_chain[i].map->dm_nsegs != 0) {
2317 			map = sc->xl_cdata.xl_tx_chain[i].map;
2318 
2319 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2320 			    BUS_DMASYNC_POSTWRITE);
2321 			bus_dmamap_unload(sc->sc_dmat, map);
2322 		}
2323 		if (sc->xl_cdata.xl_tx_chain[i].xl_mbuf != NULL) {
2324 			m_freem(sc->xl_cdata.xl_tx_chain[i].xl_mbuf);
2325 			sc->xl_cdata.xl_tx_chain[i].xl_mbuf = NULL;
2326 		}
2327 	}
2328 	bzero(&sc->xl_ldata->xl_tx_list, sizeof(sc->xl_ldata->xl_tx_list));
2329 }
2330 
2331 /*
2332  * Stop the adapter and free any mbufs allocated to the
2333  * RX and TX lists.
2334  */
2335 void
2336 xl_stop(struct xl_softc *sc)
2337 {
2338 	struct ifnet *ifp;
2339 
2340 	/* Stop the stats updater. */
2341 	timeout_del(&sc->xl_stsup_tmo);
2342 
2343 	ifp = &sc->sc_arpcom.ac_if;
2344 	ifp->if_timer = 0;
2345 
2346 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISABLE);
2347 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
2348 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB);
2349 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISCARD);
2350 	xl_wait(sc);
2351 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_DISABLE);
2352 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
2353 	DELAY(800);
2354 
2355 #ifdef foo
2356 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
2357 	xl_wait(sc);
2358 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
2359 	xl_wait(sc);
2360 #endif
2361 
2362 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|XL_STAT_INTLATCH);
2363 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|0);
2364 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0);
2365 
2366 	if (sc->intr_ack)
2367 		(*sc->intr_ack)(sc);
2368 
2369 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2370 
2371 	xl_freetxrx(sc);
2372 
2373 #ifndef SMALL_KERNEL
2374 	/* Call upper layer WOL power routine if WOL is enabled. */
2375 	if ((sc->xl_flags & XL_FLAG_WOL) && sc->wol_power)
2376 		sc->wol_power(sc->wol_power_arg);
2377 #endif
2378 }
2379 
2380 void
2381 xl_attach(struct xl_softc *sc)
2382 {
2383 	u_int8_t enaddr[ETHER_ADDR_LEN];
2384 	u_int16_t		xcvr[2];
2385 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2386 	int i, media = IFM_ETHER|IFM_100_TX|IFM_FDX;
2387 	struct ifmedia *ifm;
2388 
2389 	i = splnet();
2390 	xl_reset(sc);
2391 	splx(i);
2392 
2393 	/*
2394 	 * Get station address from the EEPROM.
2395 	 */
2396 	if (xl_read_eeprom(sc, (caddr_t)&enaddr, XL_EE_OEM_ADR0, 3, 1)) {
2397 		printf("\n%s: failed to read station address\n",
2398 		    sc->sc_dev.dv_xname);
2399 		return;
2400 	}
2401 	bcopy(enaddr, &sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
2402 
2403 	if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct xl_list_data),
2404 	    PAGE_SIZE, 0, sc->sc_listseg, 1, &sc->sc_listnseg,
2405 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) {
2406 		printf(": can't alloc list mem\n");
2407 		return;
2408 	}
2409 	if (bus_dmamem_map(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg,
2410 	    sizeof(struct xl_list_data), &sc->sc_listkva,
2411 	    BUS_DMA_NOWAIT) != 0) {
2412 		printf(": can't map list mem\n");
2413 		return;
2414 	}
2415 	if (bus_dmamap_create(sc->sc_dmat, sizeof(struct xl_list_data), 1,
2416 	    sizeof(struct xl_list_data), 0, BUS_DMA_NOWAIT,
2417 	    &sc->sc_listmap) != 0) {
2418 		printf(": can't alloc list map\n");
2419 		return;
2420 	}
2421 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, sc->sc_listkva,
2422 	    sizeof(struct xl_list_data), NULL, BUS_DMA_NOWAIT) != 0) {
2423 		printf(": can't load list map\n");
2424 		return;
2425 	}
2426 	sc->xl_ldata = (struct xl_list_data *)sc->sc_listkva;
2427 
2428 	for (i = 0; i < XL_RX_LIST_CNT; i++) {
2429 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
2430 		    0, BUS_DMA_NOWAIT,
2431 		    &sc->xl_cdata.xl_rx_chain[i].map) != 0) {
2432 			printf(": can't create rx map\n");
2433 			return;
2434 		}
2435 	}
2436 	if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
2437 	    BUS_DMA_NOWAIT, &sc->sc_rx_sparemap) != 0) {
2438 		printf(": can't create rx spare map\n");
2439 		return;
2440 	}
2441 
2442 	for (i = 0; i < XL_TX_LIST_CNT; i++) {
2443 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES,
2444 		    XL_TX_LIST_CNT - 3, MCLBYTES, 0, BUS_DMA_NOWAIT,
2445 		    &sc->xl_cdata.xl_tx_chain[i].map) != 0) {
2446 			printf(": can't create tx map\n");
2447 			return;
2448 		}
2449 	}
2450 	if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, XL_TX_LIST_CNT - 3,
2451 	    MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_tx_sparemap) != 0) {
2452 		printf(": can't create tx spare map\n");
2453 		return;
2454 	}
2455 
2456 	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
2457 
2458 	if (sc->xl_flags & (XL_FLAG_INVERT_LED_PWR|XL_FLAG_INVERT_MII_PWR)) {
2459 		u_int16_t n;
2460 
2461 		XL_SEL_WIN(2);
2462 		n = CSR_READ_2(sc, 12);
2463 
2464 		if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR)
2465 			n |= 0x0010;
2466 
2467 		if (sc->xl_flags & XL_FLAG_INVERT_MII_PWR)
2468 			n |= 0x4000;
2469 
2470 		CSR_WRITE_2(sc, 12, n);
2471 	}
2472 
2473 	/*
2474 	 * Figure out the card type. 3c905B adapters have the
2475 	 * 'supportsNoTxLength' bit set in the capabilities
2476 	 * word in the EEPROM.
2477 	 * Note: my 3c575C cardbus card lies. It returns a value
2478 	 * of 0x1578 for its capabilities word, which is somewhat
2479 	 * nonsensical. Another way to distinguish a 3c90x chip
2480 	 * from a 3c90xB/C chip is to check for the 'supportsLargePackets'
2481 	 * bit. This will only be set for 3c90x boomerage chips.
2482 	 */
2483 	xl_read_eeprom(sc, (caddr_t)&sc->xl_caps, XL_EE_CAPS, 1, 0);
2484 	if (sc->xl_caps & XL_CAPS_NO_TXLENGTH ||
2485 	    !(sc->xl_caps & XL_CAPS_LARGE_PKTS))
2486 		sc->xl_type = XL_TYPE_905B;
2487 	else
2488 		sc->xl_type = XL_TYPE_90X;
2489 
2490 	/* Set the TX start threshold for best performance. */
2491 	sc->xl_tx_thresh = XL_MIN_FRAMELEN;
2492 
2493 	timeout_set(&sc->xl_stsup_tmo, xl_stats_update, sc);
2494 
2495 	ifp->if_softc = sc;
2496 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2497 	ifp->if_ioctl = xl_ioctl;
2498 	if (sc->xl_type == XL_TYPE_905B)
2499 		ifp->if_start = xl_start_90xB;
2500 	else
2501 		ifp->if_start = xl_start;
2502 	ifp->if_watchdog = xl_watchdog;
2503 	ifp->if_baudrate = 10000000;
2504 	IFQ_SET_MAXLEN(&ifp->if_snd, XL_TX_LIST_CNT - 1);
2505 	IFQ_SET_READY(&ifp->if_snd);
2506 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
2507 
2508 	ifp->if_capabilities = IFCAP_VLAN_MTU;
2509 
2510 #ifndef XL905B_TXCSUM_BROKEN
2511 	ifp->if_capabilities |= IFCAP_CSUM_IPv4|IFCAP_CSUM_TCPv4|
2512 				IFCAP_CSUM_UDPv4;
2513 #endif
2514 
2515 	XL_SEL_WIN(3);
2516 	sc->xl_media = CSR_READ_2(sc, XL_W3_MEDIA_OPT);
2517 
2518 	xl_read_eeprom(sc, (char *)&xcvr, XL_EE_ICFG_0, 2, 0);
2519 	sc->xl_xcvr = xcvr[0] | xcvr[1] << 16;
2520 	sc->xl_xcvr &= XL_ICFG_CONNECTOR_MASK;
2521 	sc->xl_xcvr >>= XL_ICFG_CONNECTOR_BITS;
2522 
2523 	xl_mediacheck(sc);
2524 
2525 	if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX
2526 	    || sc->xl_media & XL_MEDIAOPT_BT4) {
2527 		ifmedia_init(&sc->sc_mii.mii_media, 0,
2528 		    xl_ifmedia_upd, xl_ifmedia_sts);
2529 		sc->xl_hasmii = 1;
2530 		sc->sc_mii.mii_ifp = ifp;
2531 		sc->sc_mii.mii_readreg = xl_miibus_readreg;
2532 		sc->sc_mii.mii_writereg = xl_miibus_writereg;
2533 		sc->sc_mii.mii_statchg = xl_miibus_statchg;
2534 		xl_setcfg(sc);
2535 		mii_attach((struct device *)sc, &sc->sc_mii, 0xffffffff,
2536 		    MII_PHY_ANY, MII_OFFSET_ANY, 0);
2537 
2538 		if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2539 			ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE,
2540 			    0, NULL);
2541 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2542 		}
2543 		else {
2544 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2545 		}
2546 		ifm = &sc->sc_mii.mii_media;
2547 	}
2548 	else {
2549 		ifmedia_init(&sc->ifmedia, 0, xl_ifmedia_upd, xl_ifmedia_sts);
2550 		sc->xl_hasmii = 0;
2551 		ifm = &sc->ifmedia;
2552 	}
2553 
2554 	/*
2555 	 * Sanity check. If the user has selected "auto" and this isn't
2556 	 * a 10/100 card of some kind, we need to force the transceiver
2557 	 * type to something sane.
2558 	 */
2559 	if (sc->xl_xcvr == XL_XCVR_AUTO) {
2560 		xl_choose_xcvr(sc, 0);
2561 		i = splnet();
2562 		xl_reset(sc);
2563 		splx(i);
2564 	}
2565 
2566 	if (sc->xl_media & XL_MEDIAOPT_BT) {
2567 		ifmedia_add(ifm, IFM_ETHER|IFM_10_T, 0, NULL);
2568 		ifmedia_add(ifm, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
2569 		if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
2570 			ifmedia_add(ifm, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
2571 	}
2572 
2573 	if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
2574 		/*
2575 		 * Check for a 10baseFL board in disguise.
2576 		 */
2577 		if (sc->xl_type == XL_TYPE_905B &&
2578 		    sc->xl_media == XL_MEDIAOPT_10FL) {
2579 			ifmedia_add(ifm, IFM_ETHER|IFM_10_FL, 0, NULL);
2580 			ifmedia_add(ifm, IFM_ETHER|IFM_10_FL|IFM_HDX,
2581 			    0, NULL);
2582 			if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
2583 				ifmedia_add(ifm,
2584 				    IFM_ETHER|IFM_10_FL|IFM_FDX, 0, NULL);
2585 		} else {
2586 			ifmedia_add(ifm, IFM_ETHER|IFM_10_5, 0, NULL);
2587 		}
2588 	}
2589 
2590 	if (sc->xl_media & XL_MEDIAOPT_BNC) {
2591 		ifmedia_add(ifm, IFM_ETHER|IFM_10_2, 0, NULL);
2592 	}
2593 
2594 	if (sc->xl_media & XL_MEDIAOPT_BFX) {
2595 		ifp->if_baudrate = 100000000;
2596 		ifmedia_add(ifm, IFM_ETHER|IFM_100_FX, 0, NULL);
2597 	}
2598 
2599 	/* Choose a default media. */
2600 	switch(sc->xl_xcvr) {
2601 	case XL_XCVR_10BT:
2602 		media = IFM_ETHER|IFM_10_T;
2603 		xl_setmode(sc, media);
2604 		break;
2605 	case XL_XCVR_AUI:
2606 		if (sc->xl_type == XL_TYPE_905B &&
2607 		    sc->xl_media == XL_MEDIAOPT_10FL) {
2608 			media = IFM_ETHER|IFM_10_FL;
2609 			xl_setmode(sc, media);
2610 		} else {
2611 			media = IFM_ETHER|IFM_10_5;
2612 			xl_setmode(sc, media);
2613 		}
2614 		break;
2615 	case XL_XCVR_COAX:
2616 		media = IFM_ETHER|IFM_10_2;
2617 		xl_setmode(sc, media);
2618 		break;
2619 	case XL_XCVR_AUTO:
2620 	case XL_XCVR_100BTX:
2621 	case XL_XCVR_MII:
2622 		/* Chosen by miibus */
2623 		break;
2624 	case XL_XCVR_100BFX:
2625 		media = IFM_ETHER|IFM_100_FX;
2626 		xl_setmode(sc, media);
2627 		break;
2628 	default:
2629 		printf("%s: unknown XCVR type: %d\n", sc->sc_dev.dv_xname,
2630 							sc->xl_xcvr);
2631 		/*
2632 		 * This will probably be wrong, but it prevents
2633 		 * the ifmedia code from panicking.
2634 		 */
2635 		media = IFM_ETHER | IFM_10_T;
2636 		break;
2637 	}
2638 
2639 	if (sc->xl_hasmii == 0)
2640 		ifmedia_set(&sc->ifmedia, media);
2641 
2642 	if (sc->xl_flags & XL_FLAG_NO_XCVR_PWR) {
2643 		XL_SEL_WIN(0);
2644 		CSR_WRITE_2(sc, XL_W0_MFG_ID, XL_NO_XCVR_PWR_MAGICBITS);
2645 	}
2646 
2647 #ifndef SMALL_KERNEL
2648 	/* Check availability of WOL. */
2649 	if ((sc->xl_caps & XL_CAPS_PWRMGMT) != 0) {
2650 		ifp->if_capabilities |= IFCAP_WOL;
2651 		ifp->if_wol = xl_wol;
2652 		xl_wol(ifp, 0);
2653 	}
2654 #endif
2655 
2656 	/*
2657 	 * Call MI attach routines.
2658 	 */
2659 	if_attach(ifp);
2660 	ether_ifattach(ifp);
2661 	m_clsetwms(ifp, MCLBYTES, 2, XL_RX_LIST_CNT - 1);
2662 }
2663 
2664 int
2665 xl_detach(struct xl_softc *sc)
2666 {
2667 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2668 	extern void xl_freetxrx(struct xl_softc *);
2669 
2670 	/* Unhook our tick handler. */
2671 	timeout_del(&sc->xl_stsup_tmo);
2672 
2673 	xl_freetxrx(sc);
2674 
2675 	/* Detach all PHYs */
2676 	if (sc->xl_hasmii)
2677 		mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2678 
2679 	/* Delete all remaining media. */
2680 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2681 
2682 	ether_ifdetach(ifp);
2683 	if_detach(ifp);
2684 
2685 	return (0);
2686 }
2687 
2688 #ifndef SMALL_KERNEL
2689 int
2690 xl_wol(struct ifnet *ifp, int enable)
2691 {
2692 	struct xl_softc		*sc = ifp->if_softc;
2693 
2694 	XL_SEL_WIN(7);
2695 	if (enable) {
2696 		CSR_WRITE_2(sc, XL_W7_BM_PME, XL_BM_PME_MAGIC);
2697 		sc->xl_flags |= XL_FLAG_WOL;
2698 	} else {
2699 		CSR_WRITE_2(sc, XL_W7_BM_PME, 0);
2700 		sc->xl_flags &= ~XL_FLAG_WOL;
2701 	}
2702 	return (0);
2703 }
2704 #endif
2705 
2706 struct cfdriver xl_cd = {
2707 	0, "xl", DV_IFNET
2708 };
2709