xref: /dragonfly/sys/dev/netif/tl/if_tl.c (revision c9f721c2)
1 /*
2  * Copyright (c) 1997, 1998
3  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by Bill Paul.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  *
32  * $FreeBSD: src/sys/pci/if_tl.c,v 1.51.2.5 2001/12/16 15:46:08 luigi Exp $
33  * $DragonFly: src/sys/dev/netif/tl/if_tl.c,v 1.13 2004/09/15 00:55:37 joerg Exp $
34  *
35  * $FreeBSD: src/sys/pci/if_tl.c,v 1.51.2.5 2001/12/16 15:46:08 luigi Exp $
36  */
37 
38 /*
39  * Texas Instruments ThunderLAN driver for FreeBSD 2.2.6 and 3.x.
40  * Supports many Compaq PCI NICs based on the ThunderLAN ethernet controller,
41  * the National Semiconductor DP83840A physical interface and the
42  * Microchip Technology 24Cxx series serial EEPROM.
43  *
44  * Written using the following four documents:
45  *
46  * Texas Instruments ThunderLAN Programmer's Guide (www.ti.com)
47  * National Semiconductor DP83840A data sheet (www.national.com)
48  * Microchip Technology 24C02C data sheet (www.microchip.com)
49  * Micro Linear ML6692 100BaseTX only PHY data sheet (www.microlinear.com)
50  *
51  * Written by Bill Paul <wpaul@ctr.columbia.edu>
52  * Electrical Engineering Department
53  * Columbia University, New York City
54  */
55 
56 /*
57  * Some notes about the ThunderLAN:
58  *
59  * The ThunderLAN controller is a single chip containing PCI controller
60  * logic, approximately 3K of on-board SRAM, a LAN controller, and media
61  * independent interface (MII) bus. The MII allows the ThunderLAN chip to
62  * control up to 32 different physical interfaces (PHYs). The ThunderLAN
63  * also has a built-in 10baseT PHY, allowing a single ThunderLAN controller
64  * to act as a complete ethernet interface.
65  *
66  * Other PHYs may be attached to the ThunderLAN; the Compaq 10/100 cards
67  * use a National Semiconductor DP83840A PHY that supports 10 or 100Mb/sec
68  * in full or half duplex. Some of the Compaq Deskpro machines use a
69  * Level 1 LXT970 PHY with the same capabilities. Certain Olicom adapters
70  * use a Micro Linear ML6692 100BaseTX only PHY, which can be used in
71  * concert with the ThunderLAN's internal PHY to provide full 10/100
72  * support. This is cheaper than using a standalone external PHY for both
73  * 10/100 modes and letting the ThunderLAN's internal PHY go to waste.
74  * A serial EEPROM is also attached to the ThunderLAN chip to provide
75  * power-up default register settings and for storing the adapter's
76  * station address. Although not supported by this driver, the ThunderLAN
77  * chip can also be connected to token ring PHYs.
78  *
79  * The ThunderLAN has a set of registers which can be used to issue
80  * commands, acknowledge interrupts, and to manipulate other internal
81  * registers on its DIO bus. The primary registers can be accessed
82  * using either programmed I/O (inb/outb) or via PCI memory mapping,
83  * depending on how the card is configured during the PCI probing
84  * phase. It is even possible to have both PIO and memory mapped
85  * access turned on at the same time.
86  *
87  * Frame reception and transmission with the ThunderLAN chip is done
88  * using frame 'lists.' A list structure looks more or less like this:
89  *
90  * struct tl_frag {
91  *	u_int32_t		fragment_address;
92  *	u_int32_t		fragment_size;
93  * };
94  * struct tl_list {
95  *	u_int32_t		forward_pointer;
96  *	u_int16_t		cstat;
97  *	u_int16_t		frame_size;
98  *	struct tl_frag		fragments[10];
99  * };
100  *
101  * The forward pointer in the list header can be either a 0 or the address
102  * of another list, which allows several lists to be linked together. Each
103  * list contains up to 10 fragment descriptors. This means the chip allows
104  * ethernet frames to be broken up into up to 10 chunks for transfer to
105  * and from the SRAM. Note that the forward pointer and fragment buffer
106  * addresses are physical memory addresses, not virtual. Note also that
107  * a single ethernet frame can not span lists: if the host wants to
108  * transmit a frame and the frame data is split up over more than 10
109  * buffers, the frame has to collapsed before it can be transmitted.
110  *
111  * To receive frames, the driver sets up a number of lists and populates
112  * the fragment descriptors, then it sends an RX GO command to the chip.
113  * When a frame is received, the chip will DMA it into the memory regions
114  * specified by the fragment descriptors and then trigger an RX 'end of
115  * frame interrupt' when done. The driver may choose to use only one
116  * fragment per list; this may result is slighltly less efficient use
117  * of memory in exchange for improving performance.
118  *
119  * To transmit frames, the driver again sets up lists and fragment
120  * descriptors, only this time the buffers contain frame data that
121  * is to be DMA'ed into the chip instead of out of it. Once the chip
122  * has transfered the data into its on-board SRAM, it will trigger a
123  * TX 'end of frame' interrupt. It will also generate an 'end of channel'
124  * interrupt when it reaches the end of the list.
125  */
126 
127 /*
128  * Some notes about this driver:
129  *
130  * The ThunderLAN chip provides a couple of different ways to organize
131  * reception, transmission and interrupt handling. The simplest approach
132  * is to use one list each for transmission and reception. In this mode,
133  * the ThunderLAN will generate two interrupts for every received frame
134  * (one RX EOF and one RX EOC) and two for each transmitted frame (one
135  * TX EOF and one TX EOC). This may make the driver simpler but it hurts
136  * performance to have to handle so many interrupts.
137  *
138  * Initially I wanted to create a circular list of receive buffers so
139  * that the ThunderLAN chip would think there was an infinitely long
140  * receive channel and never deliver an RXEOC interrupt. However this
141  * doesn't work correctly under heavy load: while the manual says the
142  * chip will trigger an RXEOF interrupt each time a frame is copied into
143  * memory, you can't count on the chip waiting around for you to acknowledge
144  * the interrupt before it starts trying to DMA the next frame. The result
145  * is that the chip might traverse the entire circular list and then wrap
146  * around before you have a chance to do anything about it. Consequently,
147  * the receive list is terminated (with a 0 in the forward pointer in the
148  * last element). Each time an RXEOF interrupt arrives, the used list
149  * is shifted to the end of the list. This gives the appearance of an
150  * infinitely large RX chain so long as the driver doesn't fall behind
151  * the chip and allow all of the lists to be filled up.
152  *
153  * If all the lists are filled, the adapter will deliver an RX 'end of
154  * channel' interrupt when it hits the 0 forward pointer at the end of
155  * the chain. The RXEOC handler then cleans out the RX chain and resets
156  * the list head pointer in the ch_parm register and restarts the receiver.
157  *
158  * For frame transmission, it is possible to program the ThunderLAN's
159  * transmit interrupt threshold so that the chip can acknowledge multiple
160  * lists with only a single TX EOF interrupt. This allows the driver to
161  * queue several frames in one shot, and only have to handle a total
162  * two interrupts (one TX EOF and one TX EOC) no matter how many frames
163  * are transmitted. Frame transmission is done directly out of the
164  * mbufs passed to the tl_start() routine via the interface send queue.
165  * The driver simply sets up the fragment descriptors in the transmit
166  * lists to point to the mbuf data regions and sends a TX GO command.
167  *
168  * Note that since the RX and TX lists themselves are always used
169  * only by the driver, the are malloc()ed once at driver initialization
170  * time and never free()ed.
171  *
172  * Also, in order to remain as platform independent as possible, this
173  * driver uses memory mapped register access to manipulate the card
174  * as opposed to programmed I/O. This avoids the use of the inb/outb
175  * (and related) instructions which are specific to the i386 platform.
176  *
177  * Using these techniques, this driver achieves very high performance
178  * by minimizing the amount of interrupts generated during large
179  * transfers and by completely avoiding buffer copies. Frame transfer
180  * to and from the ThunderLAN chip is performed entirely by the chip
181  * itself thereby reducing the load on the host CPU.
182  */
183 
184 #include <sys/param.h>
185 #include <sys/systm.h>
186 #include <sys/sockio.h>
187 #include <sys/mbuf.h>
188 #include <sys/malloc.h>
189 #include <sys/kernel.h>
190 #include <sys/socket.h>
191 
192 #include <net/if.h>
193 #include <net/if_arp.h>
194 #include <net/ethernet.h>
195 #include <net/if_dl.h>
196 #include <net/if_media.h>
197 
198 #include <net/bpf.h>
199 
200 #include <vm/vm.h>              /* for vtophys */
201 #include <vm/pmap.h>            /* for vtophys */
202 #include <machine/clock.h>      /* for DELAY */
203 #include <machine/bus_memio.h>
204 #include <machine/bus_pio.h>
205 #include <machine/bus.h>
206 #include <machine/resource.h>
207 #include <sys/bus.h>
208 #include <sys/rman.h>
209 
210 #include "../mii_layer/mii.h"
211 #include "../mii_layer/miivar.h"
212 
213 #include <bus/pci/pcireg.h>
214 #include <bus/pci/pcivar.h>
215 
216 /*
217  * Default to using PIO register access mode to pacify certain
218  * laptop docking stations with built-in ThunderLAN chips that
219  * don't seem to handle memory mapped mode properly.
220  */
221 #define TL_USEIOSPACE
222 
223 #include "if_tlreg.h"
224 
225 /* "controller miibus0" required.  See GENERIC if you get errors here. */
226 #include "miibus_if.h"
227 
228 /*
229  * Various supported device vendors/types and their names.
230  */
231 
232 static struct tl_type tl_devs[] = {
233 	{ TI_VENDORID,	TI_DEVICEID_THUNDERLAN,
234 		"Texas Instruments ThunderLAN" },
235 	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10,
236 		"Compaq Netelligent 10" },
237 	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100,
238 		"Compaq Netelligent 10/100" },
239 	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_PROLIANT,
240 		"Compaq Netelligent 10/100 Proliant" },
241 	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_DUAL,
242 		"Compaq Netelligent 10/100 Dual Port" },
243 	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P_INTEGRATED,
244 		"Compaq NetFlex-3/P Integrated" },
245 	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P,
246 		"Compaq NetFlex-3/P" },
247 	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P_BNC,
248 		"Compaq NetFlex 3/P w/ BNC" },
249 	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_EMBEDDED,
250 		"Compaq Netelligent 10/100 TX Embedded UTP" },
251 	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_T2_UTP_COAX,
252 		"Compaq Netelligent 10 T/2 PCI UTP/Coax" },
253 	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_TX_UTP,
254 		"Compaq Netelligent 10/100 TX UTP" },
255 	{ OLICOM_VENDORID, OLICOM_DEVICEID_OC2183,
256 		"Olicom OC-2183/2185" },
257 	{ OLICOM_VENDORID, OLICOM_DEVICEID_OC2325,
258 		"Olicom OC-2325" },
259 	{ OLICOM_VENDORID, OLICOM_DEVICEID_OC2326,
260 		"Olicom OC-2326 10/100 TX UTP" },
261 	{ 0, 0, NULL }
262 };
263 
264 static int tl_probe		(device_t);
265 static int tl_attach		(device_t);
266 static int tl_detach		(device_t);
267 static int tl_intvec_rxeoc	(void *, u_int32_t);
268 static int tl_intvec_txeoc	(void *, u_int32_t);
269 static int tl_intvec_txeof	(void *, u_int32_t);
270 static int tl_intvec_rxeof	(void *, u_int32_t);
271 static int tl_intvec_adchk	(void *, u_int32_t);
272 static int tl_intvec_netsts	(void *, u_int32_t);
273 
274 static int tl_newbuf		(struct tl_softc *,
275 					struct tl_chain_onefrag *);
276 static void tl_stats_update	(void *);
277 static int tl_encap		(struct tl_softc *, struct tl_chain *,
278 						struct mbuf *);
279 
280 static void tl_intr		(void *);
281 static void tl_start		(struct ifnet *);
282 static int tl_ioctl		(struct ifnet *, u_long, caddr_t,
283 						struct ucred *);
284 static void tl_init		(void *);
285 static void tl_stop		(struct tl_softc *);
286 static void tl_watchdog		(struct ifnet *);
287 static void tl_shutdown		(device_t);
288 static int tl_ifmedia_upd	(struct ifnet *);
289 static void tl_ifmedia_sts	(struct ifnet *, struct ifmediareq *);
290 
291 static u_int8_t tl_eeprom_putbyte	(struct tl_softc *, int);
292 static u_int8_t	tl_eeprom_getbyte	(struct tl_softc *,
293 						int, u_int8_t *);
294 static int tl_read_eeprom	(struct tl_softc *, caddr_t, int, int);
295 
296 static void tl_mii_sync		(struct tl_softc *);
297 static void tl_mii_send		(struct tl_softc *, u_int32_t, int);
298 static int tl_mii_readreg	(struct tl_softc *, struct tl_mii_frame *);
299 static int tl_mii_writereg	(struct tl_softc *, struct tl_mii_frame *);
300 static int tl_miibus_readreg	(device_t, int, int);
301 static int tl_miibus_writereg	(device_t, int, int, int);
302 static void tl_miibus_statchg	(device_t);
303 
304 static void tl_setmode		(struct tl_softc *, int);
305 static int tl_calchash		(caddr_t);
306 static void tl_setmulti		(struct tl_softc *);
307 static void tl_setfilt		(struct tl_softc *, caddr_t, int);
308 static void tl_softreset	(struct tl_softc *, int);
309 static void tl_hardreset	(device_t);
310 static int tl_list_rx_init	(struct tl_softc *);
311 static int tl_list_tx_init	(struct tl_softc *);
312 
313 static u_int8_t tl_dio_read8	(struct tl_softc *, int);
314 static u_int16_t tl_dio_read16	(struct tl_softc *, int);
315 static u_int32_t tl_dio_read32	(struct tl_softc *, int);
316 static void tl_dio_write8	(struct tl_softc *, int, int);
317 static void tl_dio_write16	(struct tl_softc *, int, int);
318 static void tl_dio_write32	(struct tl_softc *, int, int);
319 static void tl_dio_setbit	(struct tl_softc *, int, int);
320 static void tl_dio_clrbit	(struct tl_softc *, int, int);
321 static void tl_dio_setbit16	(struct tl_softc *, int, int);
322 static void tl_dio_clrbit16	(struct tl_softc *, int, int);
323 
324 #ifdef TL_USEIOSPACE
325 #define TL_RES		SYS_RES_IOPORT
326 #define TL_RID		TL_PCI_LOIO
327 #else
328 #define TL_RES		SYS_RES_MEMORY
329 #define TL_RID		TL_PCI_LOMEM
330 #endif
331 
332 static device_method_t tl_methods[] = {
333 	/* Device interface */
334 	DEVMETHOD(device_probe,		tl_probe),
335 	DEVMETHOD(device_attach,	tl_attach),
336 	DEVMETHOD(device_detach,	tl_detach),
337 	DEVMETHOD(device_shutdown,	tl_shutdown),
338 
339 	/* bus interface */
340 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
341 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
342 
343 	/* MII interface */
344 	DEVMETHOD(miibus_readreg,	tl_miibus_readreg),
345 	DEVMETHOD(miibus_writereg,	tl_miibus_writereg),
346 	DEVMETHOD(miibus_statchg,	tl_miibus_statchg),
347 
348 	{ 0, 0 }
349 };
350 
351 static driver_t tl_driver = {
352 	"tl",
353 	tl_methods,
354 	sizeof(struct tl_softc)
355 };
356 
357 static devclass_t tl_devclass;
358 
359 DECLARE_DUMMY_MODULE(if_tl);
360 DRIVER_MODULE(if_tl, pci, tl_driver, tl_devclass, 0, 0);
361 DRIVER_MODULE(miibus, tl, miibus_driver, miibus_devclass, 0, 0);
362 
363 static u_int8_t tl_dio_read8(sc, reg)
364 	struct tl_softc		*sc;
365 	int			reg;
366 {
367 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
368 	return(CSR_READ_1(sc, TL_DIO_DATA + (reg & 3)));
369 }
370 
371 static u_int16_t tl_dio_read16(sc, reg)
372 	struct tl_softc		*sc;
373 	int			reg;
374 {
375 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
376 	return(CSR_READ_2(sc, TL_DIO_DATA + (reg & 3)));
377 }
378 
379 static u_int32_t tl_dio_read32(sc, reg)
380 	struct tl_softc		*sc;
381 	int			reg;
382 {
383 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
384 	return(CSR_READ_4(sc, TL_DIO_DATA + (reg & 3)));
385 }
386 
387 static void tl_dio_write8(sc, reg, val)
388 	struct tl_softc		*sc;
389 	int			reg;
390 	int			val;
391 {
392 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
393 	CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), val);
394 	return;
395 }
396 
397 static void tl_dio_write16(sc, reg, val)
398 	struct tl_softc		*sc;
399 	int			reg;
400 	int			val;
401 {
402 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
403 	CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), val);
404 	return;
405 }
406 
407 static void tl_dio_write32(sc, reg, val)
408 	struct tl_softc		*sc;
409 	int			reg;
410 	int			val;
411 {
412 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
413 	CSR_WRITE_4(sc, TL_DIO_DATA + (reg & 3), val);
414 	return;
415 }
416 
417 static void tl_dio_setbit(sc, reg, bit)
418 	struct tl_softc		*sc;
419 	int			reg;
420 	int			bit;
421 {
422 	u_int8_t			f;
423 
424 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
425 	f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3));
426 	f |= bit;
427 	CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f);
428 
429 	return;
430 }
431 
432 static void tl_dio_clrbit(sc, reg, bit)
433 	struct tl_softc		*sc;
434 	int			reg;
435 	int			bit;
436 {
437 	u_int8_t			f;
438 
439 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
440 	f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3));
441 	f &= ~bit;
442 	CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f);
443 
444 	return;
445 }
446 
447 static void tl_dio_setbit16(sc, reg, bit)
448 	struct tl_softc		*sc;
449 	int			reg;
450 	int			bit;
451 {
452 	u_int16_t			f;
453 
454 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
455 	f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3));
456 	f |= bit;
457 	CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f);
458 
459 	return;
460 }
461 
462 static void tl_dio_clrbit16(sc, reg, bit)
463 	struct tl_softc		*sc;
464 	int			reg;
465 	int			bit;
466 {
467 	u_int16_t			f;
468 
469 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
470 	f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3));
471 	f &= ~bit;
472 	CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f);
473 
474 	return;
475 }
476 
477 /*
478  * Send an instruction or address to the EEPROM, check for ACK.
479  */
480 static u_int8_t tl_eeprom_putbyte(sc, byte)
481 	struct tl_softc		*sc;
482 	int			byte;
483 {
484 	int		i, ack = 0;
485 
486 	/*
487 	 * Make sure we're in TX mode.
488 	 */
489 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ETXEN);
490 
491 	/*
492 	 * Feed in each bit and stobe the clock.
493 	 */
494 	for (i = 0x80; i; i >>= 1) {
495 		if (byte & i) {
496 			tl_dio_setbit(sc, TL_NETSIO, TL_SIO_EDATA);
497 		} else {
498 			tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_EDATA);
499 		}
500 		DELAY(1);
501 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
502 		DELAY(1);
503 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
504 	}
505 
506 	/*
507 	 * Turn off TX mode.
508 	 */
509 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN);
510 
511 	/*
512 	 * Check for ack.
513 	 */
514 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
515 	ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA;
516 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
517 
518 	return(ack);
519 }
520 
521 /*
522  * Read a byte of data stored in the EEPROM at address 'addr.'
523  */
524 static u_int8_t tl_eeprom_getbyte(sc, addr, dest)
525 	struct tl_softc		*sc;
526 	int			addr;
527 	u_int8_t		*dest;
528 {
529 	int		i;
530 	u_int8_t		byte = 0;
531 
532 	tl_dio_write8(sc, TL_NETSIO, 0);
533 
534 	EEPROM_START;
535 
536 	/*
537 	 * Send write control code to EEPROM.
538 	 */
539 	if (tl_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) {
540 		printf("tl%d: failed to send write command, status: %x\n",
541 				sc->tl_unit, tl_dio_read8(sc, TL_NETSIO));
542 		return(1);
543 	}
544 
545 	/*
546 	 * Send address of byte we want to read.
547 	 */
548 	if (tl_eeprom_putbyte(sc, addr)) {
549 		printf("tl%d: failed to send address, status: %x\n",
550 				sc->tl_unit, tl_dio_read8(sc, TL_NETSIO));
551 		return(1);
552 	}
553 
554 	EEPROM_STOP;
555 	EEPROM_START;
556 	/*
557 	 * Send read control code to EEPROM.
558 	 */
559 	if (tl_eeprom_putbyte(sc, EEPROM_CTL_READ)) {
560 		printf("tl%d: failed to send write command, status: %x\n",
561 				sc->tl_unit, tl_dio_read8(sc, TL_NETSIO));
562 		return(1);
563 	}
564 
565 	/*
566 	 * Start reading bits from EEPROM.
567 	 */
568 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN);
569 	for (i = 0x80; i; i >>= 1) {
570 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
571 		DELAY(1);
572 		if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA)
573 			byte |= i;
574 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
575 		DELAY(1);
576 	}
577 
578 	EEPROM_STOP;
579 
580 	/*
581 	 * No ACK generated for read, so just return byte.
582 	 */
583 
584 	*dest = byte;
585 
586 	return(0);
587 }
588 
589 /*
590  * Read a sequence of bytes from the EEPROM.
591  */
592 static int tl_read_eeprom(sc, dest, off, cnt)
593 	struct tl_softc		*sc;
594 	caddr_t			dest;
595 	int			off;
596 	int			cnt;
597 {
598 	int			err = 0, i;
599 	u_int8_t		byte = 0;
600 
601 	for (i = 0; i < cnt; i++) {
602 		err = tl_eeprom_getbyte(sc, off + i, &byte);
603 		if (err)
604 			break;
605 		*(dest + i) = byte;
606 	}
607 
608 	return(err ? 1 : 0);
609 }
610 
611 static void tl_mii_sync(sc)
612 	struct tl_softc		*sc;
613 {
614 	int		i;
615 
616 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN);
617 
618 	for (i = 0; i < 32; i++) {
619 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
620 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
621 	}
622 
623 	return;
624 }
625 
626 static void tl_mii_send(sc, bits, cnt)
627 	struct tl_softc		*sc;
628 	u_int32_t		bits;
629 	int			cnt;
630 {
631 	int			i;
632 
633 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
634 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
635 		if (bits & i) {
636 			tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MDATA);
637 		} else {
638 			tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MDATA);
639 		}
640 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
641 	}
642 }
643 
644 static int tl_mii_readreg(sc, frame)
645 	struct tl_softc		*sc;
646 	struct tl_mii_frame	*frame;
647 
648 {
649 	int			i, ack, s;
650 	int			minten = 0;
651 
652 	s = splimp();
653 
654 	tl_mii_sync(sc);
655 
656 	/*
657 	 * Set up frame for RX.
658 	 */
659 	frame->mii_stdelim = TL_MII_STARTDELIM;
660 	frame->mii_opcode = TL_MII_READOP;
661 	frame->mii_turnaround = 0;
662 	frame->mii_data = 0;
663 
664 	/*
665 	 * Turn off MII interrupt by forcing MINTEN low.
666 	 */
667 	minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN;
668 	if (minten) {
669 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN);
670 	}
671 
672 	/*
673  	 * Turn on data xmit.
674 	 */
675 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MTXEN);
676 
677 	/*
678 	 * Send command/address info.
679 	 */
680 	tl_mii_send(sc, frame->mii_stdelim, 2);
681 	tl_mii_send(sc, frame->mii_opcode, 2);
682 	tl_mii_send(sc, frame->mii_phyaddr, 5);
683 	tl_mii_send(sc, frame->mii_regaddr, 5);
684 
685 	/*
686 	 * Turn off xmit.
687 	 */
688 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN);
689 
690 	/* Idle bit */
691 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
692 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
693 
694 	/* Check for ack */
695 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
696 	ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MDATA;
697 
698 	/* Complete the cycle */
699 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
700 
701 	/*
702 	 * Now try reading data bits. If the ack failed, we still
703 	 * need to clock through 16 cycles to keep the PHYs in sync.
704 	 */
705 	if (ack) {
706 		for(i = 0; i < 16; i++) {
707 			tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
708 			tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
709 		}
710 		goto fail;
711 	}
712 
713 	for (i = 0x8000; i; i >>= 1) {
714 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
715 		if (!ack) {
716 			if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MDATA)
717 				frame->mii_data |= i;
718 		}
719 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
720 	}
721 
722 fail:
723 
724 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
725 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
726 
727 	/* Reenable interrupts */
728 	if (minten) {
729 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN);
730 	}
731 
732 	splx(s);
733 
734 	if (ack)
735 		return(1);
736 	return(0);
737 }
738 
739 static int tl_mii_writereg(sc, frame)
740 	struct tl_softc		*sc;
741 	struct tl_mii_frame	*frame;
742 
743 {
744 	int			s;
745 	int			minten;
746 
747 	tl_mii_sync(sc);
748 
749 	s = splimp();
750 	/*
751 	 * Set up frame for TX.
752 	 */
753 
754 	frame->mii_stdelim = TL_MII_STARTDELIM;
755 	frame->mii_opcode = TL_MII_WRITEOP;
756 	frame->mii_turnaround = TL_MII_TURNAROUND;
757 
758 	/*
759 	 * Turn off MII interrupt by forcing MINTEN low.
760 	 */
761 	minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN;
762 	if (minten) {
763 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN);
764 	}
765 
766 	/*
767  	 * Turn on data output.
768 	 */
769 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MTXEN);
770 
771 	tl_mii_send(sc, frame->mii_stdelim, 2);
772 	tl_mii_send(sc, frame->mii_opcode, 2);
773 	tl_mii_send(sc, frame->mii_phyaddr, 5);
774 	tl_mii_send(sc, frame->mii_regaddr, 5);
775 	tl_mii_send(sc, frame->mii_turnaround, 2);
776 	tl_mii_send(sc, frame->mii_data, 16);
777 
778 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
779 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
780 
781 	/*
782 	 * Turn off xmit.
783 	 */
784 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN);
785 
786 	/* Reenable interrupts */
787 	if (minten)
788 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN);
789 
790 	splx(s);
791 
792 	return(0);
793 }
794 
795 static int tl_miibus_readreg(dev, phy, reg)
796 	device_t		dev;
797 	int			phy, reg;
798 {
799 	struct tl_softc		*sc;
800 	struct tl_mii_frame	frame;
801 
802 	sc = device_get_softc(dev);
803 	bzero((char *)&frame, sizeof(frame));
804 
805 	frame.mii_phyaddr = phy;
806 	frame.mii_regaddr = reg;
807 	tl_mii_readreg(sc, &frame);
808 
809 	return(frame.mii_data);
810 }
811 
812 static int tl_miibus_writereg(dev, phy, reg, data)
813 	device_t		dev;
814 	int			phy, reg, data;
815 {
816 	struct tl_softc		*sc;
817 	struct tl_mii_frame	frame;
818 
819 	sc = device_get_softc(dev);
820 	bzero((char *)&frame, sizeof(frame));
821 
822 	frame.mii_phyaddr = phy;
823 	frame.mii_regaddr = reg;
824 	frame.mii_data = data;
825 
826 	tl_mii_writereg(sc, &frame);
827 
828 	return(0);
829 }
830 
831 static void tl_miibus_statchg(dev)
832 	device_t		dev;
833 {
834 	struct tl_softc		*sc;
835 	struct mii_data		*mii;
836 
837 	sc = device_get_softc(dev);
838 	mii = device_get_softc(sc->tl_miibus);
839 
840 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
841 		tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
842 	} else {
843 		tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
844 	}
845 
846 	return;
847 }
848 
849 /*
850  * Set modes for bitrate devices.
851  */
852 static void tl_setmode(sc, media)
853 	struct tl_softc		*sc;
854 	int			media;
855 {
856 	if (IFM_SUBTYPE(media) == IFM_10_5)
857 		tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD1);
858 	if (IFM_SUBTYPE(media) == IFM_10_T) {
859 		tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD1);
860 		if ((media & IFM_GMASK) == IFM_FDX) {
861 			tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD3);
862 			tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
863 		} else {
864 			tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD3);
865 			tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
866 		}
867 	}
868 
869 	return;
870 }
871 
872 /*
873  * Calculate the hash of a MAC address for programming the multicast hash
874  * table.  This hash is simply the address split into 6-bit chunks
875  * XOR'd, e.g.
876  * byte: 000000|00 1111|1111 22|222222|333333|33 4444|4444 55|555555
877  * bit:  765432|10 7654|3210 76|543210|765432|10 7654|3210 76|543210
878  * Bytes 0-2 and 3-5 are symmetrical, so are folded together.  Then
879  * the folded 24-bit value is split into 6-bit portions and XOR'd.
880  */
881 static int tl_calchash(addr)
882 	caddr_t			addr;
883 {
884 	int			t;
885 
886 	t = (addr[0] ^ addr[3]) << 16 | (addr[1] ^ addr[4]) << 8 |
887 		(addr[2] ^ addr[5]);
888 	return ((t >> 18) ^ (t >> 12) ^ (t >> 6) ^ t) & 0x3f;
889 }
890 
891 /*
892  * The ThunderLAN has a perfect MAC address filter in addition to
893  * the multicast hash filter. The perfect filter can be programmed
894  * with up to four MAC addresses. The first one is always used to
895  * hold the station address, which leaves us free to use the other
896  * three for multicast addresses.
897  */
898 static void tl_setfilt(sc, addr, slot)
899 	struct tl_softc		*sc;
900 	caddr_t			addr;
901 	int			slot;
902 {
903 	int			i;
904 	u_int16_t		regaddr;
905 
906 	regaddr = TL_AREG0_B5 + (slot * ETHER_ADDR_LEN);
907 
908 	for (i = 0; i < ETHER_ADDR_LEN; i++)
909 		tl_dio_write8(sc, regaddr + i, *(addr + i));
910 
911 	return;
912 }
913 
914 /*
915  * XXX In FreeBSD 3.0, multicast addresses are managed using a doubly
916  * linked list. This is fine, except addresses are added from the head
917  * end of the list. We want to arrange for 224.0.0.1 (the "all hosts")
918  * group to always be in the perfect filter, but as more groups are added,
919  * the 224.0.0.1 entry (which is always added first) gets pushed down
920  * the list and ends up at the tail. So after 3 or 4 multicast groups
921  * are added, the all-hosts entry gets pushed out of the perfect filter
922  * and into the hash table.
923  *
924  * Because the multicast list is a doubly-linked list as opposed to a
925  * circular queue, we don't have the ability to just grab the tail of
926  * the list and traverse it backwards. Instead, we have to traverse
927  * the list once to find the tail, then traverse it again backwards to
928  * update the multicast filter.
929  */
930 static void tl_setmulti(sc)
931 	struct tl_softc		*sc;
932 {
933 	struct ifnet		*ifp;
934 	u_int32_t		hashes[2] = { 0, 0 };
935 	int			h, i;
936 	struct ifmultiaddr	*ifma;
937 	u_int8_t		dummy[] = { 0, 0, 0, 0, 0 ,0 };
938 	ifp = &sc->arpcom.ac_if;
939 
940 	/* First, zot all the existing filters. */
941 	for (i = 1; i < 4; i++)
942 		tl_setfilt(sc, (caddr_t)&dummy, i);
943 	tl_dio_write32(sc, TL_HASH1, 0);
944 	tl_dio_write32(sc, TL_HASH2, 0);
945 
946 	/* Now program new ones. */
947 	if (ifp->if_flags & IFF_ALLMULTI) {
948 		hashes[0] = 0xFFFFFFFF;
949 		hashes[1] = 0xFFFFFFFF;
950 	} else {
951 		i = 1;
952 		/* First find the tail of the list. */
953 		for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL;
954 					ifma = ifma->ifma_link.le_next) {
955 			if (ifma->ifma_link.le_next == NULL)
956 				break;
957 		}
958 		/* Now traverse the list backwards. */
959 		for (; ifma != NULL && ifma != (void *)&ifp->if_multiaddrs;
960 			ifma = (struct ifmultiaddr *)ifma->ifma_link.le_prev) {
961 			if (ifma->ifma_addr->sa_family != AF_LINK)
962 				continue;
963 			/*
964 			 * Program the first three multicast groups
965 			 * into the perfect filter. For all others,
966 			 * use the hash table.
967 			 */
968 			if (i < 4) {
969 				tl_setfilt(sc,
970 			LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i);
971 				i++;
972 				continue;
973 			}
974 
975 			h = tl_calchash(
976 				LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
977 			if (h < 32)
978 				hashes[0] |= (1 << h);
979 			else
980 				hashes[1] |= (1 << (h - 32));
981 		}
982 	}
983 
984 	tl_dio_write32(sc, TL_HASH1, hashes[0]);
985 	tl_dio_write32(sc, TL_HASH2, hashes[1]);
986 
987 	return;
988 }
989 
990 /*
991  * This routine is recommended by the ThunderLAN manual to insure that
992  * the internal PHY is powered up correctly. It also recommends a one
993  * second pause at the end to 'wait for the clocks to start' but in my
994  * experience this isn't necessary.
995  */
996 static void tl_hardreset(dev)
997 	device_t		dev;
998 {
999 	struct tl_softc		*sc;
1000 	int			i;
1001 	u_int16_t		flags;
1002 
1003 	sc = device_get_softc(dev);
1004 
1005 	tl_mii_sync(sc);
1006 
1007 	flags = BMCR_LOOP|BMCR_ISO|BMCR_PDOWN;
1008 
1009 	for (i = 0; i < MII_NPHY; i++)
1010 		tl_miibus_writereg(dev, i, MII_BMCR, flags);
1011 
1012 	tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_ISO);
1013 	DELAY(50000);
1014 	tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_LOOP|BMCR_ISO);
1015 	tl_mii_sync(sc);
1016 	while(tl_miibus_readreg(dev, 31, MII_BMCR) & BMCR_RESET);
1017 
1018 	DELAY(50000);
1019 	return;
1020 }
1021 
1022 static void tl_softreset(sc, internal)
1023 	struct tl_softc		*sc;
1024 	int			internal;
1025 {
1026         u_int32_t               cmd, dummy, i;
1027 
1028         /* Assert the adapter reset bit. */
1029 	CMD_SET(sc, TL_CMD_ADRST);
1030 
1031         /* Turn off interrupts */
1032 	CMD_SET(sc, TL_CMD_INTSOFF);
1033 
1034 	/* First, clear the stats registers. */
1035 	for (i = 0; i < 5; i++)
1036 		dummy = tl_dio_read32(sc, TL_TXGOODFRAMES);
1037 
1038         /* Clear Areg and Hash registers */
1039 	for (i = 0; i < 8; i++)
1040 		tl_dio_write32(sc, TL_AREG0_B5, 0x00000000);
1041 
1042         /*
1043 	 * Set up Netconfig register. Enable one channel and
1044 	 * one fragment mode.
1045 	 */
1046 	tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_ONECHAN|TL_CFG_ONEFRAG);
1047 	if (internal && !sc->tl_bitrate) {
1048 		tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN);
1049 	} else {
1050 		tl_dio_clrbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN);
1051 	}
1052 
1053 	/* Handle cards with bitrate devices. */
1054 	if (sc->tl_bitrate)
1055 		tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_BITRATE);
1056 
1057 	/*
1058 	 * Load adapter irq pacing timer and tx threshold.
1059 	 * We make the transmit threshold 1 initially but we may
1060 	 * change that later.
1061 	 */
1062 	cmd = CSR_READ_4(sc, TL_HOSTCMD);
1063 	cmd |= TL_CMD_NES;
1064 	cmd &= ~(TL_CMD_RT|TL_CMD_EOC|TL_CMD_ACK_MASK|TL_CMD_CHSEL_MASK);
1065 	CMD_PUT(sc, cmd | (TL_CMD_LDTHR | TX_THR));
1066 	CMD_PUT(sc, cmd | (TL_CMD_LDTMR | 0x00000003));
1067 
1068         /* Unreset the MII */
1069 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_NMRST);
1070 
1071 	/* Take the adapter out of reset */
1072 	tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NRESET|TL_CMD_NWRAP);
1073 
1074 	/* Wait for things to settle down a little. */
1075 	DELAY(500);
1076 
1077         return;
1078 }
1079 
1080 /*
1081  * Probe for a ThunderLAN chip. Check the PCI vendor and device IDs
1082  * against our list and return its name if we find a match.
1083  */
1084 static int tl_probe(dev)
1085 	device_t		dev;
1086 {
1087 	struct tl_type		*t;
1088 
1089 	t = tl_devs;
1090 
1091 	while(t->tl_name != NULL) {
1092 		if ((pci_get_vendor(dev) == t->tl_vid) &&
1093 		    (pci_get_device(dev) == t->tl_did)) {
1094 			device_set_desc(dev, t->tl_name);
1095 			return(0);
1096 		}
1097 		t++;
1098 	}
1099 
1100 	return(ENXIO);
1101 }
1102 
1103 static int tl_attach(dev)
1104 	device_t		dev;
1105 {
1106 	int			s, i;
1107 	u_int32_t		command;
1108 	u_int16_t		did, vid;
1109 	struct tl_type		*t;
1110 	struct ifnet		*ifp;
1111 	struct tl_softc		*sc;
1112 	int			unit, error = 0, rid;
1113 
1114 	s = splimp();
1115 
1116 	vid = pci_get_vendor(dev);
1117 	did = pci_get_device(dev);
1118 	sc = device_get_softc(dev);
1119 	unit = device_get_unit(dev);
1120 	bzero(sc, sizeof(struct tl_softc));
1121 
1122 	t = tl_devs;
1123 	while(t->tl_name != NULL) {
1124 		if (vid == t->tl_vid && did == t->tl_did)
1125 			break;
1126 		t++;
1127 	}
1128 
1129 	if (t->tl_name == NULL) {
1130 		printf("tl%d: unknown device!?\n", unit);
1131 		goto fail;
1132 	}
1133 
1134 	/*
1135 	 * Map control/status registers.
1136 	 */
1137 	command = pci_read_config(dev, PCIR_COMMAND, 4);
1138 	command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN);
1139 	pci_write_config(dev, PCIR_COMMAND, command, 4);
1140 	command = pci_read_config(dev, PCIR_COMMAND, 4);
1141 
1142 #ifdef TL_USEIOSPACE
1143 	if (!(command & PCIM_CMD_PORTEN)) {
1144 		printf("tl%d: failed to enable I/O ports!\n", unit);
1145 		error = ENXIO;
1146 		goto fail;
1147 	}
1148 
1149 	rid = TL_PCI_LOIO;
1150 	sc->tl_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid,
1151 		0, ~0, 1, RF_ACTIVE);
1152 
1153 	/*
1154 	 * Some cards have the I/O and memory mapped address registers
1155 	 * reversed. Try both combinations before giving up.
1156 	 */
1157 	if (sc->tl_res == NULL) {
1158 		rid = TL_PCI_LOMEM;
1159 		sc->tl_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid,
1160 		    0, ~0, 1, RF_ACTIVE);
1161 	}
1162 #else
1163 	if (!(command & PCIM_CMD_MEMEN)) {
1164 		printf("tl%d: failed to enable memory mapping!\n", unit);
1165 		error = ENXIO;
1166 		goto fail;
1167 	}
1168 
1169 	rid = TL_PCI_LOMEM;
1170 	sc->tl_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
1171 	    0, ~0, 1, RF_ACTIVE);
1172 	if (sc->tl_res == NULL) {
1173 		rid = TL_PCI_LOIO;
1174 		sc->tl_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
1175 		    0, ~0, 1, RF_ACTIVE);
1176 	}
1177 #endif
1178 
1179 	if (sc->tl_res == NULL) {
1180 		printf("tl%d: couldn't map ports/memory\n", unit);
1181 		error = ENXIO;
1182 		goto fail;
1183 	}
1184 
1185 	sc->tl_btag = rman_get_bustag(sc->tl_res);
1186 	sc->tl_bhandle = rman_get_bushandle(sc->tl_res);
1187 
1188 #ifdef notdef
1189 	/*
1190 	 * The ThunderLAN manual suggests jacking the PCI latency
1191 	 * timer all the way up to its maximum value. I'm not sure
1192 	 * if this is really necessary, but what the manual wants,
1193 	 * the manual gets.
1194 	 */
1195 	command = pci_read_config(dev, TL_PCI_LATENCY_TIMER, 4);
1196 	command |= 0x0000FF00;
1197 	pci_write_config(dev, TL_PCI_LATENCY_TIMER, command, 4);
1198 #endif
1199 
1200 	/* Allocate interrupt */
1201 	rid = 0;
1202 	sc->tl_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
1203 	    RF_SHAREABLE | RF_ACTIVE);
1204 
1205 	if (sc->tl_irq == NULL) {
1206 		bus_release_resource(dev, TL_RES, TL_RID, sc->tl_res);
1207 		printf("tl%d: couldn't map interrupt\n", unit);
1208 		error = ENXIO;
1209 		goto fail;
1210 	}
1211 
1212 	error = bus_setup_intr(dev, sc->tl_irq, INTR_TYPE_NET,
1213 	    tl_intr, sc, &sc->tl_intrhand);
1214 
1215 	if (error) {
1216 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->tl_irq);
1217 		bus_release_resource(dev, TL_RES, TL_RID, sc->tl_res);
1218 		printf("tl%d: couldn't set up irq\n", unit);
1219 		goto fail;
1220 	}
1221 
1222 	/*
1223 	 * Now allocate memory for the TX and RX lists.
1224 	 */
1225 	sc->tl_ldata = contigmalloc(sizeof(struct tl_list_data), M_DEVBUF,
1226 	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1227 
1228 	if (sc->tl_ldata == NULL) {
1229 		bus_teardown_intr(dev, sc->tl_irq, sc->tl_intrhand);
1230 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->tl_irq);
1231 		bus_release_resource(dev, TL_RES, TL_RID, sc->tl_res);
1232 		printf("tl%d: no memory for list buffers!\n", unit);
1233 		error = ENXIO;
1234 		goto fail;
1235 	}
1236 
1237 	bzero(sc->tl_ldata, sizeof(struct tl_list_data));
1238 
1239 	sc->tl_unit = unit;
1240 	sc->tl_dinfo = t;
1241 	if (t->tl_vid == COMPAQ_VENDORID || t->tl_vid == TI_VENDORID)
1242 		sc->tl_eeaddr = TL_EEPROM_EADDR;
1243 	if (t->tl_vid == OLICOM_VENDORID)
1244 		sc->tl_eeaddr = TL_EEPROM_EADDR_OC;
1245 
1246 	/* Reset the adapter. */
1247 	tl_softreset(sc, 1);
1248 	tl_hardreset(dev);
1249 	tl_softreset(sc, 1);
1250 
1251 	/*
1252 	 * Get station address from the EEPROM.
1253 	 */
1254 	if (tl_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
1255 				sc->tl_eeaddr, ETHER_ADDR_LEN)) {
1256 		bus_teardown_intr(dev, sc->tl_irq, sc->tl_intrhand);
1257 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->tl_irq);
1258 		bus_release_resource(dev, TL_RES, TL_RID, sc->tl_res);
1259 		contigfree(sc->tl_ldata,
1260 		    sizeof(struct tl_list_data), M_DEVBUF);
1261 		printf("tl%d: failed to read station address\n", unit);
1262 		error = ENXIO;
1263 		goto fail;
1264 	}
1265 
1266         /*
1267          * XXX Olicom, in its desire to be different from the
1268          * rest of the world, has done strange things with the
1269          * encoding of the station address in the EEPROM. First
1270          * of all, they store the address at offset 0xF8 rather
1271          * than at 0x83 like the ThunderLAN manual suggests.
1272          * Second, they store the address in three 16-bit words in
1273          * network byte order, as opposed to storing it sequentially
1274          * like all the other ThunderLAN cards. In order to get
1275          * the station address in a form that matches what the Olicom
1276          * diagnostic utility specifies, we have to byte-swap each
1277          * word. To make things even more confusing, neither 00:00:28
1278          * nor 00:00:24 appear in the IEEE OUI database.
1279          */
1280         if (sc->tl_dinfo->tl_vid == OLICOM_VENDORID) {
1281                 for (i = 0; i < ETHER_ADDR_LEN; i += 2) {
1282                         u_int16_t               *p;
1283                         p = (u_int16_t *)&sc->arpcom.ac_enaddr[i];
1284                         *p = ntohs(*p);
1285                 }
1286         }
1287 
1288 	ifp = &sc->arpcom.ac_if;
1289 	ifp->if_softc = sc;
1290 	if_initname(ifp, "tl", sc->tl_unit);
1291 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1292 	ifp->if_ioctl = tl_ioctl;
1293 	ifp->if_start = tl_start;
1294 	ifp->if_watchdog = tl_watchdog;
1295 	ifp->if_init = tl_init;
1296 	ifp->if_mtu = ETHERMTU;
1297 	ifp->if_snd.ifq_maxlen = TL_TX_LIST_CNT - 1;
1298 	callout_init(&sc->tl_stat_timer);
1299 
1300 	/* Reset the adapter again. */
1301 	tl_softreset(sc, 1);
1302 	tl_hardreset(dev);
1303 	tl_softreset(sc, 1);
1304 
1305 	/*
1306 	 * Do MII setup. If no PHYs are found, then this is a
1307 	 * bitrate ThunderLAN chip that only supports 10baseT
1308 	 * and AUI/BNC.
1309 	 */
1310 	if (mii_phy_probe(dev, &sc->tl_miibus,
1311 	    tl_ifmedia_upd, tl_ifmedia_sts)) {
1312 		struct ifmedia		*ifm;
1313 		sc->tl_bitrate = 1;
1314 		ifmedia_init(&sc->ifmedia, 0, tl_ifmedia_upd, tl_ifmedia_sts);
1315 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
1316 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
1317 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
1318 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL);
1319 		ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_10_T);
1320 		/* Reset again, this time setting bitrate mode. */
1321 		tl_softreset(sc, 1);
1322 		ifm = &sc->ifmedia;
1323 		ifm->ifm_media = ifm->ifm_cur->ifm_media;
1324 		tl_ifmedia_upd(ifp);
1325 	}
1326 
1327 	/*
1328 	 * Call MI attach routine.
1329 	 */
1330 	ether_ifattach(ifp, sc->arpcom.ac_enaddr);
1331 
1332 fail:
1333 	splx(s);
1334 	return(error);
1335 }
1336 
1337 static int tl_detach(dev)
1338 	device_t		dev;
1339 {
1340 	struct tl_softc		*sc;
1341 	struct ifnet		*ifp;
1342 	int			s;
1343 
1344 	s = splimp();
1345 
1346 	sc = device_get_softc(dev);
1347 	ifp = &sc->arpcom.ac_if;
1348 
1349 	tl_stop(sc);
1350 	ether_ifdetach(ifp);
1351 
1352 	bus_generic_detach(dev);
1353 	device_delete_child(dev, sc->tl_miibus);
1354 
1355 	contigfree(sc->tl_ldata, sizeof(struct tl_list_data), M_DEVBUF);
1356 	if (sc->tl_bitrate)
1357 		ifmedia_removeall(&sc->ifmedia);
1358 
1359 	bus_teardown_intr(dev, sc->tl_irq, sc->tl_intrhand);
1360 	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->tl_irq);
1361 	bus_release_resource(dev, TL_RES, TL_RID, sc->tl_res);
1362 
1363 	splx(s);
1364 
1365 	return(0);
1366 }
1367 
1368 /*
1369  * Initialize the transmit lists.
1370  */
1371 static int tl_list_tx_init(sc)
1372 	struct tl_softc		*sc;
1373 {
1374 	struct tl_chain_data	*cd;
1375 	struct tl_list_data	*ld;
1376 	int			i;
1377 
1378 	cd = &sc->tl_cdata;
1379 	ld = sc->tl_ldata;
1380 	for (i = 0; i < TL_TX_LIST_CNT; i++) {
1381 		cd->tl_tx_chain[i].tl_ptr = &ld->tl_tx_list[i];
1382 		if (i == (TL_TX_LIST_CNT - 1))
1383 			cd->tl_tx_chain[i].tl_next = NULL;
1384 		else
1385 			cd->tl_tx_chain[i].tl_next = &cd->tl_tx_chain[i + 1];
1386 	}
1387 
1388 	cd->tl_tx_free = &cd->tl_tx_chain[0];
1389 	cd->tl_tx_tail = cd->tl_tx_head = NULL;
1390 	sc->tl_txeoc = 1;
1391 
1392 	return(0);
1393 }
1394 
1395 /*
1396  * Initialize the RX lists and allocate mbufs for them.
1397  */
1398 static int tl_list_rx_init(sc)
1399 	struct tl_softc		*sc;
1400 {
1401 	struct tl_chain_data	*cd;
1402 	struct tl_list_data	*ld;
1403 	int			i;
1404 
1405 	cd = &sc->tl_cdata;
1406 	ld = sc->tl_ldata;
1407 
1408 	for (i = 0; i < TL_RX_LIST_CNT; i++) {
1409 		cd->tl_rx_chain[i].tl_ptr =
1410 			(struct tl_list_onefrag *)&ld->tl_rx_list[i];
1411 		if (tl_newbuf(sc, &cd->tl_rx_chain[i]) == ENOBUFS)
1412 			return(ENOBUFS);
1413 		if (i == (TL_RX_LIST_CNT - 1)) {
1414 			cd->tl_rx_chain[i].tl_next = NULL;
1415 			ld->tl_rx_list[i].tlist_fptr = 0;
1416 		} else {
1417 			cd->tl_rx_chain[i].tl_next = &cd->tl_rx_chain[i + 1];
1418 			ld->tl_rx_list[i].tlist_fptr =
1419 					vtophys(&ld->tl_rx_list[i + 1]);
1420 		}
1421 	}
1422 
1423 	cd->tl_rx_head = &cd->tl_rx_chain[0];
1424 	cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1];
1425 
1426 	return(0);
1427 }
1428 
1429 static int tl_newbuf(sc, c)
1430 	struct tl_softc		*sc;
1431 	struct tl_chain_onefrag	*c;
1432 {
1433 	struct mbuf		*m_new = NULL;
1434 
1435 	MGETHDR(m_new, MB_DONTWAIT, MT_DATA);
1436 	if (m_new == NULL)
1437 		return(ENOBUFS);
1438 
1439 	MCLGET(m_new, MB_DONTWAIT);
1440 	if (!(m_new->m_flags & M_EXT)) {
1441 		m_freem(m_new);
1442 		return(ENOBUFS);
1443 	}
1444 
1445 #ifdef __alpha__
1446 	m_new->m_data += 2;
1447 #endif
1448 
1449 	c->tl_mbuf = m_new;
1450 	c->tl_next = NULL;
1451 	c->tl_ptr->tlist_frsize = MCLBYTES;
1452 	c->tl_ptr->tlist_fptr = 0;
1453 	c->tl_ptr->tl_frag.tlist_dadr = vtophys(mtod(m_new, caddr_t));
1454 	c->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES;
1455 	c->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1456 
1457 	return(0);
1458 }
1459 /*
1460  * Interrupt handler for RX 'end of frame' condition (EOF). This
1461  * tells us that a full ethernet frame has been captured and we need
1462  * to handle it.
1463  *
1464  * Reception is done using 'lists' which consist of a header and a
1465  * series of 10 data count/data address pairs that point to buffers.
1466  * Initially you're supposed to create a list, populate it with pointers
1467  * to buffers, then load the physical address of the list into the
1468  * ch_parm register. The adapter is then supposed to DMA the received
1469  * frame into the buffers for you.
1470  *
1471  * To make things as fast as possible, we have the chip DMA directly
1472  * into mbufs. This saves us from having to do a buffer copy: we can
1473  * just hand the mbufs directly to ether_input(). Once the frame has
1474  * been sent on its way, the 'list' structure is assigned a new buffer
1475  * and moved to the end of the RX chain. As long we we stay ahead of
1476  * the chip, it will always think it has an endless receive channel.
1477  *
1478  * If we happen to fall behind and the chip manages to fill up all of
1479  * the buffers, it will generate an end of channel interrupt and wait
1480  * for us to empty the chain and restart the receiver.
1481  */
1482 static int tl_intvec_rxeof(xsc, type)
1483 	void			*xsc;
1484 	u_int32_t		type;
1485 {
1486 	struct tl_softc		*sc;
1487 	int			r = 0, total_len = 0;
1488 	struct ether_header	*eh;
1489 	struct mbuf		*m;
1490 	struct ifnet		*ifp;
1491 	struct tl_chain_onefrag	*cur_rx;
1492 
1493 	sc = xsc;
1494 	ifp = &sc->arpcom.ac_if;
1495 
1496 	while(sc->tl_cdata.tl_rx_head != NULL) {
1497 		cur_rx = sc->tl_cdata.tl_rx_head;
1498 		if (!(cur_rx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP))
1499 			break;
1500 		r++;
1501 		sc->tl_cdata.tl_rx_head = cur_rx->tl_next;
1502 		m = cur_rx->tl_mbuf;
1503 		total_len = cur_rx->tl_ptr->tlist_frsize;
1504 
1505 		if (tl_newbuf(sc, cur_rx) == ENOBUFS) {
1506 			ifp->if_ierrors++;
1507 			cur_rx->tl_ptr->tlist_frsize = MCLBYTES;
1508 			cur_rx->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1509 			cur_rx->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES;
1510 			continue;
1511 		}
1512 
1513 		sc->tl_cdata.tl_rx_tail->tl_ptr->tlist_fptr =
1514 						vtophys(cur_rx->tl_ptr);
1515 		sc->tl_cdata.tl_rx_tail->tl_next = cur_rx;
1516 		sc->tl_cdata.tl_rx_tail = cur_rx;
1517 
1518 		eh = mtod(m, struct ether_header *);
1519 		m->m_pkthdr.rcvif = ifp;
1520 
1521 		/*
1522 		 * Note: when the ThunderLAN chip is in 'capture all
1523 		 * frames' mode, it will receive its own transmissions.
1524 		 * We drop don't need to process our own transmissions,
1525 		 * so we drop them here and continue.
1526 		 */
1527 		/*if (ifp->if_flags & IFF_PROMISC && */
1528 		if (!bcmp(eh->ether_shost, sc->arpcom.ac_enaddr,
1529 		 					ETHER_ADDR_LEN)) {
1530 				m_freem(m);
1531 				continue;
1532 		}
1533 
1534 		(*ifp->if_input)(ifp, m);
1535 	}
1536 
1537 	return(r);
1538 }
1539 
1540 /*
1541  * The RX-EOC condition hits when the ch_parm address hasn't been
1542  * initialized or the adapter reached a list with a forward pointer
1543  * of 0 (which indicates the end of the chain). In our case, this means
1544  * the card has hit the end of the receive buffer chain and we need to
1545  * empty out the buffers and shift the pointer back to the beginning again.
1546  */
1547 static int tl_intvec_rxeoc(xsc, type)
1548 	void			*xsc;
1549 	u_int32_t		type;
1550 {
1551 	struct tl_softc		*sc;
1552 	int			r;
1553 	struct tl_chain_data	*cd;
1554 
1555 
1556 	sc = xsc;
1557 	cd = &sc->tl_cdata;
1558 
1559 	/* Flush out the receive queue and ack RXEOF interrupts. */
1560 	r = tl_intvec_rxeof(xsc, type);
1561 	CMD_PUT(sc, TL_CMD_ACK | r | (type & ~(0x00100000)));
1562 	r = 1;
1563 	cd->tl_rx_head = &cd->tl_rx_chain[0];
1564 	cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1];
1565 	CSR_WRITE_4(sc, TL_CH_PARM, vtophys(sc->tl_cdata.tl_rx_head->tl_ptr));
1566 	r |= (TL_CMD_GO|TL_CMD_RT);
1567 	return(r);
1568 }
1569 
1570 static int tl_intvec_txeof(xsc, type)
1571 	void			*xsc;
1572 	u_int32_t		type;
1573 {
1574 	struct tl_softc		*sc;
1575 	int			r = 0;
1576 	struct tl_chain		*cur_tx;
1577 
1578 	sc = xsc;
1579 
1580 	/*
1581 	 * Go through our tx list and free mbufs for those
1582 	 * frames that have been sent.
1583 	 */
1584 	while (sc->tl_cdata.tl_tx_head != NULL) {
1585 		cur_tx = sc->tl_cdata.tl_tx_head;
1586 		if (!(cur_tx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP))
1587 			break;
1588 		sc->tl_cdata.tl_tx_head = cur_tx->tl_next;
1589 
1590 		r++;
1591 		m_freem(cur_tx->tl_mbuf);
1592 		cur_tx->tl_mbuf = NULL;
1593 
1594 		cur_tx->tl_next = sc->tl_cdata.tl_tx_free;
1595 		sc->tl_cdata.tl_tx_free = cur_tx;
1596 		if (!cur_tx->tl_ptr->tlist_fptr)
1597 			break;
1598 	}
1599 
1600 	return(r);
1601 }
1602 
1603 /*
1604  * The transmit end of channel interrupt. The adapter triggers this
1605  * interrupt to tell us it hit the end of the current transmit list.
1606  *
1607  * A note about this: it's possible for a condition to arise where
1608  * tl_start() may try to send frames between TXEOF and TXEOC interrupts.
1609  * You have to avoid this since the chip expects things to go in a
1610  * particular order: transmit, acknowledge TXEOF, acknowledge TXEOC.
1611  * When the TXEOF handler is called, it will free all of the transmitted
1612  * frames and reset the tx_head pointer to NULL. However, a TXEOC
1613  * interrupt should be received and acknowledged before any more frames
1614  * are queued for transmission. If tl_statrt() is called after TXEOF
1615  * resets the tx_head pointer but _before_ the TXEOC interrupt arrives,
1616  * it could attempt to issue a transmit command prematurely.
1617  *
1618  * To guard against this, tl_start() will only issue transmit commands
1619  * if the tl_txeoc flag is set, and only the TXEOC interrupt handler
1620  * can set this flag once tl_start() has cleared it.
1621  */
1622 static int tl_intvec_txeoc(xsc, type)
1623 	void			*xsc;
1624 	u_int32_t		type;
1625 {
1626 	struct tl_softc		*sc;
1627 	struct ifnet		*ifp;
1628 	u_int32_t		cmd;
1629 
1630 	sc = xsc;
1631 	ifp = &sc->arpcom.ac_if;
1632 
1633 	/* Clear the timeout timer. */
1634 	ifp->if_timer = 0;
1635 
1636 	if (sc->tl_cdata.tl_tx_head == NULL) {
1637 		ifp->if_flags &= ~IFF_OACTIVE;
1638 		sc->tl_cdata.tl_tx_tail = NULL;
1639 		sc->tl_txeoc = 1;
1640 	} else {
1641 		sc->tl_txeoc = 0;
1642 		/* First we have to ack the EOC interrupt. */
1643 		CMD_PUT(sc, TL_CMD_ACK | 0x00000001 | type);
1644 		/* Then load the address of the next TX list. */
1645 		CSR_WRITE_4(sc, TL_CH_PARM,
1646 		    vtophys(sc->tl_cdata.tl_tx_head->tl_ptr));
1647 		/* Restart TX channel. */
1648 		cmd = CSR_READ_4(sc, TL_HOSTCMD);
1649 		cmd &= ~TL_CMD_RT;
1650 		cmd |= TL_CMD_GO|TL_CMD_INTSON;
1651 		CMD_PUT(sc, cmd);
1652 		return(0);
1653 	}
1654 
1655 	return(1);
1656 }
1657 
1658 static int tl_intvec_adchk(xsc, type)
1659 	void			*xsc;
1660 	u_int32_t		type;
1661 {
1662 	struct tl_softc		*sc;
1663 
1664 	sc = xsc;
1665 
1666 	if (type)
1667 		printf("tl%d: adapter check: %x\n", sc->tl_unit,
1668 			(unsigned int)CSR_READ_4(sc, TL_CH_PARM));
1669 
1670 	tl_softreset(sc, 1);
1671 	tl_stop(sc);
1672 	tl_init(sc);
1673 	CMD_SET(sc, TL_CMD_INTSON);
1674 
1675 	return(0);
1676 }
1677 
1678 static int tl_intvec_netsts(xsc, type)
1679 	void			*xsc;
1680 	u_int32_t		type;
1681 {
1682 	struct tl_softc		*sc;
1683 	u_int16_t		netsts;
1684 
1685 	sc = xsc;
1686 
1687 	netsts = tl_dio_read16(sc, TL_NETSTS);
1688 	tl_dio_write16(sc, TL_NETSTS, netsts);
1689 
1690 	printf("tl%d: network status: %x\n", sc->tl_unit, netsts);
1691 
1692 	return(1);
1693 }
1694 
1695 static void tl_intr(xsc)
1696 	void			*xsc;
1697 {
1698 	struct tl_softc		*sc;
1699 	struct ifnet		*ifp;
1700 	int			r = 0;
1701 	u_int32_t		type = 0;
1702 	u_int16_t		ints = 0;
1703 	u_int8_t		ivec = 0;
1704 
1705 	sc = xsc;
1706 
1707 	/* Disable interrupts */
1708 	ints = CSR_READ_2(sc, TL_HOST_INT);
1709 	CSR_WRITE_2(sc, TL_HOST_INT, ints);
1710 	type = (ints << 16) & 0xFFFF0000;
1711 	ivec = (ints & TL_VEC_MASK) >> 5;
1712 	ints = (ints & TL_INT_MASK) >> 2;
1713 
1714 	ifp = &sc->arpcom.ac_if;
1715 
1716 	switch(ints) {
1717 	case (TL_INTR_INVALID):
1718 #ifdef DIAGNOSTIC
1719 		printf("tl%d: got an invalid interrupt!\n", sc->tl_unit);
1720 #endif
1721 		/* Re-enable interrupts but don't ack this one. */
1722 		CMD_PUT(sc, type);
1723 		r = 0;
1724 		break;
1725 	case (TL_INTR_TXEOF):
1726 		r = tl_intvec_txeof((void *)sc, type);
1727 		break;
1728 	case (TL_INTR_TXEOC):
1729 		r = tl_intvec_txeoc((void *)sc, type);
1730 		break;
1731 	case (TL_INTR_STATOFLOW):
1732 		tl_stats_update(sc);
1733 		r = 1;
1734 		break;
1735 	case (TL_INTR_RXEOF):
1736 		r = tl_intvec_rxeof((void *)sc, type);
1737 		break;
1738 	case (TL_INTR_DUMMY):
1739 		printf("tl%d: got a dummy interrupt\n", sc->tl_unit);
1740 		r = 1;
1741 		break;
1742 	case (TL_INTR_ADCHK):
1743 		if (ivec)
1744 			r = tl_intvec_adchk((void *)sc, type);
1745 		else
1746 			r = tl_intvec_netsts((void *)sc, type);
1747 		break;
1748 	case (TL_INTR_RXEOC):
1749 		r = tl_intvec_rxeoc((void *)sc, type);
1750 		break;
1751 	default:
1752 		printf("%s: bogus interrupt type\n", ifp->if_xname);
1753 		break;
1754 	}
1755 
1756 	/* Re-enable interrupts */
1757 	if (r) {
1758 		CMD_PUT(sc, TL_CMD_ACK | r | type);
1759 	}
1760 
1761 	if (ifp->if_snd.ifq_head != NULL)
1762 		tl_start(ifp);
1763 
1764 	return;
1765 }
1766 
1767 static void tl_stats_update(xsc)
1768 	void			*xsc;
1769 {
1770 	struct tl_softc		*sc;
1771 	struct ifnet		*ifp;
1772 	struct tl_stats		tl_stats;
1773 	struct mii_data		*mii;
1774 	u_int32_t		*p;
1775 	int			s;
1776 
1777 	s = splimp();
1778 
1779 	bzero((char *)&tl_stats, sizeof(struct tl_stats));
1780 
1781 	sc = xsc;
1782 	ifp = &sc->arpcom.ac_if;
1783 
1784 	p = (u_int32_t *)&tl_stats;
1785 
1786 	CSR_WRITE_2(sc, TL_DIO_ADDR, TL_TXGOODFRAMES|TL_DIO_ADDR_INC);
1787 	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1788 	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1789 	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1790 	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1791 	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1792 
1793 	ifp->if_opackets += tl_tx_goodframes(tl_stats);
1794 	ifp->if_collisions += tl_stats.tl_tx_single_collision +
1795 				tl_stats.tl_tx_multi_collision;
1796 	ifp->if_ipackets += tl_rx_goodframes(tl_stats);
1797 	ifp->if_ierrors += tl_stats.tl_crc_errors + tl_stats.tl_code_errors +
1798 			    tl_rx_overrun(tl_stats);
1799 	ifp->if_oerrors += tl_tx_underrun(tl_stats);
1800 
1801 	if (tl_tx_underrun(tl_stats)) {
1802 		u_int8_t		tx_thresh;
1803 		tx_thresh = tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_TXTHRESH;
1804 		if (tx_thresh != TL_AC_TXTHRESH_WHOLEPKT) {
1805 			tx_thresh >>= 4;
1806 			tx_thresh++;
1807 			printf("tl%d: tx underrun -- increasing "
1808 			    "tx threshold to %d bytes\n", sc->tl_unit,
1809 			    (64 * (tx_thresh * 4)));
1810 			tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH);
1811 			tl_dio_setbit(sc, TL_ACOMMIT, tx_thresh << 4);
1812 		}
1813 	}
1814 
1815 	callout_reset(&sc->tl_stat_timer, hz, tl_stats_update, sc);
1816 
1817 	if (!sc->tl_bitrate) {
1818 		mii = device_get_softc(sc->tl_miibus);
1819 		mii_tick(mii);
1820 	}
1821 
1822 	splx(s);
1823 
1824 	return;
1825 }
1826 
1827 /*
1828  * Encapsulate an mbuf chain in a list by coupling the mbuf data
1829  * pointers to the fragment pointers.
1830  */
1831 static int tl_encap(sc, c, m_head)
1832 	struct tl_softc		*sc;
1833 	struct tl_chain		*c;
1834 	struct mbuf		*m_head;
1835 {
1836 	int			frag = 0;
1837 	struct tl_frag		*f = NULL;
1838 	int			total_len;
1839 	struct mbuf		*m;
1840 
1841 	/*
1842  	 * Start packing the mbufs in this chain into
1843 	 * the fragment pointers. Stop when we run out
1844  	 * of fragments or hit the end of the mbuf chain.
1845 	 */
1846 	m = m_head;
1847 	total_len = 0;
1848 
1849 	for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
1850 		if (m->m_len != 0) {
1851 			if (frag == TL_MAXFRAGS)
1852 				break;
1853 			total_len+= m->m_len;
1854 			c->tl_ptr->tl_frag[frag].tlist_dadr =
1855 				vtophys(mtod(m, vm_offset_t));
1856 			c->tl_ptr->tl_frag[frag].tlist_dcnt = m->m_len;
1857 			frag++;
1858 		}
1859 	}
1860 
1861 	/*
1862 	 * Handle special cases.
1863 	 * Special case #1: we used up all 10 fragments, but
1864 	 * we have more mbufs left in the chain. Copy the
1865 	 * data into an mbuf cluster. Note that we don't
1866 	 * bother clearing the values in the other fragment
1867 	 * pointers/counters; it wouldn't gain us anything,
1868 	 * and would waste cycles.
1869 	 */
1870 	if (m != NULL) {
1871 		struct mbuf		*m_new = NULL;
1872 
1873 		MGETHDR(m_new, MB_DONTWAIT, MT_DATA);
1874 		if (m_new == NULL) {
1875 			printf("tl%d: no memory for tx list\n", sc->tl_unit);
1876 			return(1);
1877 		}
1878 		if (m_head->m_pkthdr.len > MHLEN) {
1879 			MCLGET(m_new, MB_DONTWAIT);
1880 			if (!(m_new->m_flags & M_EXT)) {
1881 				m_freem(m_new);
1882 				printf("tl%d: no memory for tx list\n",
1883 				sc->tl_unit);
1884 				return(1);
1885 			}
1886 		}
1887 		m_copydata(m_head, 0, m_head->m_pkthdr.len,
1888 					mtod(m_new, caddr_t));
1889 		m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1890 		m_freem(m_head);
1891 		m_head = m_new;
1892 		f = &c->tl_ptr->tl_frag[0];
1893 		f->tlist_dadr = vtophys(mtod(m_new, caddr_t));
1894 		f->tlist_dcnt = total_len = m_new->m_len;
1895 		frag = 1;
1896 	}
1897 
1898 	/*
1899 	 * Special case #2: the frame is smaller than the minimum
1900 	 * frame size. We have to pad it to make the chip happy.
1901 	 */
1902 	if (total_len < TL_MIN_FRAMELEN) {
1903 		if (frag == TL_MAXFRAGS)
1904 			printf("tl%d: all frags filled but "
1905 				"frame still to small!\n", sc->tl_unit);
1906 		f = &c->tl_ptr->tl_frag[frag];
1907 		f->tlist_dcnt = TL_MIN_FRAMELEN - total_len;
1908 		f->tlist_dadr = vtophys(&sc->tl_ldata->tl_pad);
1909 		total_len += f->tlist_dcnt;
1910 		frag++;
1911 	}
1912 
1913 	c->tl_mbuf = m_head;
1914 	c->tl_ptr->tl_frag[frag - 1].tlist_dcnt |= TL_LAST_FRAG;
1915 	c->tl_ptr->tlist_frsize = total_len;
1916 	c->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1917 	c->tl_ptr->tlist_fptr = 0;
1918 
1919 	return(0);
1920 }
1921 
1922 /*
1923  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1924  * to the mbuf data regions directly in the transmit lists. We also save a
1925  * copy of the pointers since the transmit list fragment pointers are
1926  * physical addresses.
1927  */
1928 static void tl_start(ifp)
1929 	struct ifnet		*ifp;
1930 {
1931 	struct tl_softc		*sc;
1932 	struct mbuf		*m_head = NULL;
1933 	u_int32_t		cmd;
1934 	struct tl_chain		*prev = NULL, *cur_tx = NULL, *start_tx;
1935 
1936 	sc = ifp->if_softc;
1937 
1938 	/*
1939 	 * Check for an available queue slot. If there are none,
1940 	 * punt.
1941 	 */
1942 	if (sc->tl_cdata.tl_tx_free == NULL) {
1943 		ifp->if_flags |= IFF_OACTIVE;
1944 		return;
1945 	}
1946 
1947 	start_tx = sc->tl_cdata.tl_tx_free;
1948 
1949 	while(sc->tl_cdata.tl_tx_free != NULL) {
1950 		IF_DEQUEUE(&ifp->if_snd, m_head);
1951 		if (m_head == NULL)
1952 			break;
1953 
1954 		/* Pick a chain member off the free list. */
1955 		cur_tx = sc->tl_cdata.tl_tx_free;
1956 		sc->tl_cdata.tl_tx_free = cur_tx->tl_next;
1957 
1958 		cur_tx->tl_next = NULL;
1959 
1960 		/* Pack the data into the list. */
1961 		tl_encap(sc, cur_tx, m_head);
1962 
1963 		/* Chain it together */
1964 		if (prev != NULL) {
1965 			prev->tl_next = cur_tx;
1966 			prev->tl_ptr->tlist_fptr = vtophys(cur_tx->tl_ptr);
1967 		}
1968 		prev = cur_tx;
1969 
1970 		/*
1971 		 * If there's a BPF listener, bounce a copy of this frame
1972 		 * to him.
1973 		 */
1974 		if (ifp->if_bpf)
1975 			bpf_mtap(ifp, cur_tx->tl_mbuf);
1976 	}
1977 
1978 	/*
1979 	 * If there are no packets queued, bail.
1980 	 */
1981 	if (cur_tx == NULL)
1982 		return;
1983 
1984 	/*
1985 	 * That's all we can stands, we can't stands no more.
1986 	 * If there are no other transfers pending, then issue the
1987 	 * TX GO command to the adapter to start things moving.
1988 	 * Otherwise, just leave the data in the queue and let
1989 	 * the EOF/EOC interrupt handler send.
1990 	 */
1991 	if (sc->tl_cdata.tl_tx_head == NULL) {
1992 		sc->tl_cdata.tl_tx_head = start_tx;
1993 		sc->tl_cdata.tl_tx_tail = cur_tx;
1994 
1995 		if (sc->tl_txeoc) {
1996 			sc->tl_txeoc = 0;
1997 			CSR_WRITE_4(sc, TL_CH_PARM, vtophys(start_tx->tl_ptr));
1998 			cmd = CSR_READ_4(sc, TL_HOSTCMD);
1999 			cmd &= ~TL_CMD_RT;
2000 			cmd |= TL_CMD_GO|TL_CMD_INTSON;
2001 			CMD_PUT(sc, cmd);
2002 		}
2003 	} else {
2004 		sc->tl_cdata.tl_tx_tail->tl_next = start_tx;
2005 		sc->tl_cdata.tl_tx_tail = cur_tx;
2006 	}
2007 
2008 	/*
2009 	 * Set a timeout in case the chip goes out to lunch.
2010 	 */
2011 	ifp->if_timer = 5;
2012 
2013 	return;
2014 }
2015 
2016 static void tl_init(xsc)
2017 	void			*xsc;
2018 {
2019 	struct tl_softc		*sc = xsc;
2020 	struct ifnet		*ifp = &sc->arpcom.ac_if;
2021         int			s;
2022 	struct mii_data		*mii;
2023 
2024 	s = splimp();
2025 
2026 	ifp = &sc->arpcom.ac_if;
2027 
2028 	/*
2029 	 * Cancel pending I/O.
2030 	 */
2031 	tl_stop(sc);
2032 
2033 	/* Initialize TX FIFO threshold */
2034 	tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH);
2035 	tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH_16LONG);
2036 
2037         /* Set PCI burst size */
2038 	tl_dio_write8(sc, TL_BSIZEREG, TL_RXBURST_16LONG|TL_TXBURST_16LONG);
2039 
2040 	/*
2041 	 * Set 'capture all frames' bit for promiscuous mode.
2042 	 */
2043 	if (ifp->if_flags & IFF_PROMISC)
2044 		tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF);
2045 	else
2046 		tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_CAF);
2047 
2048 	/*
2049 	 * Set capture broadcast bit to capture broadcast frames.
2050 	 */
2051 	if (ifp->if_flags & IFF_BROADCAST)
2052 		tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_NOBRX);
2053 	else
2054 		tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NOBRX);
2055 
2056 	tl_dio_write16(sc, TL_MAXRX, MCLBYTES);
2057 
2058 	/* Init our MAC address */
2059 	tl_setfilt(sc, (caddr_t)&sc->arpcom.ac_enaddr, 0);
2060 
2061 	/* Init multicast filter, if needed. */
2062 	tl_setmulti(sc);
2063 
2064 	/* Init circular RX list. */
2065 	if (tl_list_rx_init(sc) == ENOBUFS) {
2066 		printf("tl%d: initialization failed: no "
2067 			"memory for rx buffers\n", sc->tl_unit);
2068 		tl_stop(sc);
2069 		return;
2070 	}
2071 
2072 	/* Init TX pointers. */
2073 	tl_list_tx_init(sc);
2074 
2075 	/* Enable PCI interrupts. */
2076 	CMD_SET(sc, TL_CMD_INTSON);
2077 
2078 	/* Load the address of the rx list */
2079 	CMD_SET(sc, TL_CMD_RT);
2080 	CSR_WRITE_4(sc, TL_CH_PARM, vtophys(&sc->tl_ldata->tl_rx_list[0]));
2081 
2082 	if (!sc->tl_bitrate) {
2083 		if (sc->tl_miibus != NULL) {
2084 			mii = device_get_softc(sc->tl_miibus);
2085 			mii_mediachg(mii);
2086 		}
2087 	}
2088 
2089 	/* Send the RX go command */
2090 	CMD_SET(sc, TL_CMD_GO|TL_CMD_NES|TL_CMD_RT);
2091 
2092 	ifp->if_flags |= IFF_RUNNING;
2093 	ifp->if_flags &= ~IFF_OACTIVE;
2094 
2095 	(void)splx(s);
2096 
2097 	/* Start the stats update counter */
2098 	callout_reset(&sc->tl_stat_timer, hz, tl_stats_update, sc);
2099 }
2100 
2101 /*
2102  * Set media options.
2103  */
2104 static int tl_ifmedia_upd(ifp)
2105 	struct ifnet		*ifp;
2106 {
2107 	struct tl_softc		*sc;
2108 	struct mii_data		*mii = NULL;
2109 
2110 	sc = ifp->if_softc;
2111 
2112 	if (sc->tl_bitrate)
2113 		tl_setmode(sc, sc->ifmedia.ifm_media);
2114 	else {
2115 		mii = device_get_softc(sc->tl_miibus);
2116 		mii_mediachg(mii);
2117 	}
2118 
2119 	return(0);
2120 }
2121 
2122 /*
2123  * Report current media status.
2124  */
2125 static void tl_ifmedia_sts(ifp, ifmr)
2126 	struct ifnet		*ifp;
2127 	struct ifmediareq	*ifmr;
2128 {
2129 	struct tl_softc		*sc;
2130 	struct mii_data		*mii;
2131 
2132 	sc = ifp->if_softc;
2133 
2134 	ifmr->ifm_active = IFM_ETHER;
2135 
2136 	if (sc->tl_bitrate) {
2137 		if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD1)
2138 			ifmr->ifm_active = IFM_ETHER|IFM_10_5;
2139 		else
2140 			ifmr->ifm_active = IFM_ETHER|IFM_10_T;
2141 		if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD3)
2142 			ifmr->ifm_active |= IFM_HDX;
2143 		else
2144 			ifmr->ifm_active |= IFM_FDX;
2145 		return;
2146 	} else {
2147 		mii = device_get_softc(sc->tl_miibus);
2148 		mii_pollstat(mii);
2149 		ifmr->ifm_active = mii->mii_media_active;
2150 		ifmr->ifm_status = mii->mii_media_status;
2151 	}
2152 
2153 	return;
2154 }
2155 
2156 static int tl_ioctl(ifp, command, data, cr)
2157 	struct ifnet		*ifp;
2158 	u_long			command;
2159 	caddr_t			data;
2160 	struct ucred		*cr;
2161 {
2162 	struct tl_softc		*sc = ifp->if_softc;
2163 	struct ifreq		*ifr = (struct ifreq *) data;
2164 	int			s, error = 0;
2165 
2166 	s = splimp();
2167 
2168 	switch(command) {
2169 	case SIOCSIFADDR:
2170 	case SIOCGIFADDR:
2171 	case SIOCSIFMTU:
2172 		error = ether_ioctl(ifp, command, data);
2173 		break;
2174 	case SIOCSIFFLAGS:
2175 		if (ifp->if_flags & IFF_UP) {
2176 			if (ifp->if_flags & IFF_RUNNING &&
2177 			    ifp->if_flags & IFF_PROMISC &&
2178 			    !(sc->tl_if_flags & IFF_PROMISC)) {
2179 				tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF);
2180 				tl_setmulti(sc);
2181 			} else if (ifp->if_flags & IFF_RUNNING &&
2182 			    !(ifp->if_flags & IFF_PROMISC) &&
2183 			    sc->tl_if_flags & IFF_PROMISC) {
2184 				tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_CAF);
2185 				tl_setmulti(sc);
2186 			} else
2187 				tl_init(sc);
2188 		} else {
2189 			if (ifp->if_flags & IFF_RUNNING) {
2190 				tl_stop(sc);
2191 			}
2192 		}
2193 		sc->tl_if_flags = ifp->if_flags;
2194 		error = 0;
2195 		break;
2196 	case SIOCADDMULTI:
2197 	case SIOCDELMULTI:
2198 		tl_setmulti(sc);
2199 		error = 0;
2200 		break;
2201 	case SIOCSIFMEDIA:
2202 	case SIOCGIFMEDIA:
2203 		if (sc->tl_bitrate)
2204 			error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
2205 		else {
2206 			struct mii_data		*mii;
2207 			mii = device_get_softc(sc->tl_miibus);
2208 			error = ifmedia_ioctl(ifp, ifr,
2209 			    &mii->mii_media, command);
2210 		}
2211 		break;
2212 	default:
2213 		error = EINVAL;
2214 		break;
2215 	}
2216 
2217 	(void)splx(s);
2218 
2219 	return(error);
2220 }
2221 
2222 static void tl_watchdog(ifp)
2223 	struct ifnet		*ifp;
2224 {
2225 	struct tl_softc		*sc;
2226 
2227 	sc = ifp->if_softc;
2228 
2229 	printf("tl%d: device timeout\n", sc->tl_unit);
2230 
2231 	ifp->if_oerrors++;
2232 
2233 	tl_softreset(sc, 1);
2234 	tl_init(sc);
2235 
2236 	return;
2237 }
2238 
2239 /*
2240  * Stop the adapter and free any mbufs allocated to the
2241  * RX and TX lists.
2242  */
2243 static void tl_stop(sc)
2244 	struct tl_softc		*sc;
2245 {
2246 	int		i;
2247 	struct ifnet		*ifp;
2248 
2249 	ifp = &sc->arpcom.ac_if;
2250 
2251 	/* Stop the stats updater. */
2252 	callout_stop(&sc->tl_stat_timer);
2253 
2254 	/* Stop the transmitter */
2255 	CMD_CLR(sc, TL_CMD_RT);
2256 	CMD_SET(sc, TL_CMD_STOP);
2257 	CSR_WRITE_4(sc, TL_CH_PARM, 0);
2258 
2259 	/* Stop the receiver */
2260 	CMD_SET(sc, TL_CMD_RT);
2261 	CMD_SET(sc, TL_CMD_STOP);
2262 	CSR_WRITE_4(sc, TL_CH_PARM, 0);
2263 
2264 	/*
2265 	 * Disable host interrupts.
2266 	 */
2267 	CMD_SET(sc, TL_CMD_INTSOFF);
2268 
2269 	/*
2270 	 * Clear list pointer.
2271 	 */
2272 	CSR_WRITE_4(sc, TL_CH_PARM, 0);
2273 
2274 	/*
2275 	 * Free the RX lists.
2276 	 */
2277 	for (i = 0; i < TL_RX_LIST_CNT; i++) {
2278 		if (sc->tl_cdata.tl_rx_chain[i].tl_mbuf != NULL) {
2279 			m_freem(sc->tl_cdata.tl_rx_chain[i].tl_mbuf);
2280 			sc->tl_cdata.tl_rx_chain[i].tl_mbuf = NULL;
2281 		}
2282 	}
2283 	bzero((char *)&sc->tl_ldata->tl_rx_list,
2284 		sizeof(sc->tl_ldata->tl_rx_list));
2285 
2286 	/*
2287 	 * Free the TX list buffers.
2288 	 */
2289 	for (i = 0; i < TL_TX_LIST_CNT; i++) {
2290 		if (sc->tl_cdata.tl_tx_chain[i].tl_mbuf != NULL) {
2291 			m_freem(sc->tl_cdata.tl_tx_chain[i].tl_mbuf);
2292 			sc->tl_cdata.tl_tx_chain[i].tl_mbuf = NULL;
2293 		}
2294 	}
2295 	bzero((char *)&sc->tl_ldata->tl_tx_list,
2296 		sizeof(sc->tl_ldata->tl_tx_list));
2297 
2298 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2299 
2300 	return;
2301 }
2302 
2303 /*
2304  * Stop all chip I/O so that the kernel's probe routines don't
2305  * get confused by errant DMAs when rebooting.
2306  */
2307 static void tl_shutdown(dev)
2308 	device_t		dev;
2309 {
2310 	struct tl_softc		*sc;
2311 
2312 	sc = device_get_softc(dev);
2313 
2314 	tl_stop(sc);
2315 
2316 	return;
2317 }
2318