xref: /dragonfly/sys/dev/netif/tl/if_tl.c (revision 38a690d7)
1 /*
2  * Copyright (c) 1997, 1998
3  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by Bill Paul.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  *
32  * $FreeBSD: src/sys/pci/if_tl.c,v 1.51.2.5 2001/12/16 15:46:08 luigi Exp $
33  * $DragonFly: src/sys/dev/netif/tl/if_tl.c,v 1.4 2003/08/07 21:17:06 dillon Exp $
34  *
35  * $FreeBSD: src/sys/pci/if_tl.c,v 1.51.2.5 2001/12/16 15:46:08 luigi Exp $
36  */
37 
38 /*
39  * Texas Instruments ThunderLAN driver for FreeBSD 2.2.6 and 3.x.
40  * Supports many Compaq PCI NICs based on the ThunderLAN ethernet controller,
41  * the National Semiconductor DP83840A physical interface and the
42  * Microchip Technology 24Cxx series serial EEPROM.
43  *
44  * Written using the following four documents:
45  *
46  * Texas Instruments ThunderLAN Programmer's Guide (www.ti.com)
47  * National Semiconductor DP83840A data sheet (www.national.com)
48  * Microchip Technology 24C02C data sheet (www.microchip.com)
49  * Micro Linear ML6692 100BaseTX only PHY data sheet (www.microlinear.com)
50  *
51  * Written by Bill Paul <wpaul@ctr.columbia.edu>
52  * Electrical Engineering Department
53  * Columbia University, New York City
54  */
55 
56 /*
57  * Some notes about the ThunderLAN:
58  *
59  * The ThunderLAN controller is a single chip containing PCI controller
60  * logic, approximately 3K of on-board SRAM, a LAN controller, and media
61  * independent interface (MII) bus. The MII allows the ThunderLAN chip to
62  * control up to 32 different physical interfaces (PHYs). The ThunderLAN
63  * also has a built-in 10baseT PHY, allowing a single ThunderLAN controller
64  * to act as a complete ethernet interface.
65  *
66  * Other PHYs may be attached to the ThunderLAN; the Compaq 10/100 cards
67  * use a National Semiconductor DP83840A PHY that supports 10 or 100Mb/sec
68  * in full or half duplex. Some of the Compaq Deskpro machines use a
69  * Level 1 LXT970 PHY with the same capabilities. Certain Olicom adapters
70  * use a Micro Linear ML6692 100BaseTX only PHY, which can be used in
71  * concert with the ThunderLAN's internal PHY to provide full 10/100
72  * support. This is cheaper than using a standalone external PHY for both
73  * 10/100 modes and letting the ThunderLAN's internal PHY go to waste.
74  * A serial EEPROM is also attached to the ThunderLAN chip to provide
75  * power-up default register settings and for storing the adapter's
76  * station address. Although not supported by this driver, the ThunderLAN
77  * chip can also be connected to token ring PHYs.
78  *
79  * The ThunderLAN has a set of registers which can be used to issue
80  * commands, acknowledge interrupts, and to manipulate other internal
81  * registers on its DIO bus. The primary registers can be accessed
82  * using either programmed I/O (inb/outb) or via PCI memory mapping,
83  * depending on how the card is configured during the PCI probing
84  * phase. It is even possible to have both PIO and memory mapped
85  * access turned on at the same time.
86  *
87  * Frame reception and transmission with the ThunderLAN chip is done
88  * using frame 'lists.' A list structure looks more or less like this:
89  *
90  * struct tl_frag {
91  *	u_int32_t		fragment_address;
92  *	u_int32_t		fragment_size;
93  * };
94  * struct tl_list {
95  *	u_int32_t		forward_pointer;
96  *	u_int16_t		cstat;
97  *	u_int16_t		frame_size;
98  *	struct tl_frag		fragments[10];
99  * };
100  *
101  * The forward pointer in the list header can be either a 0 or the address
102  * of another list, which allows several lists to be linked together. Each
103  * list contains up to 10 fragment descriptors. This means the chip allows
104  * ethernet frames to be broken up into up to 10 chunks for transfer to
105  * and from the SRAM. Note that the forward pointer and fragment buffer
106  * addresses are physical memory addresses, not virtual. Note also that
107  * a single ethernet frame can not span lists: if the host wants to
108  * transmit a frame and the frame data is split up over more than 10
109  * buffers, the frame has to collapsed before it can be transmitted.
110  *
111  * To receive frames, the driver sets up a number of lists and populates
112  * the fragment descriptors, then it sends an RX GO command to the chip.
113  * When a frame is received, the chip will DMA it into the memory regions
114  * specified by the fragment descriptors and then trigger an RX 'end of
115  * frame interrupt' when done. The driver may choose to use only one
116  * fragment per list; this may result is slighltly less efficient use
117  * of memory in exchange for improving performance.
118  *
119  * To transmit frames, the driver again sets up lists and fragment
120  * descriptors, only this time the buffers contain frame data that
121  * is to be DMA'ed into the chip instead of out of it. Once the chip
122  * has transfered the data into its on-board SRAM, it will trigger a
123  * TX 'end of frame' interrupt. It will also generate an 'end of channel'
124  * interrupt when it reaches the end of the list.
125  */
126 
127 /*
128  * Some notes about this driver:
129  *
130  * The ThunderLAN chip provides a couple of different ways to organize
131  * reception, transmission and interrupt handling. The simplest approach
132  * is to use one list each for transmission and reception. In this mode,
133  * the ThunderLAN will generate two interrupts for every received frame
134  * (one RX EOF and one RX EOC) and two for each transmitted frame (one
135  * TX EOF and one TX EOC). This may make the driver simpler but it hurts
136  * performance to have to handle so many interrupts.
137  *
138  * Initially I wanted to create a circular list of receive buffers so
139  * that the ThunderLAN chip would think there was an infinitely long
140  * receive channel and never deliver an RXEOC interrupt. However this
141  * doesn't work correctly under heavy load: while the manual says the
142  * chip will trigger an RXEOF interrupt each time a frame is copied into
143  * memory, you can't count on the chip waiting around for you to acknowledge
144  * the interrupt before it starts trying to DMA the next frame. The result
145  * is that the chip might traverse the entire circular list and then wrap
146  * around before you have a chance to do anything about it. Consequently,
147  * the receive list is terminated (with a 0 in the forward pointer in the
148  * last element). Each time an RXEOF interrupt arrives, the used list
149  * is shifted to the end of the list. This gives the appearance of an
150  * infinitely large RX chain so long as the driver doesn't fall behind
151  * the chip and allow all of the lists to be filled up.
152  *
153  * If all the lists are filled, the adapter will deliver an RX 'end of
154  * channel' interrupt when it hits the 0 forward pointer at the end of
155  * the chain. The RXEOC handler then cleans out the RX chain and resets
156  * the list head pointer in the ch_parm register and restarts the receiver.
157  *
158  * For frame transmission, it is possible to program the ThunderLAN's
159  * transmit interrupt threshold so that the chip can acknowledge multiple
160  * lists with only a single TX EOF interrupt. This allows the driver to
161  * queue several frames in one shot, and only have to handle a total
162  * two interrupts (one TX EOF and one TX EOC) no matter how many frames
163  * are transmitted. Frame transmission is done directly out of the
164  * mbufs passed to the tl_start() routine via the interface send queue.
165  * The driver simply sets up the fragment descriptors in the transmit
166  * lists to point to the mbuf data regions and sends a TX GO command.
167  *
168  * Note that since the RX and TX lists themselves are always used
169  * only by the driver, the are malloc()ed once at driver initialization
170  * time and never free()ed.
171  *
172  * Also, in order to remain as platform independent as possible, this
173  * driver uses memory mapped register access to manipulate the card
174  * as opposed to programmed I/O. This avoids the use of the inb/outb
175  * (and related) instructions which are specific to the i386 platform.
176  *
177  * Using these techniques, this driver achieves very high performance
178  * by minimizing the amount of interrupts generated during large
179  * transfers and by completely avoiding buffer copies. Frame transfer
180  * to and from the ThunderLAN chip is performed entirely by the chip
181  * itself thereby reducing the load on the host CPU.
182  */
183 
184 #include <sys/param.h>
185 #include <sys/systm.h>
186 #include <sys/sockio.h>
187 #include <sys/mbuf.h>
188 #include <sys/malloc.h>
189 #include <sys/kernel.h>
190 #include <sys/socket.h>
191 
192 #include <net/if.h>
193 #include <net/if_arp.h>
194 #include <net/ethernet.h>
195 #include <net/if_dl.h>
196 #include <net/if_media.h>
197 
198 #include <net/bpf.h>
199 
200 #include <vm/vm.h>              /* for vtophys */
201 #include <vm/pmap.h>            /* for vtophys */
202 #include <machine/clock.h>      /* for DELAY */
203 #include <machine/bus_memio.h>
204 #include <machine/bus_pio.h>
205 #include <machine/bus.h>
206 #include <machine/resource.h>
207 #include <sys/bus.h>
208 #include <sys/rman.h>
209 
210 #include "../mii_layer/mii.h"
211 #include "../mii_layer/miivar.h"
212 
213 #include <bus/pci/pcireg.h>
214 #include <bus/pci/pcivar.h>
215 
216 /*
217  * Default to using PIO register access mode to pacify certain
218  * laptop docking stations with built-in ThunderLAN chips that
219  * don't seem to handle memory mapped mode properly.
220  */
221 #define TL_USEIOSPACE
222 
223 #include "if_tlreg.h"
224 
225 /* "controller miibus0" required.  See GENERIC if you get errors here. */
226 #include "miibus_if.h"
227 
228 /*
229  * Various supported device vendors/types and their names.
230  */
231 
232 static struct tl_type tl_devs[] = {
233 	{ TI_VENDORID,	TI_DEVICEID_THUNDERLAN,
234 		"Texas Instruments ThunderLAN" },
235 	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10,
236 		"Compaq Netelligent 10" },
237 	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100,
238 		"Compaq Netelligent 10/100" },
239 	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_PROLIANT,
240 		"Compaq Netelligent 10/100 Proliant" },
241 	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_DUAL,
242 		"Compaq Netelligent 10/100 Dual Port" },
243 	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P_INTEGRATED,
244 		"Compaq NetFlex-3/P Integrated" },
245 	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P,
246 		"Compaq NetFlex-3/P" },
247 	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P_BNC,
248 		"Compaq NetFlex 3/P w/ BNC" },
249 	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_EMBEDDED,
250 		"Compaq Netelligent 10/100 TX Embedded UTP" },
251 	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_T2_UTP_COAX,
252 		"Compaq Netelligent 10 T/2 PCI UTP/Coax" },
253 	{ COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_TX_UTP,
254 		"Compaq Netelligent 10/100 TX UTP" },
255 	{ OLICOM_VENDORID, OLICOM_DEVICEID_OC2183,
256 		"Olicom OC-2183/2185" },
257 	{ OLICOM_VENDORID, OLICOM_DEVICEID_OC2325,
258 		"Olicom OC-2325" },
259 	{ OLICOM_VENDORID, OLICOM_DEVICEID_OC2326,
260 		"Olicom OC-2326 10/100 TX UTP" },
261 	{ 0, 0, NULL }
262 };
263 
264 static int tl_probe		__P((device_t));
265 static int tl_attach		__P((device_t));
266 static int tl_detach		__P((device_t));
267 static int tl_intvec_rxeoc	__P((void *, u_int32_t));
268 static int tl_intvec_txeoc	__P((void *, u_int32_t));
269 static int tl_intvec_txeof	__P((void *, u_int32_t));
270 static int tl_intvec_rxeof	__P((void *, u_int32_t));
271 static int tl_intvec_adchk	__P((void *, u_int32_t));
272 static int tl_intvec_netsts	__P((void *, u_int32_t));
273 
274 static int tl_newbuf		__P((struct tl_softc *,
275 					struct tl_chain_onefrag *));
276 static void tl_stats_update	__P((void *));
277 static int tl_encap		__P((struct tl_softc *, struct tl_chain *,
278 						struct mbuf *));
279 
280 static void tl_intr		__P((void *));
281 static void tl_start		__P((struct ifnet *));
282 static int tl_ioctl		__P((struct ifnet *, u_long, caddr_t));
283 static void tl_init		__P((void *));
284 static void tl_stop		__P((struct tl_softc *));
285 static void tl_watchdog		__P((struct ifnet *));
286 static void tl_shutdown		__P((device_t));
287 static int tl_ifmedia_upd	__P((struct ifnet *));
288 static void tl_ifmedia_sts	__P((struct ifnet *, struct ifmediareq *));
289 
290 static u_int8_t tl_eeprom_putbyte	__P((struct tl_softc *, int));
291 static u_int8_t	tl_eeprom_getbyte	__P((struct tl_softc *,
292 						int, u_int8_t *));
293 static int tl_read_eeprom	__P((struct tl_softc *, caddr_t, int, int));
294 
295 static void tl_mii_sync		__P((struct tl_softc *));
296 static void tl_mii_send		__P((struct tl_softc *, u_int32_t, int));
297 static int tl_mii_readreg	__P((struct tl_softc *, struct tl_mii_frame *));
298 static int tl_mii_writereg	__P((struct tl_softc *, struct tl_mii_frame *));
299 static int tl_miibus_readreg	__P((device_t, int, int));
300 static int tl_miibus_writereg	__P((device_t, int, int, int));
301 static void tl_miibus_statchg	__P((device_t));
302 
303 static void tl_setmode		__P((struct tl_softc *, int));
304 static int tl_calchash		__P((caddr_t));
305 static void tl_setmulti		__P((struct tl_softc *));
306 static void tl_setfilt		__P((struct tl_softc *, caddr_t, int));
307 static void tl_softreset	__P((struct tl_softc *, int));
308 static void tl_hardreset	__P((device_t));
309 static int tl_list_rx_init	__P((struct tl_softc *));
310 static int tl_list_tx_init	__P((struct tl_softc *));
311 
312 static u_int8_t tl_dio_read8	__P((struct tl_softc *, int));
313 static u_int16_t tl_dio_read16	__P((struct tl_softc *, int));
314 static u_int32_t tl_dio_read32	__P((struct tl_softc *, int));
315 static void tl_dio_write8	__P((struct tl_softc *, int, int));
316 static void tl_dio_write16	__P((struct tl_softc *, int, int));
317 static void tl_dio_write32	__P((struct tl_softc *, int, int));
318 static void tl_dio_setbit	__P((struct tl_softc *, int, int));
319 static void tl_dio_clrbit	__P((struct tl_softc *, int, int));
320 static void tl_dio_setbit16	__P((struct tl_softc *, int, int));
321 static void tl_dio_clrbit16	__P((struct tl_softc *, int, int));
322 
323 #ifdef TL_USEIOSPACE
324 #define TL_RES		SYS_RES_IOPORT
325 #define TL_RID		TL_PCI_LOIO
326 #else
327 #define TL_RES		SYS_RES_MEMORY
328 #define TL_RID		TL_PCI_LOMEM
329 #endif
330 
331 static device_method_t tl_methods[] = {
332 	/* Device interface */
333 	DEVMETHOD(device_probe,		tl_probe),
334 	DEVMETHOD(device_attach,	tl_attach),
335 	DEVMETHOD(device_detach,	tl_detach),
336 	DEVMETHOD(device_shutdown,	tl_shutdown),
337 
338 	/* bus interface */
339 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
340 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
341 
342 	/* MII interface */
343 	DEVMETHOD(miibus_readreg,	tl_miibus_readreg),
344 	DEVMETHOD(miibus_writereg,	tl_miibus_writereg),
345 	DEVMETHOD(miibus_statchg,	tl_miibus_statchg),
346 
347 	{ 0, 0 }
348 };
349 
350 static driver_t tl_driver = {
351 	"tl",
352 	tl_methods,
353 	sizeof(struct tl_softc)
354 };
355 
356 static devclass_t tl_devclass;
357 
358 DRIVER_MODULE(if_tl, pci, tl_driver, tl_devclass, 0, 0);
359 DRIVER_MODULE(miibus, tl, miibus_driver, miibus_devclass, 0, 0);
360 
361 static u_int8_t tl_dio_read8(sc, reg)
362 	struct tl_softc		*sc;
363 	int			reg;
364 {
365 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
366 	return(CSR_READ_1(sc, TL_DIO_DATA + (reg & 3)));
367 }
368 
369 static u_int16_t tl_dio_read16(sc, reg)
370 	struct tl_softc		*sc;
371 	int			reg;
372 {
373 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
374 	return(CSR_READ_2(sc, TL_DIO_DATA + (reg & 3)));
375 }
376 
377 static u_int32_t tl_dio_read32(sc, reg)
378 	struct tl_softc		*sc;
379 	int			reg;
380 {
381 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
382 	return(CSR_READ_4(sc, TL_DIO_DATA + (reg & 3)));
383 }
384 
385 static void tl_dio_write8(sc, reg, val)
386 	struct tl_softc		*sc;
387 	int			reg;
388 	int			val;
389 {
390 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
391 	CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), val);
392 	return;
393 }
394 
395 static void tl_dio_write16(sc, reg, val)
396 	struct tl_softc		*sc;
397 	int			reg;
398 	int			val;
399 {
400 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
401 	CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), val);
402 	return;
403 }
404 
405 static void tl_dio_write32(sc, reg, val)
406 	struct tl_softc		*sc;
407 	int			reg;
408 	int			val;
409 {
410 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
411 	CSR_WRITE_4(sc, TL_DIO_DATA + (reg & 3), val);
412 	return;
413 }
414 
415 static void tl_dio_setbit(sc, reg, bit)
416 	struct tl_softc		*sc;
417 	int			reg;
418 	int			bit;
419 {
420 	u_int8_t			f;
421 
422 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
423 	f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3));
424 	f |= bit;
425 	CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f);
426 
427 	return;
428 }
429 
430 static void tl_dio_clrbit(sc, reg, bit)
431 	struct tl_softc		*sc;
432 	int			reg;
433 	int			bit;
434 {
435 	u_int8_t			f;
436 
437 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
438 	f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3));
439 	f &= ~bit;
440 	CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f);
441 
442 	return;
443 }
444 
445 static void tl_dio_setbit16(sc, reg, bit)
446 	struct tl_softc		*sc;
447 	int			reg;
448 	int			bit;
449 {
450 	u_int16_t			f;
451 
452 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
453 	f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3));
454 	f |= bit;
455 	CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f);
456 
457 	return;
458 }
459 
460 static void tl_dio_clrbit16(sc, reg, bit)
461 	struct tl_softc		*sc;
462 	int			reg;
463 	int			bit;
464 {
465 	u_int16_t			f;
466 
467 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
468 	f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3));
469 	f &= ~bit;
470 	CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f);
471 
472 	return;
473 }
474 
475 /*
476  * Send an instruction or address to the EEPROM, check for ACK.
477  */
478 static u_int8_t tl_eeprom_putbyte(sc, byte)
479 	struct tl_softc		*sc;
480 	int			byte;
481 {
482 	int		i, ack = 0;
483 
484 	/*
485 	 * Make sure we're in TX mode.
486 	 */
487 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ETXEN);
488 
489 	/*
490 	 * Feed in each bit and stobe the clock.
491 	 */
492 	for (i = 0x80; i; i >>= 1) {
493 		if (byte & i) {
494 			tl_dio_setbit(sc, TL_NETSIO, TL_SIO_EDATA);
495 		} else {
496 			tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_EDATA);
497 		}
498 		DELAY(1);
499 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
500 		DELAY(1);
501 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
502 	}
503 
504 	/*
505 	 * Turn off TX mode.
506 	 */
507 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN);
508 
509 	/*
510 	 * Check for ack.
511 	 */
512 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
513 	ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA;
514 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
515 
516 	return(ack);
517 }
518 
519 /*
520  * Read a byte of data stored in the EEPROM at address 'addr.'
521  */
522 static u_int8_t tl_eeprom_getbyte(sc, addr, dest)
523 	struct tl_softc		*sc;
524 	int			addr;
525 	u_int8_t		*dest;
526 {
527 	int		i;
528 	u_int8_t		byte = 0;
529 
530 	tl_dio_write8(sc, TL_NETSIO, 0);
531 
532 	EEPROM_START;
533 
534 	/*
535 	 * Send write control code to EEPROM.
536 	 */
537 	if (tl_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) {
538 		printf("tl%d: failed to send write command, status: %x\n",
539 				sc->tl_unit, tl_dio_read8(sc, TL_NETSIO));
540 		return(1);
541 	}
542 
543 	/*
544 	 * Send address of byte we want to read.
545 	 */
546 	if (tl_eeprom_putbyte(sc, addr)) {
547 		printf("tl%d: failed to send address, status: %x\n",
548 				sc->tl_unit, tl_dio_read8(sc, TL_NETSIO));
549 		return(1);
550 	}
551 
552 	EEPROM_STOP;
553 	EEPROM_START;
554 	/*
555 	 * Send read control code to EEPROM.
556 	 */
557 	if (tl_eeprom_putbyte(sc, EEPROM_CTL_READ)) {
558 		printf("tl%d: failed to send write command, status: %x\n",
559 				sc->tl_unit, tl_dio_read8(sc, TL_NETSIO));
560 		return(1);
561 	}
562 
563 	/*
564 	 * Start reading bits from EEPROM.
565 	 */
566 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN);
567 	for (i = 0x80; i; i >>= 1) {
568 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
569 		DELAY(1);
570 		if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA)
571 			byte |= i;
572 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
573 		DELAY(1);
574 	}
575 
576 	EEPROM_STOP;
577 
578 	/*
579 	 * No ACK generated for read, so just return byte.
580 	 */
581 
582 	*dest = byte;
583 
584 	return(0);
585 }
586 
587 /*
588  * Read a sequence of bytes from the EEPROM.
589  */
590 static int tl_read_eeprom(sc, dest, off, cnt)
591 	struct tl_softc		*sc;
592 	caddr_t			dest;
593 	int			off;
594 	int			cnt;
595 {
596 	int			err = 0, i;
597 	u_int8_t		byte = 0;
598 
599 	for (i = 0; i < cnt; i++) {
600 		err = tl_eeprom_getbyte(sc, off + i, &byte);
601 		if (err)
602 			break;
603 		*(dest + i) = byte;
604 	}
605 
606 	return(err ? 1 : 0);
607 }
608 
609 static void tl_mii_sync(sc)
610 	struct tl_softc		*sc;
611 {
612 	int		i;
613 
614 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN);
615 
616 	for (i = 0; i < 32; i++) {
617 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
618 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
619 	}
620 
621 	return;
622 }
623 
624 static void tl_mii_send(sc, bits, cnt)
625 	struct tl_softc		*sc;
626 	u_int32_t		bits;
627 	int			cnt;
628 {
629 	int			i;
630 
631 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
632 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
633 		if (bits & i) {
634 			tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MDATA);
635 		} else {
636 			tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MDATA);
637 		}
638 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
639 	}
640 }
641 
642 static int tl_mii_readreg(sc, frame)
643 	struct tl_softc		*sc;
644 	struct tl_mii_frame	*frame;
645 
646 {
647 	int			i, ack, s;
648 	int			minten = 0;
649 
650 	s = splimp();
651 
652 	tl_mii_sync(sc);
653 
654 	/*
655 	 * Set up frame for RX.
656 	 */
657 	frame->mii_stdelim = TL_MII_STARTDELIM;
658 	frame->mii_opcode = TL_MII_READOP;
659 	frame->mii_turnaround = 0;
660 	frame->mii_data = 0;
661 
662 	/*
663 	 * Turn off MII interrupt by forcing MINTEN low.
664 	 */
665 	minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN;
666 	if (minten) {
667 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN);
668 	}
669 
670 	/*
671  	 * Turn on data xmit.
672 	 */
673 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MTXEN);
674 
675 	/*
676 	 * Send command/address info.
677 	 */
678 	tl_mii_send(sc, frame->mii_stdelim, 2);
679 	tl_mii_send(sc, frame->mii_opcode, 2);
680 	tl_mii_send(sc, frame->mii_phyaddr, 5);
681 	tl_mii_send(sc, frame->mii_regaddr, 5);
682 
683 	/*
684 	 * Turn off xmit.
685 	 */
686 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN);
687 
688 	/* Idle bit */
689 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
690 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
691 
692 	/* Check for ack */
693 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
694 	ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MDATA;
695 
696 	/* Complete the cycle */
697 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
698 
699 	/*
700 	 * Now try reading data bits. If the ack failed, we still
701 	 * need to clock through 16 cycles to keep the PHYs in sync.
702 	 */
703 	if (ack) {
704 		for(i = 0; i < 16; i++) {
705 			tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
706 			tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
707 		}
708 		goto fail;
709 	}
710 
711 	for (i = 0x8000; i; i >>= 1) {
712 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
713 		if (!ack) {
714 			if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MDATA)
715 				frame->mii_data |= i;
716 		}
717 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
718 	}
719 
720 fail:
721 
722 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
723 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
724 
725 	/* Reenable interrupts */
726 	if (minten) {
727 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN);
728 	}
729 
730 	splx(s);
731 
732 	if (ack)
733 		return(1);
734 	return(0);
735 }
736 
737 static int tl_mii_writereg(sc, frame)
738 	struct tl_softc		*sc;
739 	struct tl_mii_frame	*frame;
740 
741 {
742 	int			s;
743 	int			minten;
744 
745 	tl_mii_sync(sc);
746 
747 	s = splimp();
748 	/*
749 	 * Set up frame for TX.
750 	 */
751 
752 	frame->mii_stdelim = TL_MII_STARTDELIM;
753 	frame->mii_opcode = TL_MII_WRITEOP;
754 	frame->mii_turnaround = TL_MII_TURNAROUND;
755 
756 	/*
757 	 * Turn off MII interrupt by forcing MINTEN low.
758 	 */
759 	minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN;
760 	if (minten) {
761 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN);
762 	}
763 
764 	/*
765  	 * Turn on data output.
766 	 */
767 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MTXEN);
768 
769 	tl_mii_send(sc, frame->mii_stdelim, 2);
770 	tl_mii_send(sc, frame->mii_opcode, 2);
771 	tl_mii_send(sc, frame->mii_phyaddr, 5);
772 	tl_mii_send(sc, frame->mii_regaddr, 5);
773 	tl_mii_send(sc, frame->mii_turnaround, 2);
774 	tl_mii_send(sc, frame->mii_data, 16);
775 
776 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
777 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
778 
779 	/*
780 	 * Turn off xmit.
781 	 */
782 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN);
783 
784 	/* Reenable interrupts */
785 	if (minten)
786 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN);
787 
788 	splx(s);
789 
790 	return(0);
791 }
792 
793 static int tl_miibus_readreg(dev, phy, reg)
794 	device_t		dev;
795 	int			phy, reg;
796 {
797 	struct tl_softc		*sc;
798 	struct tl_mii_frame	frame;
799 
800 	sc = device_get_softc(dev);
801 	bzero((char *)&frame, sizeof(frame));
802 
803 	frame.mii_phyaddr = phy;
804 	frame.mii_regaddr = reg;
805 	tl_mii_readreg(sc, &frame);
806 
807 	return(frame.mii_data);
808 }
809 
810 static int tl_miibus_writereg(dev, phy, reg, data)
811 	device_t		dev;
812 	int			phy, reg, data;
813 {
814 	struct tl_softc		*sc;
815 	struct tl_mii_frame	frame;
816 
817 	sc = device_get_softc(dev);
818 	bzero((char *)&frame, sizeof(frame));
819 
820 	frame.mii_phyaddr = phy;
821 	frame.mii_regaddr = reg;
822 	frame.mii_data = data;
823 
824 	tl_mii_writereg(sc, &frame);
825 
826 	return(0);
827 }
828 
829 static void tl_miibus_statchg(dev)
830 	device_t		dev;
831 {
832 	struct tl_softc		*sc;
833 	struct mii_data		*mii;
834 
835 	sc = device_get_softc(dev);
836 	mii = device_get_softc(sc->tl_miibus);
837 
838 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
839 		tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
840 	} else {
841 		tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
842 	}
843 
844 	return;
845 }
846 
847 /*
848  * Set modes for bitrate devices.
849  */
850 static void tl_setmode(sc, media)
851 	struct tl_softc		*sc;
852 	int			media;
853 {
854 	if (IFM_SUBTYPE(media) == IFM_10_5)
855 		tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD1);
856 	if (IFM_SUBTYPE(media) == IFM_10_T) {
857 		tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD1);
858 		if ((media & IFM_GMASK) == IFM_FDX) {
859 			tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD3);
860 			tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
861 		} else {
862 			tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD3);
863 			tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
864 		}
865 	}
866 
867 	return;
868 }
869 
870 /*
871  * Calculate the hash of a MAC address for programming the multicast hash
872  * table.  This hash is simply the address split into 6-bit chunks
873  * XOR'd, e.g.
874  * byte: 000000|00 1111|1111 22|222222|333333|33 4444|4444 55|555555
875  * bit:  765432|10 7654|3210 76|543210|765432|10 7654|3210 76|543210
876  * Bytes 0-2 and 3-5 are symmetrical, so are folded together.  Then
877  * the folded 24-bit value is split into 6-bit portions and XOR'd.
878  */
879 static int tl_calchash(addr)
880 	caddr_t			addr;
881 {
882 	int			t;
883 
884 	t = (addr[0] ^ addr[3]) << 16 | (addr[1] ^ addr[4]) << 8 |
885 		(addr[2] ^ addr[5]);
886 	return ((t >> 18) ^ (t >> 12) ^ (t >> 6) ^ t) & 0x3f;
887 }
888 
889 /*
890  * The ThunderLAN has a perfect MAC address filter in addition to
891  * the multicast hash filter. The perfect filter can be programmed
892  * with up to four MAC addresses. The first one is always used to
893  * hold the station address, which leaves us free to use the other
894  * three for multicast addresses.
895  */
896 static void tl_setfilt(sc, addr, slot)
897 	struct tl_softc		*sc;
898 	caddr_t			addr;
899 	int			slot;
900 {
901 	int			i;
902 	u_int16_t		regaddr;
903 
904 	regaddr = TL_AREG0_B5 + (slot * ETHER_ADDR_LEN);
905 
906 	for (i = 0; i < ETHER_ADDR_LEN; i++)
907 		tl_dio_write8(sc, regaddr + i, *(addr + i));
908 
909 	return;
910 }
911 
912 /*
913  * XXX In FreeBSD 3.0, multicast addresses are managed using a doubly
914  * linked list. This is fine, except addresses are added from the head
915  * end of the list. We want to arrange for 224.0.0.1 (the "all hosts")
916  * group to always be in the perfect filter, but as more groups are added,
917  * the 224.0.0.1 entry (which is always added first) gets pushed down
918  * the list and ends up at the tail. So after 3 or 4 multicast groups
919  * are added, the all-hosts entry gets pushed out of the perfect filter
920  * and into the hash table.
921  *
922  * Because the multicast list is a doubly-linked list as opposed to a
923  * circular queue, we don't have the ability to just grab the tail of
924  * the list and traverse it backwards. Instead, we have to traverse
925  * the list once to find the tail, then traverse it again backwards to
926  * update the multicast filter.
927  */
928 static void tl_setmulti(sc)
929 	struct tl_softc		*sc;
930 {
931 	struct ifnet		*ifp;
932 	u_int32_t		hashes[2] = { 0, 0 };
933 	int			h, i;
934 	struct ifmultiaddr	*ifma;
935 	u_int8_t		dummy[] = { 0, 0, 0, 0, 0 ,0 };
936 	ifp = &sc->arpcom.ac_if;
937 
938 	/* First, zot all the existing filters. */
939 	for (i = 1; i < 4; i++)
940 		tl_setfilt(sc, (caddr_t)&dummy, i);
941 	tl_dio_write32(sc, TL_HASH1, 0);
942 	tl_dio_write32(sc, TL_HASH2, 0);
943 
944 	/* Now program new ones. */
945 	if (ifp->if_flags & IFF_ALLMULTI) {
946 		hashes[0] = 0xFFFFFFFF;
947 		hashes[1] = 0xFFFFFFFF;
948 	} else {
949 		i = 1;
950 		/* First find the tail of the list. */
951 		for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL;
952 					ifma = ifma->ifma_link.le_next) {
953 			if (ifma->ifma_link.le_next == NULL)
954 				break;
955 		}
956 		/* Now traverse the list backwards. */
957 		for (; ifma != NULL && ifma != (void *)&ifp->if_multiaddrs;
958 			ifma = (struct ifmultiaddr *)ifma->ifma_link.le_prev) {
959 			if (ifma->ifma_addr->sa_family != AF_LINK)
960 				continue;
961 			/*
962 			 * Program the first three multicast groups
963 			 * into the perfect filter. For all others,
964 			 * use the hash table.
965 			 */
966 			if (i < 4) {
967 				tl_setfilt(sc,
968 			LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i);
969 				i++;
970 				continue;
971 			}
972 
973 			h = tl_calchash(
974 				LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
975 			if (h < 32)
976 				hashes[0] |= (1 << h);
977 			else
978 				hashes[1] |= (1 << (h - 32));
979 		}
980 	}
981 
982 	tl_dio_write32(sc, TL_HASH1, hashes[0]);
983 	tl_dio_write32(sc, TL_HASH2, hashes[1]);
984 
985 	return;
986 }
987 
988 /*
989  * This routine is recommended by the ThunderLAN manual to insure that
990  * the internal PHY is powered up correctly. It also recommends a one
991  * second pause at the end to 'wait for the clocks to start' but in my
992  * experience this isn't necessary.
993  */
994 static void tl_hardreset(dev)
995 	device_t		dev;
996 {
997 	struct tl_softc		*sc;
998 	int			i;
999 	u_int16_t		flags;
1000 
1001 	sc = device_get_softc(dev);
1002 
1003 	tl_mii_sync(sc);
1004 
1005 	flags = BMCR_LOOP|BMCR_ISO|BMCR_PDOWN;
1006 
1007 	for (i = 0; i < MII_NPHY; i++)
1008 		tl_miibus_writereg(dev, i, MII_BMCR, flags);
1009 
1010 	tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_ISO);
1011 	DELAY(50000);
1012 	tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_LOOP|BMCR_ISO);
1013 	tl_mii_sync(sc);
1014 	while(tl_miibus_readreg(dev, 31, MII_BMCR) & BMCR_RESET);
1015 
1016 	DELAY(50000);
1017 	return;
1018 }
1019 
1020 static void tl_softreset(sc, internal)
1021 	struct tl_softc		*sc;
1022 	int			internal;
1023 {
1024         u_int32_t               cmd, dummy, i;
1025 
1026         /* Assert the adapter reset bit. */
1027 	CMD_SET(sc, TL_CMD_ADRST);
1028 
1029         /* Turn off interrupts */
1030 	CMD_SET(sc, TL_CMD_INTSOFF);
1031 
1032 	/* First, clear the stats registers. */
1033 	for (i = 0; i < 5; i++)
1034 		dummy = tl_dio_read32(sc, TL_TXGOODFRAMES);
1035 
1036         /* Clear Areg and Hash registers */
1037 	for (i = 0; i < 8; i++)
1038 		tl_dio_write32(sc, TL_AREG0_B5, 0x00000000);
1039 
1040         /*
1041 	 * Set up Netconfig register. Enable one channel and
1042 	 * one fragment mode.
1043 	 */
1044 	tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_ONECHAN|TL_CFG_ONEFRAG);
1045 	if (internal && !sc->tl_bitrate) {
1046 		tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN);
1047 	} else {
1048 		tl_dio_clrbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN);
1049 	}
1050 
1051 	/* Handle cards with bitrate devices. */
1052 	if (sc->tl_bitrate)
1053 		tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_BITRATE);
1054 
1055 	/*
1056 	 * Load adapter irq pacing timer and tx threshold.
1057 	 * We make the transmit threshold 1 initially but we may
1058 	 * change that later.
1059 	 */
1060 	cmd = CSR_READ_4(sc, TL_HOSTCMD);
1061 	cmd |= TL_CMD_NES;
1062 	cmd &= ~(TL_CMD_RT|TL_CMD_EOC|TL_CMD_ACK_MASK|TL_CMD_CHSEL_MASK);
1063 	CMD_PUT(sc, cmd | (TL_CMD_LDTHR | TX_THR));
1064 	CMD_PUT(sc, cmd | (TL_CMD_LDTMR | 0x00000003));
1065 
1066         /* Unreset the MII */
1067 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_NMRST);
1068 
1069 	/* Take the adapter out of reset */
1070 	tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NRESET|TL_CMD_NWRAP);
1071 
1072 	/* Wait for things to settle down a little. */
1073 	DELAY(500);
1074 
1075         return;
1076 }
1077 
1078 /*
1079  * Probe for a ThunderLAN chip. Check the PCI vendor and device IDs
1080  * against our list and return its name if we find a match.
1081  */
1082 static int tl_probe(dev)
1083 	device_t		dev;
1084 {
1085 	struct tl_type		*t;
1086 
1087 	t = tl_devs;
1088 
1089 	while(t->tl_name != NULL) {
1090 		if ((pci_get_vendor(dev) == t->tl_vid) &&
1091 		    (pci_get_device(dev) == t->tl_did)) {
1092 			device_set_desc(dev, t->tl_name);
1093 			return(0);
1094 		}
1095 		t++;
1096 	}
1097 
1098 	return(ENXIO);
1099 }
1100 
1101 static int tl_attach(dev)
1102 	device_t		dev;
1103 {
1104 	int			s, i;
1105 	u_int32_t		command;
1106 	u_int16_t		did, vid;
1107 	struct tl_type		*t;
1108 	struct ifnet		*ifp;
1109 	struct tl_softc		*sc;
1110 	int			unit, error = 0, rid;
1111 
1112 	s = splimp();
1113 
1114 	vid = pci_get_vendor(dev);
1115 	did = pci_get_device(dev);
1116 	sc = device_get_softc(dev);
1117 	unit = device_get_unit(dev);
1118 	bzero(sc, sizeof(struct tl_softc));
1119 
1120 	t = tl_devs;
1121 	while(t->tl_name != NULL) {
1122 		if (vid == t->tl_vid && did == t->tl_did)
1123 			break;
1124 		t++;
1125 	}
1126 
1127 	if (t->tl_name == NULL) {
1128 		printf("tl%d: unknown device!?\n", unit);
1129 		goto fail;
1130 	}
1131 
1132 	/*
1133 	 * Map control/status registers.
1134 	 */
1135 	command = pci_read_config(dev, PCIR_COMMAND, 4);
1136 	command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN);
1137 	pci_write_config(dev, PCIR_COMMAND, command, 4);
1138 	command = pci_read_config(dev, PCIR_COMMAND, 4);
1139 
1140 #ifdef TL_USEIOSPACE
1141 	if (!(command & PCIM_CMD_PORTEN)) {
1142 		printf("tl%d: failed to enable I/O ports!\n", unit);
1143 		error = ENXIO;
1144 		goto fail;
1145 	}
1146 
1147 	rid = TL_PCI_LOIO;
1148 	sc->tl_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid,
1149 		0, ~0, 1, RF_ACTIVE);
1150 
1151 	/*
1152 	 * Some cards have the I/O and memory mapped address registers
1153 	 * reversed. Try both combinations before giving up.
1154 	 */
1155 	if (sc->tl_res == NULL) {
1156 		rid = TL_PCI_LOMEM;
1157 		sc->tl_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid,
1158 		    0, ~0, 1, RF_ACTIVE);
1159 	}
1160 #else
1161 	if (!(command & PCIM_CMD_MEMEN)) {
1162 		printf("tl%d: failed to enable memory mapping!\n", unit);
1163 		error = ENXIO;
1164 		goto fail;
1165 	}
1166 
1167 	rid = TL_PCI_LOMEM;
1168 	sc->tl_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
1169 	    0, ~0, 1, RF_ACTIVE);
1170 	if (sc->tl_res == NULL) {
1171 		rid = TL_PCI_LOIO;
1172 		sc->tl_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
1173 		    0, ~0, 1, RF_ACTIVE);
1174 	}
1175 #endif
1176 
1177 	if (sc->tl_res == NULL) {
1178 		printf("tl%d: couldn't map ports/memory\n", unit);
1179 		error = ENXIO;
1180 		goto fail;
1181 	}
1182 
1183 	sc->tl_btag = rman_get_bustag(sc->tl_res);
1184 	sc->tl_bhandle = rman_get_bushandle(sc->tl_res);
1185 
1186 #ifdef notdef
1187 	/*
1188 	 * The ThunderLAN manual suggests jacking the PCI latency
1189 	 * timer all the way up to its maximum value. I'm not sure
1190 	 * if this is really necessary, but what the manual wants,
1191 	 * the manual gets.
1192 	 */
1193 	command = pci_read_config(dev, TL_PCI_LATENCY_TIMER, 4);
1194 	command |= 0x0000FF00;
1195 	pci_write_config(dev, TL_PCI_LATENCY_TIMER, command, 4);
1196 #endif
1197 
1198 	/* Allocate interrupt */
1199 	rid = 0;
1200 	sc->tl_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
1201 	    RF_SHAREABLE | RF_ACTIVE);
1202 
1203 	if (sc->tl_irq == NULL) {
1204 		bus_release_resource(dev, TL_RES, TL_RID, sc->tl_res);
1205 		printf("tl%d: couldn't map interrupt\n", unit);
1206 		error = ENXIO;
1207 		goto fail;
1208 	}
1209 
1210 	error = bus_setup_intr(dev, sc->tl_irq, INTR_TYPE_NET,
1211 	    tl_intr, sc, &sc->tl_intrhand);
1212 
1213 	if (error) {
1214 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->tl_irq);
1215 		bus_release_resource(dev, TL_RES, TL_RID, sc->tl_res);
1216 		printf("tl%d: couldn't set up irq\n", unit);
1217 		goto fail;
1218 	}
1219 
1220 	/*
1221 	 * Now allocate memory for the TX and RX lists.
1222 	 */
1223 	sc->tl_ldata = contigmalloc(sizeof(struct tl_list_data), M_DEVBUF,
1224 	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1225 
1226 	if (sc->tl_ldata == NULL) {
1227 		bus_teardown_intr(dev, sc->tl_irq, sc->tl_intrhand);
1228 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->tl_irq);
1229 		bus_release_resource(dev, TL_RES, TL_RID, sc->tl_res);
1230 		printf("tl%d: no memory for list buffers!\n", unit);
1231 		error = ENXIO;
1232 		goto fail;
1233 	}
1234 
1235 	bzero(sc->tl_ldata, sizeof(struct tl_list_data));
1236 
1237 	sc->tl_unit = unit;
1238 	sc->tl_dinfo = t;
1239 	if (t->tl_vid == COMPAQ_VENDORID || t->tl_vid == TI_VENDORID)
1240 		sc->tl_eeaddr = TL_EEPROM_EADDR;
1241 	if (t->tl_vid == OLICOM_VENDORID)
1242 		sc->tl_eeaddr = TL_EEPROM_EADDR_OC;
1243 
1244 	/* Reset the adapter. */
1245 	tl_softreset(sc, 1);
1246 	tl_hardreset(dev);
1247 	tl_softreset(sc, 1);
1248 
1249 	/*
1250 	 * Get station address from the EEPROM.
1251 	 */
1252 	if (tl_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
1253 				sc->tl_eeaddr, ETHER_ADDR_LEN)) {
1254 		bus_teardown_intr(dev, sc->tl_irq, sc->tl_intrhand);
1255 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->tl_irq);
1256 		bus_release_resource(dev, TL_RES, TL_RID, sc->tl_res);
1257 		contigfree(sc->tl_ldata,
1258 		    sizeof(struct tl_list_data), M_DEVBUF);
1259 		printf("tl%d: failed to read station address\n", unit);
1260 		error = ENXIO;
1261 		goto fail;
1262 	}
1263 
1264         /*
1265          * XXX Olicom, in its desire to be different from the
1266          * rest of the world, has done strange things with the
1267          * encoding of the station address in the EEPROM. First
1268          * of all, they store the address at offset 0xF8 rather
1269          * than at 0x83 like the ThunderLAN manual suggests.
1270          * Second, they store the address in three 16-bit words in
1271          * network byte order, as opposed to storing it sequentially
1272          * like all the other ThunderLAN cards. In order to get
1273          * the station address in a form that matches what the Olicom
1274          * diagnostic utility specifies, we have to byte-swap each
1275          * word. To make things even more confusing, neither 00:00:28
1276          * nor 00:00:24 appear in the IEEE OUI database.
1277          */
1278         if (sc->tl_dinfo->tl_vid == OLICOM_VENDORID) {
1279                 for (i = 0; i < ETHER_ADDR_LEN; i += 2) {
1280                         u_int16_t               *p;
1281                         p = (u_int16_t *)&sc->arpcom.ac_enaddr[i];
1282                         *p = ntohs(*p);
1283                 }
1284         }
1285 
1286 	/*
1287 	 * A ThunderLAN chip was detected. Inform the world.
1288 	 */
1289 	printf("tl%d: Ethernet address: %6D\n", unit,
1290 				sc->arpcom.ac_enaddr, ":");
1291 
1292 	ifp = &sc->arpcom.ac_if;
1293 	ifp->if_softc = sc;
1294 	ifp->if_unit = sc->tl_unit;
1295 	ifp->if_name = "tl";
1296 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1297 	ifp->if_ioctl = tl_ioctl;
1298 	ifp->if_output = ether_output;
1299 	ifp->if_start = tl_start;
1300 	ifp->if_watchdog = tl_watchdog;
1301 	ifp->if_init = tl_init;
1302 	ifp->if_mtu = ETHERMTU;
1303 	ifp->if_snd.ifq_maxlen = TL_TX_LIST_CNT - 1;
1304 	callout_handle_init(&sc->tl_stat_ch);
1305 
1306 	/* Reset the adapter again. */
1307 	tl_softreset(sc, 1);
1308 	tl_hardreset(dev);
1309 	tl_softreset(sc, 1);
1310 
1311 	/*
1312 	 * Do MII setup. If no PHYs are found, then this is a
1313 	 * bitrate ThunderLAN chip that only supports 10baseT
1314 	 * and AUI/BNC.
1315 	 */
1316 	if (mii_phy_probe(dev, &sc->tl_miibus,
1317 	    tl_ifmedia_upd, tl_ifmedia_sts)) {
1318 		struct ifmedia		*ifm;
1319 		sc->tl_bitrate = 1;
1320 		ifmedia_init(&sc->ifmedia, 0, tl_ifmedia_upd, tl_ifmedia_sts);
1321 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
1322 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
1323 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
1324 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL);
1325 		ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_10_T);
1326 		/* Reset again, this time setting bitrate mode. */
1327 		tl_softreset(sc, 1);
1328 		ifm = &sc->ifmedia;
1329 		ifm->ifm_media = ifm->ifm_cur->ifm_media;
1330 		tl_ifmedia_upd(ifp);
1331 	}
1332 
1333 	/*
1334 	 * Call MI attach routine.
1335 	 */
1336 	ether_ifattach(ifp, ETHER_BPF_SUPPORTED);
1337 
1338 fail:
1339 	splx(s);
1340 	return(error);
1341 }
1342 
1343 static int tl_detach(dev)
1344 	device_t		dev;
1345 {
1346 	struct tl_softc		*sc;
1347 	struct ifnet		*ifp;
1348 	int			s;
1349 
1350 	s = splimp();
1351 
1352 	sc = device_get_softc(dev);
1353 	ifp = &sc->arpcom.ac_if;
1354 
1355 	tl_stop(sc);
1356 	ether_ifdetach(ifp, ETHER_BPF_SUPPORTED);
1357 
1358 	bus_generic_detach(dev);
1359 	device_delete_child(dev, sc->tl_miibus);
1360 
1361 	contigfree(sc->tl_ldata, sizeof(struct tl_list_data), M_DEVBUF);
1362 	if (sc->tl_bitrate)
1363 		ifmedia_removeall(&sc->ifmedia);
1364 
1365 	bus_teardown_intr(dev, sc->tl_irq, sc->tl_intrhand);
1366 	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->tl_irq);
1367 	bus_release_resource(dev, TL_RES, TL_RID, sc->tl_res);
1368 
1369 	splx(s);
1370 
1371 	return(0);
1372 }
1373 
1374 /*
1375  * Initialize the transmit lists.
1376  */
1377 static int tl_list_tx_init(sc)
1378 	struct tl_softc		*sc;
1379 {
1380 	struct tl_chain_data	*cd;
1381 	struct tl_list_data	*ld;
1382 	int			i;
1383 
1384 	cd = &sc->tl_cdata;
1385 	ld = sc->tl_ldata;
1386 	for (i = 0; i < TL_TX_LIST_CNT; i++) {
1387 		cd->tl_tx_chain[i].tl_ptr = &ld->tl_tx_list[i];
1388 		if (i == (TL_TX_LIST_CNT - 1))
1389 			cd->tl_tx_chain[i].tl_next = NULL;
1390 		else
1391 			cd->tl_tx_chain[i].tl_next = &cd->tl_tx_chain[i + 1];
1392 	}
1393 
1394 	cd->tl_tx_free = &cd->tl_tx_chain[0];
1395 	cd->tl_tx_tail = cd->tl_tx_head = NULL;
1396 	sc->tl_txeoc = 1;
1397 
1398 	return(0);
1399 }
1400 
1401 /*
1402  * Initialize the RX lists and allocate mbufs for them.
1403  */
1404 static int tl_list_rx_init(sc)
1405 	struct tl_softc		*sc;
1406 {
1407 	struct tl_chain_data	*cd;
1408 	struct tl_list_data	*ld;
1409 	int			i;
1410 
1411 	cd = &sc->tl_cdata;
1412 	ld = sc->tl_ldata;
1413 
1414 	for (i = 0; i < TL_RX_LIST_CNT; i++) {
1415 		cd->tl_rx_chain[i].tl_ptr =
1416 			(struct tl_list_onefrag *)&ld->tl_rx_list[i];
1417 		if (tl_newbuf(sc, &cd->tl_rx_chain[i]) == ENOBUFS)
1418 			return(ENOBUFS);
1419 		if (i == (TL_RX_LIST_CNT - 1)) {
1420 			cd->tl_rx_chain[i].tl_next = NULL;
1421 			ld->tl_rx_list[i].tlist_fptr = 0;
1422 		} else {
1423 			cd->tl_rx_chain[i].tl_next = &cd->tl_rx_chain[i + 1];
1424 			ld->tl_rx_list[i].tlist_fptr =
1425 					vtophys(&ld->tl_rx_list[i + 1]);
1426 		}
1427 	}
1428 
1429 	cd->tl_rx_head = &cd->tl_rx_chain[0];
1430 	cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1];
1431 
1432 	return(0);
1433 }
1434 
1435 static int tl_newbuf(sc, c)
1436 	struct tl_softc		*sc;
1437 	struct tl_chain_onefrag	*c;
1438 {
1439 	struct mbuf		*m_new = NULL;
1440 
1441 	MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1442 	if (m_new == NULL)
1443 		return(ENOBUFS);
1444 
1445 	MCLGET(m_new, M_DONTWAIT);
1446 	if (!(m_new->m_flags & M_EXT)) {
1447 		m_freem(m_new);
1448 		return(ENOBUFS);
1449 	}
1450 
1451 #ifdef __alpha__
1452 	m_new->m_data += 2;
1453 #endif
1454 
1455 	c->tl_mbuf = m_new;
1456 	c->tl_next = NULL;
1457 	c->tl_ptr->tlist_frsize = MCLBYTES;
1458 	c->tl_ptr->tlist_fptr = 0;
1459 	c->tl_ptr->tl_frag.tlist_dadr = vtophys(mtod(m_new, caddr_t));
1460 	c->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES;
1461 	c->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1462 
1463 	return(0);
1464 }
1465 /*
1466  * Interrupt handler for RX 'end of frame' condition (EOF). This
1467  * tells us that a full ethernet frame has been captured and we need
1468  * to handle it.
1469  *
1470  * Reception is done using 'lists' which consist of a header and a
1471  * series of 10 data count/data address pairs that point to buffers.
1472  * Initially you're supposed to create a list, populate it with pointers
1473  * to buffers, then load the physical address of the list into the
1474  * ch_parm register. The adapter is then supposed to DMA the received
1475  * frame into the buffers for you.
1476  *
1477  * To make things as fast as possible, we have the chip DMA directly
1478  * into mbufs. This saves us from having to do a buffer copy: we can
1479  * just hand the mbufs directly to ether_input(). Once the frame has
1480  * been sent on its way, the 'list' structure is assigned a new buffer
1481  * and moved to the end of the RX chain. As long we we stay ahead of
1482  * the chip, it will always think it has an endless receive channel.
1483  *
1484  * If we happen to fall behind and the chip manages to fill up all of
1485  * the buffers, it will generate an end of channel interrupt and wait
1486  * for us to empty the chain and restart the receiver.
1487  */
1488 static int tl_intvec_rxeof(xsc, type)
1489 	void			*xsc;
1490 	u_int32_t		type;
1491 {
1492 	struct tl_softc		*sc;
1493 	int			r = 0, total_len = 0;
1494 	struct ether_header	*eh;
1495 	struct mbuf		*m;
1496 	struct ifnet		*ifp;
1497 	struct tl_chain_onefrag	*cur_rx;
1498 
1499 	sc = xsc;
1500 	ifp = &sc->arpcom.ac_if;
1501 
1502 	while(sc->tl_cdata.tl_rx_head != NULL) {
1503 		cur_rx = sc->tl_cdata.tl_rx_head;
1504 		if (!(cur_rx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP))
1505 			break;
1506 		r++;
1507 		sc->tl_cdata.tl_rx_head = cur_rx->tl_next;
1508 		m = cur_rx->tl_mbuf;
1509 		total_len = cur_rx->tl_ptr->tlist_frsize;
1510 
1511 		if (tl_newbuf(sc, cur_rx) == ENOBUFS) {
1512 			ifp->if_ierrors++;
1513 			cur_rx->tl_ptr->tlist_frsize = MCLBYTES;
1514 			cur_rx->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1515 			cur_rx->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES;
1516 			continue;
1517 		}
1518 
1519 		sc->tl_cdata.tl_rx_tail->tl_ptr->tlist_fptr =
1520 						vtophys(cur_rx->tl_ptr);
1521 		sc->tl_cdata.tl_rx_tail->tl_next = cur_rx;
1522 		sc->tl_cdata.tl_rx_tail = cur_rx;
1523 
1524 		eh = mtod(m, struct ether_header *);
1525 		m->m_pkthdr.rcvif = ifp;
1526 
1527 		/*
1528 		 * Note: when the ThunderLAN chip is in 'capture all
1529 		 * frames' mode, it will receive its own transmissions.
1530 		 * We drop don't need to process our own transmissions,
1531 		 * so we drop them here and continue.
1532 		 */
1533 		/*if (ifp->if_flags & IFF_PROMISC && */
1534 		if (!bcmp(eh->ether_shost, sc->arpcom.ac_enaddr,
1535 		 					ETHER_ADDR_LEN)) {
1536 				m_freem(m);
1537 				continue;
1538 		}
1539 
1540 		/* Remove header from mbuf and pass it on. */
1541 		m->m_pkthdr.len = m->m_len =
1542 				total_len - sizeof(struct ether_header);
1543 		m->m_data += sizeof(struct ether_header);
1544 		ether_input(ifp, eh, m);
1545 	}
1546 
1547 	return(r);
1548 }
1549 
1550 /*
1551  * The RX-EOC condition hits when the ch_parm address hasn't been
1552  * initialized or the adapter reached a list with a forward pointer
1553  * of 0 (which indicates the end of the chain). In our case, this means
1554  * the card has hit the end of the receive buffer chain and we need to
1555  * empty out the buffers and shift the pointer back to the beginning again.
1556  */
1557 static int tl_intvec_rxeoc(xsc, type)
1558 	void			*xsc;
1559 	u_int32_t		type;
1560 {
1561 	struct tl_softc		*sc;
1562 	int			r;
1563 	struct tl_chain_data	*cd;
1564 
1565 
1566 	sc = xsc;
1567 	cd = &sc->tl_cdata;
1568 
1569 	/* Flush out the receive queue and ack RXEOF interrupts. */
1570 	r = tl_intvec_rxeof(xsc, type);
1571 	CMD_PUT(sc, TL_CMD_ACK | r | (type & ~(0x00100000)));
1572 	r = 1;
1573 	cd->tl_rx_head = &cd->tl_rx_chain[0];
1574 	cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1];
1575 	CSR_WRITE_4(sc, TL_CH_PARM, vtophys(sc->tl_cdata.tl_rx_head->tl_ptr));
1576 	r |= (TL_CMD_GO|TL_CMD_RT);
1577 	return(r);
1578 }
1579 
1580 static int tl_intvec_txeof(xsc, type)
1581 	void			*xsc;
1582 	u_int32_t		type;
1583 {
1584 	struct tl_softc		*sc;
1585 	int			r = 0;
1586 	struct tl_chain		*cur_tx;
1587 
1588 	sc = xsc;
1589 
1590 	/*
1591 	 * Go through our tx list and free mbufs for those
1592 	 * frames that have been sent.
1593 	 */
1594 	while (sc->tl_cdata.tl_tx_head != NULL) {
1595 		cur_tx = sc->tl_cdata.tl_tx_head;
1596 		if (!(cur_tx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP))
1597 			break;
1598 		sc->tl_cdata.tl_tx_head = cur_tx->tl_next;
1599 
1600 		r++;
1601 		m_freem(cur_tx->tl_mbuf);
1602 		cur_tx->tl_mbuf = NULL;
1603 
1604 		cur_tx->tl_next = sc->tl_cdata.tl_tx_free;
1605 		sc->tl_cdata.tl_tx_free = cur_tx;
1606 		if (!cur_tx->tl_ptr->tlist_fptr)
1607 			break;
1608 	}
1609 
1610 	return(r);
1611 }
1612 
1613 /*
1614  * The transmit end of channel interrupt. The adapter triggers this
1615  * interrupt to tell us it hit the end of the current transmit list.
1616  *
1617  * A note about this: it's possible for a condition to arise where
1618  * tl_start() may try to send frames between TXEOF and TXEOC interrupts.
1619  * You have to avoid this since the chip expects things to go in a
1620  * particular order: transmit, acknowledge TXEOF, acknowledge TXEOC.
1621  * When the TXEOF handler is called, it will free all of the transmitted
1622  * frames and reset the tx_head pointer to NULL. However, a TXEOC
1623  * interrupt should be received and acknowledged before any more frames
1624  * are queued for transmission. If tl_statrt() is called after TXEOF
1625  * resets the tx_head pointer but _before_ the TXEOC interrupt arrives,
1626  * it could attempt to issue a transmit command prematurely.
1627  *
1628  * To guard against this, tl_start() will only issue transmit commands
1629  * if the tl_txeoc flag is set, and only the TXEOC interrupt handler
1630  * can set this flag once tl_start() has cleared it.
1631  */
1632 static int tl_intvec_txeoc(xsc, type)
1633 	void			*xsc;
1634 	u_int32_t		type;
1635 {
1636 	struct tl_softc		*sc;
1637 	struct ifnet		*ifp;
1638 	u_int32_t		cmd;
1639 
1640 	sc = xsc;
1641 	ifp = &sc->arpcom.ac_if;
1642 
1643 	/* Clear the timeout timer. */
1644 	ifp->if_timer = 0;
1645 
1646 	if (sc->tl_cdata.tl_tx_head == NULL) {
1647 		ifp->if_flags &= ~IFF_OACTIVE;
1648 		sc->tl_cdata.tl_tx_tail = NULL;
1649 		sc->tl_txeoc = 1;
1650 	} else {
1651 		sc->tl_txeoc = 0;
1652 		/* First we have to ack the EOC interrupt. */
1653 		CMD_PUT(sc, TL_CMD_ACK | 0x00000001 | type);
1654 		/* Then load the address of the next TX list. */
1655 		CSR_WRITE_4(sc, TL_CH_PARM,
1656 		    vtophys(sc->tl_cdata.tl_tx_head->tl_ptr));
1657 		/* Restart TX channel. */
1658 		cmd = CSR_READ_4(sc, TL_HOSTCMD);
1659 		cmd &= ~TL_CMD_RT;
1660 		cmd |= TL_CMD_GO|TL_CMD_INTSON;
1661 		CMD_PUT(sc, cmd);
1662 		return(0);
1663 	}
1664 
1665 	return(1);
1666 }
1667 
1668 static int tl_intvec_adchk(xsc, type)
1669 	void			*xsc;
1670 	u_int32_t		type;
1671 {
1672 	struct tl_softc		*sc;
1673 
1674 	sc = xsc;
1675 
1676 	if (type)
1677 		printf("tl%d: adapter check: %x\n", sc->tl_unit,
1678 			(unsigned int)CSR_READ_4(sc, TL_CH_PARM));
1679 
1680 	tl_softreset(sc, 1);
1681 	tl_stop(sc);
1682 	tl_init(sc);
1683 	CMD_SET(sc, TL_CMD_INTSON);
1684 
1685 	return(0);
1686 }
1687 
1688 static int tl_intvec_netsts(xsc, type)
1689 	void			*xsc;
1690 	u_int32_t		type;
1691 {
1692 	struct tl_softc		*sc;
1693 	u_int16_t		netsts;
1694 
1695 	sc = xsc;
1696 
1697 	netsts = tl_dio_read16(sc, TL_NETSTS);
1698 	tl_dio_write16(sc, TL_NETSTS, netsts);
1699 
1700 	printf("tl%d: network status: %x\n", sc->tl_unit, netsts);
1701 
1702 	return(1);
1703 }
1704 
1705 static void tl_intr(xsc)
1706 	void			*xsc;
1707 {
1708 	struct tl_softc		*sc;
1709 	struct ifnet		*ifp;
1710 	int			r = 0;
1711 	u_int32_t		type = 0;
1712 	u_int16_t		ints = 0;
1713 	u_int8_t		ivec = 0;
1714 
1715 	sc = xsc;
1716 
1717 	/* Disable interrupts */
1718 	ints = CSR_READ_2(sc, TL_HOST_INT);
1719 	CSR_WRITE_2(sc, TL_HOST_INT, ints);
1720 	type = (ints << 16) & 0xFFFF0000;
1721 	ivec = (ints & TL_VEC_MASK) >> 5;
1722 	ints = (ints & TL_INT_MASK) >> 2;
1723 
1724 	ifp = &sc->arpcom.ac_if;
1725 
1726 	switch(ints) {
1727 	case (TL_INTR_INVALID):
1728 #ifdef DIAGNOSTIC
1729 		printf("tl%d: got an invalid interrupt!\n", sc->tl_unit);
1730 #endif
1731 		/* Re-enable interrupts but don't ack this one. */
1732 		CMD_PUT(sc, type);
1733 		r = 0;
1734 		break;
1735 	case (TL_INTR_TXEOF):
1736 		r = tl_intvec_txeof((void *)sc, type);
1737 		break;
1738 	case (TL_INTR_TXEOC):
1739 		r = tl_intvec_txeoc((void *)sc, type);
1740 		break;
1741 	case (TL_INTR_STATOFLOW):
1742 		tl_stats_update(sc);
1743 		r = 1;
1744 		break;
1745 	case (TL_INTR_RXEOF):
1746 		r = tl_intvec_rxeof((void *)sc, type);
1747 		break;
1748 	case (TL_INTR_DUMMY):
1749 		printf("tl%d: got a dummy interrupt\n", sc->tl_unit);
1750 		r = 1;
1751 		break;
1752 	case (TL_INTR_ADCHK):
1753 		if (ivec)
1754 			r = tl_intvec_adchk((void *)sc, type);
1755 		else
1756 			r = tl_intvec_netsts((void *)sc, type);
1757 		break;
1758 	case (TL_INTR_RXEOC):
1759 		r = tl_intvec_rxeoc((void *)sc, type);
1760 		break;
1761 	default:
1762 		printf("tl%d: bogus interrupt type\n", ifp->if_unit);
1763 		break;
1764 	}
1765 
1766 	/* Re-enable interrupts */
1767 	if (r) {
1768 		CMD_PUT(sc, TL_CMD_ACK | r | type);
1769 	}
1770 
1771 	if (ifp->if_snd.ifq_head != NULL)
1772 		tl_start(ifp);
1773 
1774 	return;
1775 }
1776 
1777 static void tl_stats_update(xsc)
1778 	void			*xsc;
1779 {
1780 	struct tl_softc		*sc;
1781 	struct ifnet		*ifp;
1782 	struct tl_stats		tl_stats;
1783 	struct mii_data		*mii;
1784 	u_int32_t		*p;
1785 	int			s;
1786 
1787 	s = splimp();
1788 
1789 	bzero((char *)&tl_stats, sizeof(struct tl_stats));
1790 
1791 	sc = xsc;
1792 	ifp = &sc->arpcom.ac_if;
1793 
1794 	p = (u_int32_t *)&tl_stats;
1795 
1796 	CSR_WRITE_2(sc, TL_DIO_ADDR, TL_TXGOODFRAMES|TL_DIO_ADDR_INC);
1797 	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1798 	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1799 	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1800 	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1801 	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1802 
1803 	ifp->if_opackets += tl_tx_goodframes(tl_stats);
1804 	ifp->if_collisions += tl_stats.tl_tx_single_collision +
1805 				tl_stats.tl_tx_multi_collision;
1806 	ifp->if_ipackets += tl_rx_goodframes(tl_stats);
1807 	ifp->if_ierrors += tl_stats.tl_crc_errors + tl_stats.tl_code_errors +
1808 			    tl_rx_overrun(tl_stats);
1809 	ifp->if_oerrors += tl_tx_underrun(tl_stats);
1810 
1811 	if (tl_tx_underrun(tl_stats)) {
1812 		u_int8_t		tx_thresh;
1813 		tx_thresh = tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_TXTHRESH;
1814 		if (tx_thresh != TL_AC_TXTHRESH_WHOLEPKT) {
1815 			tx_thresh >>= 4;
1816 			tx_thresh++;
1817 			printf("tl%d: tx underrun -- increasing "
1818 			    "tx threshold to %d bytes\n", sc->tl_unit,
1819 			    (64 * (tx_thresh * 4)));
1820 			tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH);
1821 			tl_dio_setbit(sc, TL_ACOMMIT, tx_thresh << 4);
1822 		}
1823 	}
1824 
1825 	sc->tl_stat_ch = timeout(tl_stats_update, sc, hz);
1826 
1827 	if (!sc->tl_bitrate) {
1828 		mii = device_get_softc(sc->tl_miibus);
1829 		mii_tick(mii);
1830 	}
1831 
1832 	splx(s);
1833 
1834 	return;
1835 }
1836 
1837 /*
1838  * Encapsulate an mbuf chain in a list by coupling the mbuf data
1839  * pointers to the fragment pointers.
1840  */
1841 static int tl_encap(sc, c, m_head)
1842 	struct tl_softc		*sc;
1843 	struct tl_chain		*c;
1844 	struct mbuf		*m_head;
1845 {
1846 	int			frag = 0;
1847 	struct tl_frag		*f = NULL;
1848 	int			total_len;
1849 	struct mbuf		*m;
1850 
1851 	/*
1852  	 * Start packing the mbufs in this chain into
1853 	 * the fragment pointers. Stop when we run out
1854  	 * of fragments or hit the end of the mbuf chain.
1855 	 */
1856 	m = m_head;
1857 	total_len = 0;
1858 
1859 	for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
1860 		if (m->m_len != 0) {
1861 			if (frag == TL_MAXFRAGS)
1862 				break;
1863 			total_len+= m->m_len;
1864 			c->tl_ptr->tl_frag[frag].tlist_dadr =
1865 				vtophys(mtod(m, vm_offset_t));
1866 			c->tl_ptr->tl_frag[frag].tlist_dcnt = m->m_len;
1867 			frag++;
1868 		}
1869 	}
1870 
1871 	/*
1872 	 * Handle special cases.
1873 	 * Special case #1: we used up all 10 fragments, but
1874 	 * we have more mbufs left in the chain. Copy the
1875 	 * data into an mbuf cluster. Note that we don't
1876 	 * bother clearing the values in the other fragment
1877 	 * pointers/counters; it wouldn't gain us anything,
1878 	 * and would waste cycles.
1879 	 */
1880 	if (m != NULL) {
1881 		struct mbuf		*m_new = NULL;
1882 
1883 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1884 		if (m_new == NULL) {
1885 			printf("tl%d: no memory for tx list\n", sc->tl_unit);
1886 			return(1);
1887 		}
1888 		if (m_head->m_pkthdr.len > MHLEN) {
1889 			MCLGET(m_new, M_DONTWAIT);
1890 			if (!(m_new->m_flags & M_EXT)) {
1891 				m_freem(m_new);
1892 				printf("tl%d: no memory for tx list\n",
1893 				sc->tl_unit);
1894 				return(1);
1895 			}
1896 		}
1897 		m_copydata(m_head, 0, m_head->m_pkthdr.len,
1898 					mtod(m_new, caddr_t));
1899 		m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1900 		m_freem(m_head);
1901 		m_head = m_new;
1902 		f = &c->tl_ptr->tl_frag[0];
1903 		f->tlist_dadr = vtophys(mtod(m_new, caddr_t));
1904 		f->tlist_dcnt = total_len = m_new->m_len;
1905 		frag = 1;
1906 	}
1907 
1908 	/*
1909 	 * Special case #2: the frame is smaller than the minimum
1910 	 * frame size. We have to pad it to make the chip happy.
1911 	 */
1912 	if (total_len < TL_MIN_FRAMELEN) {
1913 		if (frag == TL_MAXFRAGS)
1914 			printf("tl%d: all frags filled but "
1915 				"frame still to small!\n", sc->tl_unit);
1916 		f = &c->tl_ptr->tl_frag[frag];
1917 		f->tlist_dcnt = TL_MIN_FRAMELEN - total_len;
1918 		f->tlist_dadr = vtophys(&sc->tl_ldata->tl_pad);
1919 		total_len += f->tlist_dcnt;
1920 		frag++;
1921 	}
1922 
1923 	c->tl_mbuf = m_head;
1924 	c->tl_ptr->tl_frag[frag - 1].tlist_dcnt |= TL_LAST_FRAG;
1925 	c->tl_ptr->tlist_frsize = total_len;
1926 	c->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1927 	c->tl_ptr->tlist_fptr = 0;
1928 
1929 	return(0);
1930 }
1931 
1932 /*
1933  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1934  * to the mbuf data regions directly in the transmit lists. We also save a
1935  * copy of the pointers since the transmit list fragment pointers are
1936  * physical addresses.
1937  */
1938 static void tl_start(ifp)
1939 	struct ifnet		*ifp;
1940 {
1941 	struct tl_softc		*sc;
1942 	struct mbuf		*m_head = NULL;
1943 	u_int32_t		cmd;
1944 	struct tl_chain		*prev = NULL, *cur_tx = NULL, *start_tx;
1945 
1946 	sc = ifp->if_softc;
1947 
1948 	/*
1949 	 * Check for an available queue slot. If there are none,
1950 	 * punt.
1951 	 */
1952 	if (sc->tl_cdata.tl_tx_free == NULL) {
1953 		ifp->if_flags |= IFF_OACTIVE;
1954 		return;
1955 	}
1956 
1957 	start_tx = sc->tl_cdata.tl_tx_free;
1958 
1959 	while(sc->tl_cdata.tl_tx_free != NULL) {
1960 		IF_DEQUEUE(&ifp->if_snd, m_head);
1961 		if (m_head == NULL)
1962 			break;
1963 
1964 		/* Pick a chain member off the free list. */
1965 		cur_tx = sc->tl_cdata.tl_tx_free;
1966 		sc->tl_cdata.tl_tx_free = cur_tx->tl_next;
1967 
1968 		cur_tx->tl_next = NULL;
1969 
1970 		/* Pack the data into the list. */
1971 		tl_encap(sc, cur_tx, m_head);
1972 
1973 		/* Chain it together */
1974 		if (prev != NULL) {
1975 			prev->tl_next = cur_tx;
1976 			prev->tl_ptr->tlist_fptr = vtophys(cur_tx->tl_ptr);
1977 		}
1978 		prev = cur_tx;
1979 
1980 		/*
1981 		 * If there's a BPF listener, bounce a copy of this frame
1982 		 * to him.
1983 		 */
1984 		if (ifp->if_bpf)
1985 			bpf_mtap(ifp, cur_tx->tl_mbuf);
1986 	}
1987 
1988 	/*
1989 	 * If there are no packets queued, bail.
1990 	 */
1991 	if (cur_tx == NULL)
1992 		return;
1993 
1994 	/*
1995 	 * That's all we can stands, we can't stands no more.
1996 	 * If there are no other transfers pending, then issue the
1997 	 * TX GO command to the adapter to start things moving.
1998 	 * Otherwise, just leave the data in the queue and let
1999 	 * the EOF/EOC interrupt handler send.
2000 	 */
2001 	if (sc->tl_cdata.tl_tx_head == NULL) {
2002 		sc->tl_cdata.tl_tx_head = start_tx;
2003 		sc->tl_cdata.tl_tx_tail = cur_tx;
2004 
2005 		if (sc->tl_txeoc) {
2006 			sc->tl_txeoc = 0;
2007 			CSR_WRITE_4(sc, TL_CH_PARM, vtophys(start_tx->tl_ptr));
2008 			cmd = CSR_READ_4(sc, TL_HOSTCMD);
2009 			cmd &= ~TL_CMD_RT;
2010 			cmd |= TL_CMD_GO|TL_CMD_INTSON;
2011 			CMD_PUT(sc, cmd);
2012 		}
2013 	} else {
2014 		sc->tl_cdata.tl_tx_tail->tl_next = start_tx;
2015 		sc->tl_cdata.tl_tx_tail = cur_tx;
2016 	}
2017 
2018 	/*
2019 	 * Set a timeout in case the chip goes out to lunch.
2020 	 */
2021 	ifp->if_timer = 5;
2022 
2023 	return;
2024 }
2025 
2026 static void tl_init(xsc)
2027 	void			*xsc;
2028 {
2029 	struct tl_softc		*sc = xsc;
2030 	struct ifnet		*ifp = &sc->arpcom.ac_if;
2031         int			s;
2032 	struct mii_data		*mii;
2033 
2034 	s = splimp();
2035 
2036 	ifp = &sc->arpcom.ac_if;
2037 
2038 	/*
2039 	 * Cancel pending I/O.
2040 	 */
2041 	tl_stop(sc);
2042 
2043 	/* Initialize TX FIFO threshold */
2044 	tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH);
2045 	tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH_16LONG);
2046 
2047         /* Set PCI burst size */
2048 	tl_dio_write8(sc, TL_BSIZEREG, TL_RXBURST_16LONG|TL_TXBURST_16LONG);
2049 
2050 	/*
2051 	 * Set 'capture all frames' bit for promiscuous mode.
2052 	 */
2053 	if (ifp->if_flags & IFF_PROMISC)
2054 		tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF);
2055 	else
2056 		tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_CAF);
2057 
2058 	/*
2059 	 * Set capture broadcast bit to capture broadcast frames.
2060 	 */
2061 	if (ifp->if_flags & IFF_BROADCAST)
2062 		tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_NOBRX);
2063 	else
2064 		tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NOBRX);
2065 
2066 	tl_dio_write16(sc, TL_MAXRX, MCLBYTES);
2067 
2068 	/* Init our MAC address */
2069 	tl_setfilt(sc, (caddr_t)&sc->arpcom.ac_enaddr, 0);
2070 
2071 	/* Init multicast filter, if needed. */
2072 	tl_setmulti(sc);
2073 
2074 	/* Init circular RX list. */
2075 	if (tl_list_rx_init(sc) == ENOBUFS) {
2076 		printf("tl%d: initialization failed: no "
2077 			"memory for rx buffers\n", sc->tl_unit);
2078 		tl_stop(sc);
2079 		return;
2080 	}
2081 
2082 	/* Init TX pointers. */
2083 	tl_list_tx_init(sc);
2084 
2085 	/* Enable PCI interrupts. */
2086 	CMD_SET(sc, TL_CMD_INTSON);
2087 
2088 	/* Load the address of the rx list */
2089 	CMD_SET(sc, TL_CMD_RT);
2090 	CSR_WRITE_4(sc, TL_CH_PARM, vtophys(&sc->tl_ldata->tl_rx_list[0]));
2091 
2092 	if (!sc->tl_bitrate) {
2093 		if (sc->tl_miibus != NULL) {
2094 			mii = device_get_softc(sc->tl_miibus);
2095 			mii_mediachg(mii);
2096 		}
2097 	}
2098 
2099 	/* Send the RX go command */
2100 	CMD_SET(sc, TL_CMD_GO|TL_CMD_NES|TL_CMD_RT);
2101 
2102 	ifp->if_flags |= IFF_RUNNING;
2103 	ifp->if_flags &= ~IFF_OACTIVE;
2104 
2105 	(void)splx(s);
2106 
2107 	/* Start the stats update counter */
2108 	sc->tl_stat_ch = timeout(tl_stats_update, sc, hz);
2109 
2110 	return;
2111 }
2112 
2113 /*
2114  * Set media options.
2115  */
2116 static int tl_ifmedia_upd(ifp)
2117 	struct ifnet		*ifp;
2118 {
2119 	struct tl_softc		*sc;
2120 	struct mii_data		*mii = NULL;
2121 
2122 	sc = ifp->if_softc;
2123 
2124 	if (sc->tl_bitrate)
2125 		tl_setmode(sc, sc->ifmedia.ifm_media);
2126 	else {
2127 		mii = device_get_softc(sc->tl_miibus);
2128 		mii_mediachg(mii);
2129 	}
2130 
2131 	return(0);
2132 }
2133 
2134 /*
2135  * Report current media status.
2136  */
2137 static void tl_ifmedia_sts(ifp, ifmr)
2138 	struct ifnet		*ifp;
2139 	struct ifmediareq	*ifmr;
2140 {
2141 	struct tl_softc		*sc;
2142 	struct mii_data		*mii;
2143 
2144 	sc = ifp->if_softc;
2145 
2146 	ifmr->ifm_active = IFM_ETHER;
2147 
2148 	if (sc->tl_bitrate) {
2149 		if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD1)
2150 			ifmr->ifm_active = IFM_ETHER|IFM_10_5;
2151 		else
2152 			ifmr->ifm_active = IFM_ETHER|IFM_10_T;
2153 		if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD3)
2154 			ifmr->ifm_active |= IFM_HDX;
2155 		else
2156 			ifmr->ifm_active |= IFM_FDX;
2157 		return;
2158 	} else {
2159 		mii = device_get_softc(sc->tl_miibus);
2160 		mii_pollstat(mii);
2161 		ifmr->ifm_active = mii->mii_media_active;
2162 		ifmr->ifm_status = mii->mii_media_status;
2163 	}
2164 
2165 	return;
2166 }
2167 
2168 static int tl_ioctl(ifp, command, data)
2169 	struct ifnet		*ifp;
2170 	u_long			command;
2171 	caddr_t			data;
2172 {
2173 	struct tl_softc		*sc = ifp->if_softc;
2174 	struct ifreq		*ifr = (struct ifreq *) data;
2175 	int			s, error = 0;
2176 
2177 	s = splimp();
2178 
2179 	switch(command) {
2180 	case SIOCSIFADDR:
2181 	case SIOCGIFADDR:
2182 	case SIOCSIFMTU:
2183 		error = ether_ioctl(ifp, command, data);
2184 		break;
2185 	case SIOCSIFFLAGS:
2186 		if (ifp->if_flags & IFF_UP) {
2187 			if (ifp->if_flags & IFF_RUNNING &&
2188 			    ifp->if_flags & IFF_PROMISC &&
2189 			    !(sc->tl_if_flags & IFF_PROMISC)) {
2190 				tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF);
2191 				tl_setmulti(sc);
2192 			} else if (ifp->if_flags & IFF_RUNNING &&
2193 			    !(ifp->if_flags & IFF_PROMISC) &&
2194 			    sc->tl_if_flags & IFF_PROMISC) {
2195 				tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_CAF);
2196 				tl_setmulti(sc);
2197 			} else
2198 				tl_init(sc);
2199 		} else {
2200 			if (ifp->if_flags & IFF_RUNNING) {
2201 				tl_stop(sc);
2202 			}
2203 		}
2204 		sc->tl_if_flags = ifp->if_flags;
2205 		error = 0;
2206 		break;
2207 	case SIOCADDMULTI:
2208 	case SIOCDELMULTI:
2209 		tl_setmulti(sc);
2210 		error = 0;
2211 		break;
2212 	case SIOCSIFMEDIA:
2213 	case SIOCGIFMEDIA:
2214 		if (sc->tl_bitrate)
2215 			error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
2216 		else {
2217 			struct mii_data		*mii;
2218 			mii = device_get_softc(sc->tl_miibus);
2219 			error = ifmedia_ioctl(ifp, ifr,
2220 			    &mii->mii_media, command);
2221 		}
2222 		break;
2223 	default:
2224 		error = EINVAL;
2225 		break;
2226 	}
2227 
2228 	(void)splx(s);
2229 
2230 	return(error);
2231 }
2232 
2233 static void tl_watchdog(ifp)
2234 	struct ifnet		*ifp;
2235 {
2236 	struct tl_softc		*sc;
2237 
2238 	sc = ifp->if_softc;
2239 
2240 	printf("tl%d: device timeout\n", sc->tl_unit);
2241 
2242 	ifp->if_oerrors++;
2243 
2244 	tl_softreset(sc, 1);
2245 	tl_init(sc);
2246 
2247 	return;
2248 }
2249 
2250 /*
2251  * Stop the adapter and free any mbufs allocated to the
2252  * RX and TX lists.
2253  */
2254 static void tl_stop(sc)
2255 	struct tl_softc		*sc;
2256 {
2257 	int		i;
2258 	struct ifnet		*ifp;
2259 
2260 	ifp = &sc->arpcom.ac_if;
2261 
2262 	/* Stop the stats updater. */
2263 	untimeout(tl_stats_update, sc, sc->tl_stat_ch);
2264 
2265 	/* Stop the transmitter */
2266 	CMD_CLR(sc, TL_CMD_RT);
2267 	CMD_SET(sc, TL_CMD_STOP);
2268 	CSR_WRITE_4(sc, TL_CH_PARM, 0);
2269 
2270 	/* Stop the receiver */
2271 	CMD_SET(sc, TL_CMD_RT);
2272 	CMD_SET(sc, TL_CMD_STOP);
2273 	CSR_WRITE_4(sc, TL_CH_PARM, 0);
2274 
2275 	/*
2276 	 * Disable host interrupts.
2277 	 */
2278 	CMD_SET(sc, TL_CMD_INTSOFF);
2279 
2280 	/*
2281 	 * Clear list pointer.
2282 	 */
2283 	CSR_WRITE_4(sc, TL_CH_PARM, 0);
2284 
2285 	/*
2286 	 * Free the RX lists.
2287 	 */
2288 	for (i = 0; i < TL_RX_LIST_CNT; i++) {
2289 		if (sc->tl_cdata.tl_rx_chain[i].tl_mbuf != NULL) {
2290 			m_freem(sc->tl_cdata.tl_rx_chain[i].tl_mbuf);
2291 			sc->tl_cdata.tl_rx_chain[i].tl_mbuf = NULL;
2292 		}
2293 	}
2294 	bzero((char *)&sc->tl_ldata->tl_rx_list,
2295 		sizeof(sc->tl_ldata->tl_rx_list));
2296 
2297 	/*
2298 	 * Free the TX list buffers.
2299 	 */
2300 	for (i = 0; i < TL_TX_LIST_CNT; i++) {
2301 		if (sc->tl_cdata.tl_tx_chain[i].tl_mbuf != NULL) {
2302 			m_freem(sc->tl_cdata.tl_tx_chain[i].tl_mbuf);
2303 			sc->tl_cdata.tl_tx_chain[i].tl_mbuf = NULL;
2304 		}
2305 	}
2306 	bzero((char *)&sc->tl_ldata->tl_tx_list,
2307 		sizeof(sc->tl_ldata->tl_tx_list));
2308 
2309 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2310 
2311 	return;
2312 }
2313 
2314 /*
2315  * Stop all chip I/O so that the kernel's probe routines don't
2316  * get confused by errant DMAs when rebooting.
2317  */
2318 static void tl_shutdown(dev)
2319 	device_t		dev;
2320 {
2321 	struct tl_softc		*sc;
2322 
2323 	sc = device_get_softc(dev);
2324 
2325 	tl_stop(sc);
2326 
2327 	return;
2328 }
2329