xref: /openbsd/sys/dev/pci/if_tl.c (revision cecf84d4)
1 /*	$OpenBSD: if_tl.c,v 1.63 2015/04/30 07:51:07 mpi Exp $	*/
2 
3 /*
4  * Copyright (c) 1997, 1998
5  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * $FreeBSD: src/sys/pci/if_tl.c,v 1.64 2001/02/06 10:11:48 phk Exp $
35  */
36 
37 /*
38  * Texas Instruments ThunderLAN driver for FreeBSD 2.2.6 and 3.x.
39  * Supports many Compaq PCI NICs based on the ThunderLAN ethernet controller,
40  * the National Semiconductor DP83840A physical interface and the
41  * Microchip Technology 24Cxx series serial EEPROM.
42  *
43  * Written using the following four documents:
44  *
45  * Texas Instruments ThunderLAN Programmer's Guide (www.ti.com)
46  * National Semiconductor DP83840A data sheet (www.national.com)
47  * Microchip Technology 24C02C data sheet (www.microchip.com)
48  * Micro Linear ML6692 100BaseTX only PHY data sheet (www.microlinear.com)
49  *
50  * Written by Bill Paul <wpaul@ctr.columbia.edu>
51  * Electrical Engineering Department
52  * Columbia University, New York City
53  */
54 
55 /*
56  * Some notes about the ThunderLAN:
57  *
58  * The ThunderLAN controller is a single chip containing PCI controller
59  * logic, approximately 3K of on-board SRAM, a LAN controller, and media
60  * independent interface (MII) bus. The MII allows the ThunderLAN chip to
61  * control up to 32 different physical interfaces (PHYs). The ThunderLAN
62  * also has a built-in 10baseT PHY, allowing a single ThunderLAN controller
63  * to act as a complete ethernet interface.
64  *
65  * Other PHYs may be attached to the ThunderLAN; the Compaq 10/100 cards
66  * use a National Semiconductor DP83840A PHY that supports 10 or 100Mb/sec
67  * in full or half duplex. Some of the Compaq Deskpro machines use a
68  * Level 1 LXT970 PHY with the same capabilities. Certain Olicom adapters
69  * use a Micro Linear ML6692 100BaseTX only PHY, which can be used in
70  * concert with the ThunderLAN's internal PHY to provide full 10/100
71  * support. This is cheaper than using a standalone external PHY for both
72  * 10/100 modes and letting the ThunderLAN's internal PHY go to waste.
73  * A serial EEPROM is also attached to the ThunderLAN chip to provide
74  * power-up default register settings and for storing the adapter's
75  * station address. Although not supported by this driver, the ThunderLAN
76  * chip can also be connected to token ring PHYs.
77  *
78  * The ThunderLAN has a set of registers which can be used to issue
79  * commands, acknowledge interrupts, and to manipulate other internal
80  * registers on its DIO bus. The primary registers can be accessed
81  * using either programmed I/O (inb/outb) or via PCI memory mapping,
82  * depending on how the card is configured during the PCI probing
83  * phase. It is even possible to have both PIO and memory mapped
84  * access turned on at the same time.
85  *
86  * Frame reception and transmission with the ThunderLAN chip is done
87  * using frame 'lists.' A list structure looks more or less like this:
88  *
89  * struct tl_frag {
90  *	u_int32_t		fragment_address;
91  *	u_int32_t		fragment_size;
92  * };
93  * struct tl_list {
94  *	u_int32_t		forward_pointer;
95  *	u_int16_t		cstat;
96  *	u_int16_t		frame_size;
97  *	struct tl_frag		fragments[10];
98  * };
99  *
100  * The forward pointer in the list header can be either a 0 or the address
101  * of another list, which allows several lists to be linked together. Each
102  * list contains up to 10 fragment descriptors. This means the chip allows
103  * ethernet frames to be broken up into up to 10 chunks for transfer to
104  * and from the SRAM. Note that the forward pointer and fragment buffer
105  * addresses are physical memory addresses, not virtual. Note also that
106  * a single ethernet frame can not span lists: if the host wants to
107  * transmit a frame and the frame data is split up over more than 10
108  * buffers, the frame has to collapsed before it can be transmitted.
109  *
110  * To receive frames, the driver sets up a number of lists and populates
111  * the fragment descriptors, then it sends an RX GO command to the chip.
112  * When a frame is received, the chip will DMA it into the memory regions
113  * specified by the fragment descriptors and then trigger an RX 'end of
114  * frame interrupt' when done. The driver may choose to use only one
115  * fragment per list; this may result is slighltly less efficient use
116  * of memory in exchange for improving performance.
117  *
118  * To transmit frames, the driver again sets up lists and fragment
119  * descriptors, only this time the buffers contain frame data that
120  * is to be DMA'ed into the chip instead of out of it. Once the chip
121  * has transferred the data into its on-board SRAM, it will trigger a
122  * TX 'end of frame' interrupt. It will also generate an 'end of channel'
123  * interrupt when it reaches the end of the list.
124  */
125 
126 /*
127  * Some notes about this driver:
128  *
129  * The ThunderLAN chip provides a couple of different ways to organize
130  * reception, transmission and interrupt handling. The simplest approach
131  * is to use one list each for transmission and reception. In this mode,
132  * the ThunderLAN will generate two interrupts for every received frame
133  * (one RX EOF and one RX EOC) and two for each transmitted frame (one
134  * TX EOF and one TX EOC). This may make the driver simpler but it hurts
135  * performance to have to handle so many interrupts.
136  *
137  * Initially I wanted to create a circular list of receive buffers so
138  * that the ThunderLAN chip would think there was an infinitely long
139  * receive channel and never deliver an RXEOC interrupt. However this
140  * doesn't work correctly under heavy load: while the manual says the
141  * chip will trigger an RXEOF interrupt each time a frame is copied into
142  * memory, you can't count on the chip waiting around for you to acknowledge
143  * the interrupt before it starts trying to DMA the next frame. The result
144  * is that the chip might traverse the entire circular list and then wrap
145  * around before you have a chance to do anything about it. Consequently,
146  * the receive list is terminated (with a 0 in the forward pointer in the
147  * last element). Each time an RXEOF interrupt arrives, the used list
148  * is shifted to the end of the list. This gives the appearance of an
149  * infinitely large RX chain so long as the driver doesn't fall behind
150  * the chip and allow all of the lists to be filled up.
151  *
152  * If all the lists are filled, the adapter will deliver an RX 'end of
153  * channel' interrupt when it hits the 0 forward pointer at the end of
154  * the chain. The RXEOC handler then cleans out the RX chain and resets
155  * the list head pointer in the ch_parm register and restarts the receiver.
156  *
157  * For frame transmission, it is possible to program the ThunderLAN's
158  * transmit interrupt threshold so that the chip can acknowledge multiple
159  * lists with only a single TX EOF interrupt. This allows the driver to
160  * queue several frames in one shot, and only have to handle a total
161  * two interrupts (one TX EOF and one TX EOC) no matter how many frames
162  * are transmitted. Frame transmission is done directly out of the
163  * mbufs passed to the tl_start() routine via the interface send queue.
164  * The driver simply sets up the fragment descriptors in the transmit
165  * lists to point to the mbuf data regions and sends a TX GO command.
166  *
167  * Note that since the RX and TX lists themselves are always used
168  * only by the driver, the are malloc()ed once at driver initialization
169  * time and never free()ed.
170  *
171  * Also, in order to remain as platform independent as possible, this
172  * driver uses memory mapped register access to manipulate the card
173  * as opposed to programmed I/O. This avoids the use of the inb/outb
174  * (and related) instructions which are specific to the i386 platform.
175  *
176  * Using these techniques, this driver achieves very high performance
177  * by minimizing the amount of interrupts generated during large
178  * transfers and by completely avoiding buffer copies. Frame transfer
179  * to and from the ThunderLAN chip is performed entirely by the chip
180  * itself thereby reducing the load on the host CPU.
181  */
182 
183 #include "bpfilter.h"
184 
185 #include <sys/param.h>
186 #include <sys/systm.h>
187 #include <sys/sockio.h>
188 #include <sys/mbuf.h>
189 #include <sys/malloc.h>
190 #include <sys/kernel.h>
191 #include <sys/socket.h>
192 #include <sys/device.h>
193 #include <sys/timeout.h>
194 
195 #include <net/if.h>
196 
197 #include <netinet/in.h>
198 #include <netinet/if_ether.h>
199 
200 #include <net/if_dl.h>
201 #include <net/if_media.h>
202 
203 #if NBPFILTER > 0
204 #include <net/bpf.h>
205 #endif
206 
207 #include <uvm/uvm_extern.h>              /* for vtophys */
208 #define	VTOPHYS(v)	vtophys((vaddr_t)(v))
209 
210 #include <dev/mii/mii.h>
211 #include <dev/mii/miivar.h>
212 
213 #include <dev/pci/pcireg.h>
214 #include <dev/pci/pcivar.h>
215 #include <dev/pci/pcidevs.h>
216 
217 /*
218  * Default to using PIO register access mode to pacify certain
219  * laptop docking stations with built-in ThunderLAN chips that
220  * don't seem to handle memory mapped mode properly.
221  */
222 #define TL_USEIOSPACE
223 
224 #include <dev/pci/if_tlreg.h>
225 #include <dev/mii/tlphyvar.h>
226 
227 const struct tl_products tl_prods[] = {
228 	{ PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_N100TX, TLPHY_MEDIA_NO_10_T },
229 	{ PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_N10T, TLPHY_MEDIA_10_5 },
230 	{ PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_IntNF3P, TLPHY_MEDIA_10_2 },
231 	{ PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_IntPL100TX, TLPHY_MEDIA_10_5|TLPHY_MEDIA_NO_10_T },
232 	{ PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_DPNet100TX, TLPHY_MEDIA_10_5|TLPHY_MEDIA_NO_10_T },
233 	{ PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_DP4000, TLPHY_MEDIA_10_5|TLPHY_MEDIA_NO_10_T },
234 	{ PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_NF3P_BNC, TLPHY_MEDIA_10_2 },
235 	{ PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_NF3P, TLPHY_MEDIA_10_5 },
236 	{ PCI_VENDOR_TI, PCI_PRODUCT_TI_TLAN, 0 },
237 	{ 0, 0, 0 }
238 };
239 
240 int tl_probe(struct device *, void *, void *);
241 void tl_attach(struct device *, struct device *, void *);
242 void tl_wait_up(void *);
243 int tl_intvec_rxeoc(void *, u_int32_t);
244 int tl_intvec_txeoc(void *, u_int32_t);
245 int tl_intvec_txeof(void *, u_int32_t);
246 int tl_intvec_rxeof(void *, u_int32_t);
247 int tl_intvec_adchk(void *, u_int32_t);
248 int tl_intvec_netsts(void *, u_int32_t);
249 
250 int tl_newbuf(struct tl_softc *, struct tl_chain_onefrag *);
251 void tl_stats_update(void *);
252 int tl_encap(struct tl_softc *, struct tl_chain *, struct mbuf *);
253 
254 int tl_intr(void *);
255 void tl_start(struct ifnet *);
256 int tl_ioctl(struct ifnet *, u_long, caddr_t);
257 void tl_init(void *);
258 void tl_stop(struct tl_softc *);
259 void tl_watchdog(struct ifnet *);
260 int tl_ifmedia_upd(struct ifnet *);
261 void tl_ifmedia_sts(struct ifnet *, struct ifmediareq *);
262 
263 u_int8_t tl_eeprom_putbyte(struct tl_softc *, int);
264 u_int8_t tl_eeprom_getbyte(struct tl_softc *, int, u_int8_t *);
265 int tl_read_eeprom(struct tl_softc *, caddr_t, int, int);
266 
267 void tl_mii_sync(struct tl_softc *);
268 void tl_mii_send(struct tl_softc *, u_int32_t, int);
269 int tl_mii_readreg(struct tl_softc *, struct tl_mii_frame *);
270 int tl_mii_writereg(struct tl_softc *, struct tl_mii_frame *);
271 int tl_miibus_readreg(struct device *, int, int);
272 void tl_miibus_writereg(struct device *, int, int, int);
273 void tl_miibus_statchg(struct device *);
274 
275 void tl_setmode(struct tl_softc *, int);
276 int tl_calchash(caddr_t);
277 void tl_iff(struct tl_softc *);
278 void tl_setfilt(struct tl_softc *, caddr_t, int);
279 void tl_softreset(struct tl_softc *, int);
280 void tl_hardreset(struct device *);
281 int tl_list_rx_init(struct tl_softc *);
282 int tl_list_tx_init(struct tl_softc *);
283 
284 u_int8_t tl_dio_read8(struct tl_softc *, int);
285 u_int16_t tl_dio_read16(struct tl_softc *, int);
286 u_int32_t tl_dio_read32(struct tl_softc *, int);
287 void tl_dio_write8(struct tl_softc *, int, int);
288 void tl_dio_write16(struct tl_softc *, int, int);
289 void tl_dio_write32(struct tl_softc *, int, int);
290 void tl_dio_setbit(struct tl_softc *, int, int);
291 void tl_dio_clrbit(struct tl_softc *, int, int);
292 void tl_dio_setbit16(struct tl_softc *, int, int);
293 void tl_dio_clrbit16(struct tl_softc *, int, int);
294 
295 u_int8_t
296 tl_dio_read8(struct tl_softc *sc, int reg)
297 {
298 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
299 	return(CSR_READ_1(sc, TL_DIO_DATA + (reg & 3)));
300 }
301 
302 u_int16_t
303 tl_dio_read16(struct tl_softc *sc, int reg)
304 {
305 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
306 	return(CSR_READ_2(sc, TL_DIO_DATA + (reg & 3)));
307 }
308 
309 u_int32_t
310 tl_dio_read32(struct tl_softc *sc, int reg)
311 {
312 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
313 	return(CSR_READ_4(sc, TL_DIO_DATA + (reg & 3)));
314 }
315 
316 void
317 tl_dio_write8(struct tl_softc *sc, int reg, int val)
318 {
319 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
320 	CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), val);
321 }
322 
323 void
324 tl_dio_write16(struct tl_softc *sc, int reg, int val)
325 {
326 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
327 	CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), val);
328 }
329 
330 void
331 tl_dio_write32(struct tl_softc *sc, int reg, int val)
332 {
333 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
334 	CSR_WRITE_4(sc, TL_DIO_DATA + (reg & 3), val);
335 }
336 
337 void
338 tl_dio_setbit(struct tl_softc *sc, int reg, int bit)
339 {
340 	u_int8_t			f;
341 
342 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
343 	f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3));
344 	f |= bit;
345 	CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f);
346 }
347 
348 void
349 tl_dio_clrbit(struct tl_softc *sc, int reg, int bit)
350 {
351 	u_int8_t			f;
352 
353 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
354 	f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3));
355 	f &= ~bit;
356 	CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f);
357 }
358 
359 void
360 tl_dio_setbit16(struct tl_softc *sc, int reg, int bit)
361 {
362 	u_int16_t			f;
363 
364 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
365 	f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3));
366 	f |= bit;
367 	CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f);
368 }
369 
370 void
371 tl_dio_clrbit16(struct tl_softc *sc, int reg, int bit)
372 {
373 	u_int16_t			f;
374 
375 	CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
376 	f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3));
377 	f &= ~bit;
378 	CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f);
379 }
380 
381 /*
382  * Send an instruction or address to the EEPROM, check for ACK.
383  */
384 u_int8_t
385 tl_eeprom_putbyte(struct tl_softc *sc, int byte)
386 {
387 	int			i, ack = 0;
388 
389 	/*
390 	 * Make sure we're in TX mode.
391 	 */
392 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ETXEN);
393 
394 	/*
395 	 * Feed in each bit and strobe the clock.
396 	 */
397 	for (i = 0x80; i; i >>= 1) {
398 		if (byte & i)
399 			tl_dio_setbit(sc, TL_NETSIO, TL_SIO_EDATA);
400 		else
401 			tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_EDATA);
402 		DELAY(1);
403 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
404 		DELAY(1);
405 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
406 	}
407 
408 	/*
409 	 * Turn off TX mode.
410 	 */
411 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN);
412 
413 	/*
414 	 * Check for ack.
415 	 */
416 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
417 	ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA;
418 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
419 
420 	return(ack);
421 }
422 
423 /*
424  * Read a byte of data stored in the EEPROM at address 'addr.'
425  */
426 u_int8_t
427 tl_eeprom_getbyte(struct tl_softc *sc, int addr, u_int8_t *dest)
428 {
429 	int			i;
430 	u_int8_t		byte = 0;
431 
432 	tl_dio_write8(sc, TL_NETSIO, 0);
433 
434 	EEPROM_START;
435 
436 	/*
437 	 * Send write control code to EEPROM.
438 	 */
439 	if (tl_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) {
440 		printf("%s: failed to send write command, status: %x\n",
441 			sc->sc_dev.dv_xname, tl_dio_read8(sc, TL_NETSIO));
442 		return(1);
443 	}
444 
445 	/*
446 	 * Send address of byte we want to read.
447 	 */
448 	if (tl_eeprom_putbyte(sc, addr)) {
449 		printf("%s: failed to send address, status: %x\n",
450 			sc->sc_dev.dv_xname, tl_dio_read8(sc, TL_NETSIO));
451 		return(1);
452 	}
453 
454 	EEPROM_STOP;
455 	EEPROM_START;
456 	/*
457 	 * Send read control code to EEPROM.
458 	 */
459 	if (tl_eeprom_putbyte(sc, EEPROM_CTL_READ)) {
460 		printf("%s: failed to send write command, status: %x\n",
461 			sc->sc_dev.dv_xname, tl_dio_read8(sc, TL_NETSIO));
462 		return(1);
463 	}
464 
465 	/*
466 	 * Start reading bits from EEPROM.
467 	 */
468 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN);
469 	for (i = 0x80; i; i >>= 1) {
470 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
471 		DELAY(1);
472 		if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA)
473 			byte |= i;
474 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
475 		DELAY(1);
476 	}
477 
478 	EEPROM_STOP;
479 
480 	/*
481 	 * No ACK generated for read, so just return byte.
482 	 */
483 
484 	*dest = byte;
485 
486 	return(0);
487 }
488 
489 /*
490  * Read a sequence of bytes from the EEPROM.
491  */
492 int
493 tl_read_eeprom(struct tl_softc *sc, caddr_t dest, int off, int cnt)
494 {
495 	int			err = 0, i;
496 	u_int8_t		byte = 0;
497 
498 	for (i = 0; i < cnt; i++) {
499 		err = tl_eeprom_getbyte(sc, off + i, &byte);
500 		if (err)
501 			break;
502 		*(dest + i) = byte;
503 	}
504 
505 	return(err ? 1 : 0);
506 }
507 
508 void
509 tl_mii_sync(struct tl_softc *sc)
510 {
511 	int			i;
512 
513 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN);
514 
515 	for (i = 0; i < 32; i++) {
516 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
517 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
518 	}
519 }
520 
521 void
522 tl_mii_send(struct tl_softc *sc, u_int32_t bits, int cnt)
523 {
524 	int			i;
525 
526 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
527 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
528 		if (bits & i)
529 			tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MDATA);
530 		else
531 			tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MDATA);
532 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
533 	}
534 }
535 
536 int
537 tl_mii_readreg(struct tl_softc *sc, struct tl_mii_frame *frame)
538 {
539 	int			i, ack, s;
540 	int			minten = 0;
541 
542 	s = splnet();
543 
544 	tl_mii_sync(sc);
545 
546 	/*
547 	 * Set up frame for RX.
548 	 */
549 	frame->mii_stdelim = TL_MII_STARTDELIM;
550 	frame->mii_opcode = TL_MII_READOP;
551 	frame->mii_turnaround = 0;
552 	frame->mii_data = 0;
553 
554 	/*
555 	 * Turn off MII interrupt by forcing MINTEN low.
556 	 */
557 	minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN;
558 	if (minten)
559 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN);
560 
561 	/*
562  	 * Turn on data xmit.
563 	 */
564 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MTXEN);
565 
566 	/*
567 	 * Send command/address info.
568 	 */
569 	tl_mii_send(sc, frame->mii_stdelim, 2);
570 	tl_mii_send(sc, frame->mii_opcode, 2);
571 	tl_mii_send(sc, frame->mii_phyaddr, 5);
572 	tl_mii_send(sc, frame->mii_regaddr, 5);
573 
574 	/*
575 	 * Turn off xmit.
576 	 */
577 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN);
578 
579 	/* Idle bit */
580 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
581 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
582 
583 	/* Check for ack */
584 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
585 	ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MDATA;
586 
587 	/* Complete the cycle */
588 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
589 
590 	/*
591 	 * Now try reading data bits. If the ack failed, we still
592 	 * need to clock through 16 cycles to keep the PHYs in sync.
593 	 */
594 	if (ack) {
595 		for(i = 0; i < 16; i++) {
596 			tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
597 			tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
598 		}
599 		goto fail;
600 	}
601 
602 	for (i = 0x8000; i; i >>= 1) {
603 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
604 		if (!ack) {
605 			if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MDATA)
606 				frame->mii_data |= i;
607 		}
608 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
609 	}
610 
611 fail:
612 
613 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
614 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
615 
616 	/* Reenable interrupts */
617 	if (minten)
618 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN);
619 
620 	splx(s);
621 
622 	if (ack)
623 		return(1);
624 	return(0);
625 }
626 
627 int
628 tl_mii_writereg(struct tl_softc *sc, struct tl_mii_frame *frame)
629 {
630 	int			s;
631 	int			minten;
632 
633 	tl_mii_sync(sc);
634 
635 	s = splnet();
636 	/*
637 	 * Set up frame for TX.
638 	 */
639 
640 	frame->mii_stdelim = TL_MII_STARTDELIM;
641 	frame->mii_opcode = TL_MII_WRITEOP;
642 	frame->mii_turnaround = TL_MII_TURNAROUND;
643 
644 	/*
645 	 * Turn off MII interrupt by forcing MINTEN low.
646 	 */
647 	minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN;
648 	if (minten)
649 		tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN);
650 
651 	/*
652  	 * Turn on data output.
653 	 */
654 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MTXEN);
655 
656 	tl_mii_send(sc, frame->mii_stdelim, 2);
657 	tl_mii_send(sc, frame->mii_opcode, 2);
658 	tl_mii_send(sc, frame->mii_phyaddr, 5);
659 	tl_mii_send(sc, frame->mii_regaddr, 5);
660 	tl_mii_send(sc, frame->mii_turnaround, 2);
661 	tl_mii_send(sc, frame->mii_data, 16);
662 
663 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
664 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
665 
666 	/*
667 	 * Turn off xmit.
668 	 */
669 	tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN);
670 
671 	/* Reenable interrupts */
672 	if (minten)
673 		tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN);
674 
675 	splx(s);
676 
677 	return(0);
678 }
679 
680 int
681 tl_miibus_readreg(struct device *dev, int phy, int reg)
682 {
683 	struct tl_softc *sc = (struct tl_softc *)dev;
684 	struct tl_mii_frame	frame;
685 
686 	bzero(&frame, sizeof(frame));
687 
688 	frame.mii_phyaddr = phy;
689 	frame.mii_regaddr = reg;
690 	tl_mii_readreg(sc, &frame);
691 
692 	return(frame.mii_data);
693 }
694 
695 void
696 tl_miibus_writereg(struct device *dev, int phy, int reg, int data)
697 {
698 	struct tl_softc *sc = (struct tl_softc *)dev;
699 	struct tl_mii_frame	frame;
700 
701 	bzero(&frame, sizeof(frame));
702 
703 	frame.mii_phyaddr = phy;
704 	frame.mii_regaddr = reg;
705 	frame.mii_data = data;
706 
707 	tl_mii_writereg(sc, &frame);
708 }
709 
710 void
711 tl_miibus_statchg(struct device *dev)
712 {
713 	struct tl_softc *sc = (struct tl_softc *)dev;
714 
715 	if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX)
716 		tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
717 	else
718 		tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
719 }
720 
721 /*
722  * Set modes for bitrate devices.
723  */
724 void
725 tl_setmode(struct tl_softc *sc, int media)
726 {
727 	if (IFM_SUBTYPE(media) == IFM_10_5)
728 		tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD1);
729 	if (IFM_SUBTYPE(media) == IFM_10_T) {
730 		tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD1);
731 		if ((media & IFM_GMASK) == IFM_FDX) {
732 			tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD3);
733 			tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
734 		} else {
735 			tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD3);
736 			tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
737 		}
738 	}
739 }
740 
741 /*
742  * Calculate the hash of a MAC address for programming the multicast hash
743  * table.  This hash is simply the address split into 6-bit chunks
744  * XOR'd, e.g.
745  * byte: 000000|00 1111|1111 22|222222|333333|33 4444|4444 55|555555
746  * bit:  765432|10 7654|3210 76|543210|765432|10 7654|3210 76|543210
747  * Bytes 0-2 and 3-5 are symmetrical, so are folded together.  Then
748  * the folded 24-bit value is split into 6-bit portions and XOR'd.
749  */
750 int
751 tl_calchash(caddr_t addr)
752 {
753 	int			t;
754 
755 	t = (addr[0] ^ addr[3]) << 16 | (addr[1] ^ addr[4]) << 8 |
756 		(addr[2] ^ addr[5]);
757 	return ((t >> 18) ^ (t >> 12) ^ (t >> 6) ^ t) & 0x3f;
758 }
759 
760 /*
761  * The ThunderLAN has a perfect MAC address filter in addition to
762  * the multicast hash filter. The perfect filter can be programmed
763  * with up to four MAC addresses. The first one is always used to
764  * hold the station address, which leaves us free to use the other
765  * three for multicast addresses.
766  */
767 void
768 tl_setfilt(struct tl_softc *sc, caddr_t addr, int slot)
769 {
770 	int			i;
771 	u_int16_t		regaddr;
772 
773 	regaddr = TL_AREG0_B5 + (slot * ETHER_ADDR_LEN);
774 
775 	for (i = 0; i < ETHER_ADDR_LEN; i++)
776 		tl_dio_write8(sc, regaddr + i, *(addr + i));
777 }
778 
779 /*
780  * XXX In FreeBSD 3.0, multicast addresses are managed using a doubly
781  * linked list. This is fine, except addresses are added from the head
782  * end of the list. We want to arrange for 224.0.0.1 (the "all hosts")
783  * group to always be in the perfect filter, but as more groups are added,
784  * the 224.0.0.1 entry (which is always added first) gets pushed down
785  * the list and ends up at the tail. So after 3 or 4 multicast groups
786  * are added, the all-hosts entry gets pushed out of the perfect filter
787  * and into the hash table.
788  *
789  * Because the multicast list is a doubly-linked list as opposed to a
790  * circular queue, we don't have the ability to just grab the tail of
791  * the list and traverse it backwards. Instead, we have to traverse
792  * the list once to find the tail, then traverse it again backwards to
793  * update the multicast filter.
794  */
795 void
796 tl_iff(struct tl_softc *sc)
797 {
798 	struct ifnet		*ifp = &sc->arpcom.ac_if;
799 	struct arpcom		*ac = &sc->arpcom;
800 	struct ether_multistep step;
801 	struct ether_multi *enm;
802 	u_int32_t		hashes[2];
803 	int			h = 0;
804 
805 	tl_dio_clrbit(sc, TL_NETCMD, (TL_CMD_CAF | TL_CMD_NOBRX));
806 	bzero(hashes, sizeof(hashes));
807 	ifp->if_flags &= ~IFF_ALLMULTI;
808 
809 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
810 		ifp->if_flags |= IFF_ALLMULTI;
811 		if (ifp->if_flags & IFF_PROMISC)
812 			tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF);
813 		else
814 			hashes[0] = hashes[1] = 0xffffffff;
815 	} else {
816 		ETHER_FIRST_MULTI(step, ac, enm);
817 		while (enm != NULL) {
818 			h = tl_calchash(enm->enm_addrlo);
819 
820 			if (h < 32)
821 				hashes[0] |= (1 << h);
822 			else
823 				hashes[1] |= (1 << (h - 32));
824 
825 			ETHER_NEXT_MULTI(step, enm);
826 		}
827 	}
828 
829 	tl_dio_write32(sc, TL_HASH1, hashes[0]);
830 	tl_dio_write32(sc, TL_HASH2, hashes[1]);
831 }
832 
833 /*
834  * This routine is recommended by the ThunderLAN manual to insure that
835  * the internal PHY is powered up correctly. It also recommends a one
836  * second pause at the end to 'wait for the clocks to start' but in my
837  * experience this isn't necessary.
838  */
839 void
840 tl_hardreset(struct device *dev)
841 {
842 	struct tl_softc		*sc = (struct tl_softc *)dev;
843 	int			i;
844 	u_int16_t		flags;
845 
846 	flags = BMCR_LOOP|BMCR_ISO|BMCR_PDOWN;
847 
848 	for (i =0 ; i < MII_NPHY; i++)
849 		tl_miibus_writereg(dev, i, MII_BMCR, flags);
850 
851 	tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_ISO);
852 	tl_mii_sync(sc);
853 	while(tl_miibus_readreg(dev, 31, MII_BMCR) & BMCR_RESET);
854 
855 	DELAY(5000);
856 }
857 
858 void
859 tl_softreset(struct tl_softc *sc, int internal)
860 {
861         u_int32_t               cmd, dummy, i;
862 
863         /* Assert the adapter reset bit. */
864 	CMD_SET(sc, TL_CMD_ADRST);
865         /* Turn off interrupts */
866 	CMD_SET(sc, TL_CMD_INTSOFF);
867 
868 	/* First, clear the stats registers. */
869 	for (i = 0; i < 5; i++)
870 		dummy = tl_dio_read32(sc, TL_TXGOODFRAMES);
871 
872         /* Clear Areg and Hash registers */
873 	for (i = 0; i < 8; i++)
874 		tl_dio_write32(sc, TL_AREG0_B5, 0x00000000);
875 
876         /*
877 	 * Set up Netconfig register. Enable one channel and
878 	 * one fragment mode.
879 	 */
880 	tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_ONECHAN|TL_CFG_ONEFRAG);
881 	if (internal && !sc->tl_bitrate) {
882 		tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN);
883 	} else {
884 		tl_dio_clrbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN);
885 	}
886 
887 	/* Handle cards with bitrate devices. */
888 	if (sc->tl_bitrate)
889 		tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_BITRATE);
890 
891 	/*
892 	 * Load adapter irq pacing timer and tx threshold.
893 	 * We make the transmit threshold 1 initially but we may
894 	 * change that later.
895 	 */
896 	cmd = CSR_READ_4(sc, TL_HOSTCMD);
897 	cmd |= TL_CMD_NES;
898 	cmd &= ~(TL_CMD_RT|TL_CMD_EOC|TL_CMD_ACK_MASK|TL_CMD_CHSEL_MASK);
899 	CMD_PUT(sc, cmd | (TL_CMD_LDTHR | TX_THR));
900 	CMD_PUT(sc, cmd | (TL_CMD_LDTMR | 0x00000003));
901 
902         /* Unreset the MII */
903 	tl_dio_setbit(sc, TL_NETSIO, TL_SIO_NMRST);
904 
905 	/* Take the adapter out of reset */
906 	tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NRESET|TL_CMD_NWRAP);
907 
908 	/* Wait for things to settle down a little. */
909 	DELAY(500);
910 }
911 
912 /*
913  * Initialize the transmit lists.
914  */
915 int
916 tl_list_tx_init(struct tl_softc *sc)
917 {
918 	struct tl_chain_data	*cd;
919 	struct tl_list_data	*ld;
920 	int			i;
921 
922 	cd = &sc->tl_cdata;
923 	ld = sc->tl_ldata;
924 	for (i = 0; i < TL_TX_LIST_CNT; i++) {
925 		cd->tl_tx_chain[i].tl_ptr = &ld->tl_tx_list[i];
926 		if (i == (TL_TX_LIST_CNT - 1))
927 			cd->tl_tx_chain[i].tl_next = NULL;
928 		else
929 			cd->tl_tx_chain[i].tl_next = &cd->tl_tx_chain[i + 1];
930 	}
931 
932 	cd->tl_tx_free = &cd->tl_tx_chain[0];
933 	cd->tl_tx_tail = cd->tl_tx_head = NULL;
934 	sc->tl_txeoc = 1;
935 
936 	return(0);
937 }
938 
939 /*
940  * Initialize the RX lists and allocate mbufs for them.
941  */
942 int
943 tl_list_rx_init(struct tl_softc *sc)
944 {
945 	struct tl_chain_data	*cd;
946 	struct tl_list_data	*ld;
947 	int			i;
948 
949 	cd = &sc->tl_cdata;
950 	ld = sc->tl_ldata;
951 
952 	for (i = 0; i < TL_RX_LIST_CNT; i++) {
953 		cd->tl_rx_chain[i].tl_ptr =
954 			(struct tl_list_onefrag *)&ld->tl_rx_list[i];
955 		if (tl_newbuf(sc, &cd->tl_rx_chain[i]) == ENOBUFS)
956 			return(ENOBUFS);
957 		if (i == (TL_RX_LIST_CNT - 1)) {
958 			cd->tl_rx_chain[i].tl_next = NULL;
959 			ld->tl_rx_list[i].tlist_fptr = 0;
960 		} else {
961 			cd->tl_rx_chain[i].tl_next = &cd->tl_rx_chain[i + 1];
962 			ld->tl_rx_list[i].tlist_fptr =
963 					VTOPHYS(&ld->tl_rx_list[i + 1]);
964 		}
965 	}
966 
967 	cd->tl_rx_head = &cd->tl_rx_chain[0];
968 	cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1];
969 
970 	return(0);
971 }
972 
973 int
974 tl_newbuf(struct tl_softc *sc, struct tl_chain_onefrag *c)
975 {
976 	struct mbuf		*m_new = NULL;
977 
978 	MGETHDR(m_new, M_DONTWAIT, MT_DATA);
979 	if (m_new == NULL) {
980 		return(ENOBUFS);
981 	}
982 
983 	MCLGET(m_new, M_DONTWAIT);
984 	if (!(m_new->m_flags & M_EXT)) {
985 		m_freem(m_new);
986 		return(ENOBUFS);
987 	}
988 
989 #ifdef __alpha__
990 	m_new->m_data += 2;
991 #endif
992 
993 	c->tl_mbuf = m_new;
994 	c->tl_next = NULL;
995 	c->tl_ptr->tlist_frsize = MCLBYTES;
996 	c->tl_ptr->tlist_fptr = 0;
997 	c->tl_ptr->tl_frag.tlist_dadr = VTOPHYS(mtod(m_new, caddr_t));
998 	c->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES;
999 	c->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1000 
1001 	return(0);
1002 }
1003 /*
1004  * Interrupt handler for RX 'end of frame' condition (EOF). This
1005  * tells us that a full ethernet frame has been captured and we need
1006  * to handle it.
1007  *
1008  * Reception is done using 'lists' which consist of a header and a
1009  * series of 10 data count/data address pairs that point to buffers.
1010  * Initially you're supposed to create a list, populate it with pointers
1011  * to buffers, then load the physical address of the list into the
1012  * ch_parm register. The adapter is then supposed to DMA the received
1013  * frame into the buffers for you.
1014  *
1015  * To make things as fast as possible, we have the chip DMA directly
1016  * into mbufs. This saves us from having to do a buffer copy: we can
1017  * just hand the mbufs directly to the network stack. Once the frame
1018  * has been sent on its way, the 'list' structure is assigned a new
1019  * buffer and moved to the end of the RX chain. As long we we stay
1020  * ahead of the chip, it will always think it has an endless receive
1021  * channel.
1022  *
1023  * If we happen to fall behind and the chip manages to fill up all of
1024  * the buffers, it will generate an end of channel interrupt and wait
1025  * for us to empty the chain and restart the receiver.
1026  */
1027 int
1028 tl_intvec_rxeof(void *xsc, u_int32_t type)
1029 {
1030 	struct tl_softc		*sc;
1031 	int			r = 0, total_len = 0;
1032 	struct ether_header	*eh;
1033 	struct mbuf		*m;
1034 	struct mbuf_list	ml = MBUF_LIST_INITIALIZER();
1035 	struct ifnet		*ifp;
1036 	struct tl_chain_onefrag	*cur_rx;
1037 
1038 	sc = xsc;
1039 	ifp = &sc->arpcom.ac_if;
1040 
1041 	while(sc->tl_cdata.tl_rx_head != NULL) {
1042 		cur_rx = sc->tl_cdata.tl_rx_head;
1043 		if (!(cur_rx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP))
1044 			break;
1045 		r++;
1046 		sc->tl_cdata.tl_rx_head = cur_rx->tl_next;
1047 		m = cur_rx->tl_mbuf;
1048 		total_len = cur_rx->tl_ptr->tlist_frsize;
1049 
1050 		if (tl_newbuf(sc, cur_rx) == ENOBUFS) {
1051 			ifp->if_ierrors++;
1052 			cur_rx->tl_ptr->tlist_frsize = MCLBYTES;
1053 			cur_rx->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1054 			cur_rx->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES;
1055 			continue;
1056 		}
1057 
1058 		sc->tl_cdata.tl_rx_tail->tl_ptr->tlist_fptr =
1059 						VTOPHYS(cur_rx->tl_ptr);
1060 		sc->tl_cdata.tl_rx_tail->tl_next = cur_rx;
1061 		sc->tl_cdata.tl_rx_tail = cur_rx;
1062 
1063 		eh = mtod(m, struct ether_header *);
1064 
1065 		/*
1066 		 * Note: when the ThunderLAN chip is in 'capture all
1067 		 * frames' mode, it will receive its own transmissions.
1068 		 * We drop don't need to process our own transmissions,
1069 		 * so we drop them here and continue.
1070 		 */
1071 		/*if (ifp->if_flags & IFF_PROMISC && */
1072 		if (!bcmp(eh->ether_shost, sc->arpcom.ac_enaddr,
1073 		 					ETHER_ADDR_LEN)) {
1074 				m_freem(m);
1075 				continue;
1076 		}
1077 
1078 		m->m_pkthdr.len = m->m_len = total_len;
1079 		ml_enqueue(&ml, m);
1080 	}
1081 
1082 	if_input(ifp, &ml);
1083 
1084 	return(r);
1085 }
1086 
1087 /*
1088  * The RX-EOC condition hits when the ch_parm address hasn't been
1089  * initialized or the adapter reached a list with a forward pointer
1090  * of 0 (which indicates the end of the chain). In our case, this means
1091  * the card has hit the end of the receive buffer chain and we need to
1092  * empty out the buffers and shift the pointer back to the beginning again.
1093  */
1094 int
1095 tl_intvec_rxeoc(void *xsc, u_int32_t type)
1096 {
1097 	struct tl_softc		*sc;
1098 	int			r;
1099 	struct tl_chain_data	*cd;
1100 
1101 	sc = xsc;
1102 	cd = &sc->tl_cdata;
1103 
1104 	/* Flush out the receive queue and ack RXEOF interrupts. */
1105 	r = tl_intvec_rxeof(xsc, type);
1106 	CMD_PUT(sc, TL_CMD_ACK | r | (type & ~(0x00100000)));
1107 	r = 1;
1108 	cd->tl_rx_head = &cd->tl_rx_chain[0];
1109 	cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1];
1110 	CSR_WRITE_4(sc, TL_CH_PARM, VTOPHYS(sc->tl_cdata.tl_rx_head->tl_ptr));
1111 	r |= (TL_CMD_GO|TL_CMD_RT);
1112 	return(r);
1113 }
1114 
1115 int
1116 tl_intvec_txeof(void *xsc, u_int32_t type)
1117 {
1118 	struct tl_softc		*sc;
1119 	int			r = 0;
1120 	struct tl_chain		*cur_tx;
1121 
1122 	sc = xsc;
1123 
1124 	/*
1125 	 * Go through our tx list and free mbufs for those
1126 	 * frames that have been sent.
1127 	 */
1128 	while (sc->tl_cdata.tl_tx_head != NULL) {
1129 		cur_tx = sc->tl_cdata.tl_tx_head;
1130 		if (!(cur_tx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP))
1131 			break;
1132 		sc->tl_cdata.tl_tx_head = cur_tx->tl_next;
1133 
1134 		r++;
1135 		m_freem(cur_tx->tl_mbuf);
1136 		cur_tx->tl_mbuf = NULL;
1137 
1138 		cur_tx->tl_next = sc->tl_cdata.tl_tx_free;
1139 		sc->tl_cdata.tl_tx_free = cur_tx;
1140 		if (!cur_tx->tl_ptr->tlist_fptr)
1141 			break;
1142 	}
1143 
1144 	return(r);
1145 }
1146 
1147 /*
1148  * The transmit end of channel interrupt. The adapter triggers this
1149  * interrupt to tell us it hit the end of the current transmit list.
1150  *
1151  * A note about this: it's possible for a condition to arise where
1152  * tl_start() may try to send frames between TXEOF and TXEOC interrupts.
1153  * You have to avoid this since the chip expects things to go in a
1154  * particular order: transmit, acknowledge TXEOF, acknowledge TXEOC.
1155  * When the TXEOF handler is called, it will free all of the transmitted
1156  * frames and reset the tx_head pointer to NULL. However, a TXEOC
1157  * interrupt should be received and acknowledged before any more frames
1158  * are queued for transmission. If tl_statrt() is called after TXEOF
1159  * resets the tx_head pointer but _before_ the TXEOC interrupt arrives,
1160  * it could attempt to issue a transmit command prematurely.
1161  *
1162  * To guard against this, tl_start() will only issue transmit commands
1163  * if the tl_txeoc flag is set, and only the TXEOC interrupt handler
1164  * can set this flag once tl_start() has cleared it.
1165  */
1166 int
1167 tl_intvec_txeoc(void *xsc, u_int32_t type)
1168 {
1169 	struct tl_softc		*sc;
1170 	struct ifnet		*ifp;
1171 	u_int32_t		cmd;
1172 
1173 	sc = xsc;
1174 	ifp = &sc->arpcom.ac_if;
1175 
1176 	/* Clear the timeout timer. */
1177 	ifp->if_timer = 0;
1178 
1179 	if (sc->tl_cdata.tl_tx_head == NULL) {
1180 		ifp->if_flags &= ~IFF_OACTIVE;
1181 		sc->tl_cdata.tl_tx_tail = NULL;
1182 		sc->tl_txeoc = 1;
1183 	} else {
1184 		sc->tl_txeoc = 0;
1185 		/* First we have to ack the EOC interrupt. */
1186 		CMD_PUT(sc, TL_CMD_ACK | 0x00000001 | type);
1187 		/* Then load the address of the next TX list. */
1188 		CSR_WRITE_4(sc, TL_CH_PARM,
1189 		    VTOPHYS(sc->tl_cdata.tl_tx_head->tl_ptr));
1190 		/* Restart TX channel. */
1191 		cmd = CSR_READ_4(sc, TL_HOSTCMD);
1192 		cmd &= ~TL_CMD_RT;
1193 		cmd |= TL_CMD_GO|TL_CMD_INTSON;
1194 		CMD_PUT(sc, cmd);
1195 		return(0);
1196 	}
1197 
1198 	return(1);
1199 }
1200 
1201 int
1202 tl_intvec_adchk(void *xsc, u_int32_t type)
1203 {
1204 	struct tl_softc		*sc;
1205 
1206 	sc = xsc;
1207 
1208 	if (type)
1209 		printf("%s: adapter check: %x\n", sc->sc_dev.dv_xname,
1210 			(unsigned int)CSR_READ_4(sc, TL_CH_PARM));
1211 
1212 	tl_softreset(sc, 1);
1213 	tl_stop(sc);
1214 	tl_init(sc);
1215 	CMD_SET(sc, TL_CMD_INTSON);
1216 
1217 	return(0);
1218 }
1219 
1220 int
1221 tl_intvec_netsts(void *xsc, u_int32_t type)
1222 {
1223 	struct tl_softc		*sc;
1224 	u_int16_t		netsts;
1225 
1226 	sc = xsc;
1227 
1228 	netsts = tl_dio_read16(sc, TL_NETSTS);
1229 	tl_dio_write16(sc, TL_NETSTS, netsts);
1230 
1231 	printf("%s: network status: %x\n", sc->sc_dev.dv_xname, netsts);
1232 
1233 	return(1);
1234 }
1235 
1236 int
1237 tl_intr(void *xsc)
1238 {
1239 	struct tl_softc		*sc;
1240 	struct ifnet		*ifp;
1241 	int			r = 0;
1242 	u_int32_t		type = 0;
1243 	u_int16_t		ints = 0;
1244 	u_int8_t		ivec = 0;
1245 
1246 	sc = xsc;
1247 
1248 	/* Disable interrupts */
1249 	ints = CSR_READ_2(sc, TL_HOST_INT);
1250 	CSR_WRITE_2(sc, TL_HOST_INT, ints);
1251 	type = (ints << 16) & 0xFFFF0000;
1252 	ivec = (ints & TL_VEC_MASK) >> 5;
1253 	ints = (ints & TL_INT_MASK) >> 2;
1254 
1255 	ifp = &sc->arpcom.ac_if;
1256 
1257 	switch(ints) {
1258 	case (TL_INTR_INVALID):
1259 		/* Re-enable interrupts but don't ack this one. */
1260 		CMD_PUT(sc, type);
1261 		r = 0;
1262 		break;
1263 	case (TL_INTR_TXEOF):
1264 		r = tl_intvec_txeof((void *)sc, type);
1265 		break;
1266 	case (TL_INTR_TXEOC):
1267 		r = tl_intvec_txeoc((void *)sc, type);
1268 		break;
1269 	case (TL_INTR_STATOFLOW):
1270 		tl_stats_update(sc);
1271 		r = 1;
1272 		break;
1273 	case (TL_INTR_RXEOF):
1274 		r = tl_intvec_rxeof((void *)sc, type);
1275 		break;
1276 	case (TL_INTR_DUMMY):
1277 		printf("%s: got a dummy interrupt\n", sc->sc_dev.dv_xname);
1278 		r = 1;
1279 		break;
1280 	case (TL_INTR_ADCHK):
1281 		if (ivec)
1282 			r = tl_intvec_adchk((void *)sc, type);
1283 		else
1284 			r = tl_intvec_netsts((void *)sc, type);
1285 		break;
1286 	case (TL_INTR_RXEOC):
1287 		r = tl_intvec_rxeoc((void *)sc, type);
1288 		break;
1289 	default:
1290 		printf("%s: bogus interrupt type\n", sc->sc_dev.dv_xname);
1291 		break;
1292 	}
1293 
1294 	/* Re-enable interrupts */
1295 	if (r) {
1296 		CMD_PUT(sc, TL_CMD_ACK | r | type);
1297 	}
1298 
1299 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1300 		tl_start(ifp);
1301 
1302 	return r;
1303 }
1304 
1305 void
1306 tl_stats_update(void *xsc)
1307 {
1308 	struct tl_softc		*sc;
1309 	struct ifnet		*ifp;
1310 	struct tl_stats		tl_stats;
1311 	u_int32_t		*p;
1312 	int			s;
1313 
1314 	s = splnet();
1315 
1316 	bzero(&tl_stats, sizeof(struct tl_stats));
1317 
1318 	sc = xsc;
1319 	ifp = &sc->arpcom.ac_if;
1320 
1321 	p = (u_int32_t *)&tl_stats;
1322 
1323 	CSR_WRITE_2(sc, TL_DIO_ADDR, TL_TXGOODFRAMES|TL_DIO_ADDR_INC);
1324 	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1325 	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1326 	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1327 	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1328 	*p++ = CSR_READ_4(sc, TL_DIO_DATA);
1329 
1330 	ifp->if_opackets += tl_tx_goodframes(tl_stats);
1331 	ifp->if_collisions += tl_stats.tl_tx_single_collision +
1332 				tl_stats.tl_tx_multi_collision;
1333 	ifp->if_ipackets += tl_rx_goodframes(tl_stats);
1334 	ifp->if_ierrors += tl_stats.tl_crc_errors + tl_stats.tl_code_errors +
1335 			    tl_rx_overrun(tl_stats);
1336 	ifp->if_oerrors += tl_tx_underrun(tl_stats);
1337 
1338 	if (tl_tx_underrun(tl_stats)) {
1339 		u_int8_t	tx_thresh;
1340 		tx_thresh = tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_TXTHRESH;
1341 		if (tx_thresh != TL_AC_TXTHRESH_WHOLEPKT) {
1342 			tx_thresh >>= 4;
1343 			tx_thresh++;
1344 			tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH);
1345 			tl_dio_setbit(sc, TL_ACOMMIT, tx_thresh << 4);
1346 		}
1347 	}
1348 
1349 	timeout_add_sec(&sc->tl_stats_tmo, 1);
1350 
1351 	if (!sc->tl_bitrate)
1352 		mii_tick(&sc->sc_mii);
1353 
1354 	splx(s);
1355 }
1356 
1357 /*
1358  * Encapsulate an mbuf chain in a list by coupling the mbuf data
1359  * pointers to the fragment pointers.
1360  */
1361 int
1362 tl_encap(struct tl_softc *sc, struct tl_chain *c, struct mbuf *m_head)
1363 {
1364 	int			frag = 0;
1365 	struct tl_frag		*f = NULL;
1366 	int			total_len;
1367 	struct mbuf		*m;
1368 
1369 	/*
1370  	 * Start packing the mbufs in this chain into
1371 	 * the fragment pointers. Stop when we run out
1372  	 * of fragments or hit the end of the mbuf chain.
1373 	 */
1374 	m = m_head;
1375 	total_len = 0;
1376 
1377 	for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
1378 		if (m->m_len != 0) {
1379 			if (frag == TL_MAXFRAGS)
1380 				break;
1381 			total_len+= m->m_len;
1382 			c->tl_ptr->tl_frag[frag].tlist_dadr =
1383 				VTOPHYS(mtod(m, vaddr_t));
1384 			c->tl_ptr->tl_frag[frag].tlist_dcnt = m->m_len;
1385 			frag++;
1386 		}
1387 	}
1388 
1389 	/*
1390 	 * Handle special cases.
1391 	 * Special case #1: we used up all 10 fragments, but
1392 	 * we have more mbufs left in the chain. Copy the
1393 	 * data into an mbuf cluster. Note that we don't
1394 	 * bother clearing the values in the other fragment
1395 	 * pointers/counters; it wouldn't gain us anything,
1396 	 * and would waste cycles.
1397 	 */
1398 	if (m != NULL) {
1399 		struct mbuf		*m_new = NULL;
1400 
1401 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1402 		if (m_new == NULL)
1403 			return(1);
1404 		if (m_head->m_pkthdr.len > MHLEN) {
1405 			MCLGET(m_new, M_DONTWAIT);
1406 			if (!(m_new->m_flags & M_EXT)) {
1407 				m_freem(m_new);
1408 				return(1);
1409 			}
1410 		}
1411 		m_copydata(m_head, 0, m_head->m_pkthdr.len,
1412 					mtod(m_new, caddr_t));
1413 		m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1414 		m_freem(m_head);
1415 		m_head = m_new;
1416 		f = &c->tl_ptr->tl_frag[0];
1417 		f->tlist_dadr = VTOPHYS(mtod(m_new, caddr_t));
1418 		f->tlist_dcnt = total_len = m_new->m_len;
1419 		frag = 1;
1420 	}
1421 
1422 	/*
1423 	 * Special case #2: the frame is smaller than the minimum
1424 	 * frame size. We have to pad it to make the chip happy.
1425 	 */
1426 	if (total_len < TL_MIN_FRAMELEN) {
1427 		f = &c->tl_ptr->tl_frag[frag];
1428 		f->tlist_dcnt = TL_MIN_FRAMELEN - total_len;
1429 		f->tlist_dadr = VTOPHYS(&sc->tl_ldata->tl_pad);
1430 		total_len += f->tlist_dcnt;
1431 		frag++;
1432 	}
1433 
1434 	c->tl_mbuf = m_head;
1435 	c->tl_ptr->tl_frag[frag - 1].tlist_dcnt |= TL_LAST_FRAG;
1436 	c->tl_ptr->tlist_frsize = total_len;
1437 	c->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1438 	c->tl_ptr->tlist_fptr = 0;
1439 
1440 	return(0);
1441 }
1442 
1443 /*
1444  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1445  * to the mbuf data regions directly in the transmit lists. We also save a
1446  * copy of the pointers since the transmit list fragment pointers are
1447  * physical addresses.
1448  */
1449 void
1450 tl_start(struct ifnet *ifp)
1451 {
1452 	struct tl_softc		*sc;
1453 	struct mbuf		*m_head = NULL;
1454 	u_int32_t		cmd;
1455 	struct tl_chain		*prev = NULL, *cur_tx = NULL, *start_tx;
1456 
1457 	sc = ifp->if_softc;
1458 
1459 	/*
1460 	 * Check for an available queue slot. If there are none,
1461 	 * punt.
1462 	 */
1463 	if (sc->tl_cdata.tl_tx_free == NULL) {
1464 		ifp->if_flags |= IFF_OACTIVE;
1465 		return;
1466 	}
1467 
1468 	start_tx = sc->tl_cdata.tl_tx_free;
1469 
1470 	while(sc->tl_cdata.tl_tx_free != NULL) {
1471 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1472 		if (m_head == NULL)
1473 			break;
1474 
1475 		/* Pick a chain member off the free list. */
1476 		cur_tx = sc->tl_cdata.tl_tx_free;
1477 		sc->tl_cdata.tl_tx_free = cur_tx->tl_next;
1478 
1479 		cur_tx->tl_next = NULL;
1480 
1481 		/* Pack the data into the list. */
1482 		tl_encap(sc, cur_tx, m_head);
1483 
1484 		/* Chain it together */
1485 		if (prev != NULL) {
1486 			prev->tl_next = cur_tx;
1487 			prev->tl_ptr->tlist_fptr = VTOPHYS(cur_tx->tl_ptr);
1488 		}
1489 		prev = cur_tx;
1490 
1491 		/*
1492 		 * If there's a BPF listener, bounce a copy of this frame
1493 		 * to him.
1494 		 */
1495 #if NBPFILTER > 0
1496 		if (ifp->if_bpf)
1497 			bpf_mtap(ifp->if_bpf, cur_tx->tl_mbuf,
1498 			    BPF_DIRECTION_OUT);
1499 #endif
1500 	}
1501 
1502 	/*
1503 	 * If there are no packets queued, bail.
1504 	 */
1505 	if (cur_tx == NULL)
1506 		return;
1507 
1508 	/*
1509 	 * That's all we can stands, we can't stands no more.
1510 	 * If there are no other transfers pending, then issue the
1511 	 * TX GO command to the adapter to start things moving.
1512 	 * Otherwise, just leave the data in the queue and let
1513 	 * the EOF/EOC interrupt handler send.
1514 	 */
1515 	if (sc->tl_cdata.tl_tx_head == NULL) {
1516 		sc->tl_cdata.tl_tx_head = start_tx;
1517 		sc->tl_cdata.tl_tx_tail = cur_tx;
1518 
1519 		if (sc->tl_txeoc) {
1520 			sc->tl_txeoc = 0;
1521 			CSR_WRITE_4(sc, TL_CH_PARM, VTOPHYS(start_tx->tl_ptr));
1522 			cmd = CSR_READ_4(sc, TL_HOSTCMD);
1523 			cmd &= ~TL_CMD_RT;
1524 			cmd |= TL_CMD_GO|TL_CMD_INTSON;
1525 			CMD_PUT(sc, cmd);
1526 		}
1527 	} else {
1528 		sc->tl_cdata.tl_tx_tail->tl_next = start_tx;
1529 		sc->tl_cdata.tl_tx_tail = cur_tx;
1530 	}
1531 
1532 	/*
1533 	 * Set a timeout in case the chip goes out to lunch.
1534 	 */
1535 	ifp->if_timer = 10;
1536 }
1537 
1538 void
1539 tl_init(void *xsc)
1540 {
1541 	struct tl_softc		*sc = xsc;
1542 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1543         int			s;
1544 
1545 	s = splnet();
1546 
1547 	/*
1548 	 * Cancel pending I/O.
1549 	 */
1550 	tl_stop(sc);
1551 
1552 	/* Initialize TX FIFO threshold */
1553 	tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH);
1554 	tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH_16LONG);
1555 
1556 	/* Set PCI burst size */
1557 	tl_dio_write8(sc, TL_BSIZEREG, TL_RXBURST_16LONG|TL_TXBURST_16LONG);
1558 
1559 	tl_dio_write16(sc, TL_MAXRX, MCLBYTES);
1560 
1561 	/* Init our MAC address */
1562 	tl_setfilt(sc, (caddr_t)&sc->arpcom.ac_enaddr, 0);
1563 
1564 	/* Program promiscuous mode and multicast filters. */
1565 	tl_iff(sc);
1566 
1567 	/* Init circular RX list. */
1568 	if (tl_list_rx_init(sc) == ENOBUFS) {
1569 		printf("%s: initialization failed: no memory for rx buffers\n",
1570 			sc->sc_dev.dv_xname);
1571 		tl_stop(sc);
1572 		splx(s);
1573 		return;
1574 	}
1575 
1576 	/* Init TX pointers. */
1577 	tl_list_tx_init(sc);
1578 
1579 	/* Enable PCI interrupts. */
1580 	CMD_SET(sc, TL_CMD_INTSON);
1581 
1582 	/* Load the address of the rx list */
1583 	CMD_SET(sc, TL_CMD_RT);
1584 	CSR_WRITE_4(sc, TL_CH_PARM, VTOPHYS(&sc->tl_ldata->tl_rx_list[0]));
1585 
1586 	if (!sc->tl_bitrate)
1587 		mii_mediachg(&sc->sc_mii);
1588 	else
1589 		tl_ifmedia_upd(ifp);
1590 
1591 	/* Send the RX go command */
1592 	CMD_SET(sc, TL_CMD_GO|TL_CMD_NES|TL_CMD_RT);
1593 
1594 	splx(s);
1595 
1596 	/* Start the stats update counter */
1597 	timeout_set(&sc->tl_stats_tmo, tl_stats_update, sc);
1598 	timeout_add_sec(&sc->tl_stats_tmo, 1);
1599 	timeout_set(&sc->tl_wait_tmo, tl_wait_up, sc);
1600 	timeout_add_sec(&sc->tl_wait_tmo, 2);
1601 }
1602 
1603 /*
1604  * Set media options.
1605  */
1606 int
1607 tl_ifmedia_upd(struct ifnet *ifp)
1608 {
1609 	struct tl_softc *sc = ifp->if_softc;
1610 
1611 	if (sc->tl_bitrate)
1612 		tl_setmode(sc, sc->ifmedia.ifm_media);
1613 	else
1614 		mii_mediachg(&sc->sc_mii);
1615 
1616 	return(0);
1617 }
1618 
1619 /*
1620  * Report current media status.
1621  */
1622 void
1623 tl_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1624 {
1625 	struct tl_softc		*sc;
1626 	struct mii_data		*mii;
1627 
1628 	sc = ifp->if_softc;
1629 	mii = &sc->sc_mii;
1630 
1631 	ifmr->ifm_active = IFM_ETHER;
1632 	if (sc->tl_bitrate) {
1633 		if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD1)
1634 			ifmr->ifm_active = IFM_ETHER|IFM_10_5;
1635 		else
1636 			ifmr->ifm_active = IFM_ETHER|IFM_10_T;
1637 		if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD3)
1638 			ifmr->ifm_active |= IFM_HDX;
1639 		else
1640 			ifmr->ifm_active |= IFM_FDX;
1641 		return;
1642 	} else {
1643 		mii_pollstat(mii);
1644 		ifmr->ifm_active = mii->mii_media_active;
1645 		ifmr->ifm_status = mii->mii_media_status;
1646 	}
1647 }
1648 
1649 int
1650 tl_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1651 {
1652 	struct tl_softc		*sc = ifp->if_softc;
1653 	struct ifaddr		*ifa = (struct ifaddr *) data;
1654 	struct ifreq		*ifr = (struct ifreq *) data;
1655 	int			s, error = 0;
1656 
1657 	s = splnet();
1658 
1659 	switch(command) {
1660 	case SIOCSIFADDR:
1661 		ifp->if_flags |= IFF_UP;
1662 		if (!(ifp->if_flags & IFF_RUNNING))
1663 			tl_init(sc);
1664 		if (ifa->ifa_addr->sa_family == AF_INET)
1665 			arp_ifinit(&sc->arpcom, ifa);
1666 		break;
1667 
1668 	case SIOCSIFFLAGS:
1669 		if (ifp->if_flags & IFF_UP) {
1670 			if (ifp->if_flags & IFF_RUNNING)
1671 				error = ENETRESET;
1672 			else
1673 				tl_init(sc);
1674 		} else {
1675 			if (ifp->if_flags & IFF_RUNNING)
1676 				tl_stop(sc);
1677 		}
1678 		break;
1679 
1680 	case SIOCSIFMEDIA:
1681 	case SIOCGIFMEDIA:
1682 		if (sc->tl_bitrate)
1683 			error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
1684 		else
1685 			error = ifmedia_ioctl(ifp, ifr,
1686 			    &sc->sc_mii.mii_media, command);
1687 		break;
1688 
1689 	default:
1690 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
1691 	}
1692 
1693 	if (error == ENETRESET) {
1694 		if (ifp->if_flags & IFF_RUNNING)
1695 			tl_iff(sc);
1696 		error = 0;
1697 	}
1698 
1699 	splx(s);
1700 	return(error);
1701 }
1702 
1703 void
1704 tl_watchdog(struct ifnet *ifp)
1705 {
1706 	struct tl_softc		*sc;
1707 
1708 	sc = ifp->if_softc;
1709 
1710 	printf("%s: device timeout\n", sc->sc_dev.dv_xname);
1711 
1712 	ifp->if_oerrors++;
1713 
1714 	tl_softreset(sc, 1);
1715 	tl_init(sc);
1716 }
1717 
1718 /*
1719  * Stop the adapter and free any mbufs allocated to the
1720  * RX and TX lists.
1721  */
1722 void
1723 tl_stop(struct tl_softc *sc)
1724 {
1725 	int			i;
1726 	struct ifnet		*ifp;
1727 
1728 	ifp = &sc->arpcom.ac_if;
1729 
1730 	/* Stop the stats updater. */
1731 	timeout_del(&sc->tl_stats_tmo);
1732 	timeout_del(&sc->tl_wait_tmo);
1733 
1734 	/* Stop the transmitter */
1735 	CMD_CLR(sc, TL_CMD_RT);
1736 	CMD_SET(sc, TL_CMD_STOP);
1737 	CSR_WRITE_4(sc, TL_CH_PARM, 0);
1738 
1739 	/* Stop the receiver */
1740 	CMD_SET(sc, TL_CMD_RT);
1741 	CMD_SET(sc, TL_CMD_STOP);
1742 	CSR_WRITE_4(sc, TL_CH_PARM, 0);
1743 
1744 	/*
1745 	 * Disable host interrupts.
1746 	 */
1747 	CMD_SET(sc, TL_CMD_INTSOFF);
1748 
1749 	/*
1750 	 * Clear list pointer.
1751 	 */
1752 	CSR_WRITE_4(sc, TL_CH_PARM, 0);
1753 
1754 	/*
1755 	 * Free the RX lists.
1756 	 */
1757 	for (i = 0; i < TL_RX_LIST_CNT; i++) {
1758 		if (sc->tl_cdata.tl_rx_chain[i].tl_mbuf != NULL) {
1759 			m_freem(sc->tl_cdata.tl_rx_chain[i].tl_mbuf);
1760 			sc->tl_cdata.tl_rx_chain[i].tl_mbuf = NULL;
1761 		}
1762 	}
1763 	bzero(&sc->tl_ldata->tl_rx_list, sizeof(sc->tl_ldata->tl_rx_list));
1764 
1765 	/*
1766 	 * Free the TX list buffers.
1767 	 */
1768 	for (i = 0; i < TL_TX_LIST_CNT; i++) {
1769 		if (sc->tl_cdata.tl_tx_chain[i].tl_mbuf != NULL) {
1770 			m_freem(sc->tl_cdata.tl_tx_chain[i].tl_mbuf);
1771 			sc->tl_cdata.tl_tx_chain[i].tl_mbuf = NULL;
1772 		}
1773 	}
1774 	bzero(&sc->tl_ldata->tl_tx_list, sizeof(sc->tl_ldata->tl_tx_list));
1775 
1776 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1777 }
1778 
1779 int
1780 tl_probe(struct device *parent, void *match, void *aux)
1781 {
1782 	struct pci_attach_args *pa = (struct pci_attach_args *) aux;
1783 
1784 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_TI) {
1785 		if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_TI_TLAN)
1786 			return 1;
1787 		return 0;
1788 	}
1789 
1790 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_COMPAQ) {
1791 		switch (PCI_PRODUCT(pa->pa_id)) {
1792 		case PCI_PRODUCT_COMPAQ_N100TX:
1793 		case PCI_PRODUCT_COMPAQ_N10T:
1794 		case PCI_PRODUCT_COMPAQ_IntNF3P:
1795 		case PCI_PRODUCT_COMPAQ_DPNet100TX:
1796 		case PCI_PRODUCT_COMPAQ_IntPL100TX:
1797 		case PCI_PRODUCT_COMPAQ_DP4000:
1798 		case PCI_PRODUCT_COMPAQ_N10T2:
1799 		case PCI_PRODUCT_COMPAQ_N10_TX_UTP:
1800 		case PCI_PRODUCT_COMPAQ_NF3P:
1801 		case PCI_PRODUCT_COMPAQ_NF3P_BNC:
1802 			return 1;
1803 		}
1804 		return 0;
1805 	}
1806 
1807 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_OLICOM) {
1808 		switch (PCI_PRODUCT(pa->pa_id)) {
1809 		case PCI_PRODUCT_OLICOM_OC2183:
1810 		case PCI_PRODUCT_OLICOM_OC2325:
1811 		case PCI_PRODUCT_OLICOM_OC2326:
1812 			return 1;
1813 		}
1814 		return 0;
1815 	}
1816 
1817 	return 0;
1818 }
1819 
1820 void
1821 tl_attach(struct device *parent, struct device *self, void *aux)
1822 {
1823 	struct tl_softc *sc = (struct tl_softc *)self;
1824 	struct pci_attach_args *pa = aux;
1825 	pci_chipset_tag_t pc = pa->pa_pc;
1826 	pci_intr_handle_t ih;
1827 	const char *intrstr = NULL;
1828 	struct ifnet *ifp = &sc->arpcom.ac_if;
1829 	bus_size_t iosize;
1830 	u_int32_t command;
1831 	int i, rseg;
1832 	bus_dma_segment_t seg;
1833 	bus_dmamap_t dmamap;
1834 	caddr_t kva;
1835 
1836 	/*
1837 	 * Map control/status registers.
1838 	 */
1839 
1840 #ifdef TL_USEIOSPACE
1841 	if (pci_mapreg_map(pa, TL_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0,
1842 	    &sc->tl_btag, &sc->tl_bhandle, NULL, &iosize, 0)) {
1843 		if (pci_mapreg_map(pa, TL_PCI_LOMEM, PCI_MAPREG_TYPE_IO, 0,
1844 		    &sc->tl_btag, &sc->tl_bhandle, NULL, &iosize, 0)) {
1845 			printf(": can't map i/o space\n");
1846 			return;
1847 		}
1848 	}
1849 #else
1850 	if (pci_mapreg_map(pa, TL_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0,
1851 	    &sc->tl_btag, &sc->tl_bhandle, NULL, &iosize, 0)){
1852 		if (pci_mapreg_map(pa, TL_PCI_LOIO, PCI_MAPREG_TYPE_MEM, 0,
1853 		    &sc->tl_btag, &sc->tl_bhandle, NULL, &iosize, 0)){
1854 			printf(": can't map mem space\n");
1855 			return;
1856 		}
1857 	}
1858 #endif
1859 
1860 	/*
1861 	 * Manual wants the PCI latency timer jacked up to 0xff
1862 	 */
1863 	command = pci_conf_read(pa->pa_pc, pa->pa_tag, TL_PCI_LATENCY_TIMER);
1864 	command |= 0x0000ff00;
1865 	pci_conf_write(pa->pa_pc, pa->pa_tag, TL_PCI_LATENCY_TIMER, command);
1866 
1867 	/*
1868 	 * Allocate our interrupt.
1869 	 */
1870 	if (pci_intr_map(pa, &ih)) {
1871 		printf(": couldn't map interrupt\n");
1872 		bus_space_unmap(sc->tl_btag, sc->tl_bhandle, iosize);
1873 		return;
1874 	}
1875 	intrstr = pci_intr_string(pc, ih);
1876 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, tl_intr, sc,
1877 	    self->dv_xname);
1878 	if (sc->sc_ih == NULL) {
1879 		printf(": could not establish interrupt");
1880 		if (intrstr != NULL)
1881 			printf(" at %s", intrstr);
1882 		printf("\n");
1883 		bus_space_unmap(sc->tl_btag, sc->tl_bhandle, iosize);
1884 		return;
1885 	}
1886 	printf(": %s", intrstr);
1887 
1888 	sc->sc_dmat = pa->pa_dmat;
1889 	if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct tl_list_data),
1890 	    PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT | BUS_DMA_ZERO)) {
1891 		printf("%s: can't alloc list\n", sc->sc_dev.dv_xname);
1892 		bus_space_unmap(sc->tl_btag, sc->tl_bhandle, iosize);
1893 		return;
1894 	}
1895 	if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(struct tl_list_data),
1896 	    &kva, BUS_DMA_NOWAIT)) {
1897 		printf("%s: can't map dma buffers (%zd bytes)\n",
1898 		    sc->sc_dev.dv_xname, sizeof(struct tl_list_data));
1899 		bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1900 		return;
1901 	}
1902 	if (bus_dmamap_create(sc->sc_dmat, sizeof(struct tl_list_data), 1,
1903 	    sizeof(struct tl_list_data), 0, BUS_DMA_NOWAIT, &dmamap)) {
1904 		printf("%s: can't create dma map\n", sc->sc_dev.dv_xname);
1905 		bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(struct tl_list_data));
1906 		bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1907 		bus_space_unmap(sc->tl_btag, sc->tl_bhandle, iosize);
1908 		return;
1909 	}
1910 	if (bus_dmamap_load(sc->sc_dmat, dmamap, kva,
1911 	    sizeof(struct tl_list_data), NULL, BUS_DMA_NOWAIT)) {
1912 		printf("%s: can't load dma map\n", sc->sc_dev.dv_xname);
1913 		bus_dmamap_destroy(sc->sc_dmat, dmamap);
1914 		bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(struct tl_list_data));
1915 		bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1916 		bus_space_unmap(sc->tl_btag, sc->tl_bhandle, iosize);
1917 		return;
1918 	}
1919 	sc->tl_ldata = (struct tl_list_data *)kva;
1920 
1921 	for (sc->tl_product = tl_prods; sc->tl_product->tp_vend;
1922 	     sc->tl_product++) {
1923 		if (sc->tl_product->tp_vend == PCI_VENDOR(pa->pa_id) &&
1924 		    sc->tl_product->tp_prod == PCI_PRODUCT(pa->pa_id))
1925 			break;
1926 	}
1927 
1928 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_COMPAQ ||
1929 	    PCI_VENDOR(pa->pa_id) == PCI_VENDOR_TI)
1930 		sc->tl_eeaddr = TL_EEPROM_EADDR;
1931 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_OLICOM)
1932 		sc->tl_eeaddr = TL_EEPROM_EADDR_OC;
1933 
1934 	/*
1935 	 * Reset adapter.
1936 	 */
1937 	tl_softreset(sc, 1);
1938 	tl_hardreset(self);
1939 	DELAY(1000000);
1940 	tl_softreset(sc, 1);
1941 
1942 	/*
1943 	 * Get station address from the EEPROM.
1944 	 */
1945 	if (tl_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
1946 	    sc->tl_eeaddr, ETHER_ADDR_LEN)) {
1947 		printf("\n%s: failed to read station address\n",
1948 		    sc->sc_dev.dv_xname);
1949 		bus_space_unmap(sc->tl_btag, sc->tl_bhandle, iosize);
1950 		return;
1951 	}
1952 
1953 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_OLICOM) {
1954 		for (i = 0; i < ETHER_ADDR_LEN; i += 2) {
1955 			u_int16_t *p;
1956 
1957 			p = (u_int16_t *)&sc->arpcom.ac_enaddr[i];
1958 			*p = ntohs(*p);
1959 		}
1960 	}
1961 
1962 	printf(" address %s\n", ether_sprintf(sc->arpcom.ac_enaddr));
1963 
1964 	ifp = &sc->arpcom.ac_if;
1965 	ifp->if_softc = sc;
1966 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1967 	ifp->if_ioctl = tl_ioctl;
1968 	ifp->if_start = tl_start;
1969 	ifp->if_watchdog = tl_watchdog;
1970 	IFQ_SET_MAXLEN(&ifp->if_snd, TL_TX_LIST_CNT - 1);
1971 	IFQ_SET_READY(&ifp->if_snd);
1972 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1973 
1974 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1975 
1976 	/*
1977 	 * Reset adapter (again).
1978 	 */
1979 	tl_softreset(sc, 1);
1980 	tl_hardreset(self);
1981 	DELAY(1000000);
1982 	tl_softreset(sc, 1);
1983 
1984 	/*
1985 	 * Do MII setup. If no PHYs are found, then this is a
1986 	 * bitrate ThunderLAN chip that only supports 10baseT
1987 	 * and AUI/BNC.
1988 	 */
1989 	sc->sc_mii.mii_ifp = ifp;
1990 	sc->sc_mii.mii_readreg = tl_miibus_readreg;
1991 	sc->sc_mii.mii_writereg = tl_miibus_writereg;
1992 	sc->sc_mii.mii_statchg = tl_miibus_statchg;
1993 	ifmedia_init(&sc->sc_mii.mii_media, 0, tl_ifmedia_upd, tl_ifmedia_sts);
1994 	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
1995 	    0);
1996 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
1997 		struct ifmedia *ifm;
1998 		sc->tl_bitrate = 1;
1999 		ifmedia_init(&sc->ifmedia, 0, tl_ifmedia_upd, tl_ifmedia_sts);
2000 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
2001 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
2002 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
2003 		ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL);
2004 		ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_10_T);
2005 		/* Reset again, this time setting bitrate mode. */
2006 		tl_softreset(sc, 1);
2007 		ifm = &sc->ifmedia;
2008 		ifm->ifm_media = ifm->ifm_cur->ifm_media;
2009 		tl_ifmedia_upd(ifp);
2010 	} else
2011 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2012 
2013 	/*
2014 	 * Attach us everywhere.
2015 	 */
2016 	if_attach(ifp);
2017 	ether_ifattach(ifp);
2018 }
2019 
2020 void
2021 tl_wait_up(void *xsc)
2022 {
2023 	struct tl_softc *sc = xsc;
2024 	struct ifnet *ifp = &sc->arpcom.ac_if;
2025 
2026 	ifp->if_flags |= IFF_RUNNING;
2027 	ifp->if_flags &= ~IFF_OACTIVE;
2028 }
2029 
2030 struct cfattach tl_ca = {
2031 	sizeof(struct tl_softc), tl_probe, tl_attach
2032 };
2033 
2034 struct cfdriver tl_cd = {
2035 	NULL, "tl", DV_IFNET
2036 };
2037