xref: /openbsd/sys/dev/ic/xl.c (revision 3836e7c7)
1 /*	$OpenBSD: xl.c,v 1.141 2024/11/05 18:58:59 miod Exp $	*/
2 
3 /*
4  * Copyright (c) 1997, 1998, 1999
5  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * $FreeBSD: if_xl.c,v 1.77 2000/08/28 20:40:03 wpaul Exp $
35  */
36 
37 /*
38  * 3Com 3c90x Etherlink XL PCI NIC driver
39  *
40  * Supports the 3Com "boomerang", "cyclone", and "hurricane" PCI
41  * bus-master chips (3c90x cards and embedded controllers) including
42  * the following:
43  *
44  * 3Com 3c900-TPO	10Mbps/RJ-45
45  * 3Com 3c900-COMBO	10Mbps/RJ-45,AUI,BNC
46  * 3Com 3c905-TX	10/100Mbps/RJ-45
47  * 3Com 3c905-T4	10/100Mbps/RJ-45
48  * 3Com 3c900B-TPO	10Mbps/RJ-45
49  * 3Com 3c900B-COMBO	10Mbps/RJ-45,AUI,BNC
50  * 3Com 3c900B-TPC	10Mbps/RJ-45,BNC
51  * 3Com 3c900B-FL	10Mbps/Fiber-optic
52  * 3Com 3c905B-COMBO	10/100Mbps/RJ-45,AUI,BNC
53  * 3Com 3c905B-TX	10/100Mbps/RJ-45
54  * 3Com 3c905B-FL/FX	10/100Mbps/Fiber-optic
55  * 3Com 3c905C-TX	10/100Mbps/RJ-45 (Tornado ASIC)
56  * 3Com 3c980-TX	10/100Mbps server adapter (Hurricane ASIC)
57  * 3Com 3c980C-TX	10/100Mbps server adapter (Tornado ASIC)
58  * 3Com 3cSOHO100-TX	10/100Mbps/RJ-45 (Hurricane ASIC)
59  * 3Com 3c450-TX	10/100Mbps/RJ-45 (Tornado ASIC)
60  * 3Com 3c555		10/100Mbps/RJ-45 (MiniPCI, Laptop Hurricane)
61  * 3Com 3c556		10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
62  * 3Com 3c556B		10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
63  * 3Com 3c575TX		10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
64  * 3Com 3c575B		10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
65  * 3Com 3c575C		10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
66  * 3Com 3cxfem656	10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
67  * 3Com 3cxfem656b	10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
68  * 3Com 3cxfem656c	10/100Mbps/RJ-45 (Cardbus, Tornado ASIC)
69  * Dell Optiplex GX1 on-board 3c918 10/100Mbps/RJ-45
70  * Dell on-board 3c920 10/100Mbps/RJ-45
71  * Dell Precision on-board 3c905B 10/100Mbps/RJ-45
72  * Dell Latitude laptop docking station embedded 3c905-TX
73  *
74  * Written by Bill Paul <wpaul@ctr.columbia.edu>
75  * Electrical Engineering Department
76  * Columbia University, New York City
77  */
78 
79 /*
80  * The 3c90x series chips use a bus-master DMA interface for transferring
81  * packets to and from the controller chip. Some of the "vortex" cards
82  * (3c59x) also supported a bus master mode, however for those chips
83  * you could only DMA packets to/from a contiguous memory buffer. For
84  * transmission this would mean copying the contents of the queued mbuf
85  * chain into an mbuf cluster and then DMAing the cluster. This extra
86  * copy would sort of defeat the purpose of the bus master support for
87  * any packet that doesn't fit into a single mbuf.
88  *
89  * By contrast, the 3c90x cards support a fragment-based bus master
90  * mode where mbuf chains can be encapsulated using TX descriptors.
91  * This is similar to other PCI chips such as the Texas Instruments
92  * ThunderLAN and the Intel 82557/82558.
93  *
94  * The "vortex" driver (if_vx.c) happens to work for the "boomerang"
95  * bus master chips because they maintain the old PIO interface for
96  * backwards compatibility, but starting with the 3c905B and the
97  * "cyclone" chips, the compatibility interface has been dropped.
98  * Since using bus master DMA is a big win, we use this driver to
99  * support the PCI "boomerang" chips even though they work with the
100  * "vortex" driver in order to obtain better performance.
101  */
102 
103 #include "bpfilter.h"
104 
105 #include <sys/param.h>
106 #include <sys/systm.h>
107 #include <sys/mbuf.h>
108 #include <sys/socket.h>
109 #include <sys/ioctl.h>
110 #include <sys/errno.h>
111 #include <sys/malloc.h>
112 #include <sys/kernel.h>
113 #include <sys/device.h>
114 
115 #include <net/if.h>
116 #include <net/if_media.h>
117 
118 #include <netinet/in.h>
119 #include <netinet/if_ether.h>
120 
121 #include <dev/mii/miivar.h>
122 
123 #include <machine/bus.h>
124 
125 #if NBPFILTER > 0
126 #include <net/bpf.h>
127 #endif
128 
129 #include <dev/ic/xlreg.h>
130 
131 /*
132  * TX Checksumming is disabled by default for two reasons:
133  * - TX Checksumming will occasionally produce corrupt packets
134  * - TX Checksumming seems to reduce performance
135  *
136  * Only 905B/C cards were reported to have this problem, it is possible
137  * that later chips _may_ be immune.
138  */
139 #define	XL905B_TXCSUM_BROKEN	1
140 
141 int xl_newbuf(struct xl_softc *, struct xl_chain_onefrag *);
142 void xl_stats_update(void *);
143 int xl_encap(struct xl_softc *, struct xl_chain *,
144     struct mbuf * );
145 void xl_rxeof(struct xl_softc *);
146 void xl_txeof(struct xl_softc *);
147 void xl_txeof_90xB(struct xl_softc *);
148 void xl_txeoc(struct xl_softc *);
149 int xl_intr(void *);
150 void xl_start(struct ifnet *);
151 void xl_start_90xB(struct ifnet *);
152 int xl_ioctl(struct ifnet *, u_long, caddr_t);
153 void xl_freetxrx(struct xl_softc *);
154 void xl_watchdog(struct ifnet *);
155 int xl_ifmedia_upd(struct ifnet *);
156 void xl_ifmedia_sts(struct ifnet *, struct ifmediareq *);
157 
158 int xl_eeprom_wait(struct xl_softc *);
159 int xl_read_eeprom(struct xl_softc *, caddr_t, int, int, int);
160 void xl_mii_sync(struct xl_softc *);
161 void xl_mii_send(struct xl_softc *, u_int32_t, int);
162 int xl_mii_readreg(struct xl_softc *, struct xl_mii_frame *);
163 int xl_mii_writereg(struct xl_softc *, struct xl_mii_frame *);
164 
165 void xl_setcfg(struct xl_softc *);
166 void xl_setmode(struct xl_softc *, uint64_t);
167 void xl_iff(struct xl_softc *);
168 void xl_iff_90x(struct xl_softc *);
169 void xl_iff_905b(struct xl_softc *);
170 int xl_list_rx_init(struct xl_softc *);
171 void xl_fill_rx_ring(struct xl_softc *);
172 int xl_list_tx_init(struct xl_softc *);
173 int xl_list_tx_init_90xB(struct xl_softc *);
174 void xl_wait(struct xl_softc *);
175 void xl_mediacheck(struct xl_softc *);
176 void xl_choose_xcvr(struct xl_softc *, int);
177 
178 int xl_miibus_readreg(struct device *, int, int);
179 void xl_miibus_writereg(struct device *, int, int, int);
180 void xl_miibus_statchg(struct device *);
181 #ifndef SMALL_KERNEL
182 int xl_wol(struct ifnet *, int);
183 void xl_wol_power(struct xl_softc *);
184 #endif
185 
186 int
xl_activate(struct device * self,int act)187 xl_activate(struct device *self, int act)
188 {
189 	struct xl_softc *sc = (struct xl_softc *)self;
190 	struct ifnet	*ifp = &sc->sc_arpcom.ac_if;
191 
192 	switch (act) {
193 	case DVACT_SUSPEND:
194 		if (ifp->if_flags & IFF_RUNNING)
195 			xl_stop(sc);
196 		break;
197 	case DVACT_RESUME:
198 		if (ifp->if_flags & IFF_UP)
199 			xl_init(sc);
200 		break;
201 	case DVACT_POWERDOWN:
202 #ifndef SMALL_KERNEL
203 		xl_wol_power(sc);
204 #endif
205 		break;
206 	}
207 	return (0);
208 }
209 
210 /*
211  * Murphy's law says that it's possible the chip can wedge and
212  * the 'command in progress' bit may never clear. Hence, we wait
213  * only a finite amount of time to avoid getting caught in an
214  * infinite loop. Normally this delay routine would be a macro,
215  * but it isn't called during normal operation so we can afford
216  * to make it a function.
217  */
218 void
xl_wait(struct xl_softc * sc)219 xl_wait(struct xl_softc *sc)
220 {
221 	int	i;
222 
223 	for (i = 0; i < XL_TIMEOUT; i++) {
224 		if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY))
225 			break;
226 	}
227 
228 	if (i == XL_TIMEOUT)
229 		printf("%s: command never completed!\n", sc->sc_dev.dv_xname);
230 }
231 
232 /*
233  * MII access routines are provided for adapters with external
234  * PHYs (3c905-TX, 3c905-T4, 3c905B-T4) and those with built-in
235  * autoneg logic that's faked up to look like a PHY (3c905B-TX).
236  * Note: if you don't perform the MDIO operations just right,
237  * it's possible to end up with code that works correctly with
238  * some chips/CPUs/processor speeds/bus speeds/etc but not
239  * with others.
240  */
241 #define MII_SET(x)					\
242 	CSR_WRITE_2(sc, XL_W4_PHY_MGMT,			\
243 		CSR_READ_2(sc, XL_W4_PHY_MGMT) | (x))
244 
245 #define MII_CLR(x)					\
246 	CSR_WRITE_2(sc, XL_W4_PHY_MGMT,			\
247 		CSR_READ_2(sc, XL_W4_PHY_MGMT) & ~(x))
248 
249 /*
250  * Sync the PHYs by setting data bit and strobing the clock 32 times.
251  */
252 void
xl_mii_sync(struct xl_softc * sc)253 xl_mii_sync(struct xl_softc *sc)
254 {
255 	int	i;
256 
257 	XL_SEL_WIN(4);
258 	MII_SET(XL_MII_DIR|XL_MII_DATA);
259 
260 	for (i = 0; i < 32; i++) {
261 		MII_SET(XL_MII_CLK);
262 		MII_SET(XL_MII_DATA);
263 		MII_SET(XL_MII_DATA);
264 		MII_CLR(XL_MII_CLK);
265 		MII_SET(XL_MII_DATA);
266 		MII_SET(XL_MII_DATA);
267 	}
268 }
269 
270 /*
271  * Clock a series of bits through the MII.
272  */
273 void
xl_mii_send(struct xl_softc * sc,u_int32_t bits,int cnt)274 xl_mii_send(struct xl_softc *sc, u_int32_t bits, int cnt)
275 {
276 	int	i;
277 
278 	XL_SEL_WIN(4);
279 	MII_CLR(XL_MII_CLK);
280 
281 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
282                 if (bits & i) {
283 			MII_SET(XL_MII_DATA);
284                 } else {
285 			MII_CLR(XL_MII_DATA);
286                 }
287 		MII_CLR(XL_MII_CLK);
288 		MII_SET(XL_MII_CLK);
289 	}
290 }
291 
292 /*
293  * Read an PHY register through the MII.
294  */
295 int
xl_mii_readreg(struct xl_softc * sc,struct xl_mii_frame * frame)296 xl_mii_readreg(struct xl_softc *sc, struct xl_mii_frame *frame)
297 {
298 	int	i, ack, s;
299 
300 	s = splnet();
301 
302 	/*
303 	 * Set up frame for RX.
304 	 */
305 	frame->mii_stdelim = XL_MII_STARTDELIM;
306 	frame->mii_opcode = XL_MII_READOP;
307 	frame->mii_turnaround = 0;
308 	frame->mii_data = 0;
309 
310 	/*
311 	 * Select register window 4.
312 	 */
313 
314 	XL_SEL_WIN(4);
315 
316 	CSR_WRITE_2(sc, XL_W4_PHY_MGMT, 0);
317 	/*
318  	 * Turn on data xmit.
319 	 */
320 	MII_SET(XL_MII_DIR);
321 
322 	xl_mii_sync(sc);
323 
324 	/*
325 	 * Send command/address info.
326 	 */
327 	xl_mii_send(sc, frame->mii_stdelim, 2);
328 	xl_mii_send(sc, frame->mii_opcode, 2);
329 	xl_mii_send(sc, frame->mii_phyaddr, 5);
330 	xl_mii_send(sc, frame->mii_regaddr, 5);
331 
332 	/* Idle bit */
333 	MII_CLR((XL_MII_CLK|XL_MII_DATA));
334 	MII_SET(XL_MII_CLK);
335 
336 	/* Turn off xmit. */
337 	MII_CLR(XL_MII_DIR);
338 
339 	/* Check for ack */
340 	MII_CLR(XL_MII_CLK);
341 	ack = CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA;
342 	MII_SET(XL_MII_CLK);
343 
344 	/*
345 	 * Now try reading data bits. If the ack failed, we still
346 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
347 	 */
348 	if (ack) {
349 		for(i = 0; i < 16; i++) {
350 			MII_CLR(XL_MII_CLK);
351 			MII_SET(XL_MII_CLK);
352 		}
353 		goto fail;
354 	}
355 
356 	for (i = 0x8000; i; i >>= 1) {
357 		MII_CLR(XL_MII_CLK);
358 		if (!ack) {
359 			if (CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA)
360 				frame->mii_data |= i;
361 		}
362 		MII_SET(XL_MII_CLK);
363 	}
364 
365 fail:
366 
367 	MII_CLR(XL_MII_CLK);
368 	MII_SET(XL_MII_CLK);
369 
370 	splx(s);
371 
372 	if (ack)
373 		return (1);
374 	return (0);
375 }
376 
377 /*
378  * Write to a PHY register through the MII.
379  */
380 int
xl_mii_writereg(struct xl_softc * sc,struct xl_mii_frame * frame)381 xl_mii_writereg(struct xl_softc *sc, struct xl_mii_frame *frame)
382 {
383 	int	s;
384 
385 	s = splnet();
386 
387 	/*
388 	 * Set up frame for TX.
389 	 */
390 
391 	frame->mii_stdelim = XL_MII_STARTDELIM;
392 	frame->mii_opcode = XL_MII_WRITEOP;
393 	frame->mii_turnaround = XL_MII_TURNAROUND;
394 
395 	/*
396 	 * Select the window 4.
397 	 */
398 	XL_SEL_WIN(4);
399 
400 	/*
401  	 * Turn on data output.
402 	 */
403 	MII_SET(XL_MII_DIR);
404 
405 	xl_mii_sync(sc);
406 
407 	xl_mii_send(sc, frame->mii_stdelim, 2);
408 	xl_mii_send(sc, frame->mii_opcode, 2);
409 	xl_mii_send(sc, frame->mii_phyaddr, 5);
410 	xl_mii_send(sc, frame->mii_regaddr, 5);
411 	xl_mii_send(sc, frame->mii_turnaround, 2);
412 	xl_mii_send(sc, frame->mii_data, 16);
413 
414 	/* Idle bit. */
415 	MII_SET(XL_MII_CLK);
416 	MII_CLR(XL_MII_CLK);
417 
418 	/*
419 	 * Turn off xmit.
420 	 */
421 	MII_CLR(XL_MII_DIR);
422 
423 	splx(s);
424 
425 	return (0);
426 }
427 
428 int
xl_miibus_readreg(struct device * self,int phy,int reg)429 xl_miibus_readreg(struct device *self, int phy, int reg)
430 {
431 	struct xl_softc *sc = (struct xl_softc *)self;
432 	struct xl_mii_frame	frame;
433 
434 	if (!(sc->xl_flags & XL_FLAG_PHYOK) && phy != 24)
435 		return (0);
436 
437 	bzero(&frame, sizeof(frame));
438 
439 	frame.mii_phyaddr = phy;
440 	frame.mii_regaddr = reg;
441 	xl_mii_readreg(sc, &frame);
442 
443 	return (frame.mii_data);
444 }
445 
446 void
xl_miibus_writereg(struct device * self,int phy,int reg,int data)447 xl_miibus_writereg(struct device *self, int phy, int reg, int data)
448 {
449 	struct xl_softc *sc = (struct xl_softc *)self;
450 	struct xl_mii_frame	frame;
451 
452 	if (!(sc->xl_flags & XL_FLAG_PHYOK) && phy != 24)
453 		return;
454 
455 	bzero(&frame, sizeof(frame));
456 
457 	frame.mii_phyaddr = phy;
458 	frame.mii_regaddr = reg;
459 	frame.mii_data = data;
460 
461 	xl_mii_writereg(sc, &frame);
462 }
463 
464 void
xl_miibus_statchg(struct device * self)465 xl_miibus_statchg(struct device *self)
466 {
467 	struct xl_softc *sc = (struct xl_softc *)self;
468 
469 	xl_setcfg(sc);
470 
471 	/* Set ASIC's duplex mode to match the PHY. */
472 	XL_SEL_WIN(3);
473 	if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX)
474 		CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
475 	else
476 		CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
477 		    (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
478 }
479 
480 /*
481  * The EEPROM is slow: give it time to come ready after issuing
482  * it a command.
483  */
484 int
xl_eeprom_wait(struct xl_softc * sc)485 xl_eeprom_wait(struct xl_softc *sc)
486 {
487 	int	i;
488 
489 	for (i = 0; i < 100; i++) {
490 		if (CSR_READ_2(sc, XL_W0_EE_CMD) & XL_EE_BUSY)
491 			DELAY(162);
492 		else
493 			break;
494 	}
495 
496 	if (i == 100) {
497 		printf("%s: eeprom failed to come ready\n", sc->sc_dev.dv_xname);
498 		return (1);
499 	}
500 
501 	return (0);
502 }
503 
504 /*
505  * Read a sequence of words from the EEPROM. Note that ethernet address
506  * data is stored in the EEPROM in network byte order.
507  */
508 int
xl_read_eeprom(struct xl_softc * sc,caddr_t dest,int off,int cnt,int swap)509 xl_read_eeprom(struct xl_softc *sc, caddr_t dest, int off, int cnt, int swap)
510 {
511 	int		err = 0, i;
512 	u_int16_t	word = 0, *ptr;
513 #define EEPROM_5BIT_OFFSET(A) ((((A) << 2) & 0x7F00) | ((A) & 0x003F))
514 #define EEPROM_8BIT_OFFSET(A) ((A) & 0x003F)
515 	/* WARNING! DANGER!
516 	 * It's easy to accidentally overwrite the rom content!
517 	 * Note: the 3c575 uses 8bit EEPROM offsets.
518 	 */
519 	XL_SEL_WIN(0);
520 
521 	if (xl_eeprom_wait(sc))
522 		return (1);
523 
524 	if (sc->xl_flags & XL_FLAG_EEPROM_OFFSET_30)
525 		off += 0x30;
526 
527 	for (i = 0; i < cnt; i++) {
528 		if (sc->xl_flags & XL_FLAG_8BITROM)
529 			CSR_WRITE_2(sc, XL_W0_EE_CMD,
530 			    XL_EE_8BIT_READ | EEPROM_8BIT_OFFSET(off + i));
531 		else
532 			CSR_WRITE_2(sc, XL_W0_EE_CMD,
533 			    XL_EE_READ | EEPROM_5BIT_OFFSET(off + i));
534 		err = xl_eeprom_wait(sc);
535 		if (err)
536 			break;
537 		word = CSR_READ_2(sc, XL_W0_EE_DATA);
538 		ptr = (u_int16_t *)(dest + (i * 2));
539 		if (swap)
540 			*ptr = ntohs(word);
541 		else
542 			*ptr = word;
543 	}
544 
545 	return (err ? 1 : 0);
546 }
547 
548 void
xl_iff(struct xl_softc * sc)549 xl_iff(struct xl_softc *sc)
550 {
551 	if (sc->xl_type == XL_TYPE_905B)
552 		xl_iff_905b(sc);
553 	else
554 		xl_iff_90x(sc);
555 }
556 
557 /*
558  * NICs older than the 3c905B have only one multicast option, which
559  * is to enable reception of all multicast frames.
560  */
561 void
xl_iff_90x(struct xl_softc * sc)562 xl_iff_90x(struct xl_softc *sc)
563 {
564 	struct ifnet	*ifp = &sc->sc_arpcom.ac_if;
565 	struct arpcom	*ac = &sc->sc_arpcom;
566 	u_int8_t	rxfilt;
567 
568 	XL_SEL_WIN(5);
569 
570 	rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
571 	rxfilt &= ~(XL_RXFILTER_ALLFRAMES | XL_RXFILTER_ALLMULTI |
572 	    XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL);
573 	ifp->if_flags &= ~IFF_ALLMULTI;
574 
575 	/*
576 	 * Always accept broadcast frames.
577 	 * Always accept frames destined to our station address.
578 	 */
579 	rxfilt |= XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL;
580 
581 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multicnt > 0) {
582 		ifp->if_flags |= IFF_ALLMULTI;
583 		if (ifp->if_flags & IFF_PROMISC)
584 			rxfilt |= XL_RXFILTER_ALLFRAMES;
585 		else
586 			rxfilt |= XL_RXFILTER_ALLMULTI;
587 	}
588 
589 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT | rxfilt);
590 
591 	XL_SEL_WIN(7);
592 }
593 
594 /*
595  * 3c905B adapters have a hash filter that we can program.
596  */
597 void
xl_iff_905b(struct xl_softc * sc)598 xl_iff_905b(struct xl_softc *sc)
599 {
600 	struct ifnet	*ifp = &sc->sc_arpcom.ac_if;
601 	struct arpcom	*ac = &sc->sc_arpcom;
602 	int		h = 0, i;
603 	struct ether_multi *enm;
604 	struct ether_multistep step;
605 	u_int8_t	rxfilt;
606 
607 	XL_SEL_WIN(5);
608 
609 	rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
610 	rxfilt &= ~(XL_RXFILTER_ALLFRAMES | XL_RXFILTER_ALLMULTI |
611 	    XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL |
612 	    XL_RXFILTER_MULTIHASH);
613 	ifp->if_flags &= ~IFF_ALLMULTI;
614 
615 	/*
616 	 * Always accept broadcast frames.
617 	 * Always accept frames destined to our station address.
618 	 */
619 	rxfilt |= XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL;
620 
621 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
622 		ifp->if_flags |= IFF_ALLMULTI;
623 		if (ifp->if_flags & IFF_PROMISC)
624 			rxfilt |= XL_RXFILTER_ALLFRAMES;
625 		else
626 			rxfilt |= XL_RXFILTER_ALLMULTI;
627 	} else {
628 		rxfilt |= XL_RXFILTER_MULTIHASH;
629 
630 		/* first, zot all the existing hash bits */
631 		for (i = 0; i < XL_HASHFILT_SIZE; i++)
632 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH|i);
633 
634 		/* now program new ones */
635 		ETHER_FIRST_MULTI(step, ac, enm);
636 		while (enm != NULL) {
637 			h = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) &
638 			    0x000000FF;
639 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH |
640 			    XL_HASH_SET | h);
641 
642 			ETHER_NEXT_MULTI(step, enm);
643 		}
644 	}
645 
646 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT | rxfilt);
647 
648 	XL_SEL_WIN(7);
649 }
650 
651 void
xl_setcfg(struct xl_softc * sc)652 xl_setcfg(struct xl_softc *sc)
653 {
654 	u_int32_t icfg;
655 
656 	XL_SEL_WIN(3);
657 	icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
658 	icfg &= ~XL_ICFG_CONNECTOR_MASK;
659 	if (sc->xl_media & XL_MEDIAOPT_MII ||
660 		sc->xl_media & XL_MEDIAOPT_BT4)
661 		icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS);
662 	if (sc->xl_media & XL_MEDIAOPT_BTX)
663 		icfg |= (XL_XCVR_AUTO << XL_ICFG_CONNECTOR_BITS);
664 
665 	CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
666 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
667 }
668 
669 void
xl_setmode(struct xl_softc * sc,uint64_t media)670 xl_setmode(struct xl_softc *sc, uint64_t media)
671 {
672 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
673 	u_int32_t icfg;
674 	u_int16_t mediastat;
675 
676 	XL_SEL_WIN(4);
677 	mediastat = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
678 	XL_SEL_WIN(3);
679 	icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
680 
681 	if (sc->xl_media & XL_MEDIAOPT_BT) {
682 		if (IFM_SUBTYPE(media) == IFM_10_T) {
683 			ifp->if_baudrate = IF_Mbps(10);
684 			sc->xl_xcvr = XL_XCVR_10BT;
685 			icfg &= ~XL_ICFG_CONNECTOR_MASK;
686 			icfg |= (XL_XCVR_10BT << XL_ICFG_CONNECTOR_BITS);
687 			mediastat |= XL_MEDIASTAT_LINKBEAT|
688 					XL_MEDIASTAT_JABGUARD;
689 			mediastat &= ~XL_MEDIASTAT_SQEENB;
690 		}
691 	}
692 
693 	if (sc->xl_media & XL_MEDIAOPT_BFX) {
694 		if (IFM_SUBTYPE(media) == IFM_100_FX) {
695 			ifp->if_baudrate = IF_Mbps(100);
696 			sc->xl_xcvr = XL_XCVR_100BFX;
697 			icfg &= ~XL_ICFG_CONNECTOR_MASK;
698 			icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS);
699 			mediastat |= XL_MEDIASTAT_LINKBEAT;
700 			mediastat &= ~XL_MEDIASTAT_SQEENB;
701 		}
702 	}
703 
704 	if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
705 		if (IFM_SUBTYPE(media) == IFM_10_5) {
706 			ifp->if_baudrate = IF_Mbps(10);
707 			sc->xl_xcvr = XL_XCVR_AUI;
708 			icfg &= ~XL_ICFG_CONNECTOR_MASK;
709 			icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
710 			mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
711 					XL_MEDIASTAT_JABGUARD);
712 			mediastat |= ~XL_MEDIASTAT_SQEENB;
713 		}
714 		if (IFM_SUBTYPE(media) == IFM_10_FL) {
715 			ifp->if_baudrate = IF_Mbps(10);
716 			sc->xl_xcvr = XL_XCVR_AUI;
717 			icfg &= ~XL_ICFG_CONNECTOR_MASK;
718 			icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
719 			mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
720 					XL_MEDIASTAT_JABGUARD);
721 			mediastat |= ~XL_MEDIASTAT_SQEENB;
722 		}
723 	}
724 
725 	if (sc->xl_media & XL_MEDIAOPT_BNC) {
726 		if (IFM_SUBTYPE(media) == IFM_10_2) {
727 			ifp->if_baudrate = IF_Mbps(10);
728 			sc->xl_xcvr = XL_XCVR_COAX;
729 			icfg &= ~XL_ICFG_CONNECTOR_MASK;
730 			icfg |= (XL_XCVR_COAX << XL_ICFG_CONNECTOR_BITS);
731 			mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
732 					XL_MEDIASTAT_JABGUARD|
733 					XL_MEDIASTAT_SQEENB);
734 		}
735 	}
736 
737 	if ((media & IFM_GMASK) == IFM_FDX ||
738 			IFM_SUBTYPE(media) == IFM_100_FX) {
739 		XL_SEL_WIN(3);
740 		CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
741 	} else {
742 		XL_SEL_WIN(3);
743 		CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
744 			(CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
745 	}
746 
747 	if (IFM_SUBTYPE(media) == IFM_10_2)
748 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
749 	else
750 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
751 	CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
752 	XL_SEL_WIN(4);
753 	CSR_WRITE_2(sc, XL_W4_MEDIA_STATUS, mediastat);
754 	DELAY(800);
755 	XL_SEL_WIN(7);
756 }
757 
758 void
xl_reset(struct xl_softc * sc)759 xl_reset(struct xl_softc *sc)
760 {
761 	int	i;
762 
763 	XL_SEL_WIN(0);
764 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RESET |
765 		    ((sc->xl_flags & XL_FLAG_WEIRDRESET) ?
766 		     XL_RESETOPT_DISADVFD:0));
767 
768 	/*
769 	 * Pause briefly after issuing the reset command before trying
770 	 * to access any other registers. With my 3c575C cardbus card,
771 	 * failing to do this results in the system locking up while
772 	 * trying to poll the command busy bit in the status register.
773 	 */
774 	DELAY(100000);
775 
776 	for (i = 0; i < XL_TIMEOUT; i++) {
777 		DELAY(10);
778 		if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY))
779 			break;
780 	}
781 
782 	if (i == XL_TIMEOUT)
783 		printf("%s: reset didn't complete\n", sc->sc_dev.dv_xname);
784 
785 	/* Note: the RX reset takes an absurd amount of time
786 	 * on newer versions of the Tornado chips such as those
787 	 * on the 3c905CX and newer 3c908C cards. We wait an
788 	 * extra amount of time so that xl_wait() doesn't complain
789 	 * and annoy the users.
790 	 */
791 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
792 	DELAY(100000);
793 	xl_wait(sc);
794 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
795 	xl_wait(sc);
796 
797 	if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR ||
798 	    sc->xl_flags & XL_FLAG_INVERT_MII_PWR) {
799 		XL_SEL_WIN(2);
800 		CSR_WRITE_2(sc, XL_W2_RESET_OPTIONS, CSR_READ_2(sc,
801 		    XL_W2_RESET_OPTIONS)
802 		    | ((sc->xl_flags & XL_FLAG_INVERT_LED_PWR)?XL_RESETOPT_INVERT_LED:0)
803 		    | ((sc->xl_flags & XL_FLAG_INVERT_MII_PWR)?XL_RESETOPT_INVERT_MII:0)
804 		    );
805 	}
806 
807 	/* Wait a little while for the chip to get its brains in order. */
808 	DELAY(100000);
809 }
810 
811 /*
812  * This routine is a kludge to work around possible hardware faults
813  * or manufacturing defects that can cause the media options register
814  * (or reset options register, as it's called for the first generation
815  * 3c90x adapters) to return an incorrect result. I have encountered
816  * one Dell Latitude laptop docking station with an integrated 3c905-TX
817  * which doesn't have any of the 'mediaopt' bits set. This screws up
818  * the attach routine pretty badly because it doesn't know what media
819  * to look for. If we find ourselves in this predicament, this routine
820  * will try to guess the media options values and warn the user of a
821  * possible manufacturing defect with his adapter/system/whatever.
822  */
823 void
xl_mediacheck(struct xl_softc * sc)824 xl_mediacheck(struct xl_softc *sc)
825 {
826 	/*
827 	 * If some of the media options bits are set, assume they are
828 	 * correct. If not, try to figure it out down below.
829 	 * XXX I should check for 10baseFL, but I don't have an adapter
830 	 * to test with.
831 	 */
832 	if (sc->xl_media & (XL_MEDIAOPT_MASK & ~XL_MEDIAOPT_VCO)) {
833 		/*
834 	 	 * Check the XCVR value. If it's not in the normal range
835 	 	 * of values, we need to fake it up here.
836 	 	 */
837 		if (sc->xl_xcvr <= XL_XCVR_AUTO)
838 			return;
839 		else {
840 			printf("%s: bogus xcvr value "
841 			"in EEPROM (%x)\n", sc->sc_dev.dv_xname, sc->xl_xcvr);
842 			printf("%s: choosing new default based "
843 				"on card type\n", sc->sc_dev.dv_xname);
844 		}
845 	} else {
846 		if (sc->xl_type == XL_TYPE_905B &&
847 		    sc->xl_media & XL_MEDIAOPT_10FL)
848 			return;
849 		printf("%s: WARNING: no media options bits set in "
850 			"the media options register!!\n", sc->sc_dev.dv_xname);
851 		printf("%s: this could be a manufacturing defect in "
852 			"your adapter or system\n", sc->sc_dev.dv_xname);
853 		printf("%s: attempting to guess media type; you "
854 			"should probably consult your vendor\n", sc->sc_dev.dv_xname);
855 	}
856 
857 	xl_choose_xcvr(sc, 1);
858 }
859 
860 void
xl_choose_xcvr(struct xl_softc * sc,int verbose)861 xl_choose_xcvr(struct xl_softc *sc, int verbose)
862 {
863 	u_int16_t devid;
864 
865 	/*
866 	 * Read the device ID from the EEPROM.
867 	 * This is what's loaded into the PCI device ID register, so it has
868 	 * to be correct otherwise we wouldn't have gotten this far.
869 	 */
870 	xl_read_eeprom(sc, (caddr_t)&devid, XL_EE_PRODID, 1, 0);
871 
872 	switch(devid) {
873 	case TC_DEVICEID_BOOMERANG_10BT:	/* 3c900-TPO */
874 	case TC_DEVICEID_KRAKATOA_10BT:		/* 3c900B-TPO */
875 		sc->xl_media = XL_MEDIAOPT_BT;
876 		sc->xl_xcvr = XL_XCVR_10BT;
877 		if (verbose)
878 			printf("%s: guessing 10BaseT transceiver\n",
879 			    sc->sc_dev.dv_xname);
880 		break;
881 	case TC_DEVICEID_BOOMERANG_10BT_COMBO:	/* 3c900-COMBO */
882 	case TC_DEVICEID_KRAKATOA_10BT_COMBO:	/* 3c900B-COMBO */
883 		sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
884 		sc->xl_xcvr = XL_XCVR_10BT;
885 		if (verbose)
886 			printf("%s: guessing COMBO (AUI/BNC/TP)\n",
887 			    sc->sc_dev.dv_xname);
888 		break;
889 	case TC_DEVICEID_KRAKATOA_10BT_TPC:	/* 3c900B-TPC */
890 		sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC;
891 		sc->xl_xcvr = XL_XCVR_10BT;
892 		if (verbose)
893 			printf("%s: guessing TPC (BNC/TP)\n", sc->sc_dev.dv_xname);
894 		break;
895 	case TC_DEVICEID_CYCLONE_10FL:		/* 3c900B-FL */
896 		sc->xl_media = XL_MEDIAOPT_10FL;
897 		sc->xl_xcvr = XL_XCVR_AUI;
898 		if (verbose)
899 			printf("%s: guessing 10baseFL\n", sc->sc_dev.dv_xname);
900 		break;
901 	case TC_DEVICEID_BOOMERANG_10_100BT:	/* 3c905-TX */
902 	case TC_DEVICEID_HURRICANE_555:		/* 3c555 */
903 	case TC_DEVICEID_HURRICANE_556:		/* 3c556 */
904 	case TC_DEVICEID_HURRICANE_556B:	/* 3c556B */
905 	case TC_DEVICEID_HURRICANE_575A:	/* 3c575TX */
906 	case TC_DEVICEID_HURRICANE_575B:	/* 3c575B */
907 	case TC_DEVICEID_HURRICANE_575C:	/* 3c575C */
908 	case TC_DEVICEID_HURRICANE_656:		/* 3c656 */
909 	case TC_DEVICEID_HURRICANE_656B:	/* 3c656B */
910 	case TC_DEVICEID_TORNADO_656C:		/* 3c656C */
911 	case TC_DEVICEID_TORNADO_10_100BT_920B: /* 3c920B-EMB */
912 		sc->xl_media = XL_MEDIAOPT_MII;
913 		sc->xl_xcvr = XL_XCVR_MII;
914 		if (verbose)
915 			printf("%s: guessing MII\n", sc->sc_dev.dv_xname);
916 		break;
917 	case TC_DEVICEID_BOOMERANG_100BT4:	/* 3c905-T4 */
918 	case TC_DEVICEID_CYCLONE_10_100BT4:	/* 3c905B-T4 */
919 		sc->xl_media = XL_MEDIAOPT_BT4;
920 		sc->xl_xcvr = XL_XCVR_MII;
921 		if (verbose)
922 			printf("%s: guessing 100BaseT4/MII\n", sc->sc_dev.dv_xname);
923 		break;
924 	case TC_DEVICEID_HURRICANE_10_100BT:	/* 3c905B-TX */
925 	case TC_DEVICEID_HURRICANE_10_100BT_SERV:/* 3c980-TX */
926 	case TC_DEVICEID_TORNADO_10_100BT_SERV:	/* 3c980C-TX */
927 	case TC_DEVICEID_HURRICANE_SOHO100TX:	/* 3cSOHO100-TX */
928 	case TC_DEVICEID_TORNADO_10_100BT:	/* 3c905C-TX */
929 	case TC_DEVICEID_TORNADO_HOMECONNECT:	/* 3c450-TX */
930 		sc->xl_media = XL_MEDIAOPT_BTX;
931 		sc->xl_xcvr = XL_XCVR_AUTO;
932 		if (verbose)
933 			printf("%s: guessing 10/100 internal\n",
934 			    sc->sc_dev.dv_xname);
935 		break;
936 	case TC_DEVICEID_CYCLONE_10_100_COMBO:	/* 3c905B-COMBO */
937 		sc->xl_media = XL_MEDIAOPT_BTX|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
938 		sc->xl_xcvr = XL_XCVR_AUTO;
939 		if (verbose)
940 			printf("%s: guessing 10/100 plus BNC/AUI\n",
941 			    sc->sc_dev.dv_xname);
942 		break;
943 	default:
944 		printf("%s: unknown device ID: %x -- "
945 			"defaulting to 10baseT\n", sc->sc_dev.dv_xname, devid);
946 		sc->xl_media = XL_MEDIAOPT_BT;
947 		break;
948 	}
949 }
950 
951 /*
952  * Initialize the transmit descriptors.
953  */
954 int
xl_list_tx_init(struct xl_softc * sc)955 xl_list_tx_init(struct xl_softc *sc)
956 {
957 	struct xl_chain_data	*cd;
958 	struct xl_list_data	*ld;
959 	int			i;
960 
961 	cd = &sc->xl_cdata;
962 	ld = sc->xl_ldata;
963 	for (i = 0; i < XL_TX_LIST_CNT; i++) {
964 		cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
965 		if (i == (XL_TX_LIST_CNT - 1))
966 			cd->xl_tx_chain[i].xl_next = NULL;
967 		else
968 			cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1];
969 	}
970 
971 	cd->xl_tx_free = &cd->xl_tx_chain[0];
972 	cd->xl_tx_tail = cd->xl_tx_head = NULL;
973 
974 	return (0);
975 }
976 
977 /*
978  * Initialize the transmit descriptors.
979  */
980 int
xl_list_tx_init_90xB(struct xl_softc * sc)981 xl_list_tx_init_90xB(struct xl_softc *sc)
982 {
983 	struct xl_chain_data	*cd;
984 	struct xl_list_data	*ld;
985 	int			i, next, prev;
986 
987 	cd = &sc->xl_cdata;
988 	ld = sc->xl_ldata;
989 	for (i = 0; i < XL_TX_LIST_CNT; i++) {
990 		if (i == (XL_TX_LIST_CNT - 1))
991 			next = 0;
992 		else
993 			next = i + 1;
994 		if (i == 0)
995 			prev = XL_TX_LIST_CNT - 1;
996 		else
997 			prev = i - 1;
998 		cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
999 		cd->xl_tx_chain[i].xl_phys =
1000 		    sc->sc_listmap->dm_segs[0].ds_addr +
1001 		    offsetof(struct xl_list_data, xl_tx_list[i]);
1002 		cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[next];
1003 		cd->xl_tx_chain[i].xl_prev = &cd->xl_tx_chain[prev];
1004 	}
1005 
1006 	bzero(ld->xl_tx_list, sizeof(struct xl_list) * XL_TX_LIST_CNT);
1007 	ld->xl_tx_list[0].xl_status = htole32(XL_TXSTAT_EMPTY);
1008 
1009 	cd->xl_tx_prod = 1;
1010 	cd->xl_tx_cons = 1;
1011 	cd->xl_tx_cnt = 0;
1012 
1013 	return (0);
1014 }
1015 
1016 /*
1017  * Initialize the RX descriptors and allocate mbufs for them. Note that
1018  * we arrange the descriptors in a closed ring, so that the last descriptor
1019  * points back to the first.
1020  */
1021 int
xl_list_rx_init(struct xl_softc * sc)1022 xl_list_rx_init(struct xl_softc *sc)
1023 {
1024 	struct xl_chain_data	*cd;
1025 	struct xl_list_data	*ld;
1026 	int			i, n;
1027 	bus_addr_t		next;
1028 
1029 	cd = &sc->xl_cdata;
1030 	ld = sc->xl_ldata;
1031 
1032 	for (i = 0; i < XL_RX_LIST_CNT; i++) {
1033 		cd->xl_rx_chain[i].xl_ptr =
1034 			(struct xl_list_onefrag *)&ld->xl_rx_list[i];
1035 		if (i == (XL_RX_LIST_CNT - 1))
1036 			n = 0;
1037 		else
1038 			n = i + 1;
1039 		cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[n];
1040 		next = sc->sc_listmap->dm_segs[0].ds_addr +
1041 		       offsetof(struct xl_list_data, xl_rx_list[n]);
1042 		ld->xl_rx_list[i].xl_next = htole32(next);
1043 	}
1044 
1045 	cd->xl_rx_prod = cd->xl_rx_cons = &cd->xl_rx_chain[0];
1046 	if_rxr_init(&cd->xl_rx_ring, 2, XL_RX_LIST_CNT - 1);
1047 	xl_fill_rx_ring(sc);
1048 	return (0);
1049 }
1050 
1051 void
xl_fill_rx_ring(struct xl_softc * sc)1052 xl_fill_rx_ring(struct xl_softc *sc)
1053 {
1054 	struct xl_chain_data    *cd;
1055 	u_int			slots;
1056 
1057 	cd = &sc->xl_cdata;
1058 
1059 	for (slots = if_rxr_get(&cd->xl_rx_ring, XL_RX_LIST_CNT);
1060 	     slots > 0; slots--) {
1061 		if (xl_newbuf(sc, cd->xl_rx_prod) == ENOBUFS)
1062 			break;
1063 		cd->xl_rx_prod = cd->xl_rx_prod->xl_next;
1064 	}
1065 	if_rxr_put(&cd->xl_rx_ring, slots);
1066 }
1067 
1068 /*
1069  * Initialize an RX descriptor and attach an MBUF cluster.
1070  */
1071 int
xl_newbuf(struct xl_softc * sc,struct xl_chain_onefrag * c)1072 xl_newbuf(struct xl_softc *sc, struct xl_chain_onefrag *c)
1073 {
1074 	struct mbuf	*m_new = NULL;
1075 	bus_dmamap_t	map;
1076 
1077 	m_new = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
1078 	if (!m_new)
1079 		return (ENOBUFS);
1080 
1081 	m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1082 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_rx_sparemap,
1083 	    mtod(m_new, caddr_t), MCLBYTES, NULL, BUS_DMA_NOWAIT) != 0) {
1084 		m_freem(m_new);
1085 		return (ENOBUFS);
1086 	}
1087 
1088 	/* sync the old map, and unload it (if necessary) */
1089 	if (c->map->dm_nsegs != 0) {
1090 		bus_dmamap_sync(sc->sc_dmat, c->map,
1091 		    0, c->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1092 		bus_dmamap_unload(sc->sc_dmat, c->map);
1093 	}
1094 
1095 	map = c->map;
1096 	c->map = sc->sc_rx_sparemap;
1097 	sc->sc_rx_sparemap = map;
1098 
1099 	/* Force longword alignment for packet payload. */
1100 	m_adj(m_new, ETHER_ALIGN);
1101 
1102 	bus_dmamap_sync(sc->sc_dmat, c->map, 0, c->map->dm_mapsize,
1103 	    BUS_DMASYNC_PREREAD);
1104 
1105 	c->xl_mbuf = m_new;
1106 	c->xl_ptr->xl_frag.xl_addr =
1107 	    htole32(c->map->dm_segs[0].ds_addr + ETHER_ALIGN);
1108 	c->xl_ptr->xl_frag.xl_len =
1109 	    htole32(c->map->dm_segs[0].ds_len | XL_LAST_FRAG);
1110 	c->xl_ptr->xl_status = htole32(0);
1111 
1112 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1113 	    ((caddr_t)c->xl_ptr - sc->sc_listkva), sizeof(struct xl_list),
1114 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1115 
1116 	return (0);
1117 }
1118 
1119 /*
1120  * A frame has been uploaded: pass the resulting mbuf chain up to
1121  * the higher level protocols.
1122  */
1123 void
xl_rxeof(struct xl_softc * sc)1124 xl_rxeof(struct xl_softc *sc)
1125 {
1126 	struct mbuf_list	ml = MBUF_LIST_INITIALIZER();
1127         struct mbuf		*m;
1128         struct ifnet		*ifp;
1129 	struct xl_chain_onefrag	*cur_rx;
1130 	int			total_len = 0;
1131 	u_int32_t		rxstat;
1132 	u_int16_t		sumflags = 0;
1133 
1134 	ifp = &sc->sc_arpcom.ac_if;
1135 
1136 again:
1137 
1138 	while (if_rxr_inuse(&sc->xl_cdata.xl_rx_ring) > 0) {
1139 		cur_rx = sc->xl_cdata.xl_rx_cons;
1140 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1141 		    ((caddr_t)cur_rx->xl_ptr - sc->sc_listkva),
1142 		    sizeof(struct xl_list),
1143 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1144 		if ((rxstat = letoh32(sc->xl_cdata.xl_rx_cons->xl_ptr->xl_status)) == 0)
1145 			break;
1146 		m = cur_rx->xl_mbuf;
1147 		cur_rx->xl_mbuf = NULL;
1148 		sc->xl_cdata.xl_rx_cons = cur_rx->xl_next;
1149 		if_rxr_put(&sc->xl_cdata.xl_rx_ring, 1);
1150 		total_len = rxstat & XL_RXSTAT_LENMASK;
1151 
1152 		/*
1153 		 * Since we have told the chip to allow large frames,
1154 		 * we need to trap giant frame errors in software. We allow
1155 		 * a little more than the normal frame size to account for
1156 		 * frames with VLAN tags.
1157 		 */
1158 		if (total_len > XL_MAX_FRAMELEN)
1159 			rxstat |= (XL_RXSTAT_UP_ERROR|XL_RXSTAT_OVERSIZE);
1160 
1161 		/*
1162 		 * If an error occurs, update stats, clear the
1163 		 * status word and leave the mbuf cluster in place:
1164 		 * it should simply get re-used next time this descriptor
1165 	 	 * comes up in the ring.
1166 		 */
1167 		if (rxstat & XL_RXSTAT_UP_ERROR) {
1168 			ifp->if_ierrors++;
1169 			cur_rx->xl_ptr->xl_status = htole32(0);
1170 			m_freem(m);
1171 			continue;
1172 		}
1173 
1174 		/*
1175 		 * If the error bit was not set, the upload complete
1176 		 * bit should be set which means we have a valid packet.
1177 		 * If not, something truly strange has happened.
1178 		 */
1179 		if (!(rxstat & XL_RXSTAT_UP_CMPLT)) {
1180 			printf("%s: bad receive status -- "
1181 			    "packet dropped\n", sc->sc_dev.dv_xname);
1182 			ifp->if_ierrors++;
1183 			cur_rx->xl_ptr->xl_status = htole32(0);
1184 			m_freem(m);
1185 			continue;
1186 		}
1187 
1188 		m->m_pkthdr.len = m->m_len = total_len;
1189 
1190 		if (sc->xl_type == XL_TYPE_905B) {
1191 			if (!(rxstat & XL_RXSTAT_IPCKERR) &&
1192 			    (rxstat & XL_RXSTAT_IPCKOK))
1193 				sumflags |= M_IPV4_CSUM_IN_OK;
1194 
1195 			if (!(rxstat & XL_RXSTAT_TCPCKERR) &&
1196 			    (rxstat & XL_RXSTAT_TCPCKOK))
1197 				sumflags |= M_TCP_CSUM_IN_OK;
1198 
1199 			if (!(rxstat & XL_RXSTAT_UDPCKERR) &&
1200 			    (rxstat & XL_RXSTAT_UDPCKOK))
1201 				sumflags |= M_UDP_CSUM_IN_OK;
1202 
1203 			m->m_pkthdr.csum_flags = sumflags;
1204 		}
1205 
1206 		ml_enqueue(&ml, m);
1207 	}
1208 
1209 	if (ifiq_input(&ifp->if_rcv, &ml))
1210 		if_rxr_livelocked(&sc->xl_cdata.xl_rx_ring);
1211 
1212 	xl_fill_rx_ring(sc);
1213 
1214 	/*
1215 	 * Handle the 'end of channel' condition. When the upload
1216 	 * engine hits the end of the RX ring, it will stall. This
1217 	 * is our cue to flush the RX ring, reload the uplist pointer
1218 	 * register and unstall the engine.
1219 	 * XXX This is actually a little goofy. With the ThunderLAN
1220 	 * chip, you get an interrupt when the receiver hits the end
1221 	 * of the receive ring, which tells you exactly when you
1222 	 * you need to reload the ring pointer. Here we have to
1223 	 * fake it. I'm mad at myself for not being clever enough
1224 	 * to avoid the use of a goto here.
1225 	 */
1226 	if (CSR_READ_4(sc, XL_UPLIST_PTR) == 0 ||
1227 		CSR_READ_4(sc, XL_UPLIST_STATUS) & XL_PKTSTAT_UP_STALLED) {
1228 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
1229 		xl_wait(sc);
1230 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
1231 		xl_fill_rx_ring(sc);
1232 		goto again;
1233 	}
1234 }
1235 
1236 /*
1237  * A frame was downloaded to the chip. It's safe for us to clean up
1238  * the list buffers.
1239  */
1240 void
xl_txeof(struct xl_softc * sc)1241 xl_txeof(struct xl_softc *sc)
1242 {
1243 	struct xl_chain		*cur_tx;
1244 	struct ifnet		*ifp;
1245 
1246 	ifp = &sc->sc_arpcom.ac_if;
1247 
1248 	/*
1249 	 * Go through our tx list and free mbufs for those
1250 	 * frames that have been uploaded. Note: the 3c905B
1251 	 * sets a special bit in the status word to let us
1252 	 * know that a frame has been downloaded, but the
1253 	 * original 3c900/3c905 adapters don't do that.
1254 	 * Consequently, we have to use a different test if
1255 	 * xl_type != XL_TYPE_905B.
1256 	 */
1257 	while (sc->xl_cdata.xl_tx_head != NULL) {
1258 		cur_tx = sc->xl_cdata.xl_tx_head;
1259 
1260 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1261 		    ((caddr_t)cur_tx->xl_ptr - sc->sc_listkva),
1262 		    sizeof(struct xl_list),
1263 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1264 
1265 		if (CSR_READ_4(sc, XL_DOWNLIST_PTR))
1266 			break;
1267 
1268 		sc->xl_cdata.xl_tx_head = cur_tx->xl_next;
1269 		if (cur_tx->map->dm_nsegs != 0) {
1270 			bus_dmamap_t map = cur_tx->map;
1271 
1272 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1273 			    BUS_DMASYNC_POSTWRITE);
1274 			bus_dmamap_unload(sc->sc_dmat, map);
1275 		}
1276 		if (cur_tx->xl_mbuf != NULL) {
1277 			m_freem(cur_tx->xl_mbuf);
1278 			cur_tx->xl_mbuf = NULL;
1279 		}
1280 		cur_tx->xl_next = sc->xl_cdata.xl_tx_free;
1281 		sc->xl_cdata.xl_tx_free = cur_tx;
1282 	}
1283 
1284 	if (sc->xl_cdata.xl_tx_head == NULL) {
1285 		ifq_clr_oactive(&ifp->if_snd);
1286 		/* Clear the timeout timer. */
1287 		ifp->if_timer = 0;
1288 		sc->xl_cdata.xl_tx_tail = NULL;
1289 	} else {
1290 		if (CSR_READ_4(sc, XL_DMACTL) & XL_DMACTL_DOWN_STALLED ||
1291 			!CSR_READ_4(sc, XL_DOWNLIST_PTR)) {
1292 			CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
1293 			    sc->sc_listmap->dm_segs[0].ds_addr +
1294 			    ((caddr_t)sc->xl_cdata.xl_tx_head->xl_ptr -
1295 			    sc->sc_listkva));
1296 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1297 		}
1298 	}
1299 }
1300 
1301 void
xl_txeof_90xB(struct xl_softc * sc)1302 xl_txeof_90xB(struct xl_softc *sc)
1303 {
1304 	struct xl_chain *cur_tx = NULL;
1305 	struct ifnet *ifp;
1306 	int idx;
1307 
1308 	ifp = &sc->sc_arpcom.ac_if;
1309 
1310 	idx = sc->xl_cdata.xl_tx_cons;
1311 	while (idx != sc->xl_cdata.xl_tx_prod) {
1312 
1313 		cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
1314 
1315 		if ((cur_tx->xl_ptr->xl_status &
1316 		    htole32(XL_TXSTAT_DL_COMPLETE)) == 0)
1317 			break;
1318 
1319 		if (cur_tx->xl_mbuf != NULL) {
1320 			m_freem(cur_tx->xl_mbuf);
1321 			cur_tx->xl_mbuf = NULL;
1322 		}
1323 
1324 		if (cur_tx->map->dm_nsegs != 0) {
1325 			bus_dmamap_sync(sc->sc_dmat, cur_tx->map,
1326 			    0, cur_tx->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1327 			bus_dmamap_unload(sc->sc_dmat, cur_tx->map);
1328 		}
1329 
1330 		sc->xl_cdata.xl_tx_cnt--;
1331 		XL_INC(idx, XL_TX_LIST_CNT);
1332 	}
1333 
1334 	sc->xl_cdata.xl_tx_cons = idx;
1335 
1336 	if (cur_tx != NULL)
1337 		ifq_clr_oactive(&ifp->if_snd);
1338 	if (sc->xl_cdata.xl_tx_cnt == 0)
1339 		ifp->if_timer = 0;
1340 }
1341 
1342 /*
1343  * TX 'end of channel' interrupt handler. Actually, we should
1344  * only get a 'TX complete' interrupt if there's a transmit error,
1345  * so this is really TX error handler.
1346  */
1347 void
xl_txeoc(struct xl_softc * sc)1348 xl_txeoc(struct xl_softc *sc)
1349 {
1350 	u_int8_t	txstat;
1351 
1352 	while ((txstat = CSR_READ_1(sc, XL_TX_STATUS))) {
1353 		if (txstat & XL_TXSTATUS_UNDERRUN ||
1354 			txstat & XL_TXSTATUS_JABBER ||
1355 			txstat & XL_TXSTATUS_RECLAIM) {
1356 			if (txstat != 0x90) {
1357 				printf("%s: transmission error: %x\n",
1358 				    sc->sc_dev.dv_xname, txstat);
1359 			}
1360 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
1361 			xl_wait(sc);
1362 			if (sc->xl_type == XL_TYPE_905B) {
1363 				if (sc->xl_cdata.xl_tx_cnt) {
1364 					int i;
1365 					struct xl_chain *c;
1366 
1367 					i = sc->xl_cdata.xl_tx_cons;
1368 					c = &sc->xl_cdata.xl_tx_chain[i];
1369 					CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
1370 					    c->xl_phys);
1371 					CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
1372 				}
1373 			} else {
1374 				if (sc->xl_cdata.xl_tx_head != NULL)
1375 					CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
1376 					    sc->sc_listmap->dm_segs[0].ds_addr +
1377 					    ((caddr_t)sc->xl_cdata.xl_tx_head->xl_ptr -
1378 					    sc->sc_listkva));
1379 			}
1380 			/*
1381 			 * Remember to set this for the
1382 			 * first generation 3c90X chips.
1383 			 */
1384 			CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
1385 			if (txstat & XL_TXSTATUS_UNDERRUN &&
1386 			    sc->xl_tx_thresh < XL_PACKET_SIZE) {
1387 				sc->xl_tx_thresh += XL_MIN_FRAMELEN;
1388 #ifdef notdef
1389 				printf("%s: tx underrun, increasing tx start"
1390 				    " threshold to %d\n", sc->sc_dev.dv_xname,
1391 				    sc->xl_tx_thresh);
1392 #endif
1393 			}
1394 			CSR_WRITE_2(sc, XL_COMMAND,
1395 			    XL_CMD_TX_SET_START|sc->xl_tx_thresh);
1396 			if (sc->xl_type == XL_TYPE_905B) {
1397 				CSR_WRITE_2(sc, XL_COMMAND,
1398 				XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
1399 			}
1400 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
1401 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1402 		} else {
1403 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
1404 			CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1405 		}
1406 		/*
1407 		 * Write an arbitrary byte to the TX_STATUS register
1408 	 	 * to clear this interrupt/error and advance to the next.
1409 		 */
1410 		CSR_WRITE_1(sc, XL_TX_STATUS, 0x01);
1411 	}
1412 }
1413 
1414 int
xl_intr(void * arg)1415 xl_intr(void *arg)
1416 {
1417 	struct xl_softc		*sc;
1418 	struct ifnet		*ifp;
1419 	u_int16_t		status;
1420 	int			claimed = 0;
1421 
1422 	sc = arg;
1423 	ifp = &sc->sc_arpcom.ac_if;
1424 
1425 	while ((status = CSR_READ_2(sc, XL_STATUS)) & XL_INTRS && status != 0xFFFF) {
1426 
1427 		claimed = 1;
1428 
1429 		CSR_WRITE_2(sc, XL_COMMAND,
1430 		    XL_CMD_INTR_ACK|(status & XL_INTRS));
1431 
1432 		if (sc->intr_ack)
1433 			(*sc->intr_ack)(sc);
1434 
1435 		if (!(ifp->if_flags & IFF_RUNNING))
1436 			return (claimed);
1437 
1438 		if (status & XL_STAT_UP_COMPLETE)
1439 			xl_rxeof(sc);
1440 
1441 		if (status & XL_STAT_DOWN_COMPLETE) {
1442 			if (sc->xl_type == XL_TYPE_905B)
1443 				xl_txeof_90xB(sc);
1444 			else
1445 				xl_txeof(sc);
1446 		}
1447 
1448 		if (status & XL_STAT_TX_COMPLETE) {
1449 			ifp->if_oerrors++;
1450 			xl_txeoc(sc);
1451 		}
1452 
1453 		if (status & XL_STAT_ADFAIL)
1454 			xl_init(sc);
1455 
1456 		if (status & XL_STAT_STATSOFLOW) {
1457 			sc->xl_stats_no_timeout = 1;
1458 			xl_stats_update(sc);
1459 			sc->xl_stats_no_timeout = 0;
1460 		}
1461 	}
1462 
1463 	if (!ifq_empty(&ifp->if_snd))
1464 		(*ifp->if_start)(ifp);
1465 
1466 	return (claimed);
1467 }
1468 
1469 void
xl_stats_update(void * xsc)1470 xl_stats_update(void *xsc)
1471 {
1472 	struct xl_softc		*sc;
1473 	struct ifnet		*ifp;
1474 	struct xl_stats		xl_stats;
1475 	u_int8_t		*p;
1476 	int			i;
1477 	struct mii_data		*mii = NULL;
1478 
1479 	bzero(&xl_stats, sizeof(struct xl_stats));
1480 
1481 	sc = xsc;
1482 	ifp = &sc->sc_arpcom.ac_if;
1483 	if (sc->xl_hasmii)
1484 		mii = &sc->sc_mii;
1485 
1486 	p = (u_int8_t *)&xl_stats;
1487 
1488 	/* Read all the stats registers. */
1489 	XL_SEL_WIN(6);
1490 
1491 	for (i = 0; i < 16; i++)
1492 		*p++ = CSR_READ_1(sc, XL_W6_CARRIER_LOST + i);
1493 
1494 	ifp->if_ierrors += xl_stats.xl_rx_overrun;
1495 
1496 	ifp->if_collisions += xl_stats.xl_tx_multi_collision +
1497 				xl_stats.xl_tx_single_collision +
1498 				xl_stats.xl_tx_late_collision;
1499 
1500 	/*
1501 	 * Boomerang and cyclone chips have an extra stats counter
1502 	 * in window 4 (BadSSD). We have to read this too in order
1503 	 * to clear out all the stats registers and avoid a statsoflow
1504 	 * interrupt.
1505 	 */
1506 	XL_SEL_WIN(4);
1507 	CSR_READ_1(sc, XL_W4_BADSSD);
1508 
1509 	if (mii != NULL && (!sc->xl_stats_no_timeout))
1510 		mii_tick(mii);
1511 
1512 	XL_SEL_WIN(7);
1513 
1514 	if (!sc->xl_stats_no_timeout)
1515 		timeout_add_sec(&sc->xl_stsup_tmo, 1);
1516 }
1517 
1518 /*
1519  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1520  * pointers to the fragment pointers.
1521  */
1522 int
xl_encap(struct xl_softc * sc,struct xl_chain * c,struct mbuf * m_head)1523 xl_encap(struct xl_softc *sc, struct xl_chain *c, struct mbuf *m_head)
1524 {
1525 	int		error, frag, total_len;
1526 	u_int32_t	status;
1527 	bus_dmamap_t	map;
1528 
1529 	map = sc->sc_tx_sparemap;
1530 
1531 reload:
1532 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map,
1533 	    m_head, BUS_DMA_NOWAIT);
1534 
1535 	if (error && error != EFBIG) {
1536 		m_freem(m_head);
1537 		return (1);
1538 	}
1539 
1540 	/*
1541  	 * Start packing the mbufs in this chain into
1542 	 * the fragment pointers. Stop when we run out
1543  	 * of fragments or hit the end of the mbuf chain.
1544 	 */
1545 	for (frag = 0, total_len = 0; frag < map->dm_nsegs; frag++) {
1546 		if (frag == XL_MAXFRAGS)
1547 			break;
1548 		total_len += map->dm_segs[frag].ds_len;
1549 		c->xl_ptr->xl_frag[frag].xl_addr =
1550 		    htole32(map->dm_segs[frag].ds_addr);
1551 		c->xl_ptr->xl_frag[frag].xl_len =
1552 		    htole32(map->dm_segs[frag].ds_len);
1553 	}
1554 
1555 	/*
1556 	 * Handle special case: we used up all 63 fragments,
1557 	 * but we have more mbufs left in the chain. Copy the
1558 	 * data into an mbuf cluster. Note that we don't
1559 	 * bother clearing the values in the other fragment
1560 	 * pointers/counters; it wouldn't gain us anything,
1561 	 * and would waste cycles.
1562 	 */
1563 	if (error) {
1564 		struct mbuf	*m_new = NULL;
1565 
1566 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1567 		if (m_new == NULL) {
1568 			m_freem(m_head);
1569 			return (1);
1570 		}
1571 		if (m_head->m_pkthdr.len > MHLEN) {
1572 			MCLGET(m_new, M_DONTWAIT);
1573 			if (!(m_new->m_flags & M_EXT)) {
1574 				m_freem(m_new);
1575 				m_freem(m_head);
1576 				return (1);
1577 			}
1578 		}
1579 		m_copydata(m_head, 0, m_head->m_pkthdr.len,
1580 		    mtod(m_new, caddr_t));
1581 		m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1582 		m_freem(m_head);
1583 		m_head = m_new;
1584 		goto reload;
1585 	}
1586 
1587 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1588 	    BUS_DMASYNC_PREWRITE);
1589 
1590 	if (c->map->dm_nsegs != 0) {
1591 		bus_dmamap_sync(sc->sc_dmat, c->map,
1592 		    0, c->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1593 		bus_dmamap_unload(sc->sc_dmat, c->map);
1594 	}
1595 
1596 	c->xl_mbuf = m_head;
1597 	sc->sc_tx_sparemap = c->map;
1598 	c->map = map;
1599 	c->xl_ptr->xl_frag[frag - 1].xl_len |= htole32(XL_LAST_FRAG);
1600 	c->xl_ptr->xl_status = htole32(total_len);
1601 	c->xl_ptr->xl_next = 0;
1602 
1603 	if (sc->xl_type == XL_TYPE_905B) {
1604 		status = XL_TXSTAT_RND_DEFEAT;
1605 
1606 #ifndef XL905B_TXCSUM_BROKEN
1607 		if (m_head->m_pkthdr.csum_flags) {
1608 			if (m_head->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
1609 				status |= XL_TXSTAT_IPCKSUM;
1610 			if (m_head->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
1611 				status |= XL_TXSTAT_TCPCKSUM;
1612 			if (m_head->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
1613 				status |= XL_TXSTAT_UDPCKSUM;
1614 		}
1615 #endif
1616 		c->xl_ptr->xl_status = htole32(status);
1617 	}
1618 
1619 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1620 	    offsetof(struct xl_list_data, xl_tx_list[0]),
1621 	    sizeof(struct xl_list) * XL_TX_LIST_CNT,
1622 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1623 
1624 	return (0);
1625 }
1626 
1627 /*
1628  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1629  * to the mbuf data regions directly in the transmit lists. We also save a
1630  * copy of the pointers since the transmit list fragment pointers are
1631  * physical addresses.
1632  */
1633 void
xl_start(struct ifnet * ifp)1634 xl_start(struct ifnet *ifp)
1635 {
1636 	struct xl_softc		*sc;
1637 	struct mbuf		*m_head = NULL;
1638 	struct xl_chain		*prev = NULL, *cur_tx = NULL, *start_tx;
1639 	struct xl_chain		*prev_tx;
1640 	int			error;
1641 
1642 	sc = ifp->if_softc;
1643 
1644 	/*
1645 	 * Check for an available queue slot. If there are none,
1646 	 * punt.
1647 	 */
1648 	if (sc->xl_cdata.xl_tx_free == NULL) {
1649 		xl_txeoc(sc);
1650 		xl_txeof(sc);
1651 		if (sc->xl_cdata.xl_tx_free == NULL) {
1652 			ifq_set_oactive(&ifp->if_snd);
1653 			return;
1654 		}
1655 	}
1656 
1657 	start_tx = sc->xl_cdata.xl_tx_free;
1658 
1659 	while (sc->xl_cdata.xl_tx_free != NULL) {
1660 		m_head = ifq_dequeue(&ifp->if_snd);
1661 		if (m_head == NULL)
1662 			break;
1663 
1664 		/* Pick a descriptor off the free list. */
1665 		prev_tx = cur_tx;
1666 		cur_tx = sc->xl_cdata.xl_tx_free;
1667 
1668 		/* Pack the data into the descriptor. */
1669 		error = xl_encap(sc, cur_tx, m_head);
1670 		if (error) {
1671 			cur_tx = prev_tx;
1672 			continue;
1673 		}
1674 
1675 		sc->xl_cdata.xl_tx_free = cur_tx->xl_next;
1676 		cur_tx->xl_next = NULL;
1677 
1678 		/* Chain it together. */
1679 		if (prev != NULL) {
1680 			prev->xl_next = cur_tx;
1681 			prev->xl_ptr->xl_next =
1682 			    sc->sc_listmap->dm_segs[0].ds_addr +
1683 			    ((caddr_t)cur_tx->xl_ptr - sc->sc_listkva);
1684 
1685 		}
1686 		prev = cur_tx;
1687 
1688 #if NBPFILTER > 0
1689 		/*
1690 		 * If there's a BPF listener, bounce a copy of this frame
1691 		 * to him.
1692 		 */
1693 		if (ifp->if_bpf)
1694 			bpf_mtap(ifp->if_bpf, cur_tx->xl_mbuf,
1695 			    BPF_DIRECTION_OUT);
1696 #endif
1697 	}
1698 
1699 	/*
1700 	 * If there are no packets queued, bail.
1701 	 */
1702 	if (cur_tx == NULL)
1703 		return;
1704 
1705 	/*
1706 	 * Place the request for the upload interrupt
1707 	 * in the last descriptor in the chain. This way, if
1708 	 * we're chaining several packets at once, we'll only
1709 	 * get an interrupt once for the whole chain rather than
1710 	 * once for each packet.
1711 	 */
1712 	cur_tx->xl_ptr->xl_status |= htole32(XL_TXSTAT_DL_INTR);
1713 
1714 	/*
1715 	 * Queue the packets. If the TX channel is clear, update
1716 	 * the downlist pointer register.
1717 	 */
1718 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
1719 	xl_wait(sc);
1720 
1721 	if (sc->xl_cdata.xl_tx_head != NULL) {
1722 		sc->xl_cdata.xl_tx_tail->xl_next = start_tx;
1723 		sc->xl_cdata.xl_tx_tail->xl_ptr->xl_next =
1724 		    sc->sc_listmap->dm_segs[0].ds_addr +
1725 		    ((caddr_t)start_tx->xl_ptr - sc->sc_listkva);
1726 		sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status &=
1727 		    htole32(~XL_TXSTAT_DL_INTR);
1728 		sc->xl_cdata.xl_tx_tail = cur_tx;
1729 	} else {
1730 		sc->xl_cdata.xl_tx_head = start_tx;
1731 		sc->xl_cdata.xl_tx_tail = cur_tx;
1732 	}
1733 	if (!CSR_READ_4(sc, XL_DOWNLIST_PTR))
1734 		CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
1735 		    sc->sc_listmap->dm_segs[0].ds_addr +
1736 		    ((caddr_t)start_tx->xl_ptr - sc->sc_listkva));
1737 
1738 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1739 
1740 	XL_SEL_WIN(7);
1741 
1742 	/*
1743 	 * Set a timeout in case the chip goes out to lunch.
1744 	 */
1745 	ifp->if_timer = 5;
1746 
1747 	/*
1748 	 * XXX Under certain conditions, usually on slower machines
1749 	 * where interrupts may be dropped, it's possible for the
1750 	 * adapter to chew up all the buffers in the receive ring
1751 	 * and stall, without us being able to do anything about it.
1752 	 * To guard against this, we need to make a pass over the
1753 	 * RX queue to make sure there aren't any packets pending.
1754 	 * Doing it here means we can flush the receive ring at the
1755 	 * same time the chip is DMAing the transmit descriptors we
1756 	 * just gave it.
1757  	 *
1758 	 * 3Com goes to some lengths to emphasize the Parallel Tasking (tm)
1759 	 * nature of their chips in all their marketing literature;
1760 	 * we may as well take advantage of it. :)
1761 	 */
1762 	xl_rxeof(sc);
1763 }
1764 
1765 void
xl_start_90xB(struct ifnet * ifp)1766 xl_start_90xB(struct ifnet *ifp)
1767 {
1768 	struct xl_softc	*sc;
1769 	struct mbuf	*m_head = NULL;
1770 	struct xl_chain	*prev = NULL, *cur_tx = NULL, *start_tx;
1771 	struct xl_chain	*prev_tx;
1772 	int		error, idx;
1773 
1774 	sc = ifp->if_softc;
1775 
1776 	if (ifq_is_oactive(&ifp->if_snd))
1777 		return;
1778 
1779 	idx = sc->xl_cdata.xl_tx_prod;
1780 	start_tx = &sc->xl_cdata.xl_tx_chain[idx];
1781 
1782 	while (sc->xl_cdata.xl_tx_chain[idx].xl_mbuf == NULL) {
1783 
1784 		if ((XL_TX_LIST_CNT - sc->xl_cdata.xl_tx_cnt) < 3) {
1785 			ifq_set_oactive(&ifp->if_snd);
1786 			break;
1787 		}
1788 
1789 		m_head = ifq_dequeue(&ifp->if_snd);
1790 		if (m_head == NULL)
1791 			break;
1792 
1793 		prev_tx = cur_tx;
1794 		cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
1795 
1796 		/* Pack the data into the descriptor. */
1797 		error = xl_encap(sc, cur_tx, m_head);
1798 		if (error) {
1799 			cur_tx = prev_tx;
1800 			continue;
1801 		}
1802 
1803 		/* Chain it together. */
1804 		if (prev != NULL)
1805 			prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys);
1806 		prev = cur_tx;
1807 
1808 #if NBPFILTER > 0
1809 		/*
1810 		 * If there's a BPF listener, bounce a copy of this frame
1811 		 * to him.
1812 		 */
1813 		if (ifp->if_bpf)
1814 			bpf_mtap(ifp->if_bpf, cur_tx->xl_mbuf,
1815 			    BPF_DIRECTION_OUT);
1816 #endif
1817 
1818 		XL_INC(idx, XL_TX_LIST_CNT);
1819 		sc->xl_cdata.xl_tx_cnt++;
1820 	}
1821 
1822 	/*
1823 	 * If there are no packets queued, bail.
1824 	 */
1825 	if (cur_tx == NULL)
1826 		return;
1827 
1828 	/*
1829 	 * Place the request for the upload interrupt
1830 	 * in the last descriptor in the chain. This way, if
1831 	 * we're chaining several packets at once, we'll only
1832 	 * get an interrupt once for the whole chain rather than
1833 	 * once for each packet.
1834 	 */
1835 	cur_tx->xl_ptr->xl_status |= htole32(XL_TXSTAT_DL_INTR);
1836 
1837 	/* Start transmission */
1838 	sc->xl_cdata.xl_tx_prod = idx;
1839 	start_tx->xl_prev->xl_ptr->xl_next = htole32(start_tx->xl_phys);
1840 
1841 	/*
1842 	 * Set a timeout in case the chip goes out to lunch.
1843 	 */
1844 	ifp->if_timer = 5;
1845 }
1846 
1847 void
xl_init(void * xsc)1848 xl_init(void *xsc)
1849 {
1850 	struct xl_softc		*sc = xsc;
1851 	struct ifnet		*ifp = &sc->sc_arpcom.ac_if;
1852 	int			s, i;
1853 	struct mii_data		*mii = NULL;
1854 
1855 	s = splnet();
1856 
1857 	/*
1858 	 * Cancel pending I/O and free all RX/TX buffers.
1859 	 */
1860 	xl_stop(sc);
1861 
1862 	/* Reset the chip to a known state. */
1863 	xl_reset(sc);
1864 
1865 	if (sc->xl_hasmii)
1866 		mii = &sc->sc_mii;
1867 
1868 	if (mii == NULL) {
1869 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
1870 		xl_wait(sc);
1871 	}
1872 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
1873 	xl_wait(sc);
1874 	DELAY(10000);
1875 
1876 	/* Init our MAC address */
1877 	XL_SEL_WIN(2);
1878 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
1879 		CSR_WRITE_1(sc, XL_W2_STATION_ADDR_LO + i,
1880 				sc->sc_arpcom.ac_enaddr[i]);
1881 	}
1882 
1883 	/* Clear the station mask. */
1884 	for (i = 0; i < 3; i++)
1885 		CSR_WRITE_2(sc, XL_W2_STATION_MASK_LO + (i * 2), 0);
1886 #ifdef notdef
1887 	/* Reset TX and RX. */
1888 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
1889 	xl_wait(sc);
1890 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
1891 	xl_wait(sc);
1892 #endif
1893 	/* Init circular RX list. */
1894 	if (xl_list_rx_init(sc) == ENOBUFS) {
1895 		printf("%s: initialization failed: no "
1896 			"memory for rx buffers\n", sc->sc_dev.dv_xname);
1897 		xl_stop(sc);
1898 		splx(s);
1899 		return;
1900 	}
1901 
1902 	/* Init TX descriptors. */
1903 	if (sc->xl_type == XL_TYPE_905B)
1904 		xl_list_tx_init_90xB(sc);
1905 	else
1906 		xl_list_tx_init(sc);
1907 
1908 	/*
1909 	 * Set the TX freethresh value.
1910 	 * Note that this has no effect on 3c905B "cyclone"
1911 	 * cards but is required for 3c900/3c905 "boomerang"
1912 	 * cards in order to enable the download engine.
1913 	 */
1914 	CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
1915 
1916 	/* Set the TX start threshold for best performance. */
1917 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_SET_START|sc->xl_tx_thresh);
1918 
1919 	/*
1920 	 * If this is a 3c905B, also set the tx reclaim threshold.
1921 	 * This helps cut down on the number of tx reclaim errors
1922 	 * that could happen on a busy network. The chip multiplies
1923 	 * the register value by 16 to obtain the actual threshold
1924 	 * in bytes, so we divide by 16 when setting the value here.
1925 	 * The existing threshold value can be examined by reading
1926 	 * the register at offset 9 in window 5.
1927 	 */
1928 	if (sc->xl_type == XL_TYPE_905B) {
1929 		CSR_WRITE_2(sc, XL_COMMAND,
1930 		    XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
1931 	}
1932 
1933 	/* Program promiscuous mode and multicast filters. */
1934 	xl_iff(sc);
1935 
1936 	/*
1937 	 * Load the address of the RX list. We have to
1938 	 * stall the upload engine before we can manipulate
1939 	 * the uplist pointer register, then unstall it when
1940 	 * we're finished. We also have to wait for the
1941 	 * stall command to complete before proceeding.
1942 	 * Note that we have to do this after any RX resets
1943 	 * have completed since the uplist register is cleared
1944 	 * by a reset.
1945 	 */
1946 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
1947 	xl_wait(sc);
1948 	CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->sc_listmap->dm_segs[0].ds_addr +
1949 	    offsetof(struct xl_list_data, xl_rx_list[0]));
1950 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
1951 	xl_wait(sc);
1952 
1953 	if (sc->xl_type == XL_TYPE_905B) {
1954 		/* Set polling interval */
1955 		CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
1956 		/* Load the address of the TX list */
1957 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
1958 		xl_wait(sc);
1959 		CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
1960 		    sc->sc_listmap->dm_segs[0].ds_addr +
1961 		    offsetof(struct xl_list_data, xl_tx_list[0]));
1962 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1963 		xl_wait(sc);
1964 	}
1965 
1966 	/*
1967 	 * If the coax transceiver is on, make sure to enable
1968 	 * the DC-DC converter.
1969  	 */
1970 	XL_SEL_WIN(3);
1971 	if (sc->xl_xcvr == XL_XCVR_COAX)
1972 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
1973 	else
1974 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
1975 
1976 	/*
1977 	 * increase packet size to allow reception of 802.1q or ISL packets.
1978 	 * For the 3c90x chip, set the 'allow large packets' bit in the MAC
1979 	 * control register. For 3c90xB/C chips, use the RX packet size
1980 	 * register.
1981 	 */
1982 
1983 	if (sc->xl_type == XL_TYPE_905B)
1984 		CSR_WRITE_2(sc, XL_W3_MAXPKTSIZE, XL_PACKET_SIZE);
1985 	else {
1986 		u_int8_t macctl;
1987 		macctl = CSR_READ_1(sc, XL_W3_MAC_CTRL);
1988 		macctl |= XL_MACCTRL_ALLOW_LARGE_PACK;
1989 		CSR_WRITE_1(sc, XL_W3_MAC_CTRL, macctl);
1990 	}
1991 
1992 	/* Clear out the stats counters. */
1993 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
1994 	sc->xl_stats_no_timeout = 1;
1995 	xl_stats_update(sc);
1996 	sc->xl_stats_no_timeout = 0;
1997 	XL_SEL_WIN(4);
1998 	CSR_WRITE_2(sc, XL_W4_NET_DIAG, XL_NETDIAG_UPPER_BYTES_ENABLE);
1999 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_ENABLE);
2000 
2001 	/*
2002 	 * Enable interrupts.
2003 	 */
2004 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF);
2005 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|XL_INTRS);
2006 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS);
2007 
2008 	if (sc->intr_ack)
2009 		(*sc->intr_ack)(sc);
2010 
2011 	/* Set the RX early threshold */
2012 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_THRESH|(XL_PACKET_SIZE >>2));
2013 	CSR_WRITE_4(sc, XL_DMACTL, XL_DMACTL_UP_RX_EARLY);
2014 
2015 	/* Enable receiver and transmitter. */
2016 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
2017 	xl_wait(sc);
2018 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE);
2019 	xl_wait(sc);
2020 
2021 	/* Restore state of BMCR */
2022 	if (mii != NULL)
2023 		mii_mediachg(mii);
2024 
2025 	/* Select window 7 for normal operations. */
2026 	XL_SEL_WIN(7);
2027 
2028 	ifp->if_flags |= IFF_RUNNING;
2029 	ifq_clr_oactive(&ifp->if_snd);
2030 
2031 	splx(s);
2032 
2033 	timeout_add_sec(&sc->xl_stsup_tmo, 1);
2034 }
2035 
2036 /*
2037  * Set media options.
2038  */
2039 int
xl_ifmedia_upd(struct ifnet * ifp)2040 xl_ifmedia_upd(struct ifnet *ifp)
2041 {
2042 	struct xl_softc		*sc;
2043 	struct ifmedia		*ifm = NULL;
2044 	struct mii_data		*mii = NULL;
2045 
2046 	sc = ifp->if_softc;
2047 
2048 	if (sc->xl_hasmii)
2049 		mii = &sc->sc_mii;
2050 	if (mii == NULL)
2051 		ifm = &sc->ifmedia;
2052 	else
2053 		ifm = &mii->mii_media;
2054 
2055 	switch(IFM_SUBTYPE(ifm->ifm_media)) {
2056 	case IFM_100_FX:
2057 	case IFM_10_FL:
2058 	case IFM_10_2:
2059 	case IFM_10_5:
2060 		xl_setmode(sc, ifm->ifm_media);
2061 		return (0);
2062 		break;
2063 	default:
2064 		break;
2065 	}
2066 
2067 	if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX
2068 		|| sc->xl_media & XL_MEDIAOPT_BT4) {
2069 		xl_init(sc);
2070 	} else {
2071 		xl_setmode(sc, ifm->ifm_media);
2072 	}
2073 
2074 	return (0);
2075 }
2076 
2077 /*
2078  * Report current media status.
2079  */
2080 void
xl_ifmedia_sts(struct ifnet * ifp,struct ifmediareq * ifmr)2081 xl_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2082 {
2083 	struct xl_softc		*sc;
2084 	u_int32_t		icfg;
2085 	u_int16_t		status = 0;
2086 	struct mii_data		*mii = NULL;
2087 
2088 	sc = ifp->if_softc;
2089 	if (sc->xl_hasmii != 0)
2090 		mii = &sc->sc_mii;
2091 
2092 	XL_SEL_WIN(4);
2093 	status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
2094 
2095 	XL_SEL_WIN(3);
2096 	icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG) & XL_ICFG_CONNECTOR_MASK;
2097 	icfg >>= XL_ICFG_CONNECTOR_BITS;
2098 
2099 	ifmr->ifm_active = IFM_ETHER;
2100 	ifmr->ifm_status = IFM_AVALID;
2101 
2102 	if ((status & XL_MEDIASTAT_CARRIER) == 0)
2103 		ifmr->ifm_status |= IFM_ACTIVE;
2104 
2105 	switch(icfg) {
2106 	case XL_XCVR_10BT:
2107 		ifmr->ifm_active = IFM_ETHER|IFM_10_T;
2108 		if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
2109 			ifmr->ifm_active |= IFM_FDX;
2110 		else
2111 			ifmr->ifm_active |= IFM_HDX;
2112 		break;
2113 	case XL_XCVR_AUI:
2114 		if (sc->xl_type == XL_TYPE_905B &&
2115 		    sc->xl_media == XL_MEDIAOPT_10FL) {
2116 			ifmr->ifm_active = IFM_ETHER|IFM_10_FL;
2117 			if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
2118 				ifmr->ifm_active |= IFM_FDX;
2119 			else
2120 				ifmr->ifm_active |= IFM_HDX;
2121 		} else
2122 			ifmr->ifm_active = IFM_ETHER|IFM_10_5;
2123 		break;
2124 	case XL_XCVR_COAX:
2125 		ifmr->ifm_active = IFM_ETHER|IFM_10_2;
2126 		break;
2127 	/*
2128 	 * XXX MII and BTX/AUTO should be separate cases.
2129 	 */
2130 
2131 	case XL_XCVR_100BTX:
2132 	case XL_XCVR_AUTO:
2133 	case XL_XCVR_MII:
2134 		if (mii != NULL) {
2135 			mii_pollstat(mii);
2136 			ifmr->ifm_active = mii->mii_media_active;
2137 			ifmr->ifm_status = mii->mii_media_status;
2138 		}
2139 		break;
2140 	case XL_XCVR_100BFX:
2141 		ifmr->ifm_active = IFM_ETHER|IFM_100_FX;
2142 		break;
2143 	default:
2144 		printf("%s: unknown XCVR type: %d\n", sc->sc_dev.dv_xname, icfg);
2145 		break;
2146 	}
2147 }
2148 
2149 int
xl_ioctl(struct ifnet * ifp,u_long command,caddr_t data)2150 xl_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2151 {
2152 	struct xl_softc *sc = ifp->if_softc;
2153 	struct ifreq *ifr = (struct ifreq *)data;
2154 	int s, error = 0;
2155 	struct mii_data *mii = NULL;
2156 
2157 	s = splnet();
2158 
2159 	switch(command) {
2160 	case SIOCSIFADDR:
2161 		ifp->if_flags |= IFF_UP;
2162 		if (!(ifp->if_flags & IFF_RUNNING))
2163 			xl_init(sc);
2164 		break;
2165 
2166 	case SIOCSIFFLAGS:
2167 		if (ifp->if_flags & IFF_UP) {
2168 			if (ifp->if_flags & IFF_RUNNING)
2169 				error = ENETRESET;
2170 			else
2171 				xl_init(sc);
2172 		} else {
2173 			if (ifp->if_flags & IFF_RUNNING)
2174 				xl_stop(sc);
2175 		}
2176 		break;
2177 
2178 	case SIOCGIFMEDIA:
2179 	case SIOCSIFMEDIA:
2180 		if (sc->xl_hasmii != 0)
2181 			mii = &sc->sc_mii;
2182 		if (mii == NULL)
2183 			error = ifmedia_ioctl(ifp, ifr,
2184 			    &sc->ifmedia, command);
2185 		else
2186 			error = ifmedia_ioctl(ifp, ifr,
2187 			    &mii->mii_media, command);
2188 		break;
2189 
2190 	case SIOCGIFRXR:
2191 		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
2192 		    NULL, MCLBYTES, &sc->xl_cdata.xl_rx_ring);
2193 		break;
2194 
2195 	default:
2196 		error = ether_ioctl(ifp, &sc->sc_arpcom, command, data);
2197 	}
2198 
2199 	if (error == ENETRESET) {
2200 		if (ifp->if_flags & IFF_RUNNING)
2201 			xl_iff(sc);
2202 		error = 0;
2203 	}
2204 
2205 	splx(s);
2206 	return (error);
2207 }
2208 
2209 void
xl_watchdog(struct ifnet * ifp)2210 xl_watchdog(struct ifnet *ifp)
2211 {
2212 	struct xl_softc		*sc;
2213 	u_int16_t		status = 0;
2214 
2215 	sc = ifp->if_softc;
2216 
2217 	ifp->if_oerrors++;
2218 	XL_SEL_WIN(4);
2219 	status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
2220 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
2221 
2222 	if (status & XL_MEDIASTAT_CARRIER)
2223 		printf("%s: no carrier - transceiver cable problem?\n",
2224 								sc->sc_dev.dv_xname);
2225 	xl_txeoc(sc);
2226 	xl_txeof(sc);
2227 	xl_rxeof(sc);
2228 	xl_init(sc);
2229 
2230 	if (!ifq_empty(&ifp->if_snd))
2231 		(*ifp->if_start)(ifp);
2232 }
2233 
2234 void
xl_freetxrx(struct xl_softc * sc)2235 xl_freetxrx(struct xl_softc *sc)
2236 {
2237 	bus_dmamap_t	map;
2238 	int		i;
2239 
2240 	/*
2241 	 * Free data in the RX lists.
2242 	 */
2243 	for (i = 0; i < XL_RX_LIST_CNT; i++) {
2244 		if (sc->xl_cdata.xl_rx_chain[i].map->dm_nsegs != 0) {
2245 			map = sc->xl_cdata.xl_rx_chain[i].map;
2246 
2247 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2248 			    BUS_DMASYNC_POSTREAD);
2249 			bus_dmamap_unload(sc->sc_dmat, map);
2250 		}
2251 		if (sc->xl_cdata.xl_rx_chain[i].xl_mbuf != NULL) {
2252 			m_freem(sc->xl_cdata.xl_rx_chain[i].xl_mbuf);
2253 			sc->xl_cdata.xl_rx_chain[i].xl_mbuf = NULL;
2254 		}
2255 	}
2256 	bzero(&sc->xl_ldata->xl_rx_list, sizeof(sc->xl_ldata->xl_rx_list));
2257 	/*
2258 	 * Free the TX list buffers.
2259 	 */
2260 	for (i = 0; i < XL_TX_LIST_CNT; i++) {
2261 		if (sc->xl_cdata.xl_tx_chain[i].map->dm_nsegs != 0) {
2262 			map = sc->xl_cdata.xl_tx_chain[i].map;
2263 
2264 			bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2265 			    BUS_DMASYNC_POSTWRITE);
2266 			bus_dmamap_unload(sc->sc_dmat, map);
2267 		}
2268 		if (sc->xl_cdata.xl_tx_chain[i].xl_mbuf != NULL) {
2269 			m_freem(sc->xl_cdata.xl_tx_chain[i].xl_mbuf);
2270 			sc->xl_cdata.xl_tx_chain[i].xl_mbuf = NULL;
2271 		}
2272 	}
2273 	bzero(&sc->xl_ldata->xl_tx_list, sizeof(sc->xl_ldata->xl_tx_list));
2274 }
2275 
2276 /*
2277  * Stop the adapter and free any mbufs allocated to the
2278  * RX and TX lists.
2279  */
2280 void
xl_stop(struct xl_softc * sc)2281 xl_stop(struct xl_softc *sc)
2282 {
2283 	struct ifnet *ifp;
2284 
2285 	/* Stop the stats updater. */
2286 	timeout_del(&sc->xl_stsup_tmo);
2287 
2288 	ifp = &sc->sc_arpcom.ac_if;
2289 
2290 	ifp->if_flags &= ~IFF_RUNNING;
2291 	ifq_clr_oactive(&ifp->if_snd);
2292 	ifp->if_timer = 0;
2293 
2294 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISABLE);
2295 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
2296 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB);
2297 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISCARD);
2298 	xl_wait(sc);
2299 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_DISABLE);
2300 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
2301 	DELAY(800);
2302 
2303 #ifdef foo
2304 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
2305 	xl_wait(sc);
2306 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
2307 	xl_wait(sc);
2308 #endif
2309 
2310 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|XL_STAT_INTLATCH);
2311 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|0);
2312 	CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0);
2313 
2314 	if (sc->intr_ack)
2315 		(*sc->intr_ack)(sc);
2316 
2317 	xl_freetxrx(sc);
2318 }
2319 
2320 #ifndef SMALL_KERNEL
2321 void
xl_wol_power(struct xl_softc * sc)2322 xl_wol_power(struct xl_softc *sc)
2323 {
2324 	/* Re-enable RX and call upper layer WOL power routine
2325 	 * if WOL is enabled. */
2326 	if ((sc->xl_flags & XL_FLAG_WOL) && sc->wol_power) {
2327 		CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE);
2328 		sc->wol_power(sc->wol_power_arg);
2329 	}
2330 }
2331 #endif
2332 
2333 void
xl_attach(struct xl_softc * sc)2334 xl_attach(struct xl_softc *sc)
2335 {
2336 	u_int8_t enaddr[ETHER_ADDR_LEN];
2337 	u_int16_t		xcvr[2];
2338 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2339 	int i;
2340 	uint64_t media = IFM_ETHER|IFM_100_TX|IFM_FDX;
2341 	struct ifmedia *ifm;
2342 
2343 	i = splnet();
2344 	xl_reset(sc);
2345 	splx(i);
2346 
2347 	/*
2348 	 * Get station address from the EEPROM.
2349 	 */
2350 	if (xl_read_eeprom(sc, (caddr_t)&enaddr, XL_EE_OEM_ADR0, 3, 1)) {
2351 		printf("\n%s: failed to read station address\n",
2352 		    sc->sc_dev.dv_xname);
2353 		return;
2354 	}
2355 	memcpy(&sc->sc_arpcom.ac_enaddr, enaddr, ETHER_ADDR_LEN);
2356 
2357 	if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct xl_list_data),
2358 	    PAGE_SIZE, 0, sc->sc_listseg, 1, &sc->sc_listnseg,
2359 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) {
2360 		printf(": can't alloc list mem\n");
2361 		return;
2362 	}
2363 	if (bus_dmamem_map(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg,
2364 	    sizeof(struct xl_list_data), &sc->sc_listkva,
2365 	    BUS_DMA_NOWAIT) != 0) {
2366 		printf(": can't map list mem\n");
2367 		return;
2368 	}
2369 	if (bus_dmamap_create(sc->sc_dmat, sizeof(struct xl_list_data), 1,
2370 	    sizeof(struct xl_list_data), 0, BUS_DMA_NOWAIT,
2371 	    &sc->sc_listmap) != 0) {
2372 		printf(": can't alloc list map\n");
2373 		return;
2374 	}
2375 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, sc->sc_listkva,
2376 	    sizeof(struct xl_list_data), NULL, BUS_DMA_NOWAIT) != 0) {
2377 		printf(": can't load list map\n");
2378 		return;
2379 	}
2380 	sc->xl_ldata = (struct xl_list_data *)sc->sc_listkva;
2381 
2382 	for (i = 0; i < XL_RX_LIST_CNT; i++) {
2383 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
2384 		    0, BUS_DMA_NOWAIT,
2385 		    &sc->xl_cdata.xl_rx_chain[i].map) != 0) {
2386 			printf(": can't create rx map\n");
2387 			return;
2388 		}
2389 	}
2390 	if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
2391 	    BUS_DMA_NOWAIT, &sc->sc_rx_sparemap) != 0) {
2392 		printf(": can't create rx spare map\n");
2393 		return;
2394 	}
2395 
2396 	for (i = 0; i < XL_TX_LIST_CNT; i++) {
2397 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES,
2398 		    XL_TX_LIST_CNT - 3, MCLBYTES, 0, BUS_DMA_NOWAIT,
2399 		    &sc->xl_cdata.xl_tx_chain[i].map) != 0) {
2400 			printf(": can't create tx map\n");
2401 			return;
2402 		}
2403 	}
2404 	if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, XL_TX_LIST_CNT - 3,
2405 	    MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_tx_sparemap) != 0) {
2406 		printf(": can't create tx spare map\n");
2407 		return;
2408 	}
2409 
2410 	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
2411 
2412 	if (sc->xl_flags & (XL_FLAG_INVERT_LED_PWR|XL_FLAG_INVERT_MII_PWR)) {
2413 		u_int16_t n;
2414 
2415 		XL_SEL_WIN(2);
2416 		n = CSR_READ_2(sc, 12);
2417 
2418 		if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR)
2419 			n |= 0x0010;
2420 
2421 		if (sc->xl_flags & XL_FLAG_INVERT_MII_PWR)
2422 			n |= 0x4000;
2423 
2424 		CSR_WRITE_2(sc, 12, n);
2425 	}
2426 
2427 	/*
2428 	 * Figure out the card type. 3c905B adapters have the
2429 	 * 'supportsNoTxLength' bit set in the capabilities
2430 	 * word in the EEPROM.
2431 	 * Note: my 3c575C cardbus card lies. It returns a value
2432 	 * of 0x1578 for its capabilities word, which is somewhat
2433 	 * nonsensical. Another way to distinguish a 3c90x chip
2434 	 * from a 3c90xB/C chip is to check for the 'supportsLargePackets'
2435 	 * bit. This will only be set for 3c90x boomerang chips.
2436 	 */
2437 	xl_read_eeprom(sc, (caddr_t)&sc->xl_caps, XL_EE_CAPS, 1, 0);
2438 	if (sc->xl_caps & XL_CAPS_NO_TXLENGTH ||
2439 	    !(sc->xl_caps & XL_CAPS_LARGE_PKTS))
2440 		sc->xl_type = XL_TYPE_905B;
2441 	else
2442 		sc->xl_type = XL_TYPE_90X;
2443 
2444 	/* Set the TX start threshold for best performance. */
2445 	sc->xl_tx_thresh = XL_MIN_FRAMELEN;
2446 
2447 	timeout_set(&sc->xl_stsup_tmo, xl_stats_update, sc);
2448 
2449 	ifp->if_softc = sc;
2450 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2451 	ifp->if_ioctl = xl_ioctl;
2452 	if (sc->xl_type == XL_TYPE_905B)
2453 		ifp->if_start = xl_start_90xB;
2454 	else
2455 		ifp->if_start = xl_start;
2456 	ifp->if_watchdog = xl_watchdog;
2457 	ifp->if_baudrate = 10000000;
2458 	ifq_init_maxlen(&ifp->if_snd, XL_TX_LIST_CNT - 1);
2459 	memcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
2460 
2461 	ifp->if_capabilities = IFCAP_VLAN_MTU;
2462 
2463 #ifndef XL905B_TXCSUM_BROKEN
2464 	ifp->if_capabilities |= IFCAP_CSUM_IPv4|IFCAP_CSUM_TCPv4|
2465 				IFCAP_CSUM_UDPv4;
2466 #endif
2467 
2468 	XL_SEL_WIN(3);
2469 	sc->xl_media = CSR_READ_2(sc, XL_W3_MEDIA_OPT);
2470 
2471 	xl_read_eeprom(sc, (char *)&xcvr, XL_EE_ICFG_0, 2, 0);
2472 	sc->xl_xcvr = xcvr[0] | xcvr[1] << 16;
2473 	sc->xl_xcvr &= XL_ICFG_CONNECTOR_MASK;
2474 	sc->xl_xcvr >>= XL_ICFG_CONNECTOR_BITS;
2475 
2476 	xl_mediacheck(sc);
2477 
2478 	if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX
2479 	    || sc->xl_media & XL_MEDIAOPT_BT4) {
2480 		ifmedia_init(&sc->sc_mii.mii_media, 0,
2481 		    xl_ifmedia_upd, xl_ifmedia_sts);
2482 		sc->xl_hasmii = 1;
2483 		sc->sc_mii.mii_ifp = ifp;
2484 		sc->sc_mii.mii_readreg = xl_miibus_readreg;
2485 		sc->sc_mii.mii_writereg = xl_miibus_writereg;
2486 		sc->sc_mii.mii_statchg = xl_miibus_statchg;
2487 		xl_setcfg(sc);
2488 		mii_attach((struct device *)sc, &sc->sc_mii, 0xffffffff,
2489 		    MII_PHY_ANY, MII_OFFSET_ANY, 0);
2490 
2491 		if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2492 			ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE,
2493 			    0, NULL);
2494 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2495 		}
2496 		else {
2497 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2498 		}
2499 		ifm = &sc->sc_mii.mii_media;
2500 	}
2501 	else {
2502 		ifmedia_init(&sc->ifmedia, 0, xl_ifmedia_upd, xl_ifmedia_sts);
2503 		sc->xl_hasmii = 0;
2504 		ifm = &sc->ifmedia;
2505 	}
2506 
2507 	/*
2508 	 * Sanity check. If the user has selected "auto" and this isn't
2509 	 * a 10/100 card of some kind, we need to force the transceiver
2510 	 * type to something sane.
2511 	 */
2512 	if (sc->xl_xcvr == XL_XCVR_AUTO)
2513 		xl_choose_xcvr(sc, 0);
2514 
2515 	if (sc->xl_media & XL_MEDIAOPT_BT) {
2516 		ifmedia_add(ifm, IFM_ETHER|IFM_10_T, 0, NULL);
2517 		ifmedia_add(ifm, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
2518 		if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
2519 			ifmedia_add(ifm, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
2520 	}
2521 
2522 	if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
2523 		/*
2524 		 * Check for a 10baseFL board in disguise.
2525 		 */
2526 		if (sc->xl_type == XL_TYPE_905B &&
2527 		    sc->xl_media == XL_MEDIAOPT_10FL) {
2528 			ifmedia_add(ifm, IFM_ETHER|IFM_10_FL, 0, NULL);
2529 			ifmedia_add(ifm, IFM_ETHER|IFM_10_FL|IFM_HDX,
2530 			    0, NULL);
2531 			if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
2532 				ifmedia_add(ifm,
2533 				    IFM_ETHER|IFM_10_FL|IFM_FDX, 0, NULL);
2534 		} else {
2535 			ifmedia_add(ifm, IFM_ETHER|IFM_10_5, 0, NULL);
2536 		}
2537 	}
2538 
2539 	if (sc->xl_media & XL_MEDIAOPT_BNC) {
2540 		ifmedia_add(ifm, IFM_ETHER|IFM_10_2, 0, NULL);
2541 	}
2542 
2543 	if (sc->xl_media & XL_MEDIAOPT_BFX) {
2544 		ifp->if_baudrate = 100000000;
2545 		ifmedia_add(ifm, IFM_ETHER|IFM_100_FX, 0, NULL);
2546 	}
2547 
2548 	/* Choose a default media. */
2549 	switch(sc->xl_xcvr) {
2550 	case XL_XCVR_10BT:
2551 		media = IFM_ETHER|IFM_10_T;
2552 		xl_setmode(sc, media);
2553 		break;
2554 	case XL_XCVR_AUI:
2555 		if (sc->xl_type == XL_TYPE_905B &&
2556 		    sc->xl_media == XL_MEDIAOPT_10FL) {
2557 			media = IFM_ETHER|IFM_10_FL;
2558 			xl_setmode(sc, media);
2559 		} else {
2560 			media = IFM_ETHER|IFM_10_5;
2561 			xl_setmode(sc, media);
2562 		}
2563 		break;
2564 	case XL_XCVR_COAX:
2565 		media = IFM_ETHER|IFM_10_2;
2566 		xl_setmode(sc, media);
2567 		break;
2568 	case XL_XCVR_AUTO:
2569 	case XL_XCVR_100BTX:
2570 	case XL_XCVR_MII:
2571 		/* Chosen by miibus */
2572 		break;
2573 	case XL_XCVR_100BFX:
2574 		media = IFM_ETHER|IFM_100_FX;
2575 		xl_setmode(sc, media);
2576 		break;
2577 	default:
2578 		printf("%s: unknown XCVR type: %d\n", sc->sc_dev.dv_xname,
2579 							sc->xl_xcvr);
2580 		/*
2581 		 * This will probably be wrong, but it prevents
2582 		 * the ifmedia code from panicking.
2583 		 */
2584 		media = IFM_ETHER | IFM_10_T;
2585 		break;
2586 	}
2587 
2588 	if (sc->xl_hasmii == 0)
2589 		ifmedia_set(&sc->ifmedia, media);
2590 
2591 	if (sc->xl_flags & XL_FLAG_NO_XCVR_PWR) {
2592 		XL_SEL_WIN(0);
2593 		CSR_WRITE_2(sc, XL_W0_MFG_ID, XL_NO_XCVR_PWR_MAGICBITS);
2594 	}
2595 
2596 #ifndef SMALL_KERNEL
2597 	/* Check availability of WOL. */
2598 	if ((sc->xl_caps & XL_CAPS_PWRMGMT) != 0) {
2599 		ifp->if_capabilities |= IFCAP_WOL;
2600 		ifp->if_wol = xl_wol;
2601 		xl_wol(ifp, 0);
2602 	}
2603 #endif
2604 
2605 	/*
2606 	 * Call MI attach routines.
2607 	 */
2608 	if_attach(ifp);
2609 	ether_ifattach(ifp);
2610 }
2611 
2612 int
xl_detach(struct xl_softc * sc)2613 xl_detach(struct xl_softc *sc)
2614 {
2615 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2616 	extern void xl_freetxrx(struct xl_softc *);
2617 
2618 	/* Unhook our tick handler. */
2619 	timeout_del(&sc->xl_stsup_tmo);
2620 
2621 	xl_freetxrx(sc);
2622 
2623 	/* Detach all PHYs */
2624 	if (sc->xl_hasmii)
2625 		mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2626 
2627 	/* Delete all remaining media. */
2628 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2629 
2630 	ether_ifdetach(ifp);
2631 	if_detach(ifp);
2632 
2633 	return (0);
2634 }
2635 
2636 #ifndef SMALL_KERNEL
2637 int
xl_wol(struct ifnet * ifp,int enable)2638 xl_wol(struct ifnet *ifp, int enable)
2639 {
2640 	struct xl_softc		*sc = ifp->if_softc;
2641 
2642 	XL_SEL_WIN(7);
2643 	if (enable) {
2644 		if (!(ifp->if_flags & IFF_RUNNING))
2645 			xl_init(sc);
2646 		CSR_WRITE_2(sc, XL_W7_BM_PME, XL_BM_PME_MAGIC);
2647 		sc->xl_flags |= XL_FLAG_WOL;
2648 	} else {
2649 		CSR_WRITE_2(sc, XL_W7_BM_PME, 0);
2650 		sc->xl_flags &= ~XL_FLAG_WOL;
2651 	}
2652 	return (0);
2653 }
2654 #endif
2655 
2656 struct cfdriver xl_cd = {
2657 	NULL, "xl", DV_IFNET
2658 };
2659