xref: /openbsd/sys/dev/pci/if_ste.c (revision 4bdff4be)
1 /*	$OpenBSD: if_ste.c,v 1.70 2023/11/10 15:51:24 bluhm Exp $ */
2 /*
3  * Copyright (c) 1997, 1998, 1999
4  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  *
33  * $FreeBSD: src/sys/pci/if_ste.c,v 1.14 1999/12/07 20:14:42 wpaul Exp $
34  */
35 
36 #include "bpfilter.h"
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/mbuf.h>
41 #include <sys/socket.h>
42 #include <sys/ioctl.h>
43 #include <sys/errno.h>
44 #include <sys/malloc.h>
45 #include <sys/kernel.h>
46 #include <sys/timeout.h>
47 
48 #include <net/if.h>
49 
50 #include <netinet/in.h>
51 #include <netinet/if_ether.h>
52 
53 #include <net/if_media.h>
54 
55 #if NBPFILTER > 0
56 #include <net/bpf.h>
57 #endif
58 
59 #include <uvm/uvm_extern.h>              /* for vtophys */
60 
61 #include <sys/device.h>
62 
63 #include <dev/mii/mii.h>
64 #include <dev/mii/miivar.h>
65 
66 #include <dev/pci/pcireg.h>
67 #include <dev/pci/pcivar.h>
68 #include <dev/pci/pcidevs.h>
69 
70 #define STE_USEIOSPACE
71 
72 #include <dev/pci/if_stereg.h>
73 
74 int	ste_probe(struct device *, void *, void *);
75 void	ste_attach(struct device *, struct device *, void *);
76 int	ste_intr(void *);
77 void	ste_init(void *);
78 void	ste_rxeoc(struct ste_softc *);
79 void	ste_rxeof(struct ste_softc *);
80 void	ste_txeoc(struct ste_softc *);
81 void	ste_txeof(struct ste_softc *);
82 void	ste_stats_update(void *);
83 void	ste_stop(struct ste_softc *);
84 void	ste_reset(struct ste_softc *);
85 int	ste_ioctl(struct ifnet *, u_long, caddr_t);
86 int	ste_encap(struct ste_softc *, struct ste_chain *,
87 	    struct mbuf *);
88 void	ste_start(struct ifnet *);
89 void	ste_watchdog(struct ifnet *);
90 int	ste_newbuf(struct ste_softc *,
91 	    struct ste_chain_onefrag *,
92 	    struct mbuf *);
93 int	ste_ifmedia_upd(struct ifnet *);
94 void	ste_ifmedia_sts(struct ifnet *, struct ifmediareq *);
95 
96 void	ste_mii_sync(struct ste_softc *);
97 void	ste_mii_send(struct ste_softc *, u_int32_t, int);
98 int	ste_mii_readreg(struct ste_softc *,
99 	    struct ste_mii_frame *);
100 int	ste_mii_writereg(struct ste_softc *,
101 	    struct ste_mii_frame *);
102 int	ste_miibus_readreg(struct device *, int, int);
103 void	ste_miibus_writereg(struct device *, int, int, int);
104 void	ste_miibus_statchg(struct device *);
105 
106 int	ste_eeprom_wait(struct ste_softc *);
107 int	ste_read_eeprom(struct ste_softc *, caddr_t, int,
108 	    int, int);
109 void	ste_wait(struct ste_softc *);
110 void	ste_iff(struct ste_softc *);
111 int	ste_init_rx_list(struct ste_softc *);
112 void	ste_init_tx_list(struct ste_softc *);
113 
114 #define STE_SETBIT4(sc, reg, x)				\
115 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
116 
117 #define STE_CLRBIT4(sc, reg, x)				\
118 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
119 
120 #define STE_SETBIT2(sc, reg, x)				\
121 	CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) | x)
122 
123 #define STE_CLRBIT2(sc, reg, x)				\
124 	CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) & ~x)
125 
126 #define STE_SETBIT1(sc, reg, x)				\
127 	CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) | x)
128 
129 #define STE_CLRBIT1(sc, reg, x)				\
130 	CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) & ~x)
131 
132 
133 #define MII_SET(x)		STE_SETBIT1(sc, STE_PHYCTL, x)
134 #define MII_CLR(x)		STE_CLRBIT1(sc, STE_PHYCTL, x)
135 
136 const struct pci_matchid ste_devices[] = {
137 	{ PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DFE550TX },
138 	{ PCI_VENDOR_SUNDANCE, PCI_PRODUCT_SUNDANCE_ST201_1 },
139 	{ PCI_VENDOR_SUNDANCE, PCI_PRODUCT_SUNDANCE_ST201_2 }
140 };
141 
142 const struct cfattach ste_ca = {
143 	sizeof(struct ste_softc), ste_probe, ste_attach
144 };
145 
146 struct cfdriver ste_cd = {
147 	NULL, "ste", DV_IFNET
148 };
149 
150 /*
151  * Sync the PHYs by setting data bit and strobing the clock 32 times.
152  */
153 void
154 ste_mii_sync(struct ste_softc *sc)
155 {
156 	int		i;
157 
158 	MII_SET(STE_PHYCTL_MDIR|STE_PHYCTL_MDATA);
159 
160 	for (i = 0; i < 32; i++) {
161 		MII_SET(STE_PHYCTL_MCLK);
162 		DELAY(1);
163 		MII_CLR(STE_PHYCTL_MCLK);
164 		DELAY(1);
165 	}
166 }
167 
168 /*
169  * Clock a series of bits through the MII.
170  */
171 void
172 ste_mii_send(struct ste_softc *sc, u_int32_t bits, int cnt)
173 {
174 	int		i;
175 
176 	MII_CLR(STE_PHYCTL_MCLK);
177 
178 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
179                 if (bits & i) {
180 			MII_SET(STE_PHYCTL_MDATA);
181                 } else {
182 			MII_CLR(STE_PHYCTL_MDATA);
183                 }
184 		DELAY(1);
185 		MII_CLR(STE_PHYCTL_MCLK);
186 		DELAY(1);
187 		MII_SET(STE_PHYCTL_MCLK);
188 	}
189 }
190 
191 /*
192  * Read an PHY register through the MII.
193  */
194 int
195 ste_mii_readreg(struct ste_softc *sc, struct ste_mii_frame *frame)
196 {
197 	int		ack, i, s;
198 
199 	s = splnet();
200 
201 	/*
202 	 * Set up frame for RX.
203 	 */
204 	frame->mii_stdelim = STE_MII_STARTDELIM;
205 	frame->mii_opcode = STE_MII_READOP;
206 	frame->mii_turnaround = 0;
207 	frame->mii_data = 0;
208 
209 	CSR_WRITE_2(sc, STE_PHYCTL, 0);
210 	/*
211  	 * Turn on data xmit.
212 	 */
213 	MII_SET(STE_PHYCTL_MDIR);
214 
215 	ste_mii_sync(sc);
216 
217 	/*
218 	 * Send command/address info.
219 	 */
220 	ste_mii_send(sc, frame->mii_stdelim, 2);
221 	ste_mii_send(sc, frame->mii_opcode, 2);
222 	ste_mii_send(sc, frame->mii_phyaddr, 5);
223 	ste_mii_send(sc, frame->mii_regaddr, 5);
224 
225 	/* Turn off xmit. */
226 	MII_CLR(STE_PHYCTL_MDIR);
227 
228 	/* Idle bit */
229 	MII_CLR((STE_PHYCTL_MCLK|STE_PHYCTL_MDATA));
230 	DELAY(1);
231 	MII_SET(STE_PHYCTL_MCLK);
232 	DELAY(1);
233 
234 	/* Check for ack */
235 	MII_CLR(STE_PHYCTL_MCLK);
236 	DELAY(1);
237 	ack = CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA;
238 	MII_SET(STE_PHYCTL_MCLK);
239 	DELAY(1);
240 
241 	/*
242 	 * Now try reading data bits. If the ack failed, we still
243 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
244 	 */
245 	if (ack) {
246 		for(i = 0; i < 16; i++) {
247 			MII_CLR(STE_PHYCTL_MCLK);
248 			DELAY(1);
249 			MII_SET(STE_PHYCTL_MCLK);
250 			DELAY(1);
251 		}
252 		goto fail;
253 	}
254 
255 	for (i = 0x8000; i; i >>= 1) {
256 		MII_CLR(STE_PHYCTL_MCLK);
257 		DELAY(1);
258 		if (!ack) {
259 			if (CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA)
260 				frame->mii_data |= i;
261 			DELAY(1);
262 		}
263 		MII_SET(STE_PHYCTL_MCLK);
264 		DELAY(1);
265 	}
266 
267 fail:
268 
269 	MII_CLR(STE_PHYCTL_MCLK);
270 	DELAY(1);
271 	MII_SET(STE_PHYCTL_MCLK);
272 	DELAY(1);
273 
274 	splx(s);
275 
276 	if (ack)
277 		return(1);
278 	return(0);
279 }
280 
281 /*
282  * Write to a PHY register through the MII.
283  */
284 int
285 ste_mii_writereg(struct ste_softc *sc, struct ste_mii_frame *frame)
286 {
287 	int		s;
288 
289 	s = splnet();
290 	/*
291 	 * Set up frame for TX.
292 	 */
293 
294 	frame->mii_stdelim = STE_MII_STARTDELIM;
295 	frame->mii_opcode = STE_MII_WRITEOP;
296 	frame->mii_turnaround = STE_MII_TURNAROUND;
297 
298 	/*
299  	 * Turn on data output.
300 	 */
301 	MII_SET(STE_PHYCTL_MDIR);
302 
303 	ste_mii_sync(sc);
304 
305 	ste_mii_send(sc, frame->mii_stdelim, 2);
306 	ste_mii_send(sc, frame->mii_opcode, 2);
307 	ste_mii_send(sc, frame->mii_phyaddr, 5);
308 	ste_mii_send(sc, frame->mii_regaddr, 5);
309 	ste_mii_send(sc, frame->mii_turnaround, 2);
310 	ste_mii_send(sc, frame->mii_data, 16);
311 
312 	/* Idle bit. */
313 	MII_SET(STE_PHYCTL_MCLK);
314 	DELAY(1);
315 	MII_CLR(STE_PHYCTL_MCLK);
316 	DELAY(1);
317 
318 	/*
319 	 * Turn off xmit.
320 	 */
321 	MII_CLR(STE_PHYCTL_MDIR);
322 
323 	splx(s);
324 
325 	return(0);
326 }
327 
328 int
329 ste_miibus_readreg(struct device *self, int phy, int reg)
330 {
331 	struct ste_softc	*sc = (struct ste_softc *)self;
332 	struct ste_mii_frame	frame;
333 
334 	if (sc->ste_one_phy && phy != 0)
335 		return (0);
336 
337 	bzero(&frame, sizeof(frame));
338 
339 	frame.mii_phyaddr = phy;
340 	frame.mii_regaddr = reg;
341 	ste_mii_readreg(sc, &frame);
342 
343 	return(frame.mii_data);
344 }
345 
346 void
347 ste_miibus_writereg(struct device *self, int phy, int reg, int data)
348 {
349 	struct ste_softc	*sc = (struct ste_softc *)self;
350 	struct ste_mii_frame	frame;
351 
352 	bzero(&frame, sizeof(frame));
353 
354 	frame.mii_phyaddr = phy;
355 	frame.mii_regaddr = reg;
356 	frame.mii_data = data;
357 
358 	ste_mii_writereg(sc, &frame);
359 }
360 
361 void
362 ste_miibus_statchg(struct device *self)
363 {
364 	struct ste_softc	*sc = (struct ste_softc *)self;
365 	struct mii_data		*mii;
366 	int fdx, fcur;
367 
368 	mii = &sc->sc_mii;
369 
370 	fcur = CSR_READ_2(sc, STE_MACCTL0) & STE_MACCTL0_FULLDUPLEX;
371 	fdx = (mii->mii_media_active & IFM_GMASK) == IFM_FDX;
372 
373 	if ((fcur && fdx) || (! fcur && ! fdx))
374 		return;
375 
376 	STE_SETBIT4(sc, STE_DMACTL,
377 	    STE_DMACTL_RXDMA_STALL |STE_DMACTL_TXDMA_STALL);
378 	ste_wait(sc);
379 
380 	if (fdx)
381 		STE_SETBIT2(sc, STE_MACCTL0, STE_MACCTL0_FULLDUPLEX);
382 	else
383 		STE_CLRBIT2(sc, STE_MACCTL0, STE_MACCTL0_FULLDUPLEX);
384 
385 	STE_SETBIT4(sc, STE_DMACTL,
386 	    STE_DMACTL_RXDMA_UNSTALL | STE_DMACTL_TXDMA_UNSTALL);
387 }
388 
389 int
390 ste_ifmedia_upd(struct ifnet *ifp)
391 {
392 	struct ste_softc	*sc;
393 	struct mii_data		*mii;
394 
395 	sc = ifp->if_softc;
396 	mii = &sc->sc_mii;
397 	sc->ste_link = 0;
398 	if (mii->mii_instance) {
399 		struct mii_softc	*miisc;
400 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
401 			mii_phy_reset(miisc);
402 	}
403 	mii_mediachg(mii);
404 
405 	return(0);
406 }
407 
408 void
409 ste_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
410 {
411 	struct ste_softc	*sc;
412 	struct mii_data		*mii;
413 
414 	sc = ifp->if_softc;
415 	mii = &sc->sc_mii;
416 
417 	mii_pollstat(mii);
418 	ifmr->ifm_active = mii->mii_media_active;
419 	ifmr->ifm_status = mii->mii_media_status;
420 }
421 
422 void
423 ste_wait(struct ste_softc *sc)
424 {
425 	int		i;
426 
427 	for (i = 0; i < STE_TIMEOUT; i++) {
428 		if (!(CSR_READ_4(sc, STE_DMACTL) & STE_DMACTL_DMA_HALTINPROG))
429 			break;
430 	}
431 
432 	if (i == STE_TIMEOUT)
433 		printf("%s: command never completed!\n", sc->sc_dev.dv_xname);
434 }
435 
436 /*
437  * The EEPROM is slow: give it time to come ready after issuing
438  * it a command.
439  */
440 int
441 ste_eeprom_wait(struct ste_softc *sc)
442 {
443 	int		i;
444 
445 	DELAY(1000);
446 
447 	for (i = 0; i < 100; i++) {
448 		if (CSR_READ_2(sc, STE_EEPROM_CTL) & STE_EECTL_BUSY)
449 			DELAY(1000);
450 		else
451 			break;
452 	}
453 
454 	if (i == 100) {
455 		printf("%s: eeprom failed to come ready\n",
456 		    sc->sc_dev.dv_xname);
457 		return(1);
458 	}
459 
460 	return(0);
461 }
462 
463 /*
464  * Read a sequence of words from the EEPROM. Note that ethernet address
465  * data is stored in the EEPROM in network byte order.
466  */
467 int
468 ste_read_eeprom(struct ste_softc *sc, caddr_t dest, int off, int cnt, int swap)
469 {
470 	int			err = 0, i;
471 	u_int16_t		word = 0, *ptr;
472 
473 	if (ste_eeprom_wait(sc))
474 		return(1);
475 
476 	for (i = 0; i < cnt; i++) {
477 		CSR_WRITE_2(sc, STE_EEPROM_CTL, STE_EEOPCODE_READ | (off + i));
478 		err = ste_eeprom_wait(sc);
479 		if (err)
480 			break;
481 		word = CSR_READ_2(sc, STE_EEPROM_DATA);
482 		ptr = (u_int16_t *)(dest + (i * 2));
483 		if (swap)
484 			*ptr = ntohs(word);
485 		else
486 			*ptr = word;
487 	}
488 
489 	return(err ? 1 : 0);
490 }
491 
492 void
493 ste_iff(struct ste_softc *sc)
494 {
495 	struct ifnet		*ifp = &sc->arpcom.ac_if;
496 	struct arpcom		*ac = &sc->arpcom;
497 	struct ether_multi	*enm;
498 	struct ether_multistep	step;
499 	u_int32_t		rxmode, hashes[2];
500 	int			h = 0;
501 
502 	rxmode = CSR_READ_1(sc, STE_RX_MODE);
503 	rxmode &= ~(STE_RXMODE_ALLMULTI | STE_RXMODE_BROADCAST |
504 	    STE_RXMODE_MULTIHASH | STE_RXMODE_PROMISC |
505 	    STE_RXMODE_UNICAST);
506 	bzero(hashes, sizeof(hashes));
507 	ifp->if_flags &= ~IFF_ALLMULTI;
508 
509 	/*
510 	 * Always accept broadcast frames.
511 	 * Always accept frames destined to our station address.
512 	 */
513 	rxmode |= STE_RXMODE_BROADCAST | STE_RXMODE_UNICAST;
514 
515 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
516 		ifp->if_flags |= IFF_ALLMULTI;
517 		rxmode |= STE_RXMODE_ALLMULTI;
518 		if (ifp->if_flags & IFF_PROMISC)
519 			rxmode |= STE_RXMODE_PROMISC;
520 	} else {
521 		rxmode |= STE_RXMODE_MULTIHASH;
522 
523 		/* now program new ones */
524 		ETHER_FIRST_MULTI(step, ac, enm);
525 		while (enm != NULL) {
526 			h = ether_crc32_be(enm->enm_addrlo,
527 			    ETHER_ADDR_LEN) & 0x3F;
528 
529 			if (h < 32)
530 				hashes[0] |= (1 << h);
531 			else
532 				hashes[1] |= (1 << (h - 32));
533 
534 			ETHER_NEXT_MULTI(step, enm);
535 		}
536 	}
537 
538 	CSR_WRITE_2(sc, STE_MAR0, hashes[0] & 0xFFFF);
539 	CSR_WRITE_2(sc, STE_MAR1, (hashes[0] >> 16) & 0xFFFF);
540 	CSR_WRITE_2(sc, STE_MAR2, hashes[1] & 0xFFFF);
541 	CSR_WRITE_2(sc, STE_MAR3, (hashes[1] >> 16) & 0xFFFF);
542 	CSR_WRITE_1(sc, STE_RX_MODE, rxmode);
543 }
544 
545 int
546 ste_intr(void *xsc)
547 {
548 	struct ste_softc	*sc;
549 	struct ifnet		*ifp;
550 	u_int16_t		status;
551 	int			claimed = 0;
552 
553 	sc = xsc;
554 	ifp = &sc->arpcom.ac_if;
555 
556 	/* See if this is really our interrupt. */
557 	if (!(CSR_READ_2(sc, STE_ISR) & STE_ISR_INTLATCH))
558 		return claimed;
559 
560 	for (;;) {
561 		status = CSR_READ_2(sc, STE_ISR_ACK);
562 
563 		if (!(status & STE_INTRS))
564 			break;
565 
566 		claimed = 1;
567 
568 		if (status & STE_ISR_RX_DMADONE) {
569 			ste_rxeoc(sc);
570 			ste_rxeof(sc);
571 		}
572 
573 		if (status & STE_ISR_TX_DMADONE)
574 			ste_txeof(sc);
575 
576 		if (status & STE_ISR_TX_DONE)
577 			ste_txeoc(sc);
578 
579 		if (status & STE_ISR_STATS_OFLOW) {
580 			timeout_del(&sc->sc_stats_tmo);
581 			ste_stats_update(sc);
582 		}
583 
584 		if (status & STE_ISR_LINKEVENT)
585 			mii_pollstat(&sc->sc_mii);
586 
587 		if (status & STE_ISR_HOSTERR)
588 			ste_init(sc);
589 	}
590 
591 	/* Re-enable interrupts */
592 	CSR_WRITE_2(sc, STE_IMR, STE_INTRS);
593 
594 	if (ifp->if_flags & IFF_RUNNING && !ifq_empty(&ifp->if_snd))
595 		ste_start(ifp);
596 
597 	return claimed;
598 }
599 
600 void
601 ste_rxeoc(struct ste_softc *sc)
602 {
603 	struct ste_chain_onefrag *cur_rx;
604 
605 	if (sc->ste_cdata.ste_rx_head->ste_ptr->ste_status == 0) {
606 		cur_rx = sc->ste_cdata.ste_rx_head;
607 		do {
608 			cur_rx = cur_rx->ste_next;
609 			/* If the ring is empty, just return. */
610 			if (cur_rx == sc->ste_cdata.ste_rx_head)
611 				return;
612 		} while (cur_rx->ste_ptr->ste_status == 0);
613 		if (sc->ste_cdata.ste_rx_head->ste_ptr->ste_status == 0) {
614 			/* We've fallen behind the chip: catch it. */
615 			sc->ste_cdata.ste_rx_head = cur_rx;
616 		}
617 	}
618 }
619 
620 /*
621  * A frame has been uploaded: pass the resulting mbuf chain up to
622  * the higher level protocols.
623  */
624 void
625 ste_rxeof(struct ste_softc *sc)
626 {
627         struct mbuf		*m;
628 	struct mbuf_list	ml = MBUF_LIST_INITIALIZER();
629         struct ifnet		*ifp;
630 	struct ste_chain_onefrag	*cur_rx;
631 	int			total_len = 0, count=0;
632 	u_int32_t		rxstat;
633 
634 	ifp = &sc->arpcom.ac_if;
635 
636 	while((rxstat = sc->ste_cdata.ste_rx_head->ste_ptr->ste_status)
637 	      & STE_RXSTAT_DMADONE) {
638 		if ((STE_RX_LIST_CNT - count) < 3)
639 			break;
640 
641 		cur_rx = sc->ste_cdata.ste_rx_head;
642 		sc->ste_cdata.ste_rx_head = cur_rx->ste_next;
643 
644 		/*
645 		 * If an error occurs, update stats, clear the
646 		 * status word and leave the mbuf cluster in place:
647 		 * it should simply get re-used next time this descriptor
648 	 	 * comes up in the ring.
649 		 */
650 		if (rxstat & STE_RXSTAT_FRAME_ERR) {
651 			ifp->if_ierrors++;
652 			cur_rx->ste_ptr->ste_status = 0;
653 			continue;
654 		}
655 
656 		/*
657 		 * If there error bit was not set, the upload complete
658 		 * bit should be set which means we have a valid packet.
659 		 * If not, something truly strange has happened.
660 		 */
661 		if (!(rxstat & STE_RXSTAT_DMADONE)) {
662 			printf("%s: bad receive status -- packet dropped",
663 				sc->sc_dev.dv_xname);
664 			ifp->if_ierrors++;
665 			cur_rx->ste_ptr->ste_status = 0;
666 			continue;
667 		}
668 
669 		/* No errors; receive the packet. */
670 		m = cur_rx->ste_mbuf;
671 		total_len = cur_rx->ste_ptr->ste_status & STE_RXSTAT_FRAMELEN;
672 
673 		/*
674 		 * Try to conjure up a new mbuf cluster. If that
675 		 * fails, it means we have an out of memory condition and
676 		 * should leave the buffer in place and continue. This will
677 		 * result in a lost packet, but there's little else we
678 		 * can do in this situation.
679 		 */
680 		if (ste_newbuf(sc, cur_rx, NULL) == ENOBUFS) {
681 			ifp->if_ierrors++;
682 			cur_rx->ste_ptr->ste_status = 0;
683 			continue;
684 		}
685 
686 		m->m_pkthdr.len = m->m_len = total_len;
687 
688 		ml_enqueue(&ml, m);
689 
690 		cur_rx->ste_ptr->ste_status = 0;
691 		count++;
692 	}
693 
694 	if_input(ifp, &ml);
695 }
696 
697 void
698 ste_txeoc(struct ste_softc *sc)
699 {
700 	u_int8_t		txstat;
701 	struct ifnet		*ifp;
702 
703 	ifp = &sc->arpcom.ac_if;
704 
705 	while ((txstat = CSR_READ_1(sc, STE_TX_STATUS)) &
706 	    STE_TXSTATUS_TXDONE) {
707 		if (txstat & STE_TXSTATUS_UNDERRUN ||
708 		    txstat & STE_TXSTATUS_EXCESSCOLLS ||
709 		    txstat & STE_TXSTATUS_RECLAIMERR) {
710 			ifp->if_oerrors++;
711 			printf("%s: transmission error: %x\n",
712 			    sc->sc_dev.dv_xname, txstat);
713 
714 			ste_init(sc);
715 
716 			if (txstat & STE_TXSTATUS_UNDERRUN &&
717 			    sc->ste_tx_thresh < ETHER_MAX_DIX_LEN) {
718 				sc->ste_tx_thresh += STE_MIN_FRAMELEN;
719 				printf("%s: tx underrun, increasing tx"
720 				    " start threshold to %d bytes\n",
721 				    sc->sc_dev.dv_xname, sc->ste_tx_thresh);
722 			}
723 			CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh);
724 			CSR_WRITE_2(sc, STE_TX_RECLAIM_THRESH,
725 			    (ETHER_MAX_DIX_LEN >> 4));
726 		}
727 		ste_init(sc);
728 		CSR_WRITE_2(sc, STE_TX_STATUS, txstat);
729 	}
730 }
731 
732 void
733 ste_txeof(struct ste_softc *sc)
734 {
735 	struct ste_chain	*cur_tx = NULL;
736 	struct ifnet		*ifp;
737 	int			idx;
738 
739 	ifp = &sc->arpcom.ac_if;
740 
741 	idx = sc->ste_cdata.ste_tx_cons;
742 	while(idx != sc->ste_cdata.ste_tx_prod) {
743 		cur_tx = &sc->ste_cdata.ste_tx_chain[idx];
744 
745 		if (!(cur_tx->ste_ptr->ste_ctl & STE_TXCTL_DMADONE))
746 			break;
747 
748 		m_freem(cur_tx->ste_mbuf);
749 		cur_tx->ste_mbuf = NULL;
750 		ifq_clr_oactive(&ifp->if_snd);
751 
752 		STE_INC(idx, STE_TX_LIST_CNT);
753 	}
754 
755 	sc->ste_cdata.ste_tx_cons = idx;
756 	if (idx == sc->ste_cdata.ste_tx_prod)
757 		ifp->if_timer = 0;
758 }
759 
760 void
761 ste_stats_update(void *xsc)
762 {
763 	struct ste_softc	*sc;
764 	struct ifnet		*ifp;
765 	struct mii_data		*mii;
766 	int			s;
767 
768 	s = splnet();
769 
770 	sc = xsc;
771 	ifp = &sc->arpcom.ac_if;
772 	mii = &sc->sc_mii;
773 
774 	ifp->if_collisions += CSR_READ_1(sc, STE_LATE_COLLS)
775 	    + CSR_READ_1(sc, STE_MULTI_COLLS)
776 	    + CSR_READ_1(sc, STE_SINGLE_COLLS);
777 
778 	if (!sc->ste_link) {
779 		mii_pollstat(mii);
780 		if (mii->mii_media_status & IFM_ACTIVE &&
781 		    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
782 			sc->ste_link++;
783 			/*
784 			 * we don't get a call-back on re-init so do it
785 			 * otherwise we get stuck in the wrong link state
786 			 */
787 			ste_miibus_statchg((struct device *)sc);
788 			if (!ifq_empty(&ifp->if_snd))
789 				ste_start(ifp);
790 		}
791 	}
792 
793 	timeout_add_sec(&sc->sc_stats_tmo, 1);
794 	splx(s);
795 }
796 
797 /*
798  * Probe for a Sundance ST201 chip. Check the PCI vendor and device
799  * IDs against our list and return a device name if we find a match.
800  */
801 int
802 ste_probe(struct device *parent, void *match, void *aux)
803 {
804 	return (pci_matchbyid((struct pci_attach_args *)aux, ste_devices,
805 	    nitems(ste_devices)));
806 }
807 
808 /*
809  * Attach the interface. Allocate softc structures, do ifmedia
810  * setup and ethernet/BPF attach.
811  */
812 void
813 ste_attach(struct device *parent, struct device *self, void *aux)
814 {
815 	const char		*intrstr = NULL;
816 	struct ste_softc	*sc = (struct ste_softc *)self;
817 	struct pci_attach_args	*pa = aux;
818 	pci_chipset_tag_t	pc = pa->pa_pc;
819 	pci_intr_handle_t	ih;
820 	struct ifnet		*ifp;
821 	bus_size_t		size;
822 
823 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
824 
825 	/*
826 	 * Only use one PHY since this chip reports multiple
827 	 * Note on the DFE-550TX the PHY is at 1 on the DFE-580TX
828 	 * it is at 0 & 1.  It is rev 0x12.
829 	 */
830 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_DLINK &&
831 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_DLINK_DFE550TX &&
832 	    PCI_REVISION(pa->pa_class) == 0x12)
833 		sc->ste_one_phy = 1;
834 
835 	/*
836 	 * Map control/status registers.
837 	 */
838 
839 #ifdef STE_USEIOSPACE
840 	if (pci_mapreg_map(pa, STE_PCI_LOIO,
841 	    PCI_MAPREG_TYPE_IO, 0,
842 	    &sc->ste_btag, &sc->ste_bhandle, NULL, &size, 0)) {
843 		printf(": can't map i/o space\n");
844 		return;
845 	}
846  #else
847 	if (pci_mapreg_map(pa, STE_PCI_LOMEM,
848 	    PCI_MAPREG_TYPE_MEM|PCI_MAPREG_MEM_TYPE_32BIT, 0,
849 	    &sc->ste_btag, &sc->ste_bhandle, NULL, &size, 0)) {
850 		printf(": can't map mem space\n");
851 		return;
852 	}
853 #endif
854 
855 	/* Allocate interrupt */
856 	if (pci_intr_map(pa, &ih)) {
857 		printf(": couldn't map interrupt\n");
858 		goto fail_1;
859 	}
860 	intrstr = pci_intr_string(pc, ih);
861 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, ste_intr, sc,
862 	    self->dv_xname);
863 	if (sc->sc_ih == NULL) {
864 		printf(": couldn't establish interrupt");
865 		if (intrstr != NULL)
866 			printf(" at %s", intrstr);
867 		printf("\n");
868 		goto fail_1;
869 	}
870 	printf(": %s", intrstr);
871 
872 	/* Reset the adapter. */
873 	ste_reset(sc);
874 
875 	/*
876 	 * Get station address from the EEPROM.
877 	 */
878 	if (ste_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
879 	    STE_EEADDR_NODE0, 3, 0)) {
880 		printf(": failed to read station address\n");
881 		goto fail_2;
882 	}
883 
884 	printf(", address %s\n", ether_sprintf(sc->arpcom.ac_enaddr));
885 
886 	sc->ste_ldata_ptr = malloc(sizeof(struct ste_list_data) + 8,
887 	    M_DEVBUF, M_DONTWAIT);
888 	if (sc->ste_ldata_ptr == NULL) {
889 		printf(": no memory for list buffers!\n");
890 		goto fail_2;
891 	}
892 
893 	sc->ste_ldata = (struct ste_list_data *)sc->ste_ldata_ptr;
894 	bzero(sc->ste_ldata, sizeof(struct ste_list_data));
895 
896 	ifp = &sc->arpcom.ac_if;
897 	ifp->if_softc = sc;
898 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
899 	ifp->if_ioctl = ste_ioctl;
900 	ifp->if_start = ste_start;
901 	ifp->if_watchdog = ste_watchdog;
902 	ifq_init_maxlen(&ifp->if_snd, STE_TX_LIST_CNT - 1);
903 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
904 	ifp->if_capabilities = IFCAP_VLAN_MTU;
905 
906 	sc->ste_tx_thresh = STE_TXSTART_THRESH;
907 
908 	sc->sc_mii.mii_ifp = ifp;
909 	sc->sc_mii.mii_readreg = ste_miibus_readreg;
910 	sc->sc_mii.mii_writereg = ste_miibus_writereg;
911 	sc->sc_mii.mii_statchg = ste_miibus_statchg;
912 	ifmedia_init(&sc->sc_mii.mii_media, 0, ste_ifmedia_upd,ste_ifmedia_sts);
913 	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
914 	    0);
915 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
916 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
917 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
918 	} else
919 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
920 
921 	/*
922 	 * Call MI attach routines.
923 	 */
924 	if_attach(ifp);
925 	ether_ifattach(ifp);
926 	return;
927 
928 fail_2:
929 	pci_intr_disestablish(pc, sc->sc_ih);
930 
931 fail_1:
932 	bus_space_unmap(sc->ste_btag, sc->ste_bhandle, size);
933 }
934 
935 int
936 ste_newbuf(struct ste_softc *sc, struct ste_chain_onefrag *c, struct mbuf *m)
937 {
938 	struct mbuf		*m_new = NULL;
939 
940 	if (m == NULL) {
941 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
942 		if (m_new == NULL)
943 			return(ENOBUFS);
944 		MCLGET(m_new, M_DONTWAIT);
945 		if (!(m_new->m_flags & M_EXT)) {
946 			m_freem(m_new);
947 			return(ENOBUFS);
948 		}
949 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
950 	} else {
951 		m_new = m;
952 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
953 		m_new->m_data = m_new->m_ext.ext_buf;
954 	}
955 
956 	m_adj(m_new, ETHER_ALIGN);
957 
958 	c->ste_mbuf = m_new;
959 	c->ste_ptr->ste_status = 0;
960 	c->ste_ptr->ste_frag.ste_addr = vtophys(mtod(m_new, vaddr_t));
961 	c->ste_ptr->ste_frag.ste_len = (ETHER_MAX_DIX_LEN + ETHER_VLAN_ENCAP_LEN) | STE_FRAG_LAST;
962 
963 	return(0);
964 }
965 
966 int
967 ste_init_rx_list(struct ste_softc *sc)
968 {
969 	struct ste_chain_data	*cd;
970 	struct ste_list_data	*ld;
971 	int			i;
972 
973 	cd = &sc->ste_cdata;
974 	ld = sc->ste_ldata;
975 
976 	for (i = 0; i < STE_RX_LIST_CNT; i++) {
977 		cd->ste_rx_chain[i].ste_ptr = &ld->ste_rx_list[i];
978 		if (ste_newbuf(sc, &cd->ste_rx_chain[i], NULL) == ENOBUFS)
979 			return(ENOBUFS);
980 		if (i == (STE_RX_LIST_CNT - 1)) {
981 			cd->ste_rx_chain[i].ste_next =
982 			    &cd->ste_rx_chain[0];
983 			ld->ste_rx_list[i].ste_next =
984 			    vtophys((vaddr_t)&ld->ste_rx_list[0]);
985 		} else {
986 			cd->ste_rx_chain[i].ste_next =
987 			    &cd->ste_rx_chain[i + 1];
988 			ld->ste_rx_list[i].ste_next =
989 			    vtophys((vaddr_t)&ld->ste_rx_list[i + 1]);
990 		}
991 		ld->ste_rx_list[i].ste_status = 0;
992 	}
993 
994 	cd->ste_rx_head = &cd->ste_rx_chain[0];
995 
996 	return(0);
997 }
998 
999 void
1000 ste_init_tx_list(struct ste_softc *sc)
1001 {
1002 	struct ste_chain_data	*cd;
1003 	struct ste_list_data	*ld;
1004 	int			i;
1005 
1006 	cd = &sc->ste_cdata;
1007 	ld = sc->ste_ldata;
1008 	for (i = 0; i < STE_TX_LIST_CNT; i++) {
1009 		cd->ste_tx_chain[i].ste_ptr = &ld->ste_tx_list[i];
1010 		cd->ste_tx_chain[i].ste_phys = vtophys((vaddr_t)&ld->ste_tx_list[i]);
1011 		if (i == (STE_TX_LIST_CNT - 1))
1012 			cd->ste_tx_chain[i].ste_next =
1013 			    &cd->ste_tx_chain[0];
1014 		else
1015 			cd->ste_tx_chain[i].ste_next =
1016 			    &cd->ste_tx_chain[i + 1];
1017 	}
1018 
1019 	bzero(ld->ste_tx_list, sizeof(struct ste_desc) * STE_TX_LIST_CNT);
1020 
1021 	cd->ste_tx_prod = 0;
1022 	cd->ste_tx_cons = 0;
1023 }
1024 
1025 void
1026 ste_init(void *xsc)
1027 {
1028 	struct ste_softc	*sc = (struct ste_softc *)xsc;
1029 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1030 	struct mii_data		*mii;
1031 	int			i, s;
1032 
1033 	s = splnet();
1034 
1035 	ste_stop(sc);
1036 	/* Reset the chip to a known state. */
1037 	ste_reset(sc);
1038 
1039 	mii = &sc->sc_mii;
1040 
1041 	/* Init our MAC address */
1042 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
1043 		CSR_WRITE_1(sc, STE_PAR0 + i, sc->arpcom.ac_enaddr[i]);
1044 	}
1045 
1046 	/* Init RX list */
1047 	if (ste_init_rx_list(sc) == ENOBUFS) {
1048 		printf("%s: initialization failed: no "
1049 		    "memory for RX buffers\n", sc->sc_dev.dv_xname);
1050 		ste_stop(sc);
1051 		splx(s);
1052 		return;
1053 	}
1054 
1055 	/* Set RX polling interval */
1056 	CSR_WRITE_1(sc, STE_RX_DMAPOLL_PERIOD, 64);
1057 
1058 	/* Init TX descriptors */
1059 	ste_init_tx_list(sc);
1060 
1061 	/* Set the TX freethresh value */
1062 	CSR_WRITE_1(sc, STE_TX_DMABURST_THRESH, ETHER_MAX_DIX_LEN >> 8);
1063 
1064 	/* Set the TX start threshold for best performance. */
1065 	CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh);
1066 
1067 	/* Set the TX reclaim threshold. */
1068 	CSR_WRITE_1(sc, STE_TX_RECLAIM_THRESH, (ETHER_MAX_DIX_LEN >> 4));
1069 
1070 	/* Program promiscuous mode and multicast filters. */
1071 	ste_iff(sc);
1072 
1073 	/* Load the address of the RX list. */
1074 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL);
1075 	ste_wait(sc);
1076 	CSR_WRITE_4(sc, STE_RX_DMALIST_PTR,
1077 	    vtophys((vaddr_t)&sc->ste_ldata->ste_rx_list[0]));
1078 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL);
1079 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL);
1080 
1081 	/* Set TX polling interval (defer until we TX first packet) */
1082 	CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 0);
1083 
1084 	/* Load address of the TX list */
1085 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL);
1086 	ste_wait(sc);
1087 	CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, 0);
1088 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
1089 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
1090 	ste_wait(sc);
1091 	sc->ste_tx_prev=NULL;
1092 
1093 	/* Enable receiver and transmitter */
1094 	CSR_WRITE_2(sc, STE_MACCTL0, 0);
1095 	CSR_WRITE_2(sc, STE_MACCTL1, 0);
1096 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_ENABLE);
1097 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_ENABLE);
1098 
1099 	/* Enable stats counters. */
1100 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_ENABLE);
1101 
1102 	/* Enable interrupts. */
1103 	CSR_WRITE_2(sc, STE_ISR, 0xFFFF);
1104 	CSR_WRITE_2(sc, STE_IMR, STE_INTRS);
1105 
1106 	/* Accept VLAN length packets */
1107 	CSR_WRITE_2(sc, STE_MAX_FRAMELEN,
1108 	    ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
1109 
1110 	ste_ifmedia_upd(ifp);
1111 
1112 	ifp->if_flags |= IFF_RUNNING;
1113 	ifq_clr_oactive(&ifp->if_snd);
1114 
1115 	splx(s);
1116 
1117 	timeout_set(&sc->sc_stats_tmo, ste_stats_update, sc);
1118 	timeout_add_sec(&sc->sc_stats_tmo, 1);
1119 }
1120 
1121 void
1122 ste_stop(struct ste_softc *sc)
1123 {
1124 	int			i;
1125 	struct ifnet		*ifp;
1126 
1127 	ifp = &sc->arpcom.ac_if;
1128 
1129 	timeout_del(&sc->sc_stats_tmo);
1130 
1131 	ifp->if_flags &= ~IFF_RUNNING;
1132 	ifq_clr_oactive(&ifp->if_snd);
1133 
1134 	CSR_WRITE_2(sc, STE_IMR, 0);
1135 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_DISABLE);
1136 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_DISABLE);
1137 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_DISABLE);
1138 	STE_SETBIT2(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL);
1139 	STE_SETBIT2(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL);
1140 	ste_wait(sc);
1141 	/*
1142 	 * Try really hard to stop the RX engine or under heavy RX
1143 	 * data chip will write into de-allocated memory.
1144 	 */
1145 	ste_reset(sc);
1146 
1147 	sc->ste_link = 0;
1148 
1149 	for (i = 0; i < STE_RX_LIST_CNT; i++) {
1150 		if (sc->ste_cdata.ste_rx_chain[i].ste_mbuf != NULL) {
1151 			m_freem(sc->ste_cdata.ste_rx_chain[i].ste_mbuf);
1152 			sc->ste_cdata.ste_rx_chain[i].ste_mbuf = NULL;
1153 		}
1154 	}
1155 
1156 	for (i = 0; i < STE_TX_LIST_CNT; i++) {
1157 		if (sc->ste_cdata.ste_tx_chain[i].ste_mbuf != NULL) {
1158 			m_freem(sc->ste_cdata.ste_tx_chain[i].ste_mbuf);
1159 			sc->ste_cdata.ste_tx_chain[i].ste_mbuf = NULL;
1160 		}
1161 	}
1162 
1163 	bzero(sc->ste_ldata, sizeof(struct ste_list_data));
1164 }
1165 
1166 void
1167 ste_reset(struct ste_softc *sc)
1168 {
1169 	int		i;
1170 
1171 	STE_SETBIT4(sc, STE_ASICCTL,
1172 	    STE_ASICCTL_GLOBAL_RESET|STE_ASICCTL_RX_RESET|
1173 	    STE_ASICCTL_TX_RESET|STE_ASICCTL_DMA_RESET|
1174 	    STE_ASICCTL_FIFO_RESET|STE_ASICCTL_NETWORK_RESET|
1175 	    STE_ASICCTL_AUTOINIT_RESET|STE_ASICCTL_HOST_RESET|
1176 	    STE_ASICCTL_EXTRESET_RESET);
1177 
1178 	DELAY(100000);
1179 
1180 	for (i = 0; i < STE_TIMEOUT; i++) {
1181 		if (!(CSR_READ_4(sc, STE_ASICCTL) & STE_ASICCTL_RESET_BUSY))
1182 			break;
1183 	}
1184 
1185 	if (i == STE_TIMEOUT)
1186 		printf("%s: global reset never completed\n",
1187 		    sc->sc_dev.dv_xname);
1188 }
1189 
1190 int
1191 ste_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1192 {
1193 	struct ste_softc	*sc = ifp->if_softc;
1194 	struct ifreq		*ifr = (struct ifreq *) data;
1195 	int			s, error = 0;
1196 
1197 	s = splnet();
1198 
1199 	switch(command) {
1200 	case SIOCSIFADDR:
1201 		ifp->if_flags |= IFF_UP;
1202 		if (!(ifp->if_flags & IFF_RUNNING))
1203 			ste_init(sc);
1204 		break;
1205 
1206 	case SIOCSIFFLAGS:
1207 		if (ifp->if_flags & IFF_UP) {
1208 			if (ifp->if_flags & IFF_RUNNING)
1209 				error = ENETRESET;
1210 			else {
1211 				sc->ste_tx_thresh = STE_TXSTART_THRESH;
1212 				ste_init(sc);
1213 			}
1214 		} else {
1215 			if (ifp->if_flags & IFF_RUNNING)
1216 				ste_stop(sc);
1217 		}
1218 		break;
1219 
1220 	case SIOCGIFMEDIA:
1221 	case SIOCSIFMEDIA:
1222 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
1223 		break;
1224 
1225 	default:
1226 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
1227 	}
1228 
1229 	if (error == ENETRESET) {
1230 		if (ifp->if_flags & IFF_RUNNING)
1231 			ste_iff(sc);
1232 		error = 0;
1233 	}
1234 
1235 	splx(s);
1236 	return(error);
1237 }
1238 
1239 int
1240 ste_encap(struct ste_softc *sc, struct ste_chain *c, struct mbuf *m_head)
1241 {
1242 	int			frag = 0;
1243 	struct ste_frag		*f = NULL;
1244 	struct mbuf		*m;
1245 	struct ste_desc		*d;
1246 
1247 	d = c->ste_ptr;
1248 	d->ste_ctl = 0;
1249 
1250 encap_retry:
1251 	for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
1252 		if (m->m_len != 0) {
1253 			if (frag == STE_MAXFRAGS)
1254 				break;
1255 			f = &d->ste_frags[frag];
1256 			f->ste_addr = vtophys(mtod(m, vaddr_t));
1257 			f->ste_len = m->m_len;
1258 			frag++;
1259 		}
1260 	}
1261 
1262 	if (m != NULL) {
1263 		struct mbuf *mn;
1264 
1265 		/*
1266 		 * We ran out of segments. We have to recopy this
1267 		 * mbuf chain first. Bail out if we can't get the
1268 		 * new buffers.
1269 		 */
1270 		MGETHDR(mn, M_DONTWAIT, MT_DATA);
1271 		if (mn == NULL) {
1272 			m_freem(m_head);
1273 			return ENOMEM;
1274 		}
1275 		if (m_head->m_pkthdr.len > MHLEN) {
1276 			MCLGET(mn, M_DONTWAIT);
1277 			if ((mn->m_flags & M_EXT) == 0) {
1278 				m_freem(mn);
1279 				m_freem(m_head);
1280 				return ENOMEM;
1281 			}
1282 		}
1283 		m_copydata(m_head, 0, m_head->m_pkthdr.len,
1284 			   mtod(mn, caddr_t));
1285 		mn->m_pkthdr.len = mn->m_len = m_head->m_pkthdr.len;
1286 		m_freem(m_head);
1287 		m_head = mn;
1288 		goto encap_retry;
1289 	}
1290 
1291 	c->ste_mbuf = m_head;
1292 	d->ste_frags[frag - 1].ste_len |= STE_FRAG_LAST;
1293 	d->ste_ctl = 1;
1294 
1295 	return(0);
1296 }
1297 
1298 void
1299 ste_start(struct ifnet *ifp)
1300 {
1301 	struct ste_softc	*sc;
1302 	struct mbuf		*m_head = NULL;
1303 	struct ste_chain	*cur_tx;
1304 	int			idx;
1305 
1306 	sc = ifp->if_softc;
1307 
1308 	if (!sc->ste_link)
1309 		return;
1310 
1311 	if (ifq_is_oactive(&ifp->if_snd))
1312 		return;
1313 
1314 	idx = sc->ste_cdata.ste_tx_prod;
1315 
1316 	while(sc->ste_cdata.ste_tx_chain[idx].ste_mbuf == NULL) {
1317 		/*
1318 		 * We cannot re-use the last (free) descriptor;
1319 		 * the chip may not have read its ste_next yet.
1320 		 */
1321 		if (STE_NEXT(idx, STE_TX_LIST_CNT) ==
1322 		    sc->ste_cdata.ste_tx_cons) {
1323 			ifq_set_oactive(&ifp->if_snd);
1324 			break;
1325 		}
1326 
1327 		m_head = ifq_dequeue(&ifp->if_snd);
1328 		if (m_head == NULL)
1329 			break;
1330 
1331 		cur_tx = &sc->ste_cdata.ste_tx_chain[idx];
1332 
1333 		if (ste_encap(sc, cur_tx, m_head) != 0)
1334 			break;
1335 
1336 		cur_tx->ste_ptr->ste_next = 0;
1337 
1338 		if (sc->ste_tx_prev == NULL) {
1339 			cur_tx->ste_ptr->ste_ctl = STE_TXCTL_DMAINTR | 1;
1340 			/* Load address of the TX list */
1341 			STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL);
1342 			ste_wait(sc);
1343 
1344 			CSR_WRITE_4(sc, STE_TX_DMALIST_PTR,
1345 			    vtophys((vaddr_t)&sc->ste_ldata->ste_tx_list[0]));
1346 
1347 			/* Set TX polling interval to start TX engine */
1348 			CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 64);
1349 
1350 			STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
1351 			ste_wait(sc);
1352 		}else{
1353 			cur_tx->ste_ptr->ste_ctl = STE_TXCTL_DMAINTR | 1;
1354 			sc->ste_tx_prev->ste_ptr->ste_next
1355 				= cur_tx->ste_phys;
1356 		}
1357 
1358 		sc->ste_tx_prev = cur_tx;
1359 
1360 #if NBPFILTER > 0
1361 		/*
1362 		 * If there's a BPF listener, bounce a copy of this frame
1363 		 * to him.
1364 	 	 */
1365 		if (ifp->if_bpf)
1366 			bpf_mtap(ifp->if_bpf, cur_tx->ste_mbuf,
1367 			    BPF_DIRECTION_OUT);
1368 #endif
1369 
1370 		STE_INC(idx, STE_TX_LIST_CNT);
1371 		ifp->if_timer = 5;
1372 	}
1373 	sc->ste_cdata.ste_tx_prod = idx;
1374 }
1375 
1376 void
1377 ste_watchdog(struct ifnet *ifp)
1378 {
1379 	struct ste_softc	*sc;
1380 
1381 	sc = ifp->if_softc;
1382 
1383 	ifp->if_oerrors++;
1384 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1385 
1386 	ste_txeoc(sc);
1387 	ste_txeof(sc);
1388 	ste_rxeoc(sc);
1389 	ste_rxeof(sc);
1390 	ste_init(sc);
1391 
1392 	if (!ifq_empty(&ifp->if_snd))
1393 		ste_start(ifp);
1394 }
1395