xref: /openbsd/sys/dev/pci/if_ste.c (revision cecf84d4)
1 /*	$OpenBSD: if_ste.c,v 1.57 2014/12/22 02:28:52 tedu Exp $ */
2 /*
3  * Copyright (c) 1997, 1998, 1999
4  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  *
33  * $FreeBSD: src/sys/pci/if_ste.c,v 1.14 1999/12/07 20:14:42 wpaul Exp $
34  */
35 
36 #include "bpfilter.h"
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/mbuf.h>
41 #include <sys/protosw.h>
42 #include <sys/socket.h>
43 #include <sys/ioctl.h>
44 #include <sys/errno.h>
45 #include <sys/malloc.h>
46 #include <sys/kernel.h>
47 #include <sys/timeout.h>
48 
49 #include <net/if.h>
50 #include <net/if_dl.h>
51 #include <net/if_types.h>
52 
53 #include <netinet/in.h>
54 #include <netinet/if_ether.h>
55 
56 #include <net/if_media.h>
57 
58 #if NBPFILTER > 0
59 #include <net/bpf.h>
60 #endif
61 
62 #include <uvm/uvm_extern.h>              /* for vtophys */
63 
64 #include <sys/device.h>
65 
66 #include <dev/mii/mii.h>
67 #include <dev/mii/miivar.h>
68 
69 #include <dev/pci/pcireg.h>
70 #include <dev/pci/pcivar.h>
71 #include <dev/pci/pcidevs.h>
72 
73 #define STE_USEIOSPACE
74 
75 #include <dev/pci/if_stereg.h>
76 
77 int	ste_probe(struct device *, void *, void *);
78 void	ste_attach(struct device *, struct device *, void *);
79 int	ste_intr(void *);
80 void	ste_init(void *);
81 void	ste_rxeoc(struct ste_softc *);
82 void	ste_rxeof(struct ste_softc *);
83 void	ste_txeoc(struct ste_softc *);
84 void	ste_txeof(struct ste_softc *);
85 void	ste_stats_update(void *);
86 void	ste_stop(struct ste_softc *);
87 void	ste_reset(struct ste_softc *);
88 int	ste_ioctl(struct ifnet *, u_long, caddr_t);
89 int	ste_encap(struct ste_softc *, struct ste_chain *,
90 	    struct mbuf *);
91 void	ste_start(struct ifnet *);
92 void	ste_watchdog(struct ifnet *);
93 int	ste_newbuf(struct ste_softc *,
94 	    struct ste_chain_onefrag *,
95 	    struct mbuf *);
96 int	ste_ifmedia_upd(struct ifnet *);
97 void	ste_ifmedia_sts(struct ifnet *, struct ifmediareq *);
98 
99 void	ste_mii_sync(struct ste_softc *);
100 void	ste_mii_send(struct ste_softc *, u_int32_t, int);
101 int	ste_mii_readreg(struct ste_softc *,
102 	    struct ste_mii_frame *);
103 int	ste_mii_writereg(struct ste_softc *,
104 	    struct ste_mii_frame *);
105 int	ste_miibus_readreg(struct device *, int, int);
106 void	ste_miibus_writereg(struct device *, int, int, int);
107 void	ste_miibus_statchg(struct device *);
108 
109 int	ste_eeprom_wait(struct ste_softc *);
110 int	ste_read_eeprom(struct ste_softc *, caddr_t, int,
111 	    int, int);
112 void	ste_wait(struct ste_softc *);
113 void	ste_iff(struct ste_softc *);
114 int	ste_init_rx_list(struct ste_softc *);
115 void	ste_init_tx_list(struct ste_softc *);
116 
117 #define STE_SETBIT4(sc, reg, x)				\
118 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
119 
120 #define STE_CLRBIT4(sc, reg, x)				\
121 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
122 
123 #define STE_SETBIT2(sc, reg, x)				\
124 	CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) | x)
125 
126 #define STE_CLRBIT2(sc, reg, x)				\
127 	CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) & ~x)
128 
129 #define STE_SETBIT1(sc, reg, x)				\
130 	CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) | x)
131 
132 #define STE_CLRBIT1(sc, reg, x)				\
133 	CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) & ~x)
134 
135 
136 #define MII_SET(x)		STE_SETBIT1(sc, STE_PHYCTL, x)
137 #define MII_CLR(x)		STE_CLRBIT1(sc, STE_PHYCTL, x)
138 
139 const struct pci_matchid ste_devices[] = {
140 	{ PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DFE550TX },
141 	{ PCI_VENDOR_SUNDANCE, PCI_PRODUCT_SUNDANCE_ST201_1 },
142 	{ PCI_VENDOR_SUNDANCE, PCI_PRODUCT_SUNDANCE_ST201_2 }
143 };
144 
145 struct cfattach ste_ca = {
146 	sizeof(struct ste_softc), ste_probe, ste_attach
147 };
148 
149 struct cfdriver ste_cd = {
150 	NULL, "ste", DV_IFNET
151 };
152 
153 /*
154  * Sync the PHYs by setting data bit and strobing the clock 32 times.
155  */
156 void
157 ste_mii_sync(struct ste_softc *sc)
158 {
159 	int		i;
160 
161 	MII_SET(STE_PHYCTL_MDIR|STE_PHYCTL_MDATA);
162 
163 	for (i = 0; i < 32; i++) {
164 		MII_SET(STE_PHYCTL_MCLK);
165 		DELAY(1);
166 		MII_CLR(STE_PHYCTL_MCLK);
167 		DELAY(1);
168 	}
169 }
170 
171 /*
172  * Clock a series of bits through the MII.
173  */
174 void
175 ste_mii_send(struct ste_softc *sc, u_int32_t bits, int cnt)
176 {
177 	int		i;
178 
179 	MII_CLR(STE_PHYCTL_MCLK);
180 
181 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
182                 if (bits & i) {
183 			MII_SET(STE_PHYCTL_MDATA);
184                 } else {
185 			MII_CLR(STE_PHYCTL_MDATA);
186                 }
187 		DELAY(1);
188 		MII_CLR(STE_PHYCTL_MCLK);
189 		DELAY(1);
190 		MII_SET(STE_PHYCTL_MCLK);
191 	}
192 }
193 
194 /*
195  * Read an PHY register through the MII.
196  */
197 int
198 ste_mii_readreg(struct ste_softc *sc, struct ste_mii_frame *frame)
199 {
200 	int		ack, i, s;
201 
202 	s = splnet();
203 
204 	/*
205 	 * Set up frame for RX.
206 	 */
207 	frame->mii_stdelim = STE_MII_STARTDELIM;
208 	frame->mii_opcode = STE_MII_READOP;
209 	frame->mii_turnaround = 0;
210 	frame->mii_data = 0;
211 
212 	CSR_WRITE_2(sc, STE_PHYCTL, 0);
213 	/*
214  	 * Turn on data xmit.
215 	 */
216 	MII_SET(STE_PHYCTL_MDIR);
217 
218 	ste_mii_sync(sc);
219 
220 	/*
221 	 * Send command/address info.
222 	 */
223 	ste_mii_send(sc, frame->mii_stdelim, 2);
224 	ste_mii_send(sc, frame->mii_opcode, 2);
225 	ste_mii_send(sc, frame->mii_phyaddr, 5);
226 	ste_mii_send(sc, frame->mii_regaddr, 5);
227 
228 	/* Turn off xmit. */
229 	MII_CLR(STE_PHYCTL_MDIR);
230 
231 	/* Idle bit */
232 	MII_CLR((STE_PHYCTL_MCLK|STE_PHYCTL_MDATA));
233 	DELAY(1);
234 	MII_SET(STE_PHYCTL_MCLK);
235 	DELAY(1);
236 
237 	/* Check for ack */
238 	MII_CLR(STE_PHYCTL_MCLK);
239 	DELAY(1);
240 	ack = CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA;
241 	MII_SET(STE_PHYCTL_MCLK);
242 	DELAY(1);
243 
244 	/*
245 	 * Now try reading data bits. If the ack failed, we still
246 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
247 	 */
248 	if (ack) {
249 		for(i = 0; i < 16; i++) {
250 			MII_CLR(STE_PHYCTL_MCLK);
251 			DELAY(1);
252 			MII_SET(STE_PHYCTL_MCLK);
253 			DELAY(1);
254 		}
255 		goto fail;
256 	}
257 
258 	for (i = 0x8000; i; i >>= 1) {
259 		MII_CLR(STE_PHYCTL_MCLK);
260 		DELAY(1);
261 		if (!ack) {
262 			if (CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA)
263 				frame->mii_data |= i;
264 			DELAY(1);
265 		}
266 		MII_SET(STE_PHYCTL_MCLK);
267 		DELAY(1);
268 	}
269 
270 fail:
271 
272 	MII_CLR(STE_PHYCTL_MCLK);
273 	DELAY(1);
274 	MII_SET(STE_PHYCTL_MCLK);
275 	DELAY(1);
276 
277 	splx(s);
278 
279 	if (ack)
280 		return(1);
281 	return(0);
282 }
283 
284 /*
285  * Write to a PHY register through the MII.
286  */
287 int
288 ste_mii_writereg(struct ste_softc *sc, struct ste_mii_frame *frame)
289 {
290 	int		s;
291 
292 	s = splnet();
293 	/*
294 	 * Set up frame for TX.
295 	 */
296 
297 	frame->mii_stdelim = STE_MII_STARTDELIM;
298 	frame->mii_opcode = STE_MII_WRITEOP;
299 	frame->mii_turnaround = STE_MII_TURNAROUND;
300 
301 	/*
302  	 * Turn on data output.
303 	 */
304 	MII_SET(STE_PHYCTL_MDIR);
305 
306 	ste_mii_sync(sc);
307 
308 	ste_mii_send(sc, frame->mii_stdelim, 2);
309 	ste_mii_send(sc, frame->mii_opcode, 2);
310 	ste_mii_send(sc, frame->mii_phyaddr, 5);
311 	ste_mii_send(sc, frame->mii_regaddr, 5);
312 	ste_mii_send(sc, frame->mii_turnaround, 2);
313 	ste_mii_send(sc, frame->mii_data, 16);
314 
315 	/* Idle bit. */
316 	MII_SET(STE_PHYCTL_MCLK);
317 	DELAY(1);
318 	MII_CLR(STE_PHYCTL_MCLK);
319 	DELAY(1);
320 
321 	/*
322 	 * Turn off xmit.
323 	 */
324 	MII_CLR(STE_PHYCTL_MDIR);
325 
326 	splx(s);
327 
328 	return(0);
329 }
330 
331 int
332 ste_miibus_readreg(struct device *self, int phy, int reg)
333 {
334 	struct ste_softc	*sc = (struct ste_softc *)self;
335 	struct ste_mii_frame	frame;
336 
337 	if (sc->ste_one_phy && phy != 0)
338 		return (0);
339 
340 	bzero(&frame, sizeof(frame));
341 
342 	frame.mii_phyaddr = phy;
343 	frame.mii_regaddr = reg;
344 	ste_mii_readreg(sc, &frame);
345 
346 	return(frame.mii_data);
347 }
348 
349 void
350 ste_miibus_writereg(struct device *self, int phy, int reg, int data)
351 {
352 	struct ste_softc	*sc = (struct ste_softc *)self;
353 	struct ste_mii_frame	frame;
354 
355 	bzero(&frame, sizeof(frame));
356 
357 	frame.mii_phyaddr = phy;
358 	frame.mii_regaddr = reg;
359 	frame.mii_data = data;
360 
361 	ste_mii_writereg(sc, &frame);
362 }
363 
364 void
365 ste_miibus_statchg(struct device *self)
366 {
367 	struct ste_softc	*sc = (struct ste_softc *)self;
368 	struct mii_data		*mii;
369 	int fdx, fcur;
370 
371 	mii = &sc->sc_mii;
372 
373 	fcur = CSR_READ_2(sc, STE_MACCTL0) & STE_MACCTL0_FULLDUPLEX;
374 	fdx = (mii->mii_media_active & IFM_GMASK) == IFM_FDX;
375 
376 	if ((fcur && fdx) || (! fcur && ! fdx))
377 		return;
378 
379 	STE_SETBIT4(sc, STE_DMACTL,
380 	    STE_DMACTL_RXDMA_STALL |STE_DMACTL_TXDMA_STALL);
381 	ste_wait(sc);
382 
383 	if (fdx)
384 		STE_SETBIT2(sc, STE_MACCTL0, STE_MACCTL0_FULLDUPLEX);
385 	else
386 		STE_CLRBIT2(sc, STE_MACCTL0, STE_MACCTL0_FULLDUPLEX);
387 
388 	STE_SETBIT4(sc, STE_DMACTL,
389 	    STE_DMACTL_RXDMA_UNSTALL | STE_DMACTL_TXDMA_UNSTALL);
390 }
391 
392 int
393 ste_ifmedia_upd(struct ifnet *ifp)
394 {
395 	struct ste_softc	*sc;
396 	struct mii_data		*mii;
397 
398 	sc = ifp->if_softc;
399 	mii = &sc->sc_mii;
400 	sc->ste_link = 0;
401 	if (mii->mii_instance) {
402 		struct mii_softc	*miisc;
403 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
404 			mii_phy_reset(miisc);
405 	}
406 	mii_mediachg(mii);
407 
408 	return(0);
409 }
410 
411 void
412 ste_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
413 {
414 	struct ste_softc	*sc;
415 	struct mii_data		*mii;
416 
417 	sc = ifp->if_softc;
418 	mii = &sc->sc_mii;
419 
420 	mii_pollstat(mii);
421 	ifmr->ifm_active = mii->mii_media_active;
422 	ifmr->ifm_status = mii->mii_media_status;
423 }
424 
425 void
426 ste_wait(struct ste_softc *sc)
427 {
428 	int		i;
429 
430 	for (i = 0; i < STE_TIMEOUT; i++) {
431 		if (!(CSR_READ_4(sc, STE_DMACTL) & STE_DMACTL_DMA_HALTINPROG))
432 			break;
433 	}
434 
435 	if (i == STE_TIMEOUT)
436 		printf("%s: command never completed!\n", sc->sc_dev.dv_xname);
437 }
438 
439 /*
440  * The EEPROM is slow: give it time to come ready after issuing
441  * it a command.
442  */
443 int
444 ste_eeprom_wait(struct ste_softc *sc)
445 {
446 	int		i;
447 
448 	DELAY(1000);
449 
450 	for (i = 0; i < 100; i++) {
451 		if (CSR_READ_2(sc, STE_EEPROM_CTL) & STE_EECTL_BUSY)
452 			DELAY(1000);
453 		else
454 			break;
455 	}
456 
457 	if (i == 100) {
458 		printf("%s: eeprom failed to come ready\n",
459 		    sc->sc_dev.dv_xname);
460 		return(1);
461 	}
462 
463 	return(0);
464 }
465 
466 /*
467  * Read a sequence of words from the EEPROM. Note that ethernet address
468  * data is stored in the EEPROM in network byte order.
469  */
470 int
471 ste_read_eeprom(struct ste_softc *sc, caddr_t dest, int off, int cnt, int swap)
472 {
473 	int			err = 0, i;
474 	u_int16_t		word = 0, *ptr;
475 
476 	if (ste_eeprom_wait(sc))
477 		return(1);
478 
479 	for (i = 0; i < cnt; i++) {
480 		CSR_WRITE_2(sc, STE_EEPROM_CTL, STE_EEOPCODE_READ | (off + i));
481 		err = ste_eeprom_wait(sc);
482 		if (err)
483 			break;
484 		word = CSR_READ_2(sc, STE_EEPROM_DATA);
485 		ptr = (u_int16_t *)(dest + (i * 2));
486 		if (swap)
487 			*ptr = ntohs(word);
488 		else
489 			*ptr = word;
490 	}
491 
492 	return(err ? 1 : 0);
493 }
494 
495 void
496 ste_iff(struct ste_softc *sc)
497 {
498 	struct ifnet		*ifp = &sc->arpcom.ac_if;
499 	struct arpcom		*ac = &sc->arpcom;
500 	struct ether_multi	*enm;
501 	struct ether_multistep	step;
502 	u_int32_t		rxmode, hashes[2];
503 	int			h = 0;
504 
505 	rxmode = CSR_READ_1(sc, STE_RX_MODE);
506 	rxmode &= ~(STE_RXMODE_ALLMULTI | STE_RXMODE_BROADCAST |
507 	    STE_RXMODE_MULTIHASH | STE_RXMODE_PROMISC |
508 	    STE_RXMODE_UNICAST);
509 	bzero(hashes, sizeof(hashes));
510 	ifp->if_flags &= ~IFF_ALLMULTI;
511 
512 	/*
513 	 * Always accept broadcast frames.
514 	 * Always accept frames destined to our station address.
515 	 */
516 	rxmode |= STE_RXMODE_BROADCAST | STE_RXMODE_UNICAST;
517 
518 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
519 		ifp->if_flags |= IFF_ALLMULTI;
520 		rxmode |= STE_RXMODE_ALLMULTI;
521 		if (ifp->if_flags & IFF_PROMISC)
522 			rxmode |= STE_RXMODE_PROMISC;
523 	} else {
524 		rxmode |= STE_RXMODE_MULTIHASH;
525 
526 		/* now program new ones */
527 		ETHER_FIRST_MULTI(step, ac, enm);
528 		while (enm != NULL) {
529 			h = ether_crc32_be(enm->enm_addrlo,
530 			    ETHER_ADDR_LEN) & 0x3F;
531 
532 			if (h < 32)
533 				hashes[0] |= (1 << h);
534 			else
535 				hashes[1] |= (1 << (h - 32));
536 
537 			ETHER_NEXT_MULTI(step, enm);
538 		}
539 	}
540 
541 	CSR_WRITE_2(sc, STE_MAR0, hashes[0] & 0xFFFF);
542 	CSR_WRITE_2(sc, STE_MAR1, (hashes[0] >> 16) & 0xFFFF);
543 	CSR_WRITE_2(sc, STE_MAR2, hashes[1] & 0xFFFF);
544 	CSR_WRITE_2(sc, STE_MAR3, (hashes[1] >> 16) & 0xFFFF);
545 	CSR_WRITE_1(sc, STE_RX_MODE, rxmode);
546 }
547 
548 int
549 ste_intr(void *xsc)
550 {
551 	struct ste_softc	*sc;
552 	struct ifnet		*ifp;
553 	u_int16_t		status;
554 	int			claimed = 0;
555 
556 	sc = xsc;
557 	ifp = &sc->arpcom.ac_if;
558 
559 	/* See if this is really our interrupt. */
560 	if (!(CSR_READ_2(sc, STE_ISR) & STE_ISR_INTLATCH))
561 		return claimed;
562 
563 	for (;;) {
564 		status = CSR_READ_2(sc, STE_ISR_ACK);
565 
566 		if (!(status & STE_INTRS))
567 			break;
568 
569 		claimed = 1;
570 
571 		if (status & STE_ISR_RX_DMADONE) {
572 			ste_rxeoc(sc);
573 			ste_rxeof(sc);
574 		}
575 
576 		if (status & STE_ISR_TX_DMADONE)
577 			ste_txeof(sc);
578 
579 		if (status & STE_ISR_TX_DONE)
580 			ste_txeoc(sc);
581 
582 		if (status & STE_ISR_STATS_OFLOW) {
583 			timeout_del(&sc->sc_stats_tmo);
584 			ste_stats_update(sc);
585 		}
586 
587 		if (status & STE_ISR_LINKEVENT)
588 			mii_pollstat(&sc->sc_mii);
589 
590 		if (status & STE_ISR_HOSTERR)
591 			ste_init(sc);
592 	}
593 
594 	/* Re-enable interrupts */
595 	CSR_WRITE_2(sc, STE_IMR, STE_INTRS);
596 
597 	if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
598 		ste_start(ifp);
599 
600 	return claimed;
601 }
602 
603 void
604 ste_rxeoc(struct ste_softc *sc)
605 {
606 	struct ste_chain_onefrag *cur_rx;
607 
608 	if (sc->ste_cdata.ste_rx_head->ste_ptr->ste_status == 0) {
609 		cur_rx = sc->ste_cdata.ste_rx_head;
610 		do {
611 			cur_rx = cur_rx->ste_next;
612 			/* If the ring is empty, just return. */
613 			if (cur_rx == sc->ste_cdata.ste_rx_head)
614 				return;
615 		} while (cur_rx->ste_ptr->ste_status == 0);
616 		if (sc->ste_cdata.ste_rx_head->ste_ptr->ste_status == 0) {
617 			/* We've fallen behind the chip: catch it. */
618 			sc->ste_cdata.ste_rx_head = cur_rx;
619 		}
620 	}
621 }
622 
623 /*
624  * A frame has been uploaded: pass the resulting mbuf chain up to
625  * the higher level protocols.
626  */
627 void
628 ste_rxeof(struct ste_softc *sc)
629 {
630         struct mbuf		*m;
631         struct ifnet		*ifp;
632 	struct ste_chain_onefrag	*cur_rx;
633 	int			total_len = 0, count=0;
634 	u_int32_t		rxstat;
635 
636 	ifp = &sc->arpcom.ac_if;
637 
638 	while((rxstat = sc->ste_cdata.ste_rx_head->ste_ptr->ste_status)
639 	      & STE_RXSTAT_DMADONE) {
640 		if ((STE_RX_LIST_CNT - count) < 3)
641 			break;
642 
643 		cur_rx = sc->ste_cdata.ste_rx_head;
644 		sc->ste_cdata.ste_rx_head = cur_rx->ste_next;
645 
646 		/*
647 		 * If an error occurs, update stats, clear the
648 		 * status word and leave the mbuf cluster in place:
649 		 * it should simply get re-used next time this descriptor
650 	 	 * comes up in the ring.
651 		 */
652 		if (rxstat & STE_RXSTAT_FRAME_ERR) {
653 			ifp->if_ierrors++;
654 			cur_rx->ste_ptr->ste_status = 0;
655 			continue;
656 		}
657 
658 		/*
659 		 * If there error bit was not set, the upload complete
660 		 * bit should be set which means we have a valid packet.
661 		 * If not, something truly strange has happened.
662 		 */
663 		if (!(rxstat & STE_RXSTAT_DMADONE)) {
664 			printf("%s: bad receive status -- packet dropped",
665 				sc->sc_dev.dv_xname);
666 			ifp->if_ierrors++;
667 			cur_rx->ste_ptr->ste_status = 0;
668 			continue;
669 		}
670 
671 		/* No errors; receive the packet. */
672 		m = cur_rx->ste_mbuf;
673 		total_len = cur_rx->ste_ptr->ste_status & STE_RXSTAT_FRAMELEN;
674 
675 		/*
676 		 * Try to conjure up a new mbuf cluster. If that
677 		 * fails, it means we have an out of memory condition and
678 		 * should leave the buffer in place and continue. This will
679 		 * result in a lost packet, but there's little else we
680 		 * can do in this situation.
681 		 */
682 		if (ste_newbuf(sc, cur_rx, NULL) == ENOBUFS) {
683 			ifp->if_ierrors++;
684 			cur_rx->ste_ptr->ste_status = 0;
685 			continue;
686 		}
687 
688 		m->m_pkthdr.rcvif = ifp;
689 		m->m_pkthdr.len = m->m_len = total_len;
690 
691 		ifp->if_ipackets++;
692 
693 #if NBPFILTER > 0
694 		if (ifp->if_bpf)
695 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
696 #endif
697 
698 		/* pass it on. */
699 		ether_input_mbuf(ifp, m);
700 
701 		cur_rx->ste_ptr->ste_status = 0;
702 		count++;
703 	}
704 }
705 
706 void
707 ste_txeoc(struct ste_softc *sc)
708 {
709 	u_int8_t		txstat;
710 	struct ifnet		*ifp;
711 
712 	ifp = &sc->arpcom.ac_if;
713 
714 	while ((txstat = CSR_READ_1(sc, STE_TX_STATUS)) &
715 	    STE_TXSTATUS_TXDONE) {
716 		if (txstat & STE_TXSTATUS_UNDERRUN ||
717 		    txstat & STE_TXSTATUS_EXCESSCOLLS ||
718 		    txstat & STE_TXSTATUS_RECLAIMERR) {
719 			ifp->if_oerrors++;
720 			printf("%s: transmission error: %x\n",
721 			    sc->sc_dev.dv_xname, txstat);
722 
723 			ste_init(sc);
724 
725 			if (txstat & STE_TXSTATUS_UNDERRUN &&
726 			    sc->ste_tx_thresh < ETHER_MAX_DIX_LEN) {
727 				sc->ste_tx_thresh += STE_MIN_FRAMELEN;
728 				printf("%s: tx underrun, increasing tx"
729 				    " start threshold to %d bytes\n",
730 				    sc->sc_dev.dv_xname, sc->ste_tx_thresh);
731 			}
732 			CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh);
733 			CSR_WRITE_2(sc, STE_TX_RECLAIM_THRESH,
734 			    (ETHER_MAX_DIX_LEN >> 4));
735 		}
736 		ste_init(sc);
737 		CSR_WRITE_2(sc, STE_TX_STATUS, txstat);
738 	}
739 }
740 
741 void
742 ste_txeof(struct ste_softc *sc)
743 {
744 	struct ste_chain	*cur_tx = NULL;
745 	struct ifnet		*ifp;
746 	int			idx;
747 
748 	ifp = &sc->arpcom.ac_if;
749 
750 	idx = sc->ste_cdata.ste_tx_cons;
751 	while(idx != sc->ste_cdata.ste_tx_prod) {
752 		cur_tx = &sc->ste_cdata.ste_tx_chain[idx];
753 
754 		if (!(cur_tx->ste_ptr->ste_ctl & STE_TXCTL_DMADONE))
755 			break;
756 
757 		m_freem(cur_tx->ste_mbuf);
758 		cur_tx->ste_mbuf = NULL;
759 		ifp->if_flags &= ~IFF_OACTIVE;
760 		ifp->if_opackets++;
761 
762 		STE_INC(idx, STE_TX_LIST_CNT);
763 	}
764 
765 	sc->ste_cdata.ste_tx_cons = idx;
766 	if (idx == sc->ste_cdata.ste_tx_prod)
767 		ifp->if_timer = 0;
768 }
769 
770 void
771 ste_stats_update(void *xsc)
772 {
773 	struct ste_softc	*sc;
774 	struct ifnet		*ifp;
775 	struct mii_data		*mii;
776 	int			s;
777 
778 	s = splnet();
779 
780 	sc = xsc;
781 	ifp = &sc->arpcom.ac_if;
782 	mii = &sc->sc_mii;
783 
784 	ifp->if_collisions += CSR_READ_1(sc, STE_LATE_COLLS)
785 	    + CSR_READ_1(sc, STE_MULTI_COLLS)
786 	    + CSR_READ_1(sc, STE_SINGLE_COLLS);
787 
788 	if (!sc->ste_link) {
789 		mii_pollstat(mii);
790 		if (mii->mii_media_status & IFM_ACTIVE &&
791 		    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
792 			sc->ste_link++;
793 			/*
794 			 * we don't get a call-back on re-init so do it
795 			 * otherwise we get stuck in the wrong link state
796 			 */
797 			ste_miibus_statchg((struct device *)sc);
798 			if (!IFQ_IS_EMPTY(&ifp->if_snd))
799 				ste_start(ifp);
800 		}
801 	}
802 
803 	timeout_add_sec(&sc->sc_stats_tmo, 1);
804 	splx(s);
805 }
806 
807 /*
808  * Probe for a Sundance ST201 chip. Check the PCI vendor and device
809  * IDs against our list and return a device name if we find a match.
810  */
811 int
812 ste_probe(struct device *parent, void *match, void *aux)
813 {
814 	return (pci_matchbyid((struct pci_attach_args *)aux, ste_devices,
815 	    nitems(ste_devices)));
816 }
817 
818 /*
819  * Attach the interface. Allocate softc structures, do ifmedia
820  * setup and ethernet/BPF attach.
821  */
822 void
823 ste_attach(struct device *parent, struct device *self, void *aux)
824 {
825 	const char		*intrstr = NULL;
826 	struct ste_softc	*sc = (struct ste_softc *)self;
827 	struct pci_attach_args	*pa = aux;
828 	pci_chipset_tag_t	pc = pa->pa_pc;
829 	pci_intr_handle_t	ih;
830 	struct ifnet		*ifp;
831 	bus_size_t		size;
832 
833 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
834 
835 	/*
836 	 * Only use one PHY since this chip reports multiple
837 	 * Note on the DFE-550TX the PHY is at 1 on the DFE-580TX
838 	 * it is at 0 & 1.  It is rev 0x12.
839 	 */
840 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_DLINK &&
841 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_DLINK_DFE550TX &&
842 	    PCI_REVISION(pa->pa_class) == 0x12)
843 		sc->ste_one_phy = 1;
844 
845 	/*
846 	 * Map control/status registers.
847 	 */
848 
849 #ifdef STE_USEIOSPACE
850 	if (pci_mapreg_map(pa, STE_PCI_LOIO,
851 	    PCI_MAPREG_TYPE_IO, 0,
852 	    &sc->ste_btag, &sc->ste_bhandle, NULL, &size, 0)) {
853 		printf(": can't map i/o space\n");
854 		return;
855 	}
856  #else
857 	if (pci_mapreg_map(pa, STE_PCI_LOMEM,
858 	    PCI_MAPREG_TYPE_MEM|PCI_MAPREG_MEM_TYPE_32BIT, 0,
859 	    &sc->ste_btag, &sc->ste_bhandle, NULL, &size, 0)) {
860 		printf(": can't map mem space\n");
861 		return;
862 	}
863 #endif
864 
865 	/* Allocate interrupt */
866 	if (pci_intr_map(pa, &ih)) {
867 		printf(": couldn't map interrupt\n");
868 		goto fail_1;
869 	}
870 	intrstr = pci_intr_string(pc, ih);
871 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, ste_intr, sc,
872 	    self->dv_xname);
873 	if (sc->sc_ih == NULL) {
874 		printf(": couldn't establish interrupt");
875 		if (intrstr != NULL)
876 			printf(" at %s", intrstr);
877 		printf("\n");
878 		goto fail_1;
879 	}
880 	printf(": %s", intrstr);
881 
882 	/* Reset the adapter. */
883 	ste_reset(sc);
884 
885 	/*
886 	 * Get station address from the EEPROM.
887 	 */
888 	if (ste_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
889 	    STE_EEADDR_NODE0, 3, 0)) {
890 		printf(": failed to read station address\n");
891 		goto fail_2;
892 	}
893 
894 	printf(", address %s\n", ether_sprintf(sc->arpcom.ac_enaddr));
895 
896 	sc->ste_ldata_ptr = malloc(sizeof(struct ste_list_data) + 8,
897 	    M_DEVBUF, M_DONTWAIT);
898 	if (sc->ste_ldata_ptr == NULL) {
899 		printf(": no memory for list buffers!\n");
900 		goto fail_2;
901 	}
902 
903 	sc->ste_ldata = (struct ste_list_data *)sc->ste_ldata_ptr;
904 	bzero(sc->ste_ldata, sizeof(struct ste_list_data));
905 
906 	ifp = &sc->arpcom.ac_if;
907 	ifp->if_softc = sc;
908 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
909 	ifp->if_ioctl = ste_ioctl;
910 	ifp->if_start = ste_start;
911 	ifp->if_watchdog = ste_watchdog;
912 	IFQ_SET_MAXLEN(&ifp->if_snd, STE_TX_LIST_CNT - 1);
913 	IFQ_SET_READY(&ifp->if_snd);
914 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
915 	ifp->if_capabilities = IFCAP_VLAN_MTU;
916 
917 	sc->ste_tx_thresh = STE_TXSTART_THRESH;
918 
919 	sc->sc_mii.mii_ifp = ifp;
920 	sc->sc_mii.mii_readreg = ste_miibus_readreg;
921 	sc->sc_mii.mii_writereg = ste_miibus_writereg;
922 	sc->sc_mii.mii_statchg = ste_miibus_statchg;
923 	ifmedia_init(&sc->sc_mii.mii_media, 0, ste_ifmedia_upd,ste_ifmedia_sts);
924 	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
925 	    0);
926 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
927 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
928 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
929 	} else
930 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
931 
932 	/*
933 	 * Call MI attach routines.
934 	 */
935 	if_attach(ifp);
936 	ether_ifattach(ifp);
937 	return;
938 
939 fail_2:
940 	pci_intr_disestablish(pc, sc->sc_ih);
941 
942 fail_1:
943 	bus_space_unmap(sc->ste_btag, sc->ste_bhandle, size);
944 }
945 
946 int
947 ste_newbuf(struct ste_softc *sc, struct ste_chain_onefrag *c, struct mbuf *m)
948 {
949 	struct mbuf		*m_new = NULL;
950 
951 	if (m == NULL) {
952 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
953 		if (m_new == NULL)
954 			return(ENOBUFS);
955 		MCLGET(m_new, M_DONTWAIT);
956 		if (!(m_new->m_flags & M_EXT)) {
957 			m_freem(m_new);
958 			return(ENOBUFS);
959 		}
960 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
961 	} else {
962 		m_new = m;
963 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
964 		m_new->m_data = m_new->m_ext.ext_buf;
965 	}
966 
967 	m_adj(m_new, ETHER_ALIGN);
968 
969 	c->ste_mbuf = m_new;
970 	c->ste_ptr->ste_status = 0;
971 	c->ste_ptr->ste_frag.ste_addr = vtophys(mtod(m_new, vaddr_t));
972 	c->ste_ptr->ste_frag.ste_len = (ETHER_MAX_DIX_LEN + ETHER_VLAN_ENCAP_LEN) | STE_FRAG_LAST;
973 
974 	return(0);
975 }
976 
977 int
978 ste_init_rx_list(struct ste_softc *sc)
979 {
980 	struct ste_chain_data	*cd;
981 	struct ste_list_data	*ld;
982 	int			i;
983 
984 	cd = &sc->ste_cdata;
985 	ld = sc->ste_ldata;
986 
987 	for (i = 0; i < STE_RX_LIST_CNT; i++) {
988 		cd->ste_rx_chain[i].ste_ptr = &ld->ste_rx_list[i];
989 		if (ste_newbuf(sc, &cd->ste_rx_chain[i], NULL) == ENOBUFS)
990 			return(ENOBUFS);
991 		if (i == (STE_RX_LIST_CNT - 1)) {
992 			cd->ste_rx_chain[i].ste_next =
993 			    &cd->ste_rx_chain[0];
994 			ld->ste_rx_list[i].ste_next =
995 			    vtophys((vaddr_t)&ld->ste_rx_list[0]);
996 		} else {
997 			cd->ste_rx_chain[i].ste_next =
998 			    &cd->ste_rx_chain[i + 1];
999 			ld->ste_rx_list[i].ste_next =
1000 			    vtophys((vaddr_t)&ld->ste_rx_list[i + 1]);
1001 		}
1002 		ld->ste_rx_list[i].ste_status = 0;
1003 	}
1004 
1005 	cd->ste_rx_head = &cd->ste_rx_chain[0];
1006 
1007 	return(0);
1008 }
1009 
1010 void
1011 ste_init_tx_list(struct ste_softc *sc)
1012 {
1013 	struct ste_chain_data	*cd;
1014 	struct ste_list_data	*ld;
1015 	int			i;
1016 
1017 	cd = &sc->ste_cdata;
1018 	ld = sc->ste_ldata;
1019 	for (i = 0; i < STE_TX_LIST_CNT; i++) {
1020 		cd->ste_tx_chain[i].ste_ptr = &ld->ste_tx_list[i];
1021 		cd->ste_tx_chain[i].ste_phys = vtophys((vaddr_t)&ld->ste_tx_list[i]);
1022 		if (i == (STE_TX_LIST_CNT - 1))
1023 			cd->ste_tx_chain[i].ste_next =
1024 			    &cd->ste_tx_chain[0];
1025 		else
1026 			cd->ste_tx_chain[i].ste_next =
1027 			    &cd->ste_tx_chain[i + 1];
1028 	}
1029 
1030 	bzero(ld->ste_tx_list, sizeof(struct ste_desc) * STE_TX_LIST_CNT);
1031 
1032 	cd->ste_tx_prod = 0;
1033 	cd->ste_tx_cons = 0;
1034 }
1035 
1036 void
1037 ste_init(void *xsc)
1038 {
1039 	struct ste_softc	*sc = (struct ste_softc *)xsc;
1040 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1041 	struct mii_data		*mii;
1042 	int			i, s;
1043 
1044 	s = splnet();
1045 
1046 	ste_stop(sc);
1047 	/* Reset the chip to a known state. */
1048 	ste_reset(sc);
1049 
1050 	mii = &sc->sc_mii;
1051 
1052 	/* Init our MAC address */
1053 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
1054 		CSR_WRITE_1(sc, STE_PAR0 + i, sc->arpcom.ac_enaddr[i]);
1055 	}
1056 
1057 	/* Init RX list */
1058 	if (ste_init_rx_list(sc) == ENOBUFS) {
1059 		printf("%s: initialization failed: no "
1060 		    "memory for RX buffers\n", sc->sc_dev.dv_xname);
1061 		ste_stop(sc);
1062 		splx(s);
1063 		return;
1064 	}
1065 
1066 	/* Set RX polling interval */
1067 	CSR_WRITE_1(sc, STE_RX_DMAPOLL_PERIOD, 64);
1068 
1069 	/* Init TX descriptors */
1070 	ste_init_tx_list(sc);
1071 
1072 	/* Set the TX freethresh value */
1073 	CSR_WRITE_1(sc, STE_TX_DMABURST_THRESH, ETHER_MAX_DIX_LEN >> 8);
1074 
1075 	/* Set the TX start threshold for best performance. */
1076 	CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh);
1077 
1078 	/* Set the TX reclaim threshold. */
1079 	CSR_WRITE_1(sc, STE_TX_RECLAIM_THRESH, (ETHER_MAX_DIX_LEN >> 4));
1080 
1081 	/* Program promiscuous mode and multicast filters. */
1082 	ste_iff(sc);
1083 
1084 	/* Load the address of the RX list. */
1085 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL);
1086 	ste_wait(sc);
1087 	CSR_WRITE_4(sc, STE_RX_DMALIST_PTR,
1088 	    vtophys((vaddr_t)&sc->ste_ldata->ste_rx_list[0]));
1089 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL);
1090 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL);
1091 
1092 	/* Set TX polling interval (defer until we TX first packet) */
1093 	CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 0);
1094 
1095 	/* Load address of the TX list */
1096 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL);
1097 	ste_wait(sc);
1098 	CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, 0);
1099 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
1100 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
1101 	ste_wait(sc);
1102 	sc->ste_tx_prev=NULL;
1103 
1104 	/* Enable receiver and transmitter */
1105 	CSR_WRITE_2(sc, STE_MACCTL0, 0);
1106 	CSR_WRITE_2(sc, STE_MACCTL1, 0);
1107 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_ENABLE);
1108 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_ENABLE);
1109 
1110 	/* Enable stats counters. */
1111 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_ENABLE);
1112 
1113 	/* Enable interrupts. */
1114 	CSR_WRITE_2(sc, STE_ISR, 0xFFFF);
1115 	CSR_WRITE_2(sc, STE_IMR, STE_INTRS);
1116 
1117 	/* Accept VLAN length packets */
1118 	CSR_WRITE_2(sc, STE_MAX_FRAMELEN,
1119 	    ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
1120 
1121 	ste_ifmedia_upd(ifp);
1122 
1123 	ifp->if_flags |= IFF_RUNNING;
1124 	ifp->if_flags &= ~IFF_OACTIVE;
1125 
1126 	splx(s);
1127 
1128 	timeout_set(&sc->sc_stats_tmo, ste_stats_update, sc);
1129 	timeout_add_sec(&sc->sc_stats_tmo, 1);
1130 }
1131 
1132 void
1133 ste_stop(struct ste_softc *sc)
1134 {
1135 	int			i;
1136 	struct ifnet		*ifp;
1137 
1138 	ifp = &sc->arpcom.ac_if;
1139 
1140 	timeout_del(&sc->sc_stats_tmo);
1141 
1142 	ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
1143 
1144 	CSR_WRITE_2(sc, STE_IMR, 0);
1145 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_DISABLE);
1146 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_DISABLE);
1147 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_DISABLE);
1148 	STE_SETBIT2(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL);
1149 	STE_SETBIT2(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL);
1150 	ste_wait(sc);
1151 	/*
1152 	 * Try really hard to stop the RX engine or under heavy RX
1153 	 * data chip will write into de-allocated memory.
1154 	 */
1155 	ste_reset(sc);
1156 
1157 	sc->ste_link = 0;
1158 
1159 	for (i = 0; i < STE_RX_LIST_CNT; i++) {
1160 		if (sc->ste_cdata.ste_rx_chain[i].ste_mbuf != NULL) {
1161 			m_freem(sc->ste_cdata.ste_rx_chain[i].ste_mbuf);
1162 			sc->ste_cdata.ste_rx_chain[i].ste_mbuf = NULL;
1163 		}
1164 	}
1165 
1166 	for (i = 0; i < STE_TX_LIST_CNT; i++) {
1167 		if (sc->ste_cdata.ste_tx_chain[i].ste_mbuf != NULL) {
1168 			m_freem(sc->ste_cdata.ste_tx_chain[i].ste_mbuf);
1169 			sc->ste_cdata.ste_tx_chain[i].ste_mbuf = NULL;
1170 		}
1171 	}
1172 
1173 	bzero(sc->ste_ldata, sizeof(struct ste_list_data));
1174 }
1175 
1176 void
1177 ste_reset(struct ste_softc *sc)
1178 {
1179 	int		i;
1180 
1181 	STE_SETBIT4(sc, STE_ASICCTL,
1182 	    STE_ASICCTL_GLOBAL_RESET|STE_ASICCTL_RX_RESET|
1183 	    STE_ASICCTL_TX_RESET|STE_ASICCTL_DMA_RESET|
1184 	    STE_ASICCTL_FIFO_RESET|STE_ASICCTL_NETWORK_RESET|
1185 	    STE_ASICCTL_AUTOINIT_RESET|STE_ASICCTL_HOST_RESET|
1186 	    STE_ASICCTL_EXTRESET_RESET);
1187 
1188 	DELAY(100000);
1189 
1190 	for (i = 0; i < STE_TIMEOUT; i++) {
1191 		if (!(CSR_READ_4(sc, STE_ASICCTL) & STE_ASICCTL_RESET_BUSY))
1192 			break;
1193 	}
1194 
1195 	if (i == STE_TIMEOUT)
1196 		printf("%s: global reset never completed\n",
1197 		    sc->sc_dev.dv_xname);
1198 }
1199 
1200 int
1201 ste_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1202 {
1203 	struct ste_softc	*sc = ifp->if_softc;
1204 	struct ifaddr		*ifa = (struct ifaddr *) data;
1205 	struct ifreq		*ifr = (struct ifreq *) data;
1206 	int			s, error = 0;
1207 
1208 	s = splnet();
1209 
1210 	switch(command) {
1211 	case SIOCSIFADDR:
1212 		ifp->if_flags |= IFF_UP;
1213 		if (!(ifp->if_flags & IFF_RUNNING))
1214 			ste_init(sc);
1215 		if (ifa->ifa_addr->sa_family == AF_INET)
1216 			arp_ifinit(&sc->arpcom, ifa);
1217 		break;
1218 
1219 	case SIOCSIFFLAGS:
1220 		if (ifp->if_flags & IFF_UP) {
1221 			if (ifp->if_flags & IFF_RUNNING)
1222 				error = ENETRESET;
1223 			else {
1224 				sc->ste_tx_thresh = STE_TXSTART_THRESH;
1225 				ste_init(sc);
1226 			}
1227 		} else {
1228 			if (ifp->if_flags & IFF_RUNNING)
1229 				ste_stop(sc);
1230 		}
1231 		break;
1232 
1233 	case SIOCGIFMEDIA:
1234 	case SIOCSIFMEDIA:
1235 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
1236 		break;
1237 
1238 	default:
1239 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
1240 	}
1241 
1242 	if (error == ENETRESET) {
1243 		if (ifp->if_flags & IFF_RUNNING)
1244 			ste_iff(sc);
1245 		error = 0;
1246 	}
1247 
1248 	splx(s);
1249 	return(error);
1250 }
1251 
1252 int
1253 ste_encap(struct ste_softc *sc, struct ste_chain *c, struct mbuf *m_head)
1254 {
1255 	int			frag = 0;
1256 	struct ste_frag		*f = NULL;
1257 	struct mbuf		*m;
1258 	struct ste_desc		*d;
1259 
1260 	d = c->ste_ptr;
1261 	d->ste_ctl = 0;
1262 
1263 encap_retry:
1264 	for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
1265 		if (m->m_len != 0) {
1266 			if (frag == STE_MAXFRAGS)
1267 				break;
1268 			f = &d->ste_frags[frag];
1269 			f->ste_addr = vtophys(mtod(m, vaddr_t));
1270 			f->ste_len = m->m_len;
1271 			frag++;
1272 		}
1273 	}
1274 
1275 	if (m != NULL) {
1276 		struct mbuf *mn;
1277 
1278 		/*
1279 		 * We ran out of segments. We have to recopy this
1280 		 * mbuf chain first. Bail out if we can't get the
1281 		 * new buffers.
1282 		 */
1283 		MGETHDR(mn, M_DONTWAIT, MT_DATA);
1284 		if (mn == NULL) {
1285 			m_freem(m_head);
1286 			return ENOMEM;
1287 		}
1288 		if (m_head->m_pkthdr.len > MHLEN) {
1289 			MCLGET(mn, M_DONTWAIT);
1290 			if ((mn->m_flags & M_EXT) == 0) {
1291 				m_freem(mn);
1292 				m_freem(m_head);
1293 				return ENOMEM;
1294 			}
1295 		}
1296 		m_copydata(m_head, 0, m_head->m_pkthdr.len,
1297 			   mtod(mn, caddr_t));
1298 		mn->m_pkthdr.len = mn->m_len = m_head->m_pkthdr.len;
1299 		m_freem(m_head);
1300 		m_head = mn;
1301 		goto encap_retry;
1302 	}
1303 
1304 	c->ste_mbuf = m_head;
1305 	d->ste_frags[frag - 1].ste_len |= STE_FRAG_LAST;
1306 	d->ste_ctl = 1;
1307 
1308 	return(0);
1309 }
1310 
1311 void
1312 ste_start(struct ifnet *ifp)
1313 {
1314 	struct ste_softc	*sc;
1315 	struct mbuf		*m_head = NULL;
1316 	struct ste_chain	*cur_tx;
1317 	int			idx;
1318 
1319 	sc = ifp->if_softc;
1320 
1321 	if (!sc->ste_link)
1322 		return;
1323 
1324 	if (ifp->if_flags & IFF_OACTIVE)
1325 		return;
1326 
1327 	idx = sc->ste_cdata.ste_tx_prod;
1328 
1329 	while(sc->ste_cdata.ste_tx_chain[idx].ste_mbuf == NULL) {
1330 		/*
1331 		 * We cannot re-use the last (free) descriptor;
1332 		 * the chip may not have read its ste_next yet.
1333 		 */
1334 		if (STE_NEXT(idx, STE_TX_LIST_CNT) ==
1335 		    sc->ste_cdata.ste_tx_cons) {
1336 			ifp->if_flags |= IFF_OACTIVE;
1337 			break;
1338 		}
1339 
1340 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1341 		if (m_head == NULL)
1342 			break;
1343 
1344 		cur_tx = &sc->ste_cdata.ste_tx_chain[idx];
1345 
1346 		if (ste_encap(sc, cur_tx, m_head) != 0)
1347 			break;
1348 
1349 		cur_tx->ste_ptr->ste_next = 0;
1350 
1351 		if (sc->ste_tx_prev == NULL) {
1352 			cur_tx->ste_ptr->ste_ctl = STE_TXCTL_DMAINTR | 1;
1353 			/* Load address of the TX list */
1354 			STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL);
1355 			ste_wait(sc);
1356 
1357 			CSR_WRITE_4(sc, STE_TX_DMALIST_PTR,
1358 			    vtophys((vaddr_t)&sc->ste_ldata->ste_tx_list[0]));
1359 
1360 			/* Set TX polling interval to start TX engine */
1361 			CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 64);
1362 
1363 			STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
1364 			ste_wait(sc);
1365 		}else{
1366 			cur_tx->ste_ptr->ste_ctl = STE_TXCTL_DMAINTR | 1;
1367 			sc->ste_tx_prev->ste_ptr->ste_next
1368 				= cur_tx->ste_phys;
1369 		}
1370 
1371 		sc->ste_tx_prev = cur_tx;
1372 
1373 #if NBPFILTER > 0
1374 		/*
1375 		 * If there's a BPF listener, bounce a copy of this frame
1376 		 * to him.
1377 	 	 */
1378 		if (ifp->if_bpf)
1379 			bpf_mtap(ifp->if_bpf, cur_tx->ste_mbuf,
1380 			    BPF_DIRECTION_OUT);
1381 #endif
1382 
1383 		STE_INC(idx, STE_TX_LIST_CNT);
1384 		ifp->if_timer = 5;
1385 	}
1386 	sc->ste_cdata.ste_tx_prod = idx;
1387 }
1388 
1389 void
1390 ste_watchdog(struct ifnet *ifp)
1391 {
1392 	struct ste_softc	*sc;
1393 
1394 	sc = ifp->if_softc;
1395 
1396 	ifp->if_oerrors++;
1397 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1398 
1399 	ste_txeoc(sc);
1400 	ste_txeof(sc);
1401 	ste_rxeoc(sc);
1402 	ste_rxeof(sc);
1403 	ste_init(sc);
1404 
1405 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1406 		ste_start(ifp);
1407 }
1408