xref: /openbsd/sys/dev/pci/if_ste.c (revision a6445c1d)
1 /*	$OpenBSD: if_ste.c,v 1.55 2014/07/22 13:12:11 mpi Exp $ */
2 /*
3  * Copyright (c) 1997, 1998, 1999
4  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  *
33  * $FreeBSD: src/sys/pci/if_ste.c,v 1.14 1999/12/07 20:14:42 wpaul Exp $
34  */
35 
36 #include "bpfilter.h"
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/mbuf.h>
41 #include <sys/protosw.h>
42 #include <sys/socket.h>
43 #include <sys/ioctl.h>
44 #include <sys/errno.h>
45 #include <sys/malloc.h>
46 #include <sys/kernel.h>
47 #include <sys/timeout.h>
48 
49 #include <net/if.h>
50 #include <net/if_dl.h>
51 #include <net/if_types.h>
52 
53 #ifdef INET
54 #include <netinet/in.h>
55 #include <netinet/if_ether.h>
56 #endif
57 
58 #include <net/if_media.h>
59 
60 #if NBPFILTER > 0
61 #include <net/bpf.h>
62 #endif
63 
64 #include <uvm/uvm_extern.h>              /* for vtophys */
65 
66 #include <sys/device.h>
67 
68 #include <dev/mii/mii.h>
69 #include <dev/mii/miivar.h>
70 
71 #include <dev/pci/pcireg.h>
72 #include <dev/pci/pcivar.h>
73 #include <dev/pci/pcidevs.h>
74 
75 #define STE_USEIOSPACE
76 
77 #include <dev/pci/if_stereg.h>
78 
79 int	ste_probe(struct device *, void *, void *);
80 void	ste_attach(struct device *, struct device *, void *);
81 int	ste_intr(void *);
82 void	ste_init(void *);
83 void	ste_rxeoc(struct ste_softc *);
84 void	ste_rxeof(struct ste_softc *);
85 void	ste_txeoc(struct ste_softc *);
86 void	ste_txeof(struct ste_softc *);
87 void	ste_stats_update(void *);
88 void	ste_stop(struct ste_softc *);
89 void	ste_reset(struct ste_softc *);
90 int	ste_ioctl(struct ifnet *, u_long, caddr_t);
91 int	ste_encap(struct ste_softc *, struct ste_chain *,
92 	    struct mbuf *);
93 void	ste_start(struct ifnet *);
94 void	ste_watchdog(struct ifnet *);
95 int	ste_newbuf(struct ste_softc *,
96 	    struct ste_chain_onefrag *,
97 	    struct mbuf *);
98 int	ste_ifmedia_upd(struct ifnet *);
99 void	ste_ifmedia_sts(struct ifnet *, struct ifmediareq *);
100 
101 void	ste_mii_sync(struct ste_softc *);
102 void	ste_mii_send(struct ste_softc *, u_int32_t, int);
103 int	ste_mii_readreg(struct ste_softc *,
104 	    struct ste_mii_frame *);
105 int	ste_mii_writereg(struct ste_softc *,
106 	    struct ste_mii_frame *);
107 int	ste_miibus_readreg(struct device *, int, int);
108 void	ste_miibus_writereg(struct device *, int, int, int);
109 void	ste_miibus_statchg(struct device *);
110 
111 int	ste_eeprom_wait(struct ste_softc *);
112 int	ste_read_eeprom(struct ste_softc *, caddr_t, int,
113 	    int, int);
114 void	ste_wait(struct ste_softc *);
115 void	ste_iff(struct ste_softc *);
116 int	ste_init_rx_list(struct ste_softc *);
117 void	ste_init_tx_list(struct ste_softc *);
118 
119 #define STE_SETBIT4(sc, reg, x)				\
120 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
121 
122 #define STE_CLRBIT4(sc, reg, x)				\
123 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
124 
125 #define STE_SETBIT2(sc, reg, x)				\
126 	CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) | x)
127 
128 #define STE_CLRBIT2(sc, reg, x)				\
129 	CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) & ~x)
130 
131 #define STE_SETBIT1(sc, reg, x)				\
132 	CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) | x)
133 
134 #define STE_CLRBIT1(sc, reg, x)				\
135 	CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) & ~x)
136 
137 
138 #define MII_SET(x)		STE_SETBIT1(sc, STE_PHYCTL, x)
139 #define MII_CLR(x)		STE_CLRBIT1(sc, STE_PHYCTL, x)
140 
141 const struct pci_matchid ste_devices[] = {
142 	{ PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DFE550TX },
143 	{ PCI_VENDOR_SUNDANCE, PCI_PRODUCT_SUNDANCE_ST201_1 },
144 	{ PCI_VENDOR_SUNDANCE, PCI_PRODUCT_SUNDANCE_ST201_2 }
145 };
146 
147 struct cfattach ste_ca = {
148 	sizeof(struct ste_softc), ste_probe, ste_attach
149 };
150 
151 struct cfdriver ste_cd = {
152 	NULL, "ste", DV_IFNET
153 };
154 
155 /*
156  * Sync the PHYs by setting data bit and strobing the clock 32 times.
157  */
158 void
159 ste_mii_sync(struct ste_softc *sc)
160 {
161 	int		i;
162 
163 	MII_SET(STE_PHYCTL_MDIR|STE_PHYCTL_MDATA);
164 
165 	for (i = 0; i < 32; i++) {
166 		MII_SET(STE_PHYCTL_MCLK);
167 		DELAY(1);
168 		MII_CLR(STE_PHYCTL_MCLK);
169 		DELAY(1);
170 	}
171 }
172 
173 /*
174  * Clock a series of bits through the MII.
175  */
176 void
177 ste_mii_send(struct ste_softc *sc, u_int32_t bits, int cnt)
178 {
179 	int		i;
180 
181 	MII_CLR(STE_PHYCTL_MCLK);
182 
183 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
184                 if (bits & i) {
185 			MII_SET(STE_PHYCTL_MDATA);
186                 } else {
187 			MII_CLR(STE_PHYCTL_MDATA);
188                 }
189 		DELAY(1);
190 		MII_CLR(STE_PHYCTL_MCLK);
191 		DELAY(1);
192 		MII_SET(STE_PHYCTL_MCLK);
193 	}
194 }
195 
196 /*
197  * Read an PHY register through the MII.
198  */
199 int
200 ste_mii_readreg(struct ste_softc *sc, struct ste_mii_frame *frame)
201 {
202 	int		ack, i, s;
203 
204 	s = splnet();
205 
206 	/*
207 	 * Set up frame for RX.
208 	 */
209 	frame->mii_stdelim = STE_MII_STARTDELIM;
210 	frame->mii_opcode = STE_MII_READOP;
211 	frame->mii_turnaround = 0;
212 	frame->mii_data = 0;
213 
214 	CSR_WRITE_2(sc, STE_PHYCTL, 0);
215 	/*
216  	 * Turn on data xmit.
217 	 */
218 	MII_SET(STE_PHYCTL_MDIR);
219 
220 	ste_mii_sync(sc);
221 
222 	/*
223 	 * Send command/address info.
224 	 */
225 	ste_mii_send(sc, frame->mii_stdelim, 2);
226 	ste_mii_send(sc, frame->mii_opcode, 2);
227 	ste_mii_send(sc, frame->mii_phyaddr, 5);
228 	ste_mii_send(sc, frame->mii_regaddr, 5);
229 
230 	/* Turn off xmit. */
231 	MII_CLR(STE_PHYCTL_MDIR);
232 
233 	/* Idle bit */
234 	MII_CLR((STE_PHYCTL_MCLK|STE_PHYCTL_MDATA));
235 	DELAY(1);
236 	MII_SET(STE_PHYCTL_MCLK);
237 	DELAY(1);
238 
239 	/* Check for ack */
240 	MII_CLR(STE_PHYCTL_MCLK);
241 	DELAY(1);
242 	ack = CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA;
243 	MII_SET(STE_PHYCTL_MCLK);
244 	DELAY(1);
245 
246 	/*
247 	 * Now try reading data bits. If the ack failed, we still
248 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
249 	 */
250 	if (ack) {
251 		for(i = 0; i < 16; i++) {
252 			MII_CLR(STE_PHYCTL_MCLK);
253 			DELAY(1);
254 			MII_SET(STE_PHYCTL_MCLK);
255 			DELAY(1);
256 		}
257 		goto fail;
258 	}
259 
260 	for (i = 0x8000; i; i >>= 1) {
261 		MII_CLR(STE_PHYCTL_MCLK);
262 		DELAY(1);
263 		if (!ack) {
264 			if (CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA)
265 				frame->mii_data |= i;
266 			DELAY(1);
267 		}
268 		MII_SET(STE_PHYCTL_MCLK);
269 		DELAY(1);
270 	}
271 
272 fail:
273 
274 	MII_CLR(STE_PHYCTL_MCLK);
275 	DELAY(1);
276 	MII_SET(STE_PHYCTL_MCLK);
277 	DELAY(1);
278 
279 	splx(s);
280 
281 	if (ack)
282 		return(1);
283 	return(0);
284 }
285 
286 /*
287  * Write to a PHY register through the MII.
288  */
289 int
290 ste_mii_writereg(struct ste_softc *sc, struct ste_mii_frame *frame)
291 {
292 	int		s;
293 
294 	s = splnet();
295 	/*
296 	 * Set up frame for TX.
297 	 */
298 
299 	frame->mii_stdelim = STE_MII_STARTDELIM;
300 	frame->mii_opcode = STE_MII_WRITEOP;
301 	frame->mii_turnaround = STE_MII_TURNAROUND;
302 
303 	/*
304  	 * Turn on data output.
305 	 */
306 	MII_SET(STE_PHYCTL_MDIR);
307 
308 	ste_mii_sync(sc);
309 
310 	ste_mii_send(sc, frame->mii_stdelim, 2);
311 	ste_mii_send(sc, frame->mii_opcode, 2);
312 	ste_mii_send(sc, frame->mii_phyaddr, 5);
313 	ste_mii_send(sc, frame->mii_regaddr, 5);
314 	ste_mii_send(sc, frame->mii_turnaround, 2);
315 	ste_mii_send(sc, frame->mii_data, 16);
316 
317 	/* Idle bit. */
318 	MII_SET(STE_PHYCTL_MCLK);
319 	DELAY(1);
320 	MII_CLR(STE_PHYCTL_MCLK);
321 	DELAY(1);
322 
323 	/*
324 	 * Turn off xmit.
325 	 */
326 	MII_CLR(STE_PHYCTL_MDIR);
327 
328 	splx(s);
329 
330 	return(0);
331 }
332 
333 int
334 ste_miibus_readreg(struct device *self, int phy, int reg)
335 {
336 	struct ste_softc	*sc = (struct ste_softc *)self;
337 	struct ste_mii_frame	frame;
338 
339 	if (sc->ste_one_phy && phy != 0)
340 		return (0);
341 
342 	bzero(&frame, sizeof(frame));
343 
344 	frame.mii_phyaddr = phy;
345 	frame.mii_regaddr = reg;
346 	ste_mii_readreg(sc, &frame);
347 
348 	return(frame.mii_data);
349 }
350 
351 void
352 ste_miibus_writereg(struct device *self, int phy, int reg, int data)
353 {
354 	struct ste_softc	*sc = (struct ste_softc *)self;
355 	struct ste_mii_frame	frame;
356 
357 	bzero(&frame, sizeof(frame));
358 
359 	frame.mii_phyaddr = phy;
360 	frame.mii_regaddr = reg;
361 	frame.mii_data = data;
362 
363 	ste_mii_writereg(sc, &frame);
364 }
365 
366 void
367 ste_miibus_statchg(struct device *self)
368 {
369 	struct ste_softc	*sc = (struct ste_softc *)self;
370 	struct mii_data		*mii;
371 	int fdx, fcur;
372 
373 	mii = &sc->sc_mii;
374 
375 	fcur = CSR_READ_2(sc, STE_MACCTL0) & STE_MACCTL0_FULLDUPLEX;
376 	fdx = (mii->mii_media_active & IFM_GMASK) == IFM_FDX;
377 
378 	if ((fcur && fdx) || (! fcur && ! fdx))
379 		return;
380 
381 	STE_SETBIT4(sc, STE_DMACTL,
382 	    STE_DMACTL_RXDMA_STALL |STE_DMACTL_TXDMA_STALL);
383 	ste_wait(sc);
384 
385 	if (fdx)
386 		STE_SETBIT2(sc, STE_MACCTL0, STE_MACCTL0_FULLDUPLEX);
387 	else
388 		STE_CLRBIT2(sc, STE_MACCTL0, STE_MACCTL0_FULLDUPLEX);
389 
390 	STE_SETBIT4(sc, STE_DMACTL,
391 	    STE_DMACTL_RXDMA_UNSTALL | STE_DMACTL_TXDMA_UNSTALL);
392 }
393 
394 int
395 ste_ifmedia_upd(struct ifnet *ifp)
396 {
397 	struct ste_softc	*sc;
398 	struct mii_data		*mii;
399 
400 	sc = ifp->if_softc;
401 	mii = &sc->sc_mii;
402 	sc->ste_link = 0;
403 	if (mii->mii_instance) {
404 		struct mii_softc	*miisc;
405 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
406 			mii_phy_reset(miisc);
407 	}
408 	mii_mediachg(mii);
409 
410 	return(0);
411 }
412 
413 void
414 ste_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
415 {
416 	struct ste_softc	*sc;
417 	struct mii_data		*mii;
418 
419 	sc = ifp->if_softc;
420 	mii = &sc->sc_mii;
421 
422 	mii_pollstat(mii);
423 	ifmr->ifm_active = mii->mii_media_active;
424 	ifmr->ifm_status = mii->mii_media_status;
425 }
426 
427 void
428 ste_wait(struct ste_softc *sc)
429 {
430 	int		i;
431 
432 	for (i = 0; i < STE_TIMEOUT; i++) {
433 		if (!(CSR_READ_4(sc, STE_DMACTL) & STE_DMACTL_DMA_HALTINPROG))
434 			break;
435 	}
436 
437 	if (i == STE_TIMEOUT)
438 		printf("%s: command never completed!\n", sc->sc_dev.dv_xname);
439 }
440 
441 /*
442  * The EEPROM is slow: give it time to come ready after issuing
443  * it a command.
444  */
445 int
446 ste_eeprom_wait(struct ste_softc *sc)
447 {
448 	int		i;
449 
450 	DELAY(1000);
451 
452 	for (i = 0; i < 100; i++) {
453 		if (CSR_READ_2(sc, STE_EEPROM_CTL) & STE_EECTL_BUSY)
454 			DELAY(1000);
455 		else
456 			break;
457 	}
458 
459 	if (i == 100) {
460 		printf("%s: eeprom failed to come ready\n",
461 		    sc->sc_dev.dv_xname);
462 		return(1);
463 	}
464 
465 	return(0);
466 }
467 
468 /*
469  * Read a sequence of words from the EEPROM. Note that ethernet address
470  * data is stored in the EEPROM in network byte order.
471  */
472 int
473 ste_read_eeprom(struct ste_softc *sc, caddr_t dest, int off, int cnt, int swap)
474 {
475 	int			err = 0, i;
476 	u_int16_t		word = 0, *ptr;
477 
478 	if (ste_eeprom_wait(sc))
479 		return(1);
480 
481 	for (i = 0; i < cnt; i++) {
482 		CSR_WRITE_2(sc, STE_EEPROM_CTL, STE_EEOPCODE_READ | (off + i));
483 		err = ste_eeprom_wait(sc);
484 		if (err)
485 			break;
486 		word = CSR_READ_2(sc, STE_EEPROM_DATA);
487 		ptr = (u_int16_t *)(dest + (i * 2));
488 		if (swap)
489 			*ptr = ntohs(word);
490 		else
491 			*ptr = word;
492 	}
493 
494 	return(err ? 1 : 0);
495 }
496 
497 void
498 ste_iff(struct ste_softc *sc)
499 {
500 	struct ifnet		*ifp = &sc->arpcom.ac_if;
501 	struct arpcom		*ac = &sc->arpcom;
502 	struct ether_multi	*enm;
503 	struct ether_multistep	step;
504 	u_int32_t		rxmode, hashes[2];
505 	int			h = 0;
506 
507 	rxmode = CSR_READ_1(sc, STE_RX_MODE);
508 	rxmode &= ~(STE_RXMODE_ALLMULTI | STE_RXMODE_BROADCAST |
509 	    STE_RXMODE_MULTIHASH | STE_RXMODE_PROMISC |
510 	    STE_RXMODE_UNICAST);
511 	bzero(hashes, sizeof(hashes));
512 	ifp->if_flags &= ~IFF_ALLMULTI;
513 
514 	/*
515 	 * Always accept broadcast frames.
516 	 * Always accept frames destined to our station address.
517 	 */
518 	rxmode |= STE_RXMODE_BROADCAST | STE_RXMODE_UNICAST;
519 
520 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
521 		ifp->if_flags |= IFF_ALLMULTI;
522 		rxmode |= STE_RXMODE_ALLMULTI;
523 		if (ifp->if_flags & IFF_PROMISC)
524 			rxmode |= STE_RXMODE_PROMISC;
525 	} else {
526 		rxmode |= STE_RXMODE_MULTIHASH;
527 
528 		/* now program new ones */
529 		ETHER_FIRST_MULTI(step, ac, enm);
530 		while (enm != NULL) {
531 			h = ether_crc32_be(enm->enm_addrlo,
532 			    ETHER_ADDR_LEN) & 0x3F;
533 
534 			if (h < 32)
535 				hashes[0] |= (1 << h);
536 			else
537 				hashes[1] |= (1 << (h - 32));
538 
539 			ETHER_NEXT_MULTI(step, enm);
540 		}
541 	}
542 
543 	CSR_WRITE_2(sc, STE_MAR0, hashes[0] & 0xFFFF);
544 	CSR_WRITE_2(sc, STE_MAR1, (hashes[0] >> 16) & 0xFFFF);
545 	CSR_WRITE_2(sc, STE_MAR2, hashes[1] & 0xFFFF);
546 	CSR_WRITE_2(sc, STE_MAR3, (hashes[1] >> 16) & 0xFFFF);
547 	CSR_WRITE_1(sc, STE_RX_MODE, rxmode);
548 }
549 
550 int
551 ste_intr(void *xsc)
552 {
553 	struct ste_softc	*sc;
554 	struct ifnet		*ifp;
555 	u_int16_t		status;
556 	int			claimed = 0;
557 
558 	sc = xsc;
559 	ifp = &sc->arpcom.ac_if;
560 
561 	/* See if this is really our interrupt. */
562 	if (!(CSR_READ_2(sc, STE_ISR) & STE_ISR_INTLATCH))
563 		return claimed;
564 
565 	for (;;) {
566 		status = CSR_READ_2(sc, STE_ISR_ACK);
567 
568 		if (!(status & STE_INTRS))
569 			break;
570 
571 		claimed = 1;
572 
573 		if (status & STE_ISR_RX_DMADONE) {
574 			ste_rxeoc(sc);
575 			ste_rxeof(sc);
576 		}
577 
578 		if (status & STE_ISR_TX_DMADONE)
579 			ste_txeof(sc);
580 
581 		if (status & STE_ISR_TX_DONE)
582 			ste_txeoc(sc);
583 
584 		if (status & STE_ISR_STATS_OFLOW) {
585 			timeout_del(&sc->sc_stats_tmo);
586 			ste_stats_update(sc);
587 		}
588 
589 		if (status & STE_ISR_LINKEVENT)
590 			mii_pollstat(&sc->sc_mii);
591 
592 		if (status & STE_ISR_HOSTERR) {
593 			ste_reset(sc);
594 			ste_init(sc);
595 		}
596 	}
597 
598 	/* Re-enable interrupts */
599 	CSR_WRITE_2(sc, STE_IMR, STE_INTRS);
600 
601 	if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
602 		ste_start(ifp);
603 
604 	return claimed;
605 }
606 
607 void
608 ste_rxeoc(struct ste_softc *sc)
609 {
610 	struct ste_chain_onefrag *cur_rx;
611 
612 	if (sc->ste_cdata.ste_rx_head->ste_ptr->ste_status == 0) {
613 		cur_rx = sc->ste_cdata.ste_rx_head;
614 		do {
615 			cur_rx = cur_rx->ste_next;
616 			/* If the ring is empty, just return. */
617 			if (cur_rx == sc->ste_cdata.ste_rx_head)
618 				return;
619 		} while (cur_rx->ste_ptr->ste_status == 0);
620 		if (sc->ste_cdata.ste_rx_head->ste_ptr->ste_status == 0) {
621 			/* We've fallen behind the chip: catch it. */
622 			sc->ste_cdata.ste_rx_head = cur_rx;
623 		}
624 	}
625 }
626 
627 /*
628  * A frame has been uploaded: pass the resulting mbuf chain up to
629  * the higher level protocols.
630  */
631 void
632 ste_rxeof(struct ste_softc *sc)
633 {
634         struct mbuf		*m;
635         struct ifnet		*ifp;
636 	struct ste_chain_onefrag	*cur_rx;
637 	int			total_len = 0, count=0;
638 	u_int32_t		rxstat;
639 
640 	ifp = &sc->arpcom.ac_if;
641 
642 	while((rxstat = sc->ste_cdata.ste_rx_head->ste_ptr->ste_status)
643 	      & STE_RXSTAT_DMADONE) {
644 		if ((STE_RX_LIST_CNT - count) < 3)
645 			break;
646 
647 		cur_rx = sc->ste_cdata.ste_rx_head;
648 		sc->ste_cdata.ste_rx_head = cur_rx->ste_next;
649 
650 		/*
651 		 * If an error occurs, update stats, clear the
652 		 * status word and leave the mbuf cluster in place:
653 		 * it should simply get re-used next time this descriptor
654 	 	 * comes up in the ring.
655 		 */
656 		if (rxstat & STE_RXSTAT_FRAME_ERR) {
657 			ifp->if_ierrors++;
658 			cur_rx->ste_ptr->ste_status = 0;
659 			continue;
660 		}
661 
662 		/*
663 		 * If there error bit was not set, the upload complete
664 		 * bit should be set which means we have a valid packet.
665 		 * If not, something truly strange has happened.
666 		 */
667 		if (!(rxstat & STE_RXSTAT_DMADONE)) {
668 			printf("%s: bad receive status -- packet dropped",
669 				sc->sc_dev.dv_xname);
670 			ifp->if_ierrors++;
671 			cur_rx->ste_ptr->ste_status = 0;
672 			continue;
673 		}
674 
675 		/* No errors; receive the packet. */
676 		m = cur_rx->ste_mbuf;
677 		total_len = cur_rx->ste_ptr->ste_status & STE_RXSTAT_FRAMELEN;
678 
679 		/*
680 		 * Try to conjure up a new mbuf cluster. If that
681 		 * fails, it means we have an out of memory condition and
682 		 * should leave the buffer in place and continue. This will
683 		 * result in a lost packet, but there's little else we
684 		 * can do in this situation.
685 		 */
686 		if (ste_newbuf(sc, cur_rx, NULL) == ENOBUFS) {
687 			ifp->if_ierrors++;
688 			cur_rx->ste_ptr->ste_status = 0;
689 			continue;
690 		}
691 
692 		m->m_pkthdr.rcvif = ifp;
693 		m->m_pkthdr.len = m->m_len = total_len;
694 
695 		ifp->if_ipackets++;
696 
697 #if NBPFILTER > 0
698 		if (ifp->if_bpf)
699 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
700 #endif
701 
702 		/* pass it on. */
703 		ether_input_mbuf(ifp, m);
704 
705 		cur_rx->ste_ptr->ste_status = 0;
706 		count++;
707 	}
708 }
709 
710 void
711 ste_txeoc(struct ste_softc *sc)
712 {
713 	u_int8_t		txstat;
714 	struct ifnet		*ifp;
715 
716 	ifp = &sc->arpcom.ac_if;
717 
718 	while ((txstat = CSR_READ_1(sc, STE_TX_STATUS)) &
719 	    STE_TXSTATUS_TXDONE) {
720 		if (txstat & STE_TXSTATUS_UNDERRUN ||
721 		    txstat & STE_TXSTATUS_EXCESSCOLLS ||
722 		    txstat & STE_TXSTATUS_RECLAIMERR) {
723 			ifp->if_oerrors++;
724 			printf("%s: transmission error: %x\n",
725 			    sc->sc_dev.dv_xname, txstat);
726 
727 			ste_reset(sc);
728 			ste_init(sc);
729 
730 			if (txstat & STE_TXSTATUS_UNDERRUN &&
731 			    sc->ste_tx_thresh < ETHER_MAX_DIX_LEN) {
732 				sc->ste_tx_thresh += STE_MIN_FRAMELEN;
733 				printf("%s: tx underrun, increasing tx"
734 				    " start threshold to %d bytes\n",
735 				    sc->sc_dev.dv_xname, sc->ste_tx_thresh);
736 			}
737 			CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh);
738 			CSR_WRITE_2(sc, STE_TX_RECLAIM_THRESH,
739 			    (ETHER_MAX_DIX_LEN >> 4));
740 		}
741 		ste_init(sc);
742 		CSR_WRITE_2(sc, STE_TX_STATUS, txstat);
743 	}
744 }
745 
746 void
747 ste_txeof(struct ste_softc *sc)
748 {
749 	struct ste_chain	*cur_tx = NULL;
750 	struct ifnet		*ifp;
751 	int			idx;
752 
753 	ifp = &sc->arpcom.ac_if;
754 
755 	idx = sc->ste_cdata.ste_tx_cons;
756 	while(idx != sc->ste_cdata.ste_tx_prod) {
757 		cur_tx = &sc->ste_cdata.ste_tx_chain[idx];
758 
759 		if (!(cur_tx->ste_ptr->ste_ctl & STE_TXCTL_DMADONE))
760 			break;
761 
762 		m_freem(cur_tx->ste_mbuf);
763 		cur_tx->ste_mbuf = NULL;
764 		ifp->if_flags &= ~IFF_OACTIVE;
765 		ifp->if_opackets++;
766 
767 		STE_INC(idx, STE_TX_LIST_CNT);
768 	}
769 
770 	sc->ste_cdata.ste_tx_cons = idx;
771 	if (idx == sc->ste_cdata.ste_tx_prod)
772 		ifp->if_timer = 0;
773 }
774 
775 void
776 ste_stats_update(void *xsc)
777 {
778 	struct ste_softc	*sc;
779 	struct ifnet		*ifp;
780 	struct mii_data		*mii;
781 	int			s;
782 
783 	s = splnet();
784 
785 	sc = xsc;
786 	ifp = &sc->arpcom.ac_if;
787 	mii = &sc->sc_mii;
788 
789 	ifp->if_collisions += CSR_READ_1(sc, STE_LATE_COLLS)
790 	    + CSR_READ_1(sc, STE_MULTI_COLLS)
791 	    + CSR_READ_1(sc, STE_SINGLE_COLLS);
792 
793 	if (!sc->ste_link) {
794 		mii_pollstat(mii);
795 		if (mii->mii_media_status & IFM_ACTIVE &&
796 		    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
797 			sc->ste_link++;
798 			/*
799 			 * we don't get a call-back on re-init so do it
800 			 * otherwise we get stuck in the wrong link state
801 			 */
802 			ste_miibus_statchg((struct device *)sc);
803 			if (!IFQ_IS_EMPTY(&ifp->if_snd))
804 				ste_start(ifp);
805 		}
806 	}
807 
808 	timeout_add_sec(&sc->sc_stats_tmo, 1);
809 	splx(s);
810 }
811 
812 /*
813  * Probe for a Sundance ST201 chip. Check the PCI vendor and device
814  * IDs against our list and return a device name if we find a match.
815  */
816 int
817 ste_probe(struct device *parent, void *match, void *aux)
818 {
819 	return (pci_matchbyid((struct pci_attach_args *)aux, ste_devices,
820 	    nitems(ste_devices)));
821 }
822 
823 /*
824  * Attach the interface. Allocate softc structures, do ifmedia
825  * setup and ethernet/BPF attach.
826  */
827 void
828 ste_attach(struct device *parent, struct device *self, void *aux)
829 {
830 	const char		*intrstr = NULL;
831 	struct ste_softc	*sc = (struct ste_softc *)self;
832 	struct pci_attach_args	*pa = aux;
833 	pci_chipset_tag_t	pc = pa->pa_pc;
834 	pci_intr_handle_t	ih;
835 	struct ifnet		*ifp;
836 	bus_size_t		size;
837 
838 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
839 
840 	/*
841 	 * Only use one PHY since this chip reports multiple
842 	 * Note on the DFE-550TX the PHY is at 1 on the DFE-580TX
843 	 * it is at 0 & 1.  It is rev 0x12.
844 	 */
845 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_DLINK &&
846 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_DLINK_DFE550TX &&
847 	    PCI_REVISION(pa->pa_class) == 0x12)
848 		sc->ste_one_phy = 1;
849 
850 	/*
851 	 * Map control/status registers.
852 	 */
853 
854 #ifdef STE_USEIOSPACE
855 	if (pci_mapreg_map(pa, STE_PCI_LOIO,
856 	    PCI_MAPREG_TYPE_IO, 0,
857 	    &sc->ste_btag, &sc->ste_bhandle, NULL, &size, 0)) {
858 		printf(": can't map i/o space\n");
859 		return;
860 	}
861  #else
862 	if (pci_mapreg_map(pa, STE_PCI_LOMEM,
863 	    PCI_MAPREG_TYPE_MEM|PCI_MAPREG_MEM_TYPE_32BIT, 0,
864 	    &sc->ste_btag, &sc->ste_bhandle, NULL, &size, 0)) {
865 		printf(": can't map mem space\n");
866 		return;
867 	}
868 #endif
869 
870 	/* Allocate interrupt */
871 	if (pci_intr_map(pa, &ih)) {
872 		printf(": couldn't map interrupt\n");
873 		goto fail_1;
874 	}
875 	intrstr = pci_intr_string(pc, ih);
876 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, ste_intr, sc,
877 	    self->dv_xname);
878 	if (sc->sc_ih == NULL) {
879 		printf(": couldn't establish interrupt");
880 		if (intrstr != NULL)
881 			printf(" at %s", intrstr);
882 		printf("\n");
883 		goto fail_1;
884 	}
885 	printf(": %s", intrstr);
886 
887 	/* Reset the adapter. */
888 	ste_reset(sc);
889 
890 	/*
891 	 * Get station address from the EEPROM.
892 	 */
893 	if (ste_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
894 	    STE_EEADDR_NODE0, 3, 0)) {
895 		printf(": failed to read station address\n");
896 		goto fail_2;
897 	}
898 
899 	printf(", address %s\n", ether_sprintf(sc->arpcom.ac_enaddr));
900 
901 	sc->ste_ldata_ptr = malloc(sizeof(struct ste_list_data) + 8,
902 	    M_DEVBUF, M_DONTWAIT);
903 	if (sc->ste_ldata_ptr == NULL) {
904 		printf(": no memory for list buffers!\n");
905 		goto fail_2;
906 	}
907 
908 	sc->ste_ldata = (struct ste_list_data *)sc->ste_ldata_ptr;
909 	bzero(sc->ste_ldata, sizeof(struct ste_list_data));
910 
911 	ifp = &sc->arpcom.ac_if;
912 	ifp->if_softc = sc;
913 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
914 	ifp->if_ioctl = ste_ioctl;
915 	ifp->if_start = ste_start;
916 	ifp->if_watchdog = ste_watchdog;
917 	IFQ_SET_MAXLEN(&ifp->if_snd, STE_TX_LIST_CNT - 1);
918 	IFQ_SET_READY(&ifp->if_snd);
919 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
920 	ifp->if_capabilities = IFCAP_VLAN_MTU;
921 
922 	sc->ste_tx_thresh = STE_TXSTART_THRESH;
923 
924 	sc->sc_mii.mii_ifp = ifp;
925 	sc->sc_mii.mii_readreg = ste_miibus_readreg;
926 	sc->sc_mii.mii_writereg = ste_miibus_writereg;
927 	sc->sc_mii.mii_statchg = ste_miibus_statchg;
928 	ifmedia_init(&sc->sc_mii.mii_media, 0, ste_ifmedia_upd,ste_ifmedia_sts);
929 	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
930 	    0);
931 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
932 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
933 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
934 	} else
935 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
936 
937 	/*
938 	 * Call MI attach routines.
939 	 */
940 	if_attach(ifp);
941 	ether_ifattach(ifp);
942 	return;
943 
944 fail_2:
945 	pci_intr_disestablish(pc, sc->sc_ih);
946 
947 fail_1:
948 	bus_space_unmap(sc->ste_btag, sc->ste_bhandle, size);
949 }
950 
951 int
952 ste_newbuf(struct ste_softc *sc, struct ste_chain_onefrag *c, struct mbuf *m)
953 {
954 	struct mbuf		*m_new = NULL;
955 
956 	if (m == NULL) {
957 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
958 		if (m_new == NULL)
959 			return(ENOBUFS);
960 		MCLGET(m_new, M_DONTWAIT);
961 		if (!(m_new->m_flags & M_EXT)) {
962 			m_freem(m_new);
963 			return(ENOBUFS);
964 		}
965 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
966 	} else {
967 		m_new = m;
968 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
969 		m_new->m_data = m_new->m_ext.ext_buf;
970 	}
971 
972 	m_adj(m_new, ETHER_ALIGN);
973 
974 	c->ste_mbuf = m_new;
975 	c->ste_ptr->ste_status = 0;
976 	c->ste_ptr->ste_frag.ste_addr = vtophys(mtod(m_new, vaddr_t));
977 	c->ste_ptr->ste_frag.ste_len = (ETHER_MAX_DIX_LEN + ETHER_VLAN_ENCAP_LEN) | STE_FRAG_LAST;
978 
979 	return(0);
980 }
981 
982 int
983 ste_init_rx_list(struct ste_softc *sc)
984 {
985 	struct ste_chain_data	*cd;
986 	struct ste_list_data	*ld;
987 	int			i;
988 
989 	cd = &sc->ste_cdata;
990 	ld = sc->ste_ldata;
991 
992 	for (i = 0; i < STE_RX_LIST_CNT; i++) {
993 		cd->ste_rx_chain[i].ste_ptr = &ld->ste_rx_list[i];
994 		if (ste_newbuf(sc, &cd->ste_rx_chain[i], NULL) == ENOBUFS)
995 			return(ENOBUFS);
996 		if (i == (STE_RX_LIST_CNT - 1)) {
997 			cd->ste_rx_chain[i].ste_next =
998 			    &cd->ste_rx_chain[0];
999 			ld->ste_rx_list[i].ste_next =
1000 			    vtophys((vaddr_t)&ld->ste_rx_list[0]);
1001 		} else {
1002 			cd->ste_rx_chain[i].ste_next =
1003 			    &cd->ste_rx_chain[i + 1];
1004 			ld->ste_rx_list[i].ste_next =
1005 			    vtophys((vaddr_t)&ld->ste_rx_list[i + 1]);
1006 		}
1007 		ld->ste_rx_list[i].ste_status = 0;
1008 	}
1009 
1010 	cd->ste_rx_head = &cd->ste_rx_chain[0];
1011 
1012 	return(0);
1013 }
1014 
1015 void
1016 ste_init_tx_list(struct ste_softc *sc)
1017 {
1018 	struct ste_chain_data	*cd;
1019 	struct ste_list_data	*ld;
1020 	int			i;
1021 
1022 	cd = &sc->ste_cdata;
1023 	ld = sc->ste_ldata;
1024 	for (i = 0; i < STE_TX_LIST_CNT; i++) {
1025 		cd->ste_tx_chain[i].ste_ptr = &ld->ste_tx_list[i];
1026 		cd->ste_tx_chain[i].ste_phys = vtophys((vaddr_t)&ld->ste_tx_list[i]);
1027 		if (i == (STE_TX_LIST_CNT - 1))
1028 			cd->ste_tx_chain[i].ste_next =
1029 			    &cd->ste_tx_chain[0];
1030 		else
1031 			cd->ste_tx_chain[i].ste_next =
1032 			    &cd->ste_tx_chain[i + 1];
1033 	}
1034 
1035 	bzero(ld->ste_tx_list, sizeof(struct ste_desc) * STE_TX_LIST_CNT);
1036 
1037 	cd->ste_tx_prod = 0;
1038 	cd->ste_tx_cons = 0;
1039 }
1040 
1041 void
1042 ste_init(void *xsc)
1043 {
1044 	struct ste_softc	*sc = (struct ste_softc *)xsc;
1045 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1046 	struct mii_data		*mii;
1047 	int			i, s;
1048 
1049 	s = splnet();
1050 
1051 	ste_stop(sc);
1052 
1053 	mii = &sc->sc_mii;
1054 
1055 	/* Init our MAC address */
1056 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
1057 		CSR_WRITE_1(sc, STE_PAR0 + i, sc->arpcom.ac_enaddr[i]);
1058 	}
1059 
1060 	/* Init RX list */
1061 	if (ste_init_rx_list(sc) == ENOBUFS) {
1062 		printf("%s: initialization failed: no "
1063 		    "memory for RX buffers\n", sc->sc_dev.dv_xname);
1064 		ste_stop(sc);
1065 		splx(s);
1066 		return;
1067 	}
1068 
1069 	/* Set RX polling interval */
1070 	CSR_WRITE_1(sc, STE_RX_DMAPOLL_PERIOD, 64);
1071 
1072 	/* Init TX descriptors */
1073 	ste_init_tx_list(sc);
1074 
1075 	/* Set the TX freethresh value */
1076 	CSR_WRITE_1(sc, STE_TX_DMABURST_THRESH, ETHER_MAX_DIX_LEN >> 8);
1077 
1078 	/* Set the TX start threshold for best performance. */
1079 	CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh);
1080 
1081 	/* Set the TX reclaim threshold. */
1082 	CSR_WRITE_1(sc, STE_TX_RECLAIM_THRESH, (ETHER_MAX_DIX_LEN >> 4));
1083 
1084 	/* Program promiscuous mode and multicast filters. */
1085 	ste_iff(sc);
1086 
1087 	/* Load the address of the RX list. */
1088 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL);
1089 	ste_wait(sc);
1090 	CSR_WRITE_4(sc, STE_RX_DMALIST_PTR,
1091 	    vtophys((vaddr_t)&sc->ste_ldata->ste_rx_list[0]));
1092 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL);
1093 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL);
1094 
1095 	/* Set TX polling interval (defer until we TX first packet) */
1096 	CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 0);
1097 
1098 	/* Load address of the TX list */
1099 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL);
1100 	ste_wait(sc);
1101 	CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, 0);
1102 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
1103 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
1104 	ste_wait(sc);
1105 	sc->ste_tx_prev=NULL;
1106 
1107 	/* Enable receiver and transmitter */
1108 	CSR_WRITE_2(sc, STE_MACCTL0, 0);
1109 	CSR_WRITE_2(sc, STE_MACCTL1, 0);
1110 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_ENABLE);
1111 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_ENABLE);
1112 
1113 	/* Enable stats counters. */
1114 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_ENABLE);
1115 
1116 	/* Enable interrupts. */
1117 	CSR_WRITE_2(sc, STE_ISR, 0xFFFF);
1118 	CSR_WRITE_2(sc, STE_IMR, STE_INTRS);
1119 
1120 	/* Accept VLAN length packets */
1121 	CSR_WRITE_2(sc, STE_MAX_FRAMELEN,
1122 	    ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
1123 
1124 	ste_ifmedia_upd(ifp);
1125 
1126 	ifp->if_flags |= IFF_RUNNING;
1127 	ifp->if_flags &= ~IFF_OACTIVE;
1128 
1129 	splx(s);
1130 
1131 	timeout_set(&sc->sc_stats_tmo, ste_stats_update, sc);
1132 	timeout_add_sec(&sc->sc_stats_tmo, 1);
1133 }
1134 
1135 void
1136 ste_stop(struct ste_softc *sc)
1137 {
1138 	int			i;
1139 	struct ifnet		*ifp;
1140 
1141 	ifp = &sc->arpcom.ac_if;
1142 
1143 	timeout_del(&sc->sc_stats_tmo);
1144 
1145 	ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
1146 
1147 	CSR_WRITE_2(sc, STE_IMR, 0);
1148 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_DISABLE);
1149 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_DISABLE);
1150 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_DISABLE);
1151 	STE_SETBIT2(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL);
1152 	STE_SETBIT2(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL);
1153 	ste_wait(sc);
1154 	/*
1155 	 * Try really hard to stop the RX engine or under heavy RX
1156 	 * data chip will write into de-allocated memory.
1157 	 */
1158 	ste_reset(sc);
1159 
1160 	sc->ste_link = 0;
1161 
1162 	for (i = 0; i < STE_RX_LIST_CNT; i++) {
1163 		if (sc->ste_cdata.ste_rx_chain[i].ste_mbuf != NULL) {
1164 			m_freem(sc->ste_cdata.ste_rx_chain[i].ste_mbuf);
1165 			sc->ste_cdata.ste_rx_chain[i].ste_mbuf = NULL;
1166 		}
1167 	}
1168 
1169 	for (i = 0; i < STE_TX_LIST_CNT; i++) {
1170 		if (sc->ste_cdata.ste_tx_chain[i].ste_mbuf != NULL) {
1171 			m_freem(sc->ste_cdata.ste_tx_chain[i].ste_mbuf);
1172 			sc->ste_cdata.ste_tx_chain[i].ste_mbuf = NULL;
1173 		}
1174 	}
1175 
1176 	bzero(sc->ste_ldata, sizeof(struct ste_list_data));
1177 }
1178 
1179 void
1180 ste_reset(struct ste_softc *sc)
1181 {
1182 	int		i;
1183 
1184 	STE_SETBIT4(sc, STE_ASICCTL,
1185 	    STE_ASICCTL_GLOBAL_RESET|STE_ASICCTL_RX_RESET|
1186 	    STE_ASICCTL_TX_RESET|STE_ASICCTL_DMA_RESET|
1187 	    STE_ASICCTL_FIFO_RESET|STE_ASICCTL_NETWORK_RESET|
1188 	    STE_ASICCTL_AUTOINIT_RESET|STE_ASICCTL_HOST_RESET|
1189 	    STE_ASICCTL_EXTRESET_RESET);
1190 
1191 	DELAY(100000);
1192 
1193 	for (i = 0; i < STE_TIMEOUT; i++) {
1194 		if (!(CSR_READ_4(sc, STE_ASICCTL) & STE_ASICCTL_RESET_BUSY))
1195 			break;
1196 	}
1197 
1198 	if (i == STE_TIMEOUT)
1199 		printf("%s: global reset never completed\n",
1200 		    sc->sc_dev.dv_xname);
1201 }
1202 
1203 int
1204 ste_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1205 {
1206 	struct ste_softc	*sc = ifp->if_softc;
1207 	struct ifaddr		*ifa = (struct ifaddr *) data;
1208 	struct ifreq		*ifr = (struct ifreq *) data;
1209 	int			s, error = 0;
1210 
1211 	s = splnet();
1212 
1213 	switch(command) {
1214 	case SIOCSIFADDR:
1215 		ifp->if_flags |= IFF_UP;
1216 		if (!(ifp->if_flags & IFF_RUNNING))
1217 			ste_init(sc);
1218 #ifdef INET
1219 		if (ifa->ifa_addr->sa_family == AF_INET)
1220 			arp_ifinit(&sc->arpcom, ifa);
1221 #endif
1222 		break;
1223 
1224 	case SIOCSIFFLAGS:
1225 		if (ifp->if_flags & IFF_UP) {
1226 			if (ifp->if_flags & IFF_RUNNING)
1227 				error = ENETRESET;
1228 			else {
1229 				sc->ste_tx_thresh = STE_TXSTART_THRESH;
1230 				ste_init(sc);
1231 			}
1232 		} else {
1233 			if (ifp->if_flags & IFF_RUNNING)
1234 				ste_stop(sc);
1235 		}
1236 		break;
1237 
1238 	case SIOCGIFMEDIA:
1239 	case SIOCSIFMEDIA:
1240 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
1241 		break;
1242 
1243 	default:
1244 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
1245 	}
1246 
1247 	if (error == ENETRESET) {
1248 		if (ifp->if_flags & IFF_RUNNING)
1249 			ste_iff(sc);
1250 		error = 0;
1251 	}
1252 
1253 	splx(s);
1254 	return(error);
1255 }
1256 
1257 int
1258 ste_encap(struct ste_softc *sc, struct ste_chain *c, struct mbuf *m_head)
1259 {
1260 	int			frag = 0;
1261 	struct ste_frag		*f = NULL;
1262 	struct mbuf		*m;
1263 	struct ste_desc		*d;
1264 
1265 	d = c->ste_ptr;
1266 	d->ste_ctl = 0;
1267 
1268 encap_retry:
1269 	for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
1270 		if (m->m_len != 0) {
1271 			if (frag == STE_MAXFRAGS)
1272 				break;
1273 			f = &d->ste_frags[frag];
1274 			f->ste_addr = vtophys(mtod(m, vaddr_t));
1275 			f->ste_len = m->m_len;
1276 			frag++;
1277 		}
1278 	}
1279 
1280 	if (m != NULL) {
1281 		struct mbuf *mn;
1282 
1283 		/*
1284 		 * We ran out of segments. We have to recopy this
1285 		 * mbuf chain first. Bail out if we can't get the
1286 		 * new buffers.
1287 		 */
1288 		MGETHDR(mn, M_DONTWAIT, MT_DATA);
1289 		if (mn == NULL) {
1290 			m_freem(m_head);
1291 			return ENOMEM;
1292 		}
1293 		if (m_head->m_pkthdr.len > MHLEN) {
1294 			MCLGET(mn, M_DONTWAIT);
1295 			if ((mn->m_flags & M_EXT) == 0) {
1296 				m_freem(mn);
1297 				m_freem(m_head);
1298 				return ENOMEM;
1299 			}
1300 		}
1301 		m_copydata(m_head, 0, m_head->m_pkthdr.len,
1302 			   mtod(mn, caddr_t));
1303 		mn->m_pkthdr.len = mn->m_len = m_head->m_pkthdr.len;
1304 		m_freem(m_head);
1305 		m_head = mn;
1306 		goto encap_retry;
1307 	}
1308 
1309 	c->ste_mbuf = m_head;
1310 	d->ste_frags[frag - 1].ste_len |= STE_FRAG_LAST;
1311 	d->ste_ctl = 1;
1312 
1313 	return(0);
1314 }
1315 
1316 void
1317 ste_start(struct ifnet *ifp)
1318 {
1319 	struct ste_softc	*sc;
1320 	struct mbuf		*m_head = NULL;
1321 	struct ste_chain	*cur_tx;
1322 	int			idx;
1323 
1324 	sc = ifp->if_softc;
1325 
1326 	if (!sc->ste_link)
1327 		return;
1328 
1329 	if (ifp->if_flags & IFF_OACTIVE)
1330 		return;
1331 
1332 	idx = sc->ste_cdata.ste_tx_prod;
1333 
1334 	while(sc->ste_cdata.ste_tx_chain[idx].ste_mbuf == NULL) {
1335 		/*
1336 		 * We cannot re-use the last (free) descriptor;
1337 		 * the chip may not have read its ste_next yet.
1338 		 */
1339 		if (STE_NEXT(idx, STE_TX_LIST_CNT) ==
1340 		    sc->ste_cdata.ste_tx_cons) {
1341 			ifp->if_flags |= IFF_OACTIVE;
1342 			break;
1343 		}
1344 
1345 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1346 		if (m_head == NULL)
1347 			break;
1348 
1349 		cur_tx = &sc->ste_cdata.ste_tx_chain[idx];
1350 
1351 		if (ste_encap(sc, cur_tx, m_head) != 0)
1352 			break;
1353 
1354 		cur_tx->ste_ptr->ste_next = 0;
1355 
1356 		if (sc->ste_tx_prev == NULL) {
1357 			cur_tx->ste_ptr->ste_ctl = STE_TXCTL_DMAINTR | 1;
1358 			/* Load address of the TX list */
1359 			STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL);
1360 			ste_wait(sc);
1361 
1362 			CSR_WRITE_4(sc, STE_TX_DMALIST_PTR,
1363 			    vtophys((vaddr_t)&sc->ste_ldata->ste_tx_list[0]));
1364 
1365 			/* Set TX polling interval to start TX engine */
1366 			CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 64);
1367 
1368 			STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
1369 			ste_wait(sc);
1370 		}else{
1371 			cur_tx->ste_ptr->ste_ctl = STE_TXCTL_DMAINTR | 1;
1372 			sc->ste_tx_prev->ste_ptr->ste_next
1373 				= cur_tx->ste_phys;
1374 		}
1375 
1376 		sc->ste_tx_prev = cur_tx;
1377 
1378 #if NBPFILTER > 0
1379 		/*
1380 		 * If there's a BPF listener, bounce a copy of this frame
1381 		 * to him.
1382 	 	 */
1383 		if (ifp->if_bpf)
1384 			bpf_mtap(ifp->if_bpf, cur_tx->ste_mbuf,
1385 			    BPF_DIRECTION_OUT);
1386 #endif
1387 
1388 		STE_INC(idx, STE_TX_LIST_CNT);
1389 		ifp->if_timer = 5;
1390 	}
1391 	sc->ste_cdata.ste_tx_prod = idx;
1392 }
1393 
1394 void
1395 ste_watchdog(struct ifnet *ifp)
1396 {
1397 	struct ste_softc	*sc;
1398 
1399 	sc = ifp->if_softc;
1400 
1401 	ifp->if_oerrors++;
1402 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1403 
1404 	ste_txeoc(sc);
1405 	ste_txeof(sc);
1406 	ste_rxeoc(sc);
1407 	ste_rxeof(sc);
1408 	ste_reset(sc);
1409 	ste_init(sc);
1410 
1411 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1412 		ste_start(ifp);
1413 }
1414