1 /* $OpenBSD: if_ste.c,v 1.71 2024/05/24 06:02:57 jsg Exp $ */
2 /*
3 * Copyright (c) 1997, 1998, 1999
4 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 * $FreeBSD: src/sys/pci/if_ste.c,v 1.14 1999/12/07 20:14:42 wpaul Exp $
34 */
35
36 #include "bpfilter.h"
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/mbuf.h>
41 #include <sys/socket.h>
42 #include <sys/ioctl.h>
43 #include <sys/errno.h>
44 #include <sys/malloc.h>
45 #include <sys/timeout.h>
46
47 #include <net/if.h>
48
49 #include <netinet/in.h>
50 #include <netinet/if_ether.h>
51
52 #include <net/if_media.h>
53
54 #if NBPFILTER > 0
55 #include <net/bpf.h>
56 #endif
57
58 #include <uvm/uvm_extern.h> /* for vtophys */
59
60 #include <sys/device.h>
61
62 #include <dev/mii/miivar.h>
63
64 #include <dev/pci/pcireg.h>
65 #include <dev/pci/pcivar.h>
66 #include <dev/pci/pcidevs.h>
67
68 #define STE_USEIOSPACE
69
70 #include <dev/pci/if_stereg.h>
71
72 int ste_probe(struct device *, void *, void *);
73 void ste_attach(struct device *, struct device *, void *);
74 int ste_intr(void *);
75 void ste_init(void *);
76 void ste_rxeoc(struct ste_softc *);
77 void ste_rxeof(struct ste_softc *);
78 void ste_txeoc(struct ste_softc *);
79 void ste_txeof(struct ste_softc *);
80 void ste_stats_update(void *);
81 void ste_stop(struct ste_softc *);
82 void ste_reset(struct ste_softc *);
83 int ste_ioctl(struct ifnet *, u_long, caddr_t);
84 int ste_encap(struct ste_softc *, struct ste_chain *,
85 struct mbuf *);
86 void ste_start(struct ifnet *);
87 void ste_watchdog(struct ifnet *);
88 int ste_newbuf(struct ste_softc *,
89 struct ste_chain_onefrag *,
90 struct mbuf *);
91 int ste_ifmedia_upd(struct ifnet *);
92 void ste_ifmedia_sts(struct ifnet *, struct ifmediareq *);
93
94 void ste_mii_sync(struct ste_softc *);
95 void ste_mii_send(struct ste_softc *, u_int32_t, int);
96 int ste_mii_readreg(struct ste_softc *,
97 struct ste_mii_frame *);
98 int ste_mii_writereg(struct ste_softc *,
99 struct ste_mii_frame *);
100 int ste_miibus_readreg(struct device *, int, int);
101 void ste_miibus_writereg(struct device *, int, int, int);
102 void ste_miibus_statchg(struct device *);
103
104 int ste_eeprom_wait(struct ste_softc *);
105 int ste_read_eeprom(struct ste_softc *, caddr_t, int,
106 int, int);
107 void ste_wait(struct ste_softc *);
108 void ste_iff(struct ste_softc *);
109 int ste_init_rx_list(struct ste_softc *);
110 void ste_init_tx_list(struct ste_softc *);
111
112 #define STE_SETBIT4(sc, reg, x) \
113 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
114
115 #define STE_CLRBIT4(sc, reg, x) \
116 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
117
118 #define STE_SETBIT2(sc, reg, x) \
119 CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) | x)
120
121 #define STE_CLRBIT2(sc, reg, x) \
122 CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) & ~x)
123
124 #define STE_SETBIT1(sc, reg, x) \
125 CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) | x)
126
127 #define STE_CLRBIT1(sc, reg, x) \
128 CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) & ~x)
129
130
131 #define MII_SET(x) STE_SETBIT1(sc, STE_PHYCTL, x)
132 #define MII_CLR(x) STE_CLRBIT1(sc, STE_PHYCTL, x)
133
134 const struct pci_matchid ste_devices[] = {
135 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DFE550TX },
136 { PCI_VENDOR_SUNDANCE, PCI_PRODUCT_SUNDANCE_ST201_1 },
137 { PCI_VENDOR_SUNDANCE, PCI_PRODUCT_SUNDANCE_ST201_2 }
138 };
139
140 const struct cfattach ste_ca = {
141 sizeof(struct ste_softc), ste_probe, ste_attach
142 };
143
144 struct cfdriver ste_cd = {
145 NULL, "ste", DV_IFNET
146 };
147
148 /*
149 * Sync the PHYs by setting data bit and strobing the clock 32 times.
150 */
151 void
ste_mii_sync(struct ste_softc * sc)152 ste_mii_sync(struct ste_softc *sc)
153 {
154 int i;
155
156 MII_SET(STE_PHYCTL_MDIR|STE_PHYCTL_MDATA);
157
158 for (i = 0; i < 32; i++) {
159 MII_SET(STE_PHYCTL_MCLK);
160 DELAY(1);
161 MII_CLR(STE_PHYCTL_MCLK);
162 DELAY(1);
163 }
164 }
165
166 /*
167 * Clock a series of bits through the MII.
168 */
169 void
ste_mii_send(struct ste_softc * sc,u_int32_t bits,int cnt)170 ste_mii_send(struct ste_softc *sc, u_int32_t bits, int cnt)
171 {
172 int i;
173
174 MII_CLR(STE_PHYCTL_MCLK);
175
176 for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
177 if (bits & i) {
178 MII_SET(STE_PHYCTL_MDATA);
179 } else {
180 MII_CLR(STE_PHYCTL_MDATA);
181 }
182 DELAY(1);
183 MII_CLR(STE_PHYCTL_MCLK);
184 DELAY(1);
185 MII_SET(STE_PHYCTL_MCLK);
186 }
187 }
188
189 /*
190 * Read an PHY register through the MII.
191 */
192 int
ste_mii_readreg(struct ste_softc * sc,struct ste_mii_frame * frame)193 ste_mii_readreg(struct ste_softc *sc, struct ste_mii_frame *frame)
194 {
195 int ack, i, s;
196
197 s = splnet();
198
199 /*
200 * Set up frame for RX.
201 */
202 frame->mii_stdelim = STE_MII_STARTDELIM;
203 frame->mii_opcode = STE_MII_READOP;
204 frame->mii_turnaround = 0;
205 frame->mii_data = 0;
206
207 CSR_WRITE_2(sc, STE_PHYCTL, 0);
208 /*
209 * Turn on data xmit.
210 */
211 MII_SET(STE_PHYCTL_MDIR);
212
213 ste_mii_sync(sc);
214
215 /*
216 * Send command/address info.
217 */
218 ste_mii_send(sc, frame->mii_stdelim, 2);
219 ste_mii_send(sc, frame->mii_opcode, 2);
220 ste_mii_send(sc, frame->mii_phyaddr, 5);
221 ste_mii_send(sc, frame->mii_regaddr, 5);
222
223 /* Turn off xmit. */
224 MII_CLR(STE_PHYCTL_MDIR);
225
226 /* Idle bit */
227 MII_CLR((STE_PHYCTL_MCLK|STE_PHYCTL_MDATA));
228 DELAY(1);
229 MII_SET(STE_PHYCTL_MCLK);
230 DELAY(1);
231
232 /* Check for ack */
233 MII_CLR(STE_PHYCTL_MCLK);
234 DELAY(1);
235 ack = CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA;
236 MII_SET(STE_PHYCTL_MCLK);
237 DELAY(1);
238
239 /*
240 * Now try reading data bits. If the ack failed, we still
241 * need to clock through 16 cycles to keep the PHY(s) in sync.
242 */
243 if (ack) {
244 for(i = 0; i < 16; i++) {
245 MII_CLR(STE_PHYCTL_MCLK);
246 DELAY(1);
247 MII_SET(STE_PHYCTL_MCLK);
248 DELAY(1);
249 }
250 goto fail;
251 }
252
253 for (i = 0x8000; i; i >>= 1) {
254 MII_CLR(STE_PHYCTL_MCLK);
255 DELAY(1);
256 if (!ack) {
257 if (CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA)
258 frame->mii_data |= i;
259 DELAY(1);
260 }
261 MII_SET(STE_PHYCTL_MCLK);
262 DELAY(1);
263 }
264
265 fail:
266
267 MII_CLR(STE_PHYCTL_MCLK);
268 DELAY(1);
269 MII_SET(STE_PHYCTL_MCLK);
270 DELAY(1);
271
272 splx(s);
273
274 if (ack)
275 return(1);
276 return(0);
277 }
278
279 /*
280 * Write to a PHY register through the MII.
281 */
282 int
ste_mii_writereg(struct ste_softc * sc,struct ste_mii_frame * frame)283 ste_mii_writereg(struct ste_softc *sc, struct ste_mii_frame *frame)
284 {
285 int s;
286
287 s = splnet();
288 /*
289 * Set up frame for TX.
290 */
291
292 frame->mii_stdelim = STE_MII_STARTDELIM;
293 frame->mii_opcode = STE_MII_WRITEOP;
294 frame->mii_turnaround = STE_MII_TURNAROUND;
295
296 /*
297 * Turn on data output.
298 */
299 MII_SET(STE_PHYCTL_MDIR);
300
301 ste_mii_sync(sc);
302
303 ste_mii_send(sc, frame->mii_stdelim, 2);
304 ste_mii_send(sc, frame->mii_opcode, 2);
305 ste_mii_send(sc, frame->mii_phyaddr, 5);
306 ste_mii_send(sc, frame->mii_regaddr, 5);
307 ste_mii_send(sc, frame->mii_turnaround, 2);
308 ste_mii_send(sc, frame->mii_data, 16);
309
310 /* Idle bit. */
311 MII_SET(STE_PHYCTL_MCLK);
312 DELAY(1);
313 MII_CLR(STE_PHYCTL_MCLK);
314 DELAY(1);
315
316 /*
317 * Turn off xmit.
318 */
319 MII_CLR(STE_PHYCTL_MDIR);
320
321 splx(s);
322
323 return(0);
324 }
325
326 int
ste_miibus_readreg(struct device * self,int phy,int reg)327 ste_miibus_readreg(struct device *self, int phy, int reg)
328 {
329 struct ste_softc *sc = (struct ste_softc *)self;
330 struct ste_mii_frame frame;
331
332 if (sc->ste_one_phy && phy != 0)
333 return (0);
334
335 bzero(&frame, sizeof(frame));
336
337 frame.mii_phyaddr = phy;
338 frame.mii_regaddr = reg;
339 ste_mii_readreg(sc, &frame);
340
341 return(frame.mii_data);
342 }
343
344 void
ste_miibus_writereg(struct device * self,int phy,int reg,int data)345 ste_miibus_writereg(struct device *self, int phy, int reg, int data)
346 {
347 struct ste_softc *sc = (struct ste_softc *)self;
348 struct ste_mii_frame frame;
349
350 bzero(&frame, sizeof(frame));
351
352 frame.mii_phyaddr = phy;
353 frame.mii_regaddr = reg;
354 frame.mii_data = data;
355
356 ste_mii_writereg(sc, &frame);
357 }
358
359 void
ste_miibus_statchg(struct device * self)360 ste_miibus_statchg(struct device *self)
361 {
362 struct ste_softc *sc = (struct ste_softc *)self;
363 struct mii_data *mii;
364 int fdx, fcur;
365
366 mii = &sc->sc_mii;
367
368 fcur = CSR_READ_2(sc, STE_MACCTL0) & STE_MACCTL0_FULLDUPLEX;
369 fdx = (mii->mii_media_active & IFM_GMASK) == IFM_FDX;
370
371 if ((fcur && fdx) || (! fcur && ! fdx))
372 return;
373
374 STE_SETBIT4(sc, STE_DMACTL,
375 STE_DMACTL_RXDMA_STALL |STE_DMACTL_TXDMA_STALL);
376 ste_wait(sc);
377
378 if (fdx)
379 STE_SETBIT2(sc, STE_MACCTL0, STE_MACCTL0_FULLDUPLEX);
380 else
381 STE_CLRBIT2(sc, STE_MACCTL0, STE_MACCTL0_FULLDUPLEX);
382
383 STE_SETBIT4(sc, STE_DMACTL,
384 STE_DMACTL_RXDMA_UNSTALL | STE_DMACTL_TXDMA_UNSTALL);
385 }
386
387 int
ste_ifmedia_upd(struct ifnet * ifp)388 ste_ifmedia_upd(struct ifnet *ifp)
389 {
390 struct ste_softc *sc;
391 struct mii_data *mii;
392
393 sc = ifp->if_softc;
394 mii = &sc->sc_mii;
395 sc->ste_link = 0;
396 if (mii->mii_instance) {
397 struct mii_softc *miisc;
398 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
399 mii_phy_reset(miisc);
400 }
401 mii_mediachg(mii);
402
403 return(0);
404 }
405
406 void
ste_ifmedia_sts(struct ifnet * ifp,struct ifmediareq * ifmr)407 ste_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
408 {
409 struct ste_softc *sc;
410 struct mii_data *mii;
411
412 sc = ifp->if_softc;
413 mii = &sc->sc_mii;
414
415 mii_pollstat(mii);
416 ifmr->ifm_active = mii->mii_media_active;
417 ifmr->ifm_status = mii->mii_media_status;
418 }
419
420 void
ste_wait(struct ste_softc * sc)421 ste_wait(struct ste_softc *sc)
422 {
423 int i;
424
425 for (i = 0; i < STE_TIMEOUT; i++) {
426 if (!(CSR_READ_4(sc, STE_DMACTL) & STE_DMACTL_DMA_HALTINPROG))
427 break;
428 }
429
430 if (i == STE_TIMEOUT)
431 printf("%s: command never completed!\n", sc->sc_dev.dv_xname);
432 }
433
434 /*
435 * The EEPROM is slow: give it time to come ready after issuing
436 * it a command.
437 */
438 int
ste_eeprom_wait(struct ste_softc * sc)439 ste_eeprom_wait(struct ste_softc *sc)
440 {
441 int i;
442
443 DELAY(1000);
444
445 for (i = 0; i < 100; i++) {
446 if (CSR_READ_2(sc, STE_EEPROM_CTL) & STE_EECTL_BUSY)
447 DELAY(1000);
448 else
449 break;
450 }
451
452 if (i == 100) {
453 printf("%s: eeprom failed to come ready\n",
454 sc->sc_dev.dv_xname);
455 return(1);
456 }
457
458 return(0);
459 }
460
461 /*
462 * Read a sequence of words from the EEPROM. Note that ethernet address
463 * data is stored in the EEPROM in network byte order.
464 */
465 int
ste_read_eeprom(struct ste_softc * sc,caddr_t dest,int off,int cnt,int swap)466 ste_read_eeprom(struct ste_softc *sc, caddr_t dest, int off, int cnt, int swap)
467 {
468 int err = 0, i;
469 u_int16_t word = 0, *ptr;
470
471 if (ste_eeprom_wait(sc))
472 return(1);
473
474 for (i = 0; i < cnt; i++) {
475 CSR_WRITE_2(sc, STE_EEPROM_CTL, STE_EEOPCODE_READ | (off + i));
476 err = ste_eeprom_wait(sc);
477 if (err)
478 break;
479 word = CSR_READ_2(sc, STE_EEPROM_DATA);
480 ptr = (u_int16_t *)(dest + (i * 2));
481 if (swap)
482 *ptr = ntohs(word);
483 else
484 *ptr = word;
485 }
486
487 return(err ? 1 : 0);
488 }
489
490 void
ste_iff(struct ste_softc * sc)491 ste_iff(struct ste_softc *sc)
492 {
493 struct ifnet *ifp = &sc->arpcom.ac_if;
494 struct arpcom *ac = &sc->arpcom;
495 struct ether_multi *enm;
496 struct ether_multistep step;
497 u_int32_t rxmode, hashes[2];
498 int h = 0;
499
500 rxmode = CSR_READ_1(sc, STE_RX_MODE);
501 rxmode &= ~(STE_RXMODE_ALLMULTI | STE_RXMODE_BROADCAST |
502 STE_RXMODE_MULTIHASH | STE_RXMODE_PROMISC |
503 STE_RXMODE_UNICAST);
504 bzero(hashes, sizeof(hashes));
505 ifp->if_flags &= ~IFF_ALLMULTI;
506
507 /*
508 * Always accept broadcast frames.
509 * Always accept frames destined to our station address.
510 */
511 rxmode |= STE_RXMODE_BROADCAST | STE_RXMODE_UNICAST;
512
513 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
514 ifp->if_flags |= IFF_ALLMULTI;
515 rxmode |= STE_RXMODE_ALLMULTI;
516 if (ifp->if_flags & IFF_PROMISC)
517 rxmode |= STE_RXMODE_PROMISC;
518 } else {
519 rxmode |= STE_RXMODE_MULTIHASH;
520
521 /* now program new ones */
522 ETHER_FIRST_MULTI(step, ac, enm);
523 while (enm != NULL) {
524 h = ether_crc32_be(enm->enm_addrlo,
525 ETHER_ADDR_LEN) & 0x3F;
526
527 if (h < 32)
528 hashes[0] |= (1 << h);
529 else
530 hashes[1] |= (1 << (h - 32));
531
532 ETHER_NEXT_MULTI(step, enm);
533 }
534 }
535
536 CSR_WRITE_2(sc, STE_MAR0, hashes[0] & 0xFFFF);
537 CSR_WRITE_2(sc, STE_MAR1, (hashes[0] >> 16) & 0xFFFF);
538 CSR_WRITE_2(sc, STE_MAR2, hashes[1] & 0xFFFF);
539 CSR_WRITE_2(sc, STE_MAR3, (hashes[1] >> 16) & 0xFFFF);
540 CSR_WRITE_1(sc, STE_RX_MODE, rxmode);
541 }
542
543 int
ste_intr(void * xsc)544 ste_intr(void *xsc)
545 {
546 struct ste_softc *sc;
547 struct ifnet *ifp;
548 u_int16_t status;
549 int claimed = 0;
550
551 sc = xsc;
552 ifp = &sc->arpcom.ac_if;
553
554 /* See if this is really our interrupt. */
555 if (!(CSR_READ_2(sc, STE_ISR) & STE_ISR_INTLATCH))
556 return claimed;
557
558 for (;;) {
559 status = CSR_READ_2(sc, STE_ISR_ACK);
560
561 if (!(status & STE_INTRS))
562 break;
563
564 claimed = 1;
565
566 if (status & STE_ISR_RX_DMADONE) {
567 ste_rxeoc(sc);
568 ste_rxeof(sc);
569 }
570
571 if (status & STE_ISR_TX_DMADONE)
572 ste_txeof(sc);
573
574 if (status & STE_ISR_TX_DONE)
575 ste_txeoc(sc);
576
577 if (status & STE_ISR_STATS_OFLOW) {
578 timeout_del(&sc->sc_stats_tmo);
579 ste_stats_update(sc);
580 }
581
582 if (status & STE_ISR_LINKEVENT)
583 mii_pollstat(&sc->sc_mii);
584
585 if (status & STE_ISR_HOSTERR)
586 ste_init(sc);
587 }
588
589 /* Re-enable interrupts */
590 CSR_WRITE_2(sc, STE_IMR, STE_INTRS);
591
592 if (ifp->if_flags & IFF_RUNNING && !ifq_empty(&ifp->if_snd))
593 ste_start(ifp);
594
595 return claimed;
596 }
597
598 void
ste_rxeoc(struct ste_softc * sc)599 ste_rxeoc(struct ste_softc *sc)
600 {
601 struct ste_chain_onefrag *cur_rx;
602
603 if (sc->ste_cdata.ste_rx_head->ste_ptr->ste_status == 0) {
604 cur_rx = sc->ste_cdata.ste_rx_head;
605 do {
606 cur_rx = cur_rx->ste_next;
607 /* If the ring is empty, just return. */
608 if (cur_rx == sc->ste_cdata.ste_rx_head)
609 return;
610 } while (cur_rx->ste_ptr->ste_status == 0);
611 if (sc->ste_cdata.ste_rx_head->ste_ptr->ste_status == 0) {
612 /* We've fallen behind the chip: catch it. */
613 sc->ste_cdata.ste_rx_head = cur_rx;
614 }
615 }
616 }
617
618 /*
619 * A frame has been uploaded: pass the resulting mbuf chain up to
620 * the higher level protocols.
621 */
622 void
ste_rxeof(struct ste_softc * sc)623 ste_rxeof(struct ste_softc *sc)
624 {
625 struct mbuf *m;
626 struct mbuf_list ml = MBUF_LIST_INITIALIZER();
627 struct ifnet *ifp;
628 struct ste_chain_onefrag *cur_rx;
629 int total_len = 0, count=0;
630 u_int32_t rxstat;
631
632 ifp = &sc->arpcom.ac_if;
633
634 while((rxstat = sc->ste_cdata.ste_rx_head->ste_ptr->ste_status)
635 & STE_RXSTAT_DMADONE) {
636 if ((STE_RX_LIST_CNT - count) < 3)
637 break;
638
639 cur_rx = sc->ste_cdata.ste_rx_head;
640 sc->ste_cdata.ste_rx_head = cur_rx->ste_next;
641
642 /*
643 * If an error occurs, update stats, clear the
644 * status word and leave the mbuf cluster in place:
645 * it should simply get re-used next time this descriptor
646 * comes up in the ring.
647 */
648 if (rxstat & STE_RXSTAT_FRAME_ERR) {
649 ifp->if_ierrors++;
650 cur_rx->ste_ptr->ste_status = 0;
651 continue;
652 }
653
654 /*
655 * If there error bit was not set, the upload complete
656 * bit should be set which means we have a valid packet.
657 * If not, something truly strange has happened.
658 */
659 if (!(rxstat & STE_RXSTAT_DMADONE)) {
660 printf("%s: bad receive status -- packet dropped",
661 sc->sc_dev.dv_xname);
662 ifp->if_ierrors++;
663 cur_rx->ste_ptr->ste_status = 0;
664 continue;
665 }
666
667 /* No errors; receive the packet. */
668 m = cur_rx->ste_mbuf;
669 total_len = cur_rx->ste_ptr->ste_status & STE_RXSTAT_FRAMELEN;
670
671 /*
672 * Try to conjure up a new mbuf cluster. If that
673 * fails, it means we have an out of memory condition and
674 * should leave the buffer in place and continue. This will
675 * result in a lost packet, but there's little else we
676 * can do in this situation.
677 */
678 if (ste_newbuf(sc, cur_rx, NULL) == ENOBUFS) {
679 ifp->if_ierrors++;
680 cur_rx->ste_ptr->ste_status = 0;
681 continue;
682 }
683
684 m->m_pkthdr.len = m->m_len = total_len;
685
686 ml_enqueue(&ml, m);
687
688 cur_rx->ste_ptr->ste_status = 0;
689 count++;
690 }
691
692 if_input(ifp, &ml);
693 }
694
695 void
ste_txeoc(struct ste_softc * sc)696 ste_txeoc(struct ste_softc *sc)
697 {
698 u_int8_t txstat;
699 struct ifnet *ifp;
700
701 ifp = &sc->arpcom.ac_if;
702
703 while ((txstat = CSR_READ_1(sc, STE_TX_STATUS)) &
704 STE_TXSTATUS_TXDONE) {
705 if (txstat & STE_TXSTATUS_UNDERRUN ||
706 txstat & STE_TXSTATUS_EXCESSCOLLS ||
707 txstat & STE_TXSTATUS_RECLAIMERR) {
708 ifp->if_oerrors++;
709 printf("%s: transmission error: %x\n",
710 sc->sc_dev.dv_xname, txstat);
711
712 ste_init(sc);
713
714 if (txstat & STE_TXSTATUS_UNDERRUN &&
715 sc->ste_tx_thresh < ETHER_MAX_DIX_LEN) {
716 sc->ste_tx_thresh += STE_MIN_FRAMELEN;
717 printf("%s: tx underrun, increasing tx"
718 " start threshold to %d bytes\n",
719 sc->sc_dev.dv_xname, sc->ste_tx_thresh);
720 }
721 CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh);
722 CSR_WRITE_2(sc, STE_TX_RECLAIM_THRESH,
723 (ETHER_MAX_DIX_LEN >> 4));
724 }
725 ste_init(sc);
726 CSR_WRITE_2(sc, STE_TX_STATUS, txstat);
727 }
728 }
729
730 void
ste_txeof(struct ste_softc * sc)731 ste_txeof(struct ste_softc *sc)
732 {
733 struct ste_chain *cur_tx = NULL;
734 struct ifnet *ifp;
735 int idx;
736
737 ifp = &sc->arpcom.ac_if;
738
739 idx = sc->ste_cdata.ste_tx_cons;
740 while(idx != sc->ste_cdata.ste_tx_prod) {
741 cur_tx = &sc->ste_cdata.ste_tx_chain[idx];
742
743 if (!(cur_tx->ste_ptr->ste_ctl & STE_TXCTL_DMADONE))
744 break;
745
746 m_freem(cur_tx->ste_mbuf);
747 cur_tx->ste_mbuf = NULL;
748 ifq_clr_oactive(&ifp->if_snd);
749
750 STE_INC(idx, STE_TX_LIST_CNT);
751 }
752
753 sc->ste_cdata.ste_tx_cons = idx;
754 if (idx == sc->ste_cdata.ste_tx_prod)
755 ifp->if_timer = 0;
756 }
757
758 void
ste_stats_update(void * xsc)759 ste_stats_update(void *xsc)
760 {
761 struct ste_softc *sc;
762 struct ifnet *ifp;
763 struct mii_data *mii;
764 int s;
765
766 s = splnet();
767
768 sc = xsc;
769 ifp = &sc->arpcom.ac_if;
770 mii = &sc->sc_mii;
771
772 ifp->if_collisions += CSR_READ_1(sc, STE_LATE_COLLS)
773 + CSR_READ_1(sc, STE_MULTI_COLLS)
774 + CSR_READ_1(sc, STE_SINGLE_COLLS);
775
776 if (!sc->ste_link) {
777 mii_pollstat(mii);
778 if (mii->mii_media_status & IFM_ACTIVE &&
779 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
780 sc->ste_link++;
781 /*
782 * we don't get a call-back on re-init so do it
783 * otherwise we get stuck in the wrong link state
784 */
785 ste_miibus_statchg((struct device *)sc);
786 if (!ifq_empty(&ifp->if_snd))
787 ste_start(ifp);
788 }
789 }
790
791 timeout_add_sec(&sc->sc_stats_tmo, 1);
792 splx(s);
793 }
794
795 /*
796 * Probe for a Sundance ST201 chip. Check the PCI vendor and device
797 * IDs against our list and return a device name if we find a match.
798 */
799 int
ste_probe(struct device * parent,void * match,void * aux)800 ste_probe(struct device *parent, void *match, void *aux)
801 {
802 return (pci_matchbyid((struct pci_attach_args *)aux, ste_devices,
803 nitems(ste_devices)));
804 }
805
806 /*
807 * Attach the interface. Allocate softc structures, do ifmedia
808 * setup and ethernet/BPF attach.
809 */
810 void
ste_attach(struct device * parent,struct device * self,void * aux)811 ste_attach(struct device *parent, struct device *self, void *aux)
812 {
813 const char *intrstr = NULL;
814 struct ste_softc *sc = (struct ste_softc *)self;
815 struct pci_attach_args *pa = aux;
816 pci_chipset_tag_t pc = pa->pa_pc;
817 pci_intr_handle_t ih;
818 struct ifnet *ifp;
819 bus_size_t size;
820
821 pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
822
823 /*
824 * Only use one PHY since this chip reports multiple
825 * Note on the DFE-550TX the PHY is at 1 on the DFE-580TX
826 * it is at 0 & 1. It is rev 0x12.
827 */
828 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_DLINK &&
829 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_DLINK_DFE550TX &&
830 PCI_REVISION(pa->pa_class) == 0x12)
831 sc->ste_one_phy = 1;
832
833 /*
834 * Map control/status registers.
835 */
836
837 #ifdef STE_USEIOSPACE
838 if (pci_mapreg_map(pa, STE_PCI_LOIO,
839 PCI_MAPREG_TYPE_IO, 0,
840 &sc->ste_btag, &sc->ste_bhandle, NULL, &size, 0)) {
841 printf(": can't map i/o space\n");
842 return;
843 }
844 #else
845 if (pci_mapreg_map(pa, STE_PCI_LOMEM,
846 PCI_MAPREG_TYPE_MEM|PCI_MAPREG_MEM_TYPE_32BIT, 0,
847 &sc->ste_btag, &sc->ste_bhandle, NULL, &size, 0)) {
848 printf(": can't map mem space\n");
849 return;
850 }
851 #endif
852
853 /* Allocate interrupt */
854 if (pci_intr_map(pa, &ih)) {
855 printf(": couldn't map interrupt\n");
856 goto fail_1;
857 }
858 intrstr = pci_intr_string(pc, ih);
859 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, ste_intr, sc,
860 self->dv_xname);
861 if (sc->sc_ih == NULL) {
862 printf(": couldn't establish interrupt");
863 if (intrstr != NULL)
864 printf(" at %s", intrstr);
865 printf("\n");
866 goto fail_1;
867 }
868 printf(": %s", intrstr);
869
870 /* Reset the adapter. */
871 ste_reset(sc);
872
873 /*
874 * Get station address from the EEPROM.
875 */
876 if (ste_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
877 STE_EEADDR_NODE0, 3, 0)) {
878 printf(": failed to read station address\n");
879 goto fail_2;
880 }
881
882 printf(", address %s\n", ether_sprintf(sc->arpcom.ac_enaddr));
883
884 sc->ste_ldata_ptr = malloc(sizeof(struct ste_list_data) + 8,
885 M_DEVBUF, M_DONTWAIT);
886 if (sc->ste_ldata_ptr == NULL) {
887 printf(": no memory for list buffers!\n");
888 goto fail_2;
889 }
890
891 sc->ste_ldata = (struct ste_list_data *)sc->ste_ldata_ptr;
892 bzero(sc->ste_ldata, sizeof(struct ste_list_data));
893
894 ifp = &sc->arpcom.ac_if;
895 ifp->if_softc = sc;
896 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
897 ifp->if_ioctl = ste_ioctl;
898 ifp->if_start = ste_start;
899 ifp->if_watchdog = ste_watchdog;
900 ifq_init_maxlen(&ifp->if_snd, STE_TX_LIST_CNT - 1);
901 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
902 ifp->if_capabilities = IFCAP_VLAN_MTU;
903
904 sc->ste_tx_thresh = STE_TXSTART_THRESH;
905
906 sc->sc_mii.mii_ifp = ifp;
907 sc->sc_mii.mii_readreg = ste_miibus_readreg;
908 sc->sc_mii.mii_writereg = ste_miibus_writereg;
909 sc->sc_mii.mii_statchg = ste_miibus_statchg;
910 ifmedia_init(&sc->sc_mii.mii_media, 0, ste_ifmedia_upd,ste_ifmedia_sts);
911 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
912 0);
913 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
914 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
915 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
916 } else
917 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
918
919 /*
920 * Call MI attach routines.
921 */
922 if_attach(ifp);
923 ether_ifattach(ifp);
924 return;
925
926 fail_2:
927 pci_intr_disestablish(pc, sc->sc_ih);
928
929 fail_1:
930 bus_space_unmap(sc->ste_btag, sc->ste_bhandle, size);
931 }
932
933 int
ste_newbuf(struct ste_softc * sc,struct ste_chain_onefrag * c,struct mbuf * m)934 ste_newbuf(struct ste_softc *sc, struct ste_chain_onefrag *c, struct mbuf *m)
935 {
936 struct mbuf *m_new = NULL;
937
938 if (m == NULL) {
939 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
940 if (m_new == NULL)
941 return(ENOBUFS);
942 MCLGET(m_new, M_DONTWAIT);
943 if (!(m_new->m_flags & M_EXT)) {
944 m_freem(m_new);
945 return(ENOBUFS);
946 }
947 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
948 } else {
949 m_new = m;
950 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
951 m_new->m_data = m_new->m_ext.ext_buf;
952 }
953
954 m_adj(m_new, ETHER_ALIGN);
955
956 c->ste_mbuf = m_new;
957 c->ste_ptr->ste_status = 0;
958 c->ste_ptr->ste_frag.ste_addr = vtophys(mtod(m_new, vaddr_t));
959 c->ste_ptr->ste_frag.ste_len = (ETHER_MAX_DIX_LEN + ETHER_VLAN_ENCAP_LEN) | STE_FRAG_LAST;
960
961 return(0);
962 }
963
964 int
ste_init_rx_list(struct ste_softc * sc)965 ste_init_rx_list(struct ste_softc *sc)
966 {
967 struct ste_chain_data *cd;
968 struct ste_list_data *ld;
969 int i;
970
971 cd = &sc->ste_cdata;
972 ld = sc->ste_ldata;
973
974 for (i = 0; i < STE_RX_LIST_CNT; i++) {
975 cd->ste_rx_chain[i].ste_ptr = &ld->ste_rx_list[i];
976 if (ste_newbuf(sc, &cd->ste_rx_chain[i], NULL) == ENOBUFS)
977 return(ENOBUFS);
978 if (i == (STE_RX_LIST_CNT - 1)) {
979 cd->ste_rx_chain[i].ste_next =
980 &cd->ste_rx_chain[0];
981 ld->ste_rx_list[i].ste_next =
982 vtophys((vaddr_t)&ld->ste_rx_list[0]);
983 } else {
984 cd->ste_rx_chain[i].ste_next =
985 &cd->ste_rx_chain[i + 1];
986 ld->ste_rx_list[i].ste_next =
987 vtophys((vaddr_t)&ld->ste_rx_list[i + 1]);
988 }
989 ld->ste_rx_list[i].ste_status = 0;
990 }
991
992 cd->ste_rx_head = &cd->ste_rx_chain[0];
993
994 return(0);
995 }
996
997 void
ste_init_tx_list(struct ste_softc * sc)998 ste_init_tx_list(struct ste_softc *sc)
999 {
1000 struct ste_chain_data *cd;
1001 struct ste_list_data *ld;
1002 int i;
1003
1004 cd = &sc->ste_cdata;
1005 ld = sc->ste_ldata;
1006 for (i = 0; i < STE_TX_LIST_CNT; i++) {
1007 cd->ste_tx_chain[i].ste_ptr = &ld->ste_tx_list[i];
1008 cd->ste_tx_chain[i].ste_phys = vtophys((vaddr_t)&ld->ste_tx_list[i]);
1009 if (i == (STE_TX_LIST_CNT - 1))
1010 cd->ste_tx_chain[i].ste_next =
1011 &cd->ste_tx_chain[0];
1012 else
1013 cd->ste_tx_chain[i].ste_next =
1014 &cd->ste_tx_chain[i + 1];
1015 }
1016
1017 bzero(ld->ste_tx_list, sizeof(struct ste_desc) * STE_TX_LIST_CNT);
1018
1019 cd->ste_tx_prod = 0;
1020 cd->ste_tx_cons = 0;
1021 }
1022
1023 void
ste_init(void * xsc)1024 ste_init(void *xsc)
1025 {
1026 struct ste_softc *sc = (struct ste_softc *)xsc;
1027 struct ifnet *ifp = &sc->arpcom.ac_if;
1028 struct mii_data *mii;
1029 int i, s;
1030
1031 s = splnet();
1032
1033 ste_stop(sc);
1034 /* Reset the chip to a known state. */
1035 ste_reset(sc);
1036
1037 mii = &sc->sc_mii;
1038
1039 /* Init our MAC address */
1040 for (i = 0; i < ETHER_ADDR_LEN; i++) {
1041 CSR_WRITE_1(sc, STE_PAR0 + i, sc->arpcom.ac_enaddr[i]);
1042 }
1043
1044 /* Init RX list */
1045 if (ste_init_rx_list(sc) == ENOBUFS) {
1046 printf("%s: initialization failed: no "
1047 "memory for RX buffers\n", sc->sc_dev.dv_xname);
1048 ste_stop(sc);
1049 splx(s);
1050 return;
1051 }
1052
1053 /* Set RX polling interval */
1054 CSR_WRITE_1(sc, STE_RX_DMAPOLL_PERIOD, 64);
1055
1056 /* Init TX descriptors */
1057 ste_init_tx_list(sc);
1058
1059 /* Set the TX freethresh value */
1060 CSR_WRITE_1(sc, STE_TX_DMABURST_THRESH, ETHER_MAX_DIX_LEN >> 8);
1061
1062 /* Set the TX start threshold for best performance. */
1063 CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh);
1064
1065 /* Set the TX reclaim threshold. */
1066 CSR_WRITE_1(sc, STE_TX_RECLAIM_THRESH, (ETHER_MAX_DIX_LEN >> 4));
1067
1068 /* Program promiscuous mode and multicast filters. */
1069 ste_iff(sc);
1070
1071 /* Load the address of the RX list. */
1072 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL);
1073 ste_wait(sc);
1074 CSR_WRITE_4(sc, STE_RX_DMALIST_PTR,
1075 vtophys((vaddr_t)&sc->ste_ldata->ste_rx_list[0]));
1076 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL);
1077 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL);
1078
1079 /* Set TX polling interval (defer until we TX first packet) */
1080 CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 0);
1081
1082 /* Load address of the TX list */
1083 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL);
1084 ste_wait(sc);
1085 CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, 0);
1086 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
1087 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
1088 ste_wait(sc);
1089 sc->ste_tx_prev=NULL;
1090
1091 /* Enable receiver and transmitter */
1092 CSR_WRITE_2(sc, STE_MACCTL0, 0);
1093 CSR_WRITE_2(sc, STE_MACCTL1, 0);
1094 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_ENABLE);
1095 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_ENABLE);
1096
1097 /* Enable stats counters. */
1098 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_ENABLE);
1099
1100 /* Enable interrupts. */
1101 CSR_WRITE_2(sc, STE_ISR, 0xFFFF);
1102 CSR_WRITE_2(sc, STE_IMR, STE_INTRS);
1103
1104 /* Accept VLAN length packets */
1105 CSR_WRITE_2(sc, STE_MAX_FRAMELEN,
1106 ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
1107
1108 ste_ifmedia_upd(ifp);
1109
1110 ifp->if_flags |= IFF_RUNNING;
1111 ifq_clr_oactive(&ifp->if_snd);
1112
1113 splx(s);
1114
1115 timeout_set(&sc->sc_stats_tmo, ste_stats_update, sc);
1116 timeout_add_sec(&sc->sc_stats_tmo, 1);
1117 }
1118
1119 void
ste_stop(struct ste_softc * sc)1120 ste_stop(struct ste_softc *sc)
1121 {
1122 int i;
1123 struct ifnet *ifp;
1124
1125 ifp = &sc->arpcom.ac_if;
1126
1127 timeout_del(&sc->sc_stats_tmo);
1128
1129 ifp->if_flags &= ~IFF_RUNNING;
1130 ifq_clr_oactive(&ifp->if_snd);
1131
1132 CSR_WRITE_2(sc, STE_IMR, 0);
1133 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_DISABLE);
1134 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_DISABLE);
1135 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_DISABLE);
1136 STE_SETBIT2(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL);
1137 STE_SETBIT2(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL);
1138 ste_wait(sc);
1139 /*
1140 * Try really hard to stop the RX engine or under heavy RX
1141 * data chip will write into de-allocated memory.
1142 */
1143 ste_reset(sc);
1144
1145 sc->ste_link = 0;
1146
1147 for (i = 0; i < STE_RX_LIST_CNT; i++) {
1148 if (sc->ste_cdata.ste_rx_chain[i].ste_mbuf != NULL) {
1149 m_freem(sc->ste_cdata.ste_rx_chain[i].ste_mbuf);
1150 sc->ste_cdata.ste_rx_chain[i].ste_mbuf = NULL;
1151 }
1152 }
1153
1154 for (i = 0; i < STE_TX_LIST_CNT; i++) {
1155 if (sc->ste_cdata.ste_tx_chain[i].ste_mbuf != NULL) {
1156 m_freem(sc->ste_cdata.ste_tx_chain[i].ste_mbuf);
1157 sc->ste_cdata.ste_tx_chain[i].ste_mbuf = NULL;
1158 }
1159 }
1160
1161 bzero(sc->ste_ldata, sizeof(struct ste_list_data));
1162 }
1163
1164 void
ste_reset(struct ste_softc * sc)1165 ste_reset(struct ste_softc *sc)
1166 {
1167 int i;
1168
1169 STE_SETBIT4(sc, STE_ASICCTL,
1170 STE_ASICCTL_GLOBAL_RESET|STE_ASICCTL_RX_RESET|
1171 STE_ASICCTL_TX_RESET|STE_ASICCTL_DMA_RESET|
1172 STE_ASICCTL_FIFO_RESET|STE_ASICCTL_NETWORK_RESET|
1173 STE_ASICCTL_AUTOINIT_RESET|STE_ASICCTL_HOST_RESET|
1174 STE_ASICCTL_EXTRESET_RESET);
1175
1176 DELAY(100000);
1177
1178 for (i = 0; i < STE_TIMEOUT; i++) {
1179 if (!(CSR_READ_4(sc, STE_ASICCTL) & STE_ASICCTL_RESET_BUSY))
1180 break;
1181 }
1182
1183 if (i == STE_TIMEOUT)
1184 printf("%s: global reset never completed\n",
1185 sc->sc_dev.dv_xname);
1186 }
1187
1188 int
ste_ioctl(struct ifnet * ifp,u_long command,caddr_t data)1189 ste_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1190 {
1191 struct ste_softc *sc = ifp->if_softc;
1192 struct ifreq *ifr = (struct ifreq *) data;
1193 int s, error = 0;
1194
1195 s = splnet();
1196
1197 switch(command) {
1198 case SIOCSIFADDR:
1199 ifp->if_flags |= IFF_UP;
1200 if (!(ifp->if_flags & IFF_RUNNING))
1201 ste_init(sc);
1202 break;
1203
1204 case SIOCSIFFLAGS:
1205 if (ifp->if_flags & IFF_UP) {
1206 if (ifp->if_flags & IFF_RUNNING)
1207 error = ENETRESET;
1208 else {
1209 sc->ste_tx_thresh = STE_TXSTART_THRESH;
1210 ste_init(sc);
1211 }
1212 } else {
1213 if (ifp->if_flags & IFF_RUNNING)
1214 ste_stop(sc);
1215 }
1216 break;
1217
1218 case SIOCGIFMEDIA:
1219 case SIOCSIFMEDIA:
1220 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
1221 break;
1222
1223 default:
1224 error = ether_ioctl(ifp, &sc->arpcom, command, data);
1225 }
1226
1227 if (error == ENETRESET) {
1228 if (ifp->if_flags & IFF_RUNNING)
1229 ste_iff(sc);
1230 error = 0;
1231 }
1232
1233 splx(s);
1234 return(error);
1235 }
1236
1237 int
ste_encap(struct ste_softc * sc,struct ste_chain * c,struct mbuf * m_head)1238 ste_encap(struct ste_softc *sc, struct ste_chain *c, struct mbuf *m_head)
1239 {
1240 int frag = 0;
1241 struct ste_frag *f = NULL;
1242 struct mbuf *m;
1243 struct ste_desc *d;
1244
1245 d = c->ste_ptr;
1246 d->ste_ctl = 0;
1247
1248 encap_retry:
1249 for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
1250 if (m->m_len != 0) {
1251 if (frag == STE_MAXFRAGS)
1252 break;
1253 f = &d->ste_frags[frag];
1254 f->ste_addr = vtophys(mtod(m, vaddr_t));
1255 f->ste_len = m->m_len;
1256 frag++;
1257 }
1258 }
1259
1260 if (m != NULL) {
1261 struct mbuf *mn;
1262
1263 /*
1264 * We ran out of segments. We have to recopy this
1265 * mbuf chain first. Bail out if we can't get the
1266 * new buffers.
1267 */
1268 MGETHDR(mn, M_DONTWAIT, MT_DATA);
1269 if (mn == NULL) {
1270 m_freem(m_head);
1271 return ENOMEM;
1272 }
1273 if (m_head->m_pkthdr.len > MHLEN) {
1274 MCLGET(mn, M_DONTWAIT);
1275 if ((mn->m_flags & M_EXT) == 0) {
1276 m_freem(mn);
1277 m_freem(m_head);
1278 return ENOMEM;
1279 }
1280 }
1281 m_copydata(m_head, 0, m_head->m_pkthdr.len,
1282 mtod(mn, caddr_t));
1283 mn->m_pkthdr.len = mn->m_len = m_head->m_pkthdr.len;
1284 m_freem(m_head);
1285 m_head = mn;
1286 goto encap_retry;
1287 }
1288
1289 c->ste_mbuf = m_head;
1290 d->ste_frags[frag - 1].ste_len |= STE_FRAG_LAST;
1291 d->ste_ctl = 1;
1292
1293 return(0);
1294 }
1295
1296 void
ste_start(struct ifnet * ifp)1297 ste_start(struct ifnet *ifp)
1298 {
1299 struct ste_softc *sc;
1300 struct mbuf *m_head = NULL;
1301 struct ste_chain *cur_tx;
1302 int idx;
1303
1304 sc = ifp->if_softc;
1305
1306 if (!sc->ste_link)
1307 return;
1308
1309 if (ifq_is_oactive(&ifp->if_snd))
1310 return;
1311
1312 idx = sc->ste_cdata.ste_tx_prod;
1313
1314 while(sc->ste_cdata.ste_tx_chain[idx].ste_mbuf == NULL) {
1315 /*
1316 * We cannot re-use the last (free) descriptor;
1317 * the chip may not have read its ste_next yet.
1318 */
1319 if (STE_NEXT(idx, STE_TX_LIST_CNT) ==
1320 sc->ste_cdata.ste_tx_cons) {
1321 ifq_set_oactive(&ifp->if_snd);
1322 break;
1323 }
1324
1325 m_head = ifq_dequeue(&ifp->if_snd);
1326 if (m_head == NULL)
1327 break;
1328
1329 cur_tx = &sc->ste_cdata.ste_tx_chain[idx];
1330
1331 if (ste_encap(sc, cur_tx, m_head) != 0)
1332 break;
1333
1334 cur_tx->ste_ptr->ste_next = 0;
1335
1336 if (sc->ste_tx_prev == NULL) {
1337 cur_tx->ste_ptr->ste_ctl = STE_TXCTL_DMAINTR | 1;
1338 /* Load address of the TX list */
1339 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL);
1340 ste_wait(sc);
1341
1342 CSR_WRITE_4(sc, STE_TX_DMALIST_PTR,
1343 vtophys((vaddr_t)&sc->ste_ldata->ste_tx_list[0]));
1344
1345 /* Set TX polling interval to start TX engine */
1346 CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 64);
1347
1348 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
1349 ste_wait(sc);
1350 }else{
1351 cur_tx->ste_ptr->ste_ctl = STE_TXCTL_DMAINTR | 1;
1352 sc->ste_tx_prev->ste_ptr->ste_next
1353 = cur_tx->ste_phys;
1354 }
1355
1356 sc->ste_tx_prev = cur_tx;
1357
1358 #if NBPFILTER > 0
1359 /*
1360 * If there's a BPF listener, bounce a copy of this frame
1361 * to him.
1362 */
1363 if (ifp->if_bpf)
1364 bpf_mtap(ifp->if_bpf, cur_tx->ste_mbuf,
1365 BPF_DIRECTION_OUT);
1366 #endif
1367
1368 STE_INC(idx, STE_TX_LIST_CNT);
1369 ifp->if_timer = 5;
1370 }
1371 sc->ste_cdata.ste_tx_prod = idx;
1372 }
1373
1374 void
ste_watchdog(struct ifnet * ifp)1375 ste_watchdog(struct ifnet *ifp)
1376 {
1377 struct ste_softc *sc;
1378
1379 sc = ifp->if_softc;
1380
1381 ifp->if_oerrors++;
1382 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1383
1384 ste_txeoc(sc);
1385 ste_txeof(sc);
1386 ste_rxeoc(sc);
1387 ste_rxeof(sc);
1388 ste_init(sc);
1389
1390 if (!ifq_empty(&ifp->if_snd))
1391 ste_start(ifp);
1392 }
1393