1 /*
2 * Copyright (c) 1997, 1998, 1999
3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 * $FreeBSD: src/sys/pci/if_ste.c,v 1.14.2.9 2003/02/05 22:03:57 mbr Exp $
33 */
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/sockio.h>
38 #include <sys/mbuf.h>
39 #include <sys/malloc.h>
40 #include <sys/kernel.h>
41 #include <sys/socket.h>
42 #include <sys/serialize.h>
43 #include <sys/bus.h>
44 #include <sys/rman.h>
45 #include <sys/interrupt.h>
46
47 #include <net/if.h>
48 #include <net/ifq_var.h>
49 #include <net/if_arp.h>
50 #include <net/ethernet.h>
51 #include <net/if_dl.h>
52 #include <net/if_media.h>
53 #include <net/vlan/if_vlan_var.h>
54
55 #include <net/bpf.h>
56
57 #include <vm/vm.h> /* for vtophys */
58 #include <vm/pmap.h> /* for vtophys */
59
60 #include "../mii_layer/mii.h"
61 #include "../mii_layer/miivar.h"
62
63 #include "pcidevs.h"
64 #include <bus/pci/pcireg.h>
65 #include <bus/pci/pcivar.h>
66
67 /* "controller miibus0" required. See GENERIC if you get errors here. */
68 #include "miibus_if.h"
69
70 #define STE_USEIOSPACE
71
72 #include "if_stereg.h"
73
74 /*
75 * Various supported device vendors/types and their names.
76 */
77 static struct ste_type ste_devs[] = {
78 { PCI_VENDOR_SUNDANCETI, PCI_PRODUCT_SUNDANCETI_ST201,
79 "Sundance ST201 10/100BaseTX" },
80 { PCI_VENDOR_SUNDANCETI, PCI_PRODUCT_SUNDANCETI_ST201_0,
81 "Sundance ST201 10/100BaseTX" },
82 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DL1002,
83 "D-Link DFE-550TX 10/100BaseTX" },
84 { 0, 0, NULL }
85 };
86
87 static int ste_probe (device_t);
88 static int ste_attach (device_t);
89 static int ste_detach (device_t);
90 static void ste_init (void *);
91 static void ste_intr (void *);
92 static void ste_rxeof (struct ste_softc *);
93 static void ste_txeoc (struct ste_softc *);
94 static void ste_txeof (struct ste_softc *);
95 static void ste_stats_update (void *);
96 static void ste_stop (struct ste_softc *);
97 static void ste_reset (struct ste_softc *);
98 static int ste_ioctl (struct ifnet *, u_long, caddr_t,
99 struct ucred *);
100 static int ste_encap (struct ste_softc *, struct ste_chain *,
101 struct mbuf *);
102 static void ste_start (struct ifnet *, struct ifaltq_subque *);
103 static void ste_watchdog (struct ifnet *);
104 static void ste_shutdown (device_t);
105 static int ste_newbuf (struct ste_softc *,
106 struct ste_chain_onefrag *,
107 struct mbuf *);
108 static int ste_ifmedia_upd (struct ifnet *);
109 static void ste_ifmedia_sts (struct ifnet *, struct ifmediareq *);
110
111 static void ste_mii_sync (struct ste_softc *);
112 static void ste_mii_send (struct ste_softc *, u_int32_t, int);
113 static int ste_mii_readreg (struct ste_softc *,
114 struct ste_mii_frame *);
115 static int ste_mii_writereg (struct ste_softc *,
116 struct ste_mii_frame *);
117 static int ste_miibus_readreg (device_t, int, int);
118 static int ste_miibus_writereg (device_t, int, int, int);
119 static void ste_miibus_statchg (device_t);
120
121 static int ste_eeprom_wait (struct ste_softc *);
122 static int ste_read_eeprom (struct ste_softc *, caddr_t, int,
123 int, int);
124 static void ste_wait (struct ste_softc *);
125 static void ste_setmulti (struct ste_softc *);
126 static int ste_init_rx_list (struct ste_softc *);
127 static void ste_init_tx_list (struct ste_softc *);
128
129 #ifdef STE_USEIOSPACE
130 #define STE_RES SYS_RES_IOPORT
131 #define STE_RID STE_PCI_LOIO
132 #else
133 #define STE_RES SYS_RES_MEMORY
134 #define STE_RID STE_PCI_LOMEM
135 #endif
136
137 static device_method_t ste_methods[] = {
138 /* Device interface */
139 DEVMETHOD(device_probe, ste_probe),
140 DEVMETHOD(device_attach, ste_attach),
141 DEVMETHOD(device_detach, ste_detach),
142 DEVMETHOD(device_shutdown, ste_shutdown),
143
144 /* bus interface */
145 DEVMETHOD(bus_print_child, bus_generic_print_child),
146 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
147
148 /* MII interface */
149 DEVMETHOD(miibus_readreg, ste_miibus_readreg),
150 DEVMETHOD(miibus_writereg, ste_miibus_writereg),
151 DEVMETHOD(miibus_statchg, ste_miibus_statchg),
152
153 DEVMETHOD_END
154 };
155
156 static driver_t ste_driver = {
157 "ste",
158 ste_methods,
159 sizeof(struct ste_softc)
160 };
161
162 static devclass_t ste_devclass;
163
164 DECLARE_DUMMY_MODULE(if_ste);
165 DRIVER_MODULE(if_ste, pci, ste_driver, ste_devclass, NULL, NULL);
166 DRIVER_MODULE(miibus, ste, miibus_driver, miibus_devclass, NULL, NULL);
167
168 #define STE_SETBIT4(sc, reg, x) \
169 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
170
171 #define STE_CLRBIT4(sc, reg, x) \
172 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
173
174 #define STE_SETBIT2(sc, reg, x) \
175 CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) | x)
176
177 #define STE_CLRBIT2(sc, reg, x) \
178 CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) & ~x)
179
180 #define STE_SETBIT1(sc, reg, x) \
181 CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) | x)
182
183 #define STE_CLRBIT1(sc, reg, x) \
184 CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) & ~x)
185
186
187 #define MII_SET(x) STE_SETBIT1(sc, STE_PHYCTL, x)
188 #define MII_CLR(x) STE_CLRBIT1(sc, STE_PHYCTL, x)
189
190 /*
191 * Sync the PHYs by setting data bit and strobing the clock 32 times.
192 */
193 static void
ste_mii_sync(struct ste_softc * sc)194 ste_mii_sync(struct ste_softc *sc)
195 {
196 int i;
197
198 MII_SET(STE_PHYCTL_MDIR|STE_PHYCTL_MDATA);
199
200 for (i = 0; i < 32; i++) {
201 MII_SET(STE_PHYCTL_MCLK);
202 DELAY(1);
203 MII_CLR(STE_PHYCTL_MCLK);
204 DELAY(1);
205 }
206
207 return;
208 }
209
210 /*
211 * Clock a series of bits through the MII.
212 */
213 static void
ste_mii_send(struct ste_softc * sc,u_int32_t bits,int cnt)214 ste_mii_send(struct ste_softc *sc, u_int32_t bits, int cnt)
215 {
216 int i;
217
218 MII_CLR(STE_PHYCTL_MCLK);
219
220 for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
221 if (bits & i) {
222 MII_SET(STE_PHYCTL_MDATA);
223 } else {
224 MII_CLR(STE_PHYCTL_MDATA);
225 }
226 DELAY(1);
227 MII_CLR(STE_PHYCTL_MCLK);
228 DELAY(1);
229 MII_SET(STE_PHYCTL_MCLK);
230 }
231 }
232
233 /*
234 * Read an PHY register through the MII.
235 */
236 static int
ste_mii_readreg(struct ste_softc * sc,struct ste_mii_frame * frame)237 ste_mii_readreg(struct ste_softc *sc, struct ste_mii_frame *frame)
238 {
239 int i, ack;
240
241 /*
242 * Set up frame for RX.
243 */
244 frame->mii_stdelim = STE_MII_STARTDELIM;
245 frame->mii_opcode = STE_MII_READOP;
246 frame->mii_turnaround = 0;
247 frame->mii_data = 0;
248
249 CSR_WRITE_2(sc, STE_PHYCTL, 0);
250 /*
251 * Turn on data xmit.
252 */
253 MII_SET(STE_PHYCTL_MDIR);
254
255 ste_mii_sync(sc);
256
257 /*
258 * Send command/address info.
259 */
260 ste_mii_send(sc, frame->mii_stdelim, 2);
261 ste_mii_send(sc, frame->mii_opcode, 2);
262 ste_mii_send(sc, frame->mii_phyaddr, 5);
263 ste_mii_send(sc, frame->mii_regaddr, 5);
264
265 /* Turn off xmit. */
266 MII_CLR(STE_PHYCTL_MDIR);
267
268 /* Idle bit */
269 MII_CLR((STE_PHYCTL_MCLK|STE_PHYCTL_MDATA));
270 DELAY(1);
271 MII_SET(STE_PHYCTL_MCLK);
272 DELAY(1);
273
274 /* Check for ack */
275 MII_CLR(STE_PHYCTL_MCLK);
276 DELAY(1);
277 ack = CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA;
278 MII_SET(STE_PHYCTL_MCLK);
279 DELAY(1);
280
281 /*
282 * Now try reading data bits. If the ack failed, we still
283 * need to clock through 16 cycles to keep the PHY(s) in sync.
284 */
285 if (ack) {
286 for(i = 0; i < 16; i++) {
287 MII_CLR(STE_PHYCTL_MCLK);
288 DELAY(1);
289 MII_SET(STE_PHYCTL_MCLK);
290 DELAY(1);
291 }
292 goto fail;
293 }
294
295 for (i = 0x8000; i; i >>= 1) {
296 MII_CLR(STE_PHYCTL_MCLK);
297 DELAY(1);
298 if (!ack) {
299 if (CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA)
300 frame->mii_data |= i;
301 DELAY(1);
302 }
303 MII_SET(STE_PHYCTL_MCLK);
304 DELAY(1);
305 }
306
307 fail:
308
309 MII_CLR(STE_PHYCTL_MCLK);
310 DELAY(1);
311 MII_SET(STE_PHYCTL_MCLK);
312 DELAY(1);
313
314 if (ack)
315 return(1);
316 return(0);
317 }
318
319 /*
320 * Write to a PHY register through the MII.
321 */
322 static int
ste_mii_writereg(struct ste_softc * sc,struct ste_mii_frame * frame)323 ste_mii_writereg(struct ste_softc *sc, struct ste_mii_frame *frame)
324 {
325 /*
326 * Set up frame for TX.
327 */
328
329 frame->mii_stdelim = STE_MII_STARTDELIM;
330 frame->mii_opcode = STE_MII_WRITEOP;
331 frame->mii_turnaround = STE_MII_TURNAROUND;
332
333 /*
334 * Turn on data output.
335 */
336 MII_SET(STE_PHYCTL_MDIR);
337
338 ste_mii_sync(sc);
339
340 ste_mii_send(sc, frame->mii_stdelim, 2);
341 ste_mii_send(sc, frame->mii_opcode, 2);
342 ste_mii_send(sc, frame->mii_phyaddr, 5);
343 ste_mii_send(sc, frame->mii_regaddr, 5);
344 ste_mii_send(sc, frame->mii_turnaround, 2);
345 ste_mii_send(sc, frame->mii_data, 16);
346
347 /* Idle bit. */
348 MII_SET(STE_PHYCTL_MCLK);
349 DELAY(1);
350 MII_CLR(STE_PHYCTL_MCLK);
351 DELAY(1);
352
353 /*
354 * Turn off xmit.
355 */
356 MII_CLR(STE_PHYCTL_MDIR);
357
358 return(0);
359 }
360
361 static int
ste_miibus_readreg(device_t dev,int phy,int reg)362 ste_miibus_readreg(device_t dev, int phy, int reg)
363 {
364 struct ste_softc *sc;
365 struct ste_mii_frame frame;
366
367 sc = device_get_softc(dev);
368
369 if ( sc->ste_one_phy && phy != 0 )
370 return (0);
371
372 bzero((char *)&frame, sizeof(frame));
373
374 frame.mii_phyaddr = phy;
375 frame.mii_regaddr = reg;
376 ste_mii_readreg(sc, &frame);
377
378 return(frame.mii_data);
379 }
380
381 static int
ste_miibus_writereg(device_t dev,int phy,int reg,int data)382 ste_miibus_writereg(device_t dev, int phy, int reg, int data)
383 {
384 struct ste_softc *sc;
385 struct ste_mii_frame frame;
386
387 sc = device_get_softc(dev);
388 bzero((char *)&frame, sizeof(frame));
389
390 frame.mii_phyaddr = phy;
391 frame.mii_regaddr = reg;
392 frame.mii_data = data;
393
394 ste_mii_writereg(sc, &frame);
395
396 return(0);
397 }
398
399 static void
ste_miibus_statchg(device_t dev)400 ste_miibus_statchg(device_t dev)
401 {
402 struct ste_softc *sc;
403 struct mii_data *mii;
404 int i;
405
406 sc = device_get_softc(dev);
407 mii = device_get_softc(sc->ste_miibus);
408
409 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
410 STE_SETBIT2(sc, STE_MACCTL0, STE_MACCTL0_FULLDUPLEX);
411 } else {
412 STE_CLRBIT2(sc, STE_MACCTL0, STE_MACCTL0_FULLDUPLEX);
413 }
414
415 STE_SETBIT4(sc, STE_ASICCTL,STE_ASICCTL_RX_RESET |
416 STE_ASICCTL_TX_RESET);
417 for (i = 0; i < STE_TIMEOUT; i++) {
418 if (!(CSR_READ_4(sc, STE_ASICCTL) & STE_ASICCTL_RESET_BUSY))
419 break;
420 }
421 if (i == STE_TIMEOUT)
422 if_printf(&sc->arpcom.ac_if, "rx reset never completed\n");
423
424 return;
425 }
426
427 static int
ste_ifmedia_upd(struct ifnet * ifp)428 ste_ifmedia_upd(struct ifnet *ifp)
429 {
430 struct ste_softc *sc;
431 struct mii_data *mii;
432
433 sc = ifp->if_softc;
434 mii = device_get_softc(sc->ste_miibus);
435 sc->ste_link = 0;
436 if (mii->mii_instance) {
437 struct mii_softc *miisc;
438 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
439 miisc = LIST_NEXT(miisc, mii_list))
440 mii_phy_reset(miisc);
441 }
442 mii_mediachg(mii);
443
444 return(0);
445 }
446
447 static void
ste_ifmedia_sts(struct ifnet * ifp,struct ifmediareq * ifmr)448 ste_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
449 {
450 struct ste_softc *sc;
451 struct mii_data *mii;
452
453 sc = ifp->if_softc;
454 mii = device_get_softc(sc->ste_miibus);
455
456 mii_pollstat(mii);
457 ifmr->ifm_active = mii->mii_media_active;
458 ifmr->ifm_status = mii->mii_media_status;
459
460 return;
461 }
462
463 static void
ste_wait(struct ste_softc * sc)464 ste_wait(struct ste_softc *sc)
465 {
466 int i;
467
468 for (i = 0; i < STE_TIMEOUT; i++) {
469 if (!(CSR_READ_4(sc, STE_DMACTL) & STE_DMACTL_DMA_HALTINPROG))
470 break;
471 }
472
473 if (i == STE_TIMEOUT)
474 if_printf(&sc->arpcom.ac_if, "command never completed!\n");
475
476 return;
477 }
478
479 /*
480 * The EEPROM is slow: give it time to come ready after issuing
481 * it a command.
482 */
483 static int
ste_eeprom_wait(struct ste_softc * sc)484 ste_eeprom_wait(struct ste_softc *sc)
485 {
486 int i;
487
488 DELAY(1000);
489
490 for (i = 0; i < 100; i++) {
491 if (CSR_READ_2(sc, STE_EEPROM_CTL) & STE_EECTL_BUSY)
492 DELAY(1000);
493 else
494 break;
495 }
496
497 if (i == 100) {
498 if_printf(&sc->arpcom.ac_if, "eeprom failed to come ready\n");
499 return(1);
500 }
501
502 return(0);
503 }
504
505 /*
506 * Read a sequence of words from the EEPROM. Note that ethernet address
507 * data is stored in the EEPROM in network byte order.
508 */
509 static int
ste_read_eeprom(struct ste_softc * sc,caddr_t dest,int off,int cnt,int swap)510 ste_read_eeprom(struct ste_softc *sc, caddr_t dest, int off, int cnt, int swap)
511 {
512 int err = 0, i;
513 u_int16_t word = 0, *ptr;
514
515 if (ste_eeprom_wait(sc))
516 return(1);
517
518 for (i = 0; i < cnt; i++) {
519 CSR_WRITE_2(sc, STE_EEPROM_CTL, STE_EEOPCODE_READ | (off + i));
520 err = ste_eeprom_wait(sc);
521 if (err)
522 break;
523 word = CSR_READ_2(sc, STE_EEPROM_DATA);
524 ptr = (u_int16_t *)(dest + (i * 2));
525 if (swap)
526 *ptr = ntohs(word);
527 else
528 *ptr = word;
529 }
530
531 return(err ? 1 : 0);
532 }
533
534 static void
ste_setmulti(struct ste_softc * sc)535 ste_setmulti(struct ste_softc *sc)
536 {
537 struct ifnet *ifp;
538 int h = 0;
539 u_int32_t hashes[2] = { 0, 0 };
540 struct ifmultiaddr *ifma;
541
542 ifp = &sc->arpcom.ac_if;
543 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
544 STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_ALLMULTI);
545 STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_MULTIHASH);
546 return;
547 }
548
549 /* first, zot all the existing hash bits */
550 CSR_WRITE_2(sc, STE_MAR0, 0);
551 CSR_WRITE_2(sc, STE_MAR1, 0);
552 CSR_WRITE_2(sc, STE_MAR2, 0);
553 CSR_WRITE_2(sc, STE_MAR3, 0);
554
555 /* now program new ones */
556 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
557 if (ifma->ifma_addr->sa_family != AF_LINK)
558 continue;
559 h = ether_crc32_be(
560 LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
561 ETHER_ADDR_LEN) & 0x3f;
562 if (h < 32)
563 hashes[0] |= (1 << h);
564 else
565 hashes[1] |= (1 << (h - 32));
566 }
567
568 CSR_WRITE_2(sc, STE_MAR0, hashes[0] & 0xFFFF);
569 CSR_WRITE_2(sc, STE_MAR1, (hashes[0] >> 16) & 0xFFFF);
570 CSR_WRITE_2(sc, STE_MAR2, hashes[1] & 0xFFFF);
571 CSR_WRITE_2(sc, STE_MAR3, (hashes[1] >> 16) & 0xFFFF);
572 STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_ALLMULTI);
573 STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_MULTIHASH);
574
575 return;
576 }
577
578 static void
ste_intr(void * xsc)579 ste_intr(void *xsc)
580 {
581 struct ste_softc *sc;
582 struct ifnet *ifp;
583 u_int16_t status;
584
585 sc = xsc;
586 ifp = &sc->arpcom.ac_if;
587
588 /* See if this is really our interrupt. */
589 if (!(CSR_READ_2(sc, STE_ISR) & STE_ISR_INTLATCH))
590 return;
591
592 for (;;) {
593 status = CSR_READ_2(sc, STE_ISR_ACK);
594
595 if (!(status & STE_INTRS))
596 break;
597
598 if (status & STE_ISR_RX_DMADONE)
599 ste_rxeof(sc);
600
601 if (status & STE_ISR_TX_DMADONE)
602 ste_txeof(sc);
603
604 if (status & STE_ISR_TX_DONE)
605 ste_txeoc(sc);
606
607 if (status & STE_ISR_STATS_OFLOW) {
608 callout_stop(&sc->ste_stat_timer);
609 ste_stats_update(sc);
610 }
611
612 if (status & STE_ISR_LINKEVENT)
613 mii_pollstat(device_get_softc(sc->ste_miibus));
614
615 if (status & STE_ISR_HOSTERR) {
616 ste_reset(sc);
617 ste_init(sc);
618 }
619 }
620
621 /* Re-enable interrupts */
622 CSR_WRITE_2(sc, STE_IMR, STE_INTRS);
623
624 if (!ifq_is_empty(&ifp->if_snd))
625 if_devstart(ifp);
626 }
627
628 /*
629 * A frame has been uploaded: pass the resulting mbuf chain up to
630 * the higher level protocols.
631 */
632 static void
ste_rxeof(struct ste_softc * sc)633 ste_rxeof(struct ste_softc *sc)
634 {
635 struct mbuf *m;
636 struct ifnet *ifp;
637 struct ste_chain_onefrag *cur_rx;
638 int total_len = 0, count=0;
639 u_int32_t rxstat;
640
641 ifp = &sc->arpcom.ac_if;
642
643 while((rxstat = sc->ste_cdata.ste_rx_head->ste_ptr->ste_status)
644 & STE_RXSTAT_DMADONE) {
645 if ((STE_RX_LIST_CNT - count) < 3) {
646 break;
647 }
648
649 cur_rx = sc->ste_cdata.ste_rx_head;
650 sc->ste_cdata.ste_rx_head = cur_rx->ste_next;
651
652 /*
653 * If an error occurs, update stats, clear the
654 * status word and leave the mbuf cluster in place:
655 * it should simply get re-used next time this descriptor
656 * comes up in the ring.
657 */
658 if (rxstat & STE_RXSTAT_FRAME_ERR) {
659 IFNET_STAT_INC(ifp, ierrors, 1);
660 cur_rx->ste_ptr->ste_status = 0;
661 continue;
662 }
663
664 /*
665 * If there error bit was not set, the upload complete
666 * bit should be set which means we have a valid packet.
667 * If not, something truly strange has happened.
668 */
669 if (!(rxstat & STE_RXSTAT_DMADONE)) {
670 if_printf(ifp, "bad receive status -- packet dropped");
671 IFNET_STAT_INC(ifp, ierrors, 1);
672 cur_rx->ste_ptr->ste_status = 0;
673 continue;
674 }
675
676 /* No errors; receive the packet. */
677 m = cur_rx->ste_mbuf;
678 total_len = cur_rx->ste_ptr->ste_status & STE_RXSTAT_FRAMELEN;
679
680 /*
681 * Try to conjure up a new mbuf cluster. If that
682 * fails, it means we have an out of memory condition and
683 * should leave the buffer in place and continue. This will
684 * result in a lost packet, but there's little else we
685 * can do in this situation.
686 */
687 if (ste_newbuf(sc, cur_rx, NULL) == ENOBUFS) {
688 IFNET_STAT_INC(ifp, ierrors, 1);
689 cur_rx->ste_ptr->ste_status = 0;
690 continue;
691 }
692
693 IFNET_STAT_INC(ifp, ipackets, 1);
694 m->m_pkthdr.rcvif = ifp;
695 m->m_pkthdr.len = m->m_len = total_len;
696
697 ifp->if_input(ifp, m, NULL, -1);
698
699 cur_rx->ste_ptr->ste_status = 0;
700 count++;
701 }
702
703 return;
704 }
705
706 static void
ste_txeoc(struct ste_softc * sc)707 ste_txeoc(struct ste_softc *sc)
708 {
709 u_int8_t txstat;
710 struct ifnet *ifp;
711
712 ifp = &sc->arpcom.ac_if;
713
714 while ((txstat = CSR_READ_1(sc, STE_TX_STATUS)) &
715 STE_TXSTATUS_TXDONE) {
716 if (txstat & STE_TXSTATUS_UNDERRUN ||
717 txstat & STE_TXSTATUS_EXCESSCOLLS ||
718 txstat & STE_TXSTATUS_RECLAIMERR) {
719 IFNET_STAT_INC(ifp, oerrors, 1);
720 if_printf(ifp, "transmission error: %x\n", txstat);
721
722 ste_reset(sc);
723 ste_init(sc);
724
725 if (txstat & STE_TXSTATUS_UNDERRUN &&
726 sc->ste_tx_thresh < STE_PACKET_SIZE) {
727 sc->ste_tx_thresh += STE_MIN_FRAMELEN;
728 if_printf(ifp, "tx underrun, increasing tx"
729 " start threshold to %d bytes\n",
730 sc->ste_tx_thresh);
731 }
732 CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh);
733 CSR_WRITE_2(sc, STE_TX_RECLAIM_THRESH,
734 (STE_PACKET_SIZE >> 4));
735 }
736 ste_init(sc);
737 CSR_WRITE_2(sc, STE_TX_STATUS, txstat);
738 }
739
740 return;
741 }
742
743 static void
ste_txeof(struct ste_softc * sc)744 ste_txeof(struct ste_softc *sc)
745 {
746 struct ste_chain *cur_tx = NULL;
747 struct ifnet *ifp;
748 int idx;
749
750 ifp = &sc->arpcom.ac_if;
751
752 idx = sc->ste_cdata.ste_tx_cons;
753 while(idx != sc->ste_cdata.ste_tx_prod) {
754 cur_tx = &sc->ste_cdata.ste_tx_chain[idx];
755
756 if (!(cur_tx->ste_ptr->ste_ctl & STE_TXCTL_DMADONE))
757 break;
758
759 if (cur_tx->ste_mbuf != NULL) {
760 m_freem(cur_tx->ste_mbuf);
761 cur_tx->ste_mbuf = NULL;
762 }
763
764 IFNET_STAT_INC(ifp, opackets, 1);
765
766 sc->ste_cdata.ste_tx_cnt--;
767 STE_INC(idx, STE_TX_LIST_CNT);
768 ifp->if_timer = 0;
769 }
770
771 sc->ste_cdata.ste_tx_cons = idx;
772
773 if (cur_tx != NULL)
774 ifq_clr_oactive(&ifp->if_snd);
775
776 return;
777 }
778
779 static void
ste_stats_update(void * xsc)780 ste_stats_update(void *xsc)
781 {
782 struct ste_softc *sc;
783 struct ifnet *ifp;
784 struct mii_data *mii;
785
786 sc = xsc;
787 ifp = &sc->arpcom.ac_if;
788 mii = device_get_softc(sc->ste_miibus);
789
790 lwkt_serialize_enter(ifp->if_serializer);
791
792 IFNET_STAT_INC(ifp, collisions, CSR_READ_1(sc, STE_LATE_COLLS)
793 + CSR_READ_1(sc, STE_MULTI_COLLS)
794 + CSR_READ_1(sc, STE_SINGLE_COLLS));
795
796 if (!sc->ste_link) {
797 mii_pollstat(mii);
798 if (mii->mii_media_status & IFM_ACTIVE &&
799 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
800 sc->ste_link++;
801 /*
802 * we don't get a call-back on re-init so do it
803 * otherwise we get stuck in the wrong link state
804 */
805 ste_miibus_statchg(sc->ste_dev);
806 if (!ifq_is_empty(&ifp->if_snd))
807 if_devstart(ifp);
808 }
809 }
810
811 callout_reset(&sc->ste_stat_timer, hz, ste_stats_update, sc);
812 lwkt_serialize_exit(ifp->if_serializer);
813 }
814
815
816 /*
817 * Probe for a Sundance ST201 chip. Check the PCI vendor and device
818 * IDs against our list and return a device name if we find a match.
819 */
820 static int
ste_probe(device_t dev)821 ste_probe(device_t dev)
822 {
823 struct ste_type *t;
824
825 t = ste_devs;
826
827 while(t->ste_name != NULL) {
828 if ((pci_get_vendor(dev) == t->ste_vid) &&
829 (pci_get_device(dev) == t->ste_did)) {
830 device_set_desc(dev, t->ste_name);
831 return(0);
832 }
833 t++;
834 }
835
836 return(ENXIO);
837 }
838
839 /*
840 * Attach the interface. Allocate softc structures, do ifmedia
841 * setup and ethernet/BPF attach.
842 */
843 static int
ste_attach(device_t dev)844 ste_attach(device_t dev)
845 {
846 struct ste_softc *sc;
847 struct ifnet *ifp;
848 int error = 0, rid;
849 uint8_t eaddr[ETHER_ADDR_LEN];
850
851 sc = device_get_softc(dev);
852 sc->ste_dev = dev;
853
854 /*
855 * Only use one PHY since this chip reports multiple
856 * Note on the DFE-550 the PHY is at 1 on the DFE-580
857 * it is at 0 & 1. It is rev 0x12.
858 */
859 if (pci_get_vendor(dev) == PCI_VENDOR_DLINK &&
860 pci_get_device(dev) == PCI_PRODUCT_DLINK_DL1002 &&
861 pci_get_revid(dev) == 0x12 )
862 sc->ste_one_phy = 1;
863
864 /*
865 * Handle power management nonsense.
866 */
867 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
868 u_int32_t iobase, membase, irq;
869
870 /* Save important PCI config data. */
871 iobase = pci_read_config(dev, STE_PCI_LOIO, 4);
872 membase = pci_read_config(dev, STE_PCI_LOMEM, 4);
873 irq = pci_read_config(dev, STE_PCI_INTLINE, 4);
874
875 /* Reset the power state. */
876 device_printf(dev, "chip is in D%d power mode "
877 "-- setting to D0\n", pci_get_powerstate(dev));
878 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
879
880 /* Restore PCI config data. */
881 pci_write_config(dev, STE_PCI_LOIO, iobase, 4);
882 pci_write_config(dev, STE_PCI_LOMEM, membase, 4);
883 pci_write_config(dev, STE_PCI_INTLINE, irq, 4);
884 }
885
886 /*
887 * Map control/status registers.
888 */
889 pci_enable_busmaster(dev);
890
891 rid = STE_RID;
892 sc->ste_res = bus_alloc_resource_any(dev, STE_RES, &rid, RF_ACTIVE);
893
894 if (sc->ste_res == NULL) {
895 device_printf(dev, "couldn't map ports/memory\n");
896 error = ENXIO;
897 goto fail;
898 }
899
900 sc->ste_btag = rman_get_bustag(sc->ste_res);
901 sc->ste_bhandle = rman_get_bushandle(sc->ste_res);
902
903 rid = 0;
904 sc->ste_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
905 RF_SHAREABLE | RF_ACTIVE);
906
907 if (sc->ste_irq == NULL) {
908 device_printf(dev, "couldn't map interrupt\n");
909 error = ENXIO;
910 goto fail;
911 }
912
913 callout_init(&sc->ste_stat_timer);
914
915 ifp = &sc->arpcom.ac_if;
916 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
917
918 /* Reset the adapter. */
919 ste_reset(sc);
920
921 /*
922 * Get station address from the EEPROM.
923 */
924 if (ste_read_eeprom(sc, eaddr, STE_EEADDR_NODE0, 3, 0)) {
925 device_printf(dev, "failed to read station address\n");
926 error = ENXIO;
927 goto fail;
928 }
929
930 /* Allocate the descriptor queues. */
931 sc->ste_ldata = contigmalloc(sizeof(struct ste_list_data), M_DEVBUF,
932 M_WAITOK | M_ZERO, 0, 0xffffffff, PAGE_SIZE, 0);
933
934 if (sc->ste_ldata == NULL) {
935 device_printf(dev, "no memory for list buffers!\n");
936 error = ENXIO;
937 goto fail;
938 }
939
940 /* Do MII setup. */
941 if (mii_phy_probe(dev, &sc->ste_miibus,
942 ste_ifmedia_upd, ste_ifmedia_sts)) {
943 device_printf(dev, "MII without any phy!\n");
944 error = ENXIO;
945 goto fail;
946 }
947
948 ifp->if_softc = sc;
949 ifp->if_mtu = ETHERMTU;
950 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
951 ifp->if_ioctl = ste_ioctl;
952 ifp->if_start = ste_start;
953 ifp->if_watchdog = ste_watchdog;
954 ifp->if_init = ste_init;
955 ifp->if_baudrate = 10000000;
956 ifq_set_maxlen(&ifp->if_snd, STE_TX_LIST_CNT - 1);
957 ifq_set_ready(&ifp->if_snd);
958
959 sc->ste_tx_thresh = STE_TXSTART_THRESH;
960
961 /*
962 * Call MI attach routine.
963 */
964 ether_ifattach(ifp, eaddr, NULL);
965
966 /*
967 * Tell the upper layer(s) we support long frames.
968 */
969 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
970
971 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->ste_irq));
972
973 error = bus_setup_intr(dev, sc->ste_irq, INTR_MPSAFE,
974 ste_intr, sc, &sc->ste_intrhand,
975 ifp->if_serializer);
976 if (error) {
977 device_printf(dev, "couldn't set up irq\n");
978 ether_ifdetach(ifp);
979 goto fail;
980 }
981
982 return 0;
983
984 fail:
985 ste_detach(dev);
986 return(error);
987 }
988
989 static int
ste_detach(device_t dev)990 ste_detach(device_t dev)
991 {
992 struct ste_softc *sc = device_get_softc(dev);
993 struct ifnet *ifp = &sc->arpcom.ac_if;
994
995 if (device_is_attached(dev)) {
996 lwkt_serialize_enter(ifp->if_serializer);
997 ste_stop(sc);
998 bus_teardown_intr(dev, sc->ste_irq, sc->ste_intrhand);
999 lwkt_serialize_exit(ifp->if_serializer);
1000
1001 ether_ifdetach(ifp);
1002 }
1003 if (sc->ste_miibus != NULL)
1004 device_delete_child(dev, sc->ste_miibus);
1005 bus_generic_detach(dev);
1006
1007 if (sc->ste_irq != NULL)
1008 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ste_irq);
1009 if (sc->ste_res != NULL)
1010 bus_release_resource(dev, STE_RES, STE_RID, sc->ste_res);
1011 if (sc->ste_ldata != NULL) {
1012 contigfree(sc->ste_ldata, sizeof(struct ste_list_data),
1013 M_DEVBUF);
1014 }
1015
1016 return(0);
1017 }
1018
1019 static int
ste_newbuf(struct ste_softc * sc,struct ste_chain_onefrag * c,struct mbuf * m)1020 ste_newbuf(struct ste_softc *sc, struct ste_chain_onefrag *c,
1021 struct mbuf *m)
1022 {
1023 struct mbuf *m_new = NULL;
1024
1025 if (m == NULL) {
1026 MGETHDR(m_new, M_NOWAIT, MT_DATA);
1027 if (m_new == NULL)
1028 return(ENOBUFS);
1029 MCLGET(m_new, M_NOWAIT);
1030 if (!(m_new->m_flags & M_EXT)) {
1031 m_freem(m_new);
1032 return(ENOBUFS);
1033 }
1034 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1035 } else {
1036 m_new = m;
1037 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1038 m_new->m_data = m_new->m_ext.ext_buf;
1039 }
1040
1041 m_adj(m_new, ETHER_ALIGN);
1042
1043 c->ste_mbuf = m_new;
1044 c->ste_ptr->ste_status = 0;
1045 c->ste_ptr->ste_frag.ste_addr = vtophys(mtod(m_new, caddr_t));
1046 c->ste_ptr->ste_frag.ste_len = (1536 + EVL_ENCAPLEN) | STE_FRAG_LAST;
1047
1048 return(0);
1049 }
1050
1051 static int
ste_init_rx_list(struct ste_softc * sc)1052 ste_init_rx_list(struct ste_softc *sc)
1053 {
1054 struct ste_chain_data *cd;
1055 struct ste_list_data *ld;
1056 int i;
1057
1058 cd = &sc->ste_cdata;
1059 ld = sc->ste_ldata;
1060
1061 for (i = 0; i < STE_RX_LIST_CNT; i++) {
1062 cd->ste_rx_chain[i].ste_ptr = &ld->ste_rx_list[i];
1063 if (ste_newbuf(sc, &cd->ste_rx_chain[i], NULL) == ENOBUFS)
1064 return(ENOBUFS);
1065 if (i == (STE_RX_LIST_CNT - 1)) {
1066 cd->ste_rx_chain[i].ste_next =
1067 &cd->ste_rx_chain[0];
1068 ld->ste_rx_list[i].ste_next =
1069 vtophys(&ld->ste_rx_list[0]);
1070 } else {
1071 cd->ste_rx_chain[i].ste_next =
1072 &cd->ste_rx_chain[i + 1];
1073 ld->ste_rx_list[i].ste_next =
1074 vtophys(&ld->ste_rx_list[i + 1]);
1075 }
1076 ld->ste_rx_list[i].ste_status = 0;
1077 }
1078
1079 cd->ste_rx_head = &cd->ste_rx_chain[0];
1080
1081 return(0);
1082 }
1083
1084 static void
ste_init_tx_list(struct ste_softc * sc)1085 ste_init_tx_list(struct ste_softc *sc)
1086 {
1087 struct ste_chain_data *cd;
1088 struct ste_list_data *ld;
1089 int i;
1090
1091 cd = &sc->ste_cdata;
1092 ld = sc->ste_ldata;
1093 for (i = 0; i < STE_TX_LIST_CNT; i++) {
1094 cd->ste_tx_chain[i].ste_ptr = &ld->ste_tx_list[i];
1095 cd->ste_tx_chain[i].ste_ptr->ste_next = 0;
1096 cd->ste_tx_chain[i].ste_ptr->ste_ctl = 0;
1097 cd->ste_tx_chain[i].ste_phys = vtophys(&ld->ste_tx_list[i]);
1098 if (i == (STE_TX_LIST_CNT - 1))
1099 cd->ste_tx_chain[i].ste_next =
1100 &cd->ste_tx_chain[0];
1101 else
1102 cd->ste_tx_chain[i].ste_next =
1103 &cd->ste_tx_chain[i + 1];
1104 if (i == 0)
1105 cd->ste_tx_chain[i].ste_prev =
1106 &cd->ste_tx_chain[STE_TX_LIST_CNT - 1];
1107 else
1108 cd->ste_tx_chain[i].ste_prev =
1109 &cd->ste_tx_chain[i - 1];
1110 }
1111
1112 cd->ste_tx_prod = 0;
1113 cd->ste_tx_cons = 0;
1114 cd->ste_tx_cnt = 0;
1115
1116 return;
1117 }
1118
1119 static void
ste_init(void * xsc)1120 ste_init(void *xsc)
1121 {
1122 struct ste_softc *sc;
1123 int i;
1124 struct ifnet *ifp;
1125
1126 sc = xsc;
1127 ifp = &sc->arpcom.ac_if;
1128
1129 ste_stop(sc);
1130
1131 /* Init our MAC address */
1132 for (i = 0; i < ETHER_ADDR_LEN; i++) {
1133 CSR_WRITE_1(sc, STE_PAR0 + i, sc->arpcom.ac_enaddr[i]);
1134 }
1135
1136 /* Init RX list */
1137 if (ste_init_rx_list(sc) == ENOBUFS) {
1138 if_printf(ifp, "initialization failed: no "
1139 "memory for RX buffers\n");
1140 ste_stop(sc);
1141 return;
1142 }
1143
1144 /* Set RX polling interval */
1145 CSR_WRITE_1(sc, STE_RX_DMAPOLL_PERIOD, 1);
1146
1147 /* Init TX descriptors */
1148 ste_init_tx_list(sc);
1149
1150 /* Set the TX freethresh value */
1151 CSR_WRITE_1(sc, STE_TX_DMABURST_THRESH, STE_PACKET_SIZE >> 8);
1152
1153 /* Set the TX start threshold for best performance. */
1154 CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh);
1155
1156 /* Set the TX reclaim threshold. */
1157 CSR_WRITE_1(sc, STE_TX_RECLAIM_THRESH, (STE_PACKET_SIZE >> 4));
1158
1159 /* Set up the RX filter. */
1160 CSR_WRITE_1(sc, STE_RX_MODE, STE_RXMODE_UNICAST);
1161
1162 /* If we want promiscuous mode, set the allframes bit. */
1163 if (ifp->if_flags & IFF_PROMISC) {
1164 STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_PROMISC);
1165 } else {
1166 STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_PROMISC);
1167 }
1168
1169 /* Set capture broadcast bit to accept broadcast frames. */
1170 if (ifp->if_flags & IFF_BROADCAST) {
1171 STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_BROADCAST);
1172 } else {
1173 STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_BROADCAST);
1174 }
1175
1176 ste_setmulti(sc);
1177
1178 /* Load the address of the RX list. */
1179 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL);
1180 ste_wait(sc);
1181 CSR_WRITE_4(sc, STE_RX_DMALIST_PTR,
1182 vtophys(&sc->ste_ldata->ste_rx_list[0]));
1183 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL);
1184 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL);
1185
1186 /* Set TX polling interval (defer until we TX first packet */
1187 CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 0);
1188
1189 /* Load address of the TX list */
1190 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL);
1191 ste_wait(sc);
1192 CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, 0);
1193 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
1194 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
1195 ste_wait(sc);
1196 sc->ste_tx_prev_idx=-1;
1197
1198 /* Enable receiver and transmitter */
1199 CSR_WRITE_2(sc, STE_MACCTL0, 0);
1200 CSR_WRITE_2(sc, STE_MACCTL1, 0);
1201 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_ENABLE);
1202 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_ENABLE);
1203
1204 /* Enable stats counters. */
1205 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_ENABLE);
1206
1207 /* Enable interrupts. */
1208 CSR_WRITE_2(sc, STE_ISR, 0xFFFF);
1209 CSR_WRITE_2(sc, STE_IMR, STE_INTRS);
1210
1211 /* Accept VLAN length packets */
1212 CSR_WRITE_2(sc, STE_MAX_FRAMELEN, ETHER_MAX_LEN + EVL_ENCAPLEN);
1213
1214 ste_ifmedia_upd(ifp);
1215
1216 ifp->if_flags |= IFF_RUNNING;
1217 ifq_clr_oactive(&ifp->if_snd);
1218
1219 callout_reset(&sc->ste_stat_timer, hz, ste_stats_update, sc);
1220 }
1221
1222 static void
ste_stop(struct ste_softc * sc)1223 ste_stop(struct ste_softc *sc)
1224 {
1225 int i;
1226 struct ifnet *ifp;
1227
1228 ifp = &sc->arpcom.ac_if;
1229
1230 callout_stop(&sc->ste_stat_timer);
1231
1232 CSR_WRITE_2(sc, STE_IMR, 0);
1233 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_DISABLE);
1234 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_DISABLE);
1235 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_DISABLE);
1236 STE_SETBIT2(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL);
1237 STE_SETBIT2(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL);
1238 ste_wait(sc);
1239 /*
1240 * Try really hard to stop the RX engine or under heavy RX
1241 * data chip will write into de-allocated memory.
1242 */
1243 ste_reset(sc);
1244
1245 sc->ste_link = 0;
1246
1247 for (i = 0; i < STE_RX_LIST_CNT; i++) {
1248 if (sc->ste_cdata.ste_rx_chain[i].ste_mbuf != NULL) {
1249 m_freem(sc->ste_cdata.ste_rx_chain[i].ste_mbuf);
1250 sc->ste_cdata.ste_rx_chain[i].ste_mbuf = NULL;
1251 }
1252 }
1253
1254 for (i = 0; i < STE_TX_LIST_CNT; i++) {
1255 if (sc->ste_cdata.ste_tx_chain[i].ste_mbuf != NULL) {
1256 m_freem(sc->ste_cdata.ste_tx_chain[i].ste_mbuf);
1257 sc->ste_cdata.ste_tx_chain[i].ste_mbuf = NULL;
1258 }
1259 }
1260
1261 bzero(sc->ste_ldata, sizeof(struct ste_list_data));
1262
1263 ifp->if_flags &= ~IFF_RUNNING;
1264 ifq_clr_oactive(&ifp->if_snd);
1265
1266 return;
1267 }
1268
1269 static void
ste_reset(struct ste_softc * sc)1270 ste_reset(struct ste_softc *sc)
1271 {
1272 int i;
1273
1274 STE_SETBIT4(sc, STE_ASICCTL,
1275 STE_ASICCTL_GLOBAL_RESET|STE_ASICCTL_RX_RESET|
1276 STE_ASICCTL_TX_RESET|STE_ASICCTL_DMA_RESET|
1277 STE_ASICCTL_FIFO_RESET|STE_ASICCTL_NETWORK_RESET|
1278 STE_ASICCTL_AUTOINIT_RESET|STE_ASICCTL_HOST_RESET|
1279 STE_ASICCTL_EXTRESET_RESET);
1280
1281 DELAY(100000);
1282
1283 for (i = 0; i < STE_TIMEOUT; i++) {
1284 if (!(CSR_READ_4(sc, STE_ASICCTL) & STE_ASICCTL_RESET_BUSY))
1285 break;
1286 }
1287
1288 if (i == STE_TIMEOUT)
1289 if_printf(&sc->arpcom.ac_if, "global reset never completed\n");
1290
1291 return;
1292 }
1293
1294 static int
ste_ioctl(struct ifnet * ifp,u_long command,caddr_t data,struct ucred * cr)1295 ste_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
1296 {
1297 struct ste_softc *sc;
1298 struct ifreq *ifr;
1299 struct mii_data *mii;
1300 int error = 0;
1301
1302 sc = ifp->if_softc;
1303 ifr = (struct ifreq *)data;
1304
1305 switch(command) {
1306 case SIOCSIFFLAGS:
1307 if (ifp->if_flags & IFF_UP) {
1308 if (ifp->if_flags & IFF_RUNNING &&
1309 ifp->if_flags & IFF_PROMISC &&
1310 !(sc->ste_if_flags & IFF_PROMISC)) {
1311 STE_SETBIT1(sc, STE_RX_MODE,
1312 STE_RXMODE_PROMISC);
1313 } else if (ifp->if_flags & IFF_RUNNING &&
1314 !(ifp->if_flags & IFF_PROMISC) &&
1315 sc->ste_if_flags & IFF_PROMISC) {
1316 STE_CLRBIT1(sc, STE_RX_MODE,
1317 STE_RXMODE_PROMISC);
1318 }
1319 if (!(ifp->if_flags & IFF_RUNNING)) {
1320 sc->ste_tx_thresh = STE_TXSTART_THRESH;
1321 ste_init(sc);
1322 }
1323 } else {
1324 if (ifp->if_flags & IFF_RUNNING)
1325 ste_stop(sc);
1326 }
1327 sc->ste_if_flags = ifp->if_flags;
1328 error = 0;
1329 break;
1330 case SIOCADDMULTI:
1331 case SIOCDELMULTI:
1332 ste_setmulti(sc);
1333 error = 0;
1334 break;
1335 case SIOCGIFMEDIA:
1336 case SIOCSIFMEDIA:
1337 mii = device_get_softc(sc->ste_miibus);
1338 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1339 break;
1340 default:
1341 error = ether_ioctl(ifp, command, data);
1342 break;
1343 }
1344 return(error);
1345 }
1346
1347 static int
ste_encap(struct ste_softc * sc,struct ste_chain * c,struct mbuf * m_head)1348 ste_encap(struct ste_softc *sc, struct ste_chain *c, struct mbuf *m_head)
1349 {
1350 int frag = 0;
1351 struct ste_frag *f = NULL;
1352 struct mbuf *m;
1353 struct ste_desc *d;
1354 int total_len = 0;
1355
1356 d = c->ste_ptr;
1357 d->ste_ctl = 0;
1358
1359 encap_retry:
1360 for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
1361 if (m->m_len != 0) {
1362 if (frag == STE_MAXFRAGS)
1363 break;
1364 total_len += m->m_len;
1365 f = &d->ste_frags[frag];
1366 f->ste_addr = vtophys(mtod(m, vm_offset_t));
1367 f->ste_len = m->m_len;
1368 frag++;
1369 }
1370 }
1371
1372 if (m != NULL) {
1373 struct mbuf *mn;
1374
1375 /*
1376 * We ran out of segments. We have to recopy this
1377 * mbuf chain first. Bail out if we can't get the
1378 * new buffers. Code borrowed from if_fxp.c.
1379 */
1380 MGETHDR(mn, M_NOWAIT, MT_DATA);
1381 if (mn == NULL) {
1382 m_freem(m_head);
1383 return ENOMEM;
1384 }
1385 if (m_head->m_pkthdr.len > MHLEN) {
1386 MCLGET(mn, M_NOWAIT);
1387 if ((mn->m_flags & M_EXT) == 0) {
1388 m_freem(mn);
1389 m_freem(m_head);
1390 return ENOMEM;
1391 }
1392 }
1393 m_copydata(m_head, 0, m_head->m_pkthdr.len, mtod(mn, void *));
1394 mn->m_pkthdr.len = mn->m_len = m_head->m_pkthdr.len;
1395 m_freem(m_head);
1396 m_head = mn;
1397 goto encap_retry;
1398 }
1399
1400 c->ste_mbuf = m_head;
1401 d->ste_frags[frag - 1].ste_len |= STE_FRAG_LAST;
1402 d->ste_ctl = 1;
1403
1404 return(0);
1405 }
1406
1407 static void
ste_start(struct ifnet * ifp,struct ifaltq_subque * ifsq)1408 ste_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
1409 {
1410 struct ste_softc *sc;
1411 struct mbuf *m_head = NULL;
1412 struct ste_chain *cur_tx = NULL;
1413 int idx;
1414
1415 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
1416
1417 sc = ifp->if_softc;
1418
1419 if (!sc->ste_link) {
1420 ifq_purge(&ifp->if_snd);
1421 return;
1422 }
1423
1424 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd))
1425 return;
1426
1427 idx = sc->ste_cdata.ste_tx_prod;
1428
1429 while(sc->ste_cdata.ste_tx_chain[idx].ste_mbuf == NULL) {
1430
1431 if ((STE_TX_LIST_CNT - sc->ste_cdata.ste_tx_cnt) < 3) {
1432 ifq_set_oactive(&ifp->if_snd);
1433 break;
1434 }
1435
1436 m_head = ifq_dequeue(&ifp->if_snd);
1437 if (m_head == NULL)
1438 break;
1439
1440 cur_tx = &sc->ste_cdata.ste_tx_chain[idx];
1441
1442 if (ste_encap(sc, cur_tx, m_head) != 0)
1443 break;
1444
1445 cur_tx->ste_ptr->ste_next = 0;
1446
1447 if(sc->ste_tx_prev_idx < 0){
1448 cur_tx->ste_ptr->ste_ctl = STE_TXCTL_DMAINTR | 1;
1449 /* Load address of the TX list */
1450 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL);
1451 ste_wait(sc);
1452
1453 CSR_WRITE_4(sc, STE_TX_DMALIST_PTR,
1454 vtophys(&sc->ste_ldata->ste_tx_list[0]));
1455
1456 /* Set TX polling interval to start TX engine */
1457 CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 64);
1458
1459 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
1460 ste_wait(sc);
1461 }else{
1462 cur_tx->ste_ptr->ste_ctl = STE_TXCTL_DMAINTR | 1;
1463 sc->ste_cdata.ste_tx_chain[
1464 sc->ste_tx_prev_idx].ste_ptr->ste_next
1465 = cur_tx->ste_phys;
1466 }
1467
1468 sc->ste_tx_prev_idx=idx;
1469
1470 BPF_MTAP(ifp, cur_tx->ste_mbuf);
1471
1472 STE_INC(idx, STE_TX_LIST_CNT);
1473 sc->ste_cdata.ste_tx_cnt++;
1474 ifp->if_timer = 5;
1475 sc->ste_cdata.ste_tx_prod = idx;
1476 }
1477 }
1478
1479 static void
ste_watchdog(struct ifnet * ifp)1480 ste_watchdog(struct ifnet *ifp)
1481 {
1482 struct ste_softc *sc;
1483
1484 sc = ifp->if_softc;
1485
1486 IFNET_STAT_INC(ifp, oerrors, 1);
1487 if_printf(ifp, "watchdog timeout\n");
1488
1489 ste_txeoc(sc);
1490 ste_txeof(sc);
1491 ste_rxeof(sc);
1492 ste_reset(sc);
1493 ste_init(sc);
1494
1495 if (!ifq_is_empty(&ifp->if_snd))
1496 if_devstart(ifp);
1497 }
1498
1499 static void
ste_shutdown(device_t dev)1500 ste_shutdown(device_t dev)
1501 {
1502 struct ste_softc *sc;
1503
1504 sc = device_get_softc(dev);
1505
1506 ste_stop(sc);
1507
1508 return;
1509 }
1510