1 /* $OpenBSD: if_stge.c,v 1.74 2024/05/24 06:02:57 jsg Exp $ */
2 /* $NetBSD: if_stge.c,v 1.27 2005/05/16 21:35:32 bouyer Exp $ */
3
4 /*-
5 * Copyright (c) 2001 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Jason R. Thorpe.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Device driver for the Sundance Tech. TC9021 10/100/1000
35 * Ethernet controller.
36 */
37
38 #include "bpfilter.h"
39 #include "vlan.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/timeout.h>
44 #include <sys/mbuf.h>
45 #include <sys/ioctl.h>
46 #include <sys/errno.h>
47 #include <sys/device.h>
48 #include <sys/queue.h>
49
50 #include <net/if.h>
51
52 #include <netinet/in.h>
53 #include <netinet/if_ether.h>
54
55 #include <net/if_media.h>
56
57 #if NBPFILTER > 0
58 #include <net/bpf.h>
59 #endif
60
61 #include <machine/bus.h>
62 #include <machine/intr.h>
63
64 #include <dev/mii/miivar.h>
65 #include <dev/mii/mii_bitbang.h>
66
67 #include <dev/pci/pcireg.h>
68 #include <dev/pci/pcivar.h>
69 #include <dev/pci/pcidevs.h>
70
71 #include <dev/pci/if_stgereg.h>
72
73 void stge_start(struct ifnet *);
74 void stge_watchdog(struct ifnet *);
75 int stge_ioctl(struct ifnet *, u_long, caddr_t);
76 int stge_init(struct ifnet *);
77 void stge_stop(struct ifnet *, int);
78
79 void stge_reset(struct stge_softc *);
80 void stge_rxdrain(struct stge_softc *);
81 int stge_add_rxbuf(struct stge_softc *, int);
82 void stge_read_eeprom(struct stge_softc *, int, uint16_t *);
83 void stge_tick(void *);
84
85 void stge_stats_update(struct stge_softc *);
86
87 void stge_iff(struct stge_softc *);
88
89 int stge_intr(void *);
90 void stge_txintr(struct stge_softc *);
91 void stge_rxintr(struct stge_softc *);
92
93 int stge_mii_readreg(struct device *, int, int);
94 void stge_mii_writereg(struct device *, int, int, int);
95 void stge_mii_statchg(struct device *);
96
97 int stge_mediachange(struct ifnet *);
98 void stge_mediastatus(struct ifnet *, struct ifmediareq *);
99
100 int stge_match(struct device *, void *, void *);
101 void stge_attach(struct device *, struct device *, void *);
102
103 int stge_copy_small = 0;
104
105 const struct cfattach stge_ca = {
106 sizeof(struct stge_softc), stge_match, stge_attach,
107 };
108
109 struct cfdriver stge_cd = {
110 NULL, "stge", DV_IFNET
111 };
112
113 uint32_t stge_mii_bitbang_read(struct device *);
114 void stge_mii_bitbang_write(struct device *, uint32_t);
115
116 const struct mii_bitbang_ops stge_mii_bitbang_ops = {
117 stge_mii_bitbang_read,
118 stge_mii_bitbang_write,
119 {
120 PC_MgmtData, /* MII_BIT_MDO */
121 PC_MgmtData, /* MII_BIT_MDI */
122 PC_MgmtClk, /* MII_BIT_MDC */
123 PC_MgmtDir, /* MII_BIT_DIR_HOST_PHY */
124 0, /* MII_BIT_DIR_PHY_HOST */
125 }
126 };
127
128 /*
129 * Devices supported by this driver.
130 */
131 const struct pci_matchid stge_devices[] = {
132 { PCI_VENDOR_ANTARES, PCI_PRODUCT_ANTARES_TC9021 },
133 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE550T },
134 { PCI_VENDOR_SUNDANCE, PCI_PRODUCT_SUNDANCE_ST1023 },
135 { PCI_VENDOR_SUNDANCE, PCI_PRODUCT_SUNDANCE_ST2021 },
136 { PCI_VENDOR_SUNDANCE, PCI_PRODUCT_SUNDANCE_TC9021 },
137 { PCI_VENDOR_SUNDANCE, PCI_PRODUCT_SUNDANCE_TC9021_ALT },
138 { PCI_VENDOR_TAMARACK, PCI_PRODUCT_TAMARACK_TC9021 },
139 { PCI_VENDOR_TAMARACK, PCI_PRODUCT_TAMARACK_TC9021_ALT }
140 };
141
142 int
stge_match(struct device * parent,void * match,void * aux)143 stge_match(struct device *parent, void *match, void *aux)
144 {
145 return (pci_matchbyid((struct pci_attach_args *)aux, stge_devices,
146 sizeof(stge_devices) / sizeof(stge_devices[0])));
147 }
148
149 void
stge_attach(struct device * parent,struct device * self,void * aux)150 stge_attach(struct device *parent, struct device *self, void *aux)
151 {
152 struct stge_softc *sc = (struct stge_softc *) self;
153 struct pci_attach_args *pa = aux;
154 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
155 pci_chipset_tag_t pc = pa->pa_pc;
156 pci_intr_handle_t ih;
157 const char *intrstr = NULL;
158 bus_space_tag_t iot, memt;
159 bus_space_handle_t ioh, memh;
160 bus_dma_segment_t seg;
161 bus_size_t iosize;
162 int ioh_valid, memh_valid;
163 int i, rseg, error;
164
165 timeout_set(&sc->sc_timeout, stge_tick, sc);
166
167 sc->sc_rev = PCI_REVISION(pa->pa_class);
168
169 /*
170 * Map the device.
171 */
172 ioh_valid = (pci_mapreg_map(pa, STGE_PCI_IOBA,
173 PCI_MAPREG_TYPE_IO, 0,
174 &iot, &ioh, NULL, &iosize, 0) == 0);
175 memh_valid = (pci_mapreg_map(pa, STGE_PCI_MMBA,
176 PCI_MAPREG_TYPE_MEM|PCI_MAPREG_MEM_TYPE_32BIT, 0,
177 &memt, &memh, NULL, &iosize, 0) == 0);
178
179 if (memh_valid) {
180 sc->sc_st = memt;
181 sc->sc_sh = memh;
182 } else if (ioh_valid) {
183 sc->sc_st = iot;
184 sc->sc_sh = ioh;
185 } else {
186 printf(": unable to map device registers\n");
187 return;
188 }
189
190 sc->sc_dmat = pa->pa_dmat;
191
192 /* Get it out of power save mode if needed. */
193 pci_set_powerstate(pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
194
195 /*
196 * Map and establish our interrupt.
197 */
198 if (pci_intr_map(pa, &ih)) {
199 printf(": unable to map interrupt\n");
200 goto fail_0;
201 }
202 intrstr = pci_intr_string(pc, ih);
203 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, stge_intr, sc,
204 sc->sc_dev.dv_xname);
205 if (sc->sc_ih == NULL) {
206 printf(": unable to establish interrupt");
207 if (intrstr != NULL)
208 printf(" at %s", intrstr);
209 printf("\n");
210 goto fail_0;
211 }
212 printf(": %s", intrstr);
213
214 /*
215 * Allocate the control data structures, and create and load the
216 * DMA map for it.
217 */
218 if ((error = bus_dmamem_alloc(sc->sc_dmat,
219 sizeof(struct stge_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
220 0)) != 0) {
221 printf("%s: unable to allocate control data, error = %d\n",
222 sc->sc_dev.dv_xname, error);
223 goto fail_0;
224 }
225
226 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
227 sizeof(struct stge_control_data), (caddr_t *)&sc->sc_control_data,
228 BUS_DMA_COHERENT)) != 0) {
229 printf("%s: unable to map control data, error = %d\n",
230 sc->sc_dev.dv_xname, error);
231 goto fail_1;
232 }
233
234 if ((error = bus_dmamap_create(sc->sc_dmat,
235 sizeof(struct stge_control_data), 1,
236 sizeof(struct stge_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
237 printf("%s: unable to create control data DMA map, "
238 "error = %d\n", sc->sc_dev.dv_xname, error);
239 goto fail_2;
240 }
241
242 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
243 sc->sc_control_data, sizeof(struct stge_control_data), NULL,
244 0)) != 0) {
245 printf("%s: unable to load control data DMA map, error = %d\n",
246 sc->sc_dev.dv_xname, error);
247 goto fail_3;
248 }
249
250 /*
251 * Create the transmit buffer DMA maps. Note that rev B.3
252 * and earlier seem to have a bug regarding multi-fragment
253 * packets. We need to limit the number of Tx segments on
254 * such chips to 1.
255 */
256 for (i = 0; i < STGE_NTXDESC; i++) {
257 if ((error = bus_dmamap_create(sc->sc_dmat,
258 STGE_JUMBO_FRAMELEN, STGE_NTXFRAGS, MCLBYTES, 0, 0,
259 &sc->sc_txsoft[i].ds_dmamap)) != 0) {
260 printf("%s: unable to create tx DMA map %d, "
261 "error = %d\n", sc->sc_dev.dv_xname, i, error);
262 goto fail_4;
263 }
264 }
265
266 /*
267 * Create the receive buffer DMA maps.
268 */
269 for (i = 0; i < STGE_NRXDESC; i++) {
270 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
271 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].ds_dmamap)) != 0) {
272 printf("%s: unable to create rx DMA map %d, "
273 "error = %d\n", sc->sc_dev.dv_xname, i, error);
274 goto fail_5;
275 }
276 sc->sc_rxsoft[i].ds_mbuf = NULL;
277 }
278
279 /*
280 * Determine if we're copper or fiber. It affects how we
281 * reset the card.
282 */
283 if (CSR_READ_4(sc, STGE_AsicCtrl) & AC_PhyMedia)
284 sc->sc_usefiber = 1;
285 else
286 sc->sc_usefiber = 0;
287
288 /*
289 * Reset the chip to a known state.
290 */
291 stge_reset(sc);
292
293 /*
294 * Reading the station address from the EEPROM doesn't seem
295 * to work, at least on my sample boards. Instead, since
296 * the reset sequence does AutoInit, read it from the station
297 * address registers. For Sundance 1023 you can only read it
298 * from EEPROM.
299 */
300 if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_SUNDANCE_ST1023) {
301 sc->sc_arpcom.ac_enaddr[0] = CSR_READ_2(sc,
302 STGE_StationAddress0) & 0xff;
303 sc->sc_arpcom.ac_enaddr[1] = CSR_READ_2(sc,
304 STGE_StationAddress0) >> 8;
305 sc->sc_arpcom.ac_enaddr[2] = CSR_READ_2(sc,
306 STGE_StationAddress1) & 0xff;
307 sc->sc_arpcom.ac_enaddr[3] = CSR_READ_2(sc,
308 STGE_StationAddress1) >> 8;
309 sc->sc_arpcom.ac_enaddr[4] = CSR_READ_2(sc,
310 STGE_StationAddress2) & 0xff;
311 sc->sc_arpcom.ac_enaddr[5] = CSR_READ_2(sc,
312 STGE_StationAddress2) >> 8;
313 sc->sc_stge1023 = 0;
314 } else {
315 uint16_t myaddr[ETHER_ADDR_LEN / 2];
316 for (i = 0; i < ETHER_ADDR_LEN / 2; i++) {
317 stge_read_eeprom(sc, STGE_EEPROM_StationAddress0 + i,
318 &myaddr[i]);
319 myaddr[i] = letoh16(myaddr[i]);
320 }
321 (void)memcpy(sc->sc_arpcom.ac_enaddr, myaddr,
322 sizeof(sc->sc_arpcom.ac_enaddr));
323 sc->sc_stge1023 = 1;
324 }
325
326 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
327
328 /*
329 * Read some important bits from the PhyCtrl register.
330 */
331 sc->sc_PhyCtrl = CSR_READ_1(sc, STGE_PhyCtrl) &
332 (PC_PhyDuplexPolarity | PC_PhyLnkPolarity);
333
334 /*
335 * Initialize our media structures and probe the MII.
336 */
337 sc->sc_mii.mii_ifp = ifp;
338 sc->sc_mii.mii_readreg = stge_mii_readreg;
339 sc->sc_mii.mii_writereg = stge_mii_writereg;
340 sc->sc_mii.mii_statchg = stge_mii_statchg;
341 ifmedia_init(&sc->sc_mii.mii_media, 0, stge_mediachange,
342 stge_mediastatus);
343 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
344 MII_OFFSET_ANY, MIIF_DOPAUSE);
345 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
346 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
347 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
348 } else
349 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
350
351 ifp = &sc->sc_arpcom.ac_if;
352 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof ifp->if_xname);
353 ifp->if_softc = sc;
354 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
355 ifp->if_ioctl = stge_ioctl;
356 ifp->if_start = stge_start;
357 ifp->if_watchdog = stge_watchdog;
358 #ifdef STGE_JUMBO
359 ifp->if_hardmtu = STGE_JUMBO_MTU;
360 #endif
361 ifq_init_maxlen(&ifp->if_snd, STGE_NTXDESC - 1);
362
363 ifp->if_capabilities = IFCAP_VLAN_MTU;
364
365 #if NVLAN > 0
366 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
367 #endif
368
369 /*
370 * The manual recommends disabling early transmit, so we
371 * do. It's disabled anyway, if using IP checksumming,
372 * since the entire packet must be in the FIFO in order
373 * for the chip to perform the checksum.
374 */
375 sc->sc_txthresh = 0x0fff;
376
377 /*
378 * Disable MWI if the PCI layer tells us to.
379 */
380 sc->sc_DMACtrl = 0;
381 #ifdef fake
382 if ((pa->pa_flags & PCI_FLAGS_MWI_OKAY) == 0)
383 sc->sc_DMACtrl |= DMAC_MWIDisable;
384 #endif
385
386 #ifdef STGE_CHECKSUM
387 /*
388 * We can do IPv4/TCPv4/UDPv4 checksums in hardware.
389 */
390 sc->sc_arpcom.ac_if.if_capabilities |= IFCAP_CSUM_IPv4 |
391 IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
392 #endif
393
394 /*
395 * Attach the interface.
396 */
397 if_attach(ifp);
398 ether_ifattach(ifp);
399 return;
400
401 /*
402 * Free any resources we've allocated during the failed attach
403 * attempt. Do this in reverse order and fall through.
404 */
405 fail_5:
406 for (i = 0; i < STGE_NRXDESC; i++) {
407 if (sc->sc_rxsoft[i].ds_dmamap != NULL)
408 bus_dmamap_destroy(sc->sc_dmat,
409 sc->sc_rxsoft[i].ds_dmamap);
410 }
411 fail_4:
412 for (i = 0; i < STGE_NTXDESC; i++) {
413 if (sc->sc_txsoft[i].ds_dmamap != NULL)
414 bus_dmamap_destroy(sc->sc_dmat,
415 sc->sc_txsoft[i].ds_dmamap);
416 }
417 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
418 fail_3:
419 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
420 fail_2:
421 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
422 sizeof(struct stge_control_data));
423 fail_1:
424 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
425 fail_0:
426 bus_space_unmap(sc->sc_st, sc->sc_sh, iosize);
427 return;
428 }
429
430 static void
stge_dma_wait(struct stge_softc * sc)431 stge_dma_wait(struct stge_softc *sc)
432 {
433 int i;
434
435 for (i = 0; i < STGE_TIMEOUT; i++) {
436 delay(2);
437 if ((CSR_READ_4(sc, STGE_DMACtrl) & DMAC_TxDMAInProg) == 0)
438 break;
439 }
440
441 if (i == STGE_TIMEOUT)
442 printf("%s: DMA wait timed out\n", sc->sc_dev.dv_xname);
443 }
444
445 /*
446 * stge_start: [ifnet interface function]
447 *
448 * Start packet transmission on the interface.
449 */
450 void
stge_start(struct ifnet * ifp)451 stge_start(struct ifnet *ifp)
452 {
453 struct stge_softc *sc = ifp->if_softc;
454 struct mbuf *m0;
455 struct stge_descsoft *ds;
456 struct stge_tfd *tfd;
457 bus_dmamap_t dmamap;
458 int error, firsttx, nexttx, opending, seg, totlen;
459 uint64_t csum_flags = 0, tfc;
460
461 if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
462 return;
463
464 /*
465 * Remember the previous number of pending transmissions
466 * and the first descriptor we will use.
467 */
468 opending = sc->sc_txpending;
469 firsttx = STGE_NEXTTX(sc->sc_txlast);
470
471 /*
472 * Loop through the send queue, setting up transmit descriptors
473 * until we drain the queue, or use up all available transmit
474 * descriptors.
475 */
476 for (;;) {
477 /*
478 * Grab a packet off the queue.
479 */
480 m0 = ifq_deq_begin(&ifp->if_snd);
481 if (m0 == NULL)
482 break;
483
484 /*
485 * Leave one unused descriptor at the end of the
486 * list to prevent wrapping completely around.
487 */
488 if (sc->sc_txpending == (STGE_NTXDESC - 1)) {
489 ifq_deq_rollback(&ifp->if_snd, m0);
490 break;
491 }
492
493 /*
494 * Get the last and next available transmit descriptor.
495 */
496 nexttx = STGE_NEXTTX(sc->sc_txlast);
497 tfd = &sc->sc_txdescs[nexttx];
498 ds = &sc->sc_txsoft[nexttx];
499
500 dmamap = ds->ds_dmamap;
501
502 /*
503 * Load the DMA map. If this fails, the packet either
504 * didn't fit in the allotted number of segments, or we
505 * were short on resources. For the too-many-segments
506 * case, we simply report an error and drop the packet,
507 * since we can't sanely copy a jumbo packet to a single
508 * buffer.
509 */
510 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
511 BUS_DMA_NOWAIT);
512 if (error) {
513 if (error == EFBIG) {
514 printf("%s: Tx packet consumes too many "
515 "DMA segments (%u), dropping...\n",
516 sc->sc_dev.dv_xname, dmamap->dm_nsegs);
517 ifq_deq_commit(&ifp->if_snd, m0);
518 m_freem(m0);
519 continue;
520 }
521 /*
522 * Short on resources, just stop for now.
523 */
524 ifq_deq_rollback(&ifp->if_snd, m0);
525 break;
526 }
527
528 ifq_deq_commit(&ifp->if_snd, m0);
529
530 /*
531 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
532 */
533
534 /* Sync the DMA map. */
535 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
536 BUS_DMASYNC_PREWRITE);
537
538 /* Initialize the fragment list. */
539 for (totlen = 0, seg = 0; seg < dmamap->dm_nsegs; seg++) {
540 tfd->tfd_frags[seg].frag_word0 =
541 htole64(FRAG_ADDR(dmamap->dm_segs[seg].ds_addr) |
542 FRAG_LEN(dmamap->dm_segs[seg].ds_len));
543 totlen += dmamap->dm_segs[seg].ds_len;
544 }
545
546 #ifdef STGE_CHECKSUM
547 /*
548 * Initialize checksumming flags in the descriptor.
549 * Byte-swap constants so the compiler can optimize.
550 */
551 if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
552 csum_flags |= TFD_IPChecksumEnable;
553
554 if (m0->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
555 csum_flags |= TFD_TCPChecksumEnable;
556 else if (m0->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
557 csum_flags |= TFD_UDPChecksumEnable;
558 #endif
559
560 /*
561 * Initialize the descriptor and give it to the chip.
562 */
563 tfc = TFD_FrameId(nexttx) | TFD_WordAlign(/*totlen & */3) |
564 TFD_FragCount(seg) | csum_flags;
565 if ((nexttx & STGE_TXINTR_SPACING_MASK) == 0)
566 tfc |= TFD_TxDMAIndicate;
567
568 #if NVLAN > 0
569 /* Check if we have a VLAN tag to insert. */
570 if (m0->m_flags & M_VLANTAG)
571 tfc |= (TFD_VLANTagInsert |
572 TFD_VID(m0->m_pkthdr.ether_vtag));
573 #endif
574
575 tfd->tfd_control = htole64(tfc);
576
577 /* Sync the descriptor. */
578 STGE_CDTXSYNC(sc, nexttx,
579 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
580
581 /*
582 * Kick the transmit DMA logic.
583 */
584 CSR_WRITE_4(sc, STGE_DMACtrl,
585 sc->sc_DMACtrl | DMAC_TxDMAPollNow);
586
587 /*
588 * Store a pointer to the packet so we can free it later.
589 */
590 ds->ds_mbuf = m0;
591
592 /* Advance the tx pointer. */
593 sc->sc_txpending++;
594 sc->sc_txlast = nexttx;
595
596 #if NBPFILTER > 0
597 /*
598 * Pass the packet to any BPF listeners.
599 */
600 if (ifp->if_bpf)
601 bpf_mtap_ether(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
602 #endif /* NBPFILTER > 0 */
603 }
604
605 if (sc->sc_txpending == (STGE_NTXDESC - 1)) {
606 /* No more slots left; notify upper layer. */
607 ifq_set_oactive(&ifp->if_snd);
608 }
609
610 if (sc->sc_txpending != opending) {
611 /*
612 * We enqueued packets. If the transmitter was idle,
613 * reset the txdirty pointer.
614 */
615 if (opending == 0)
616 sc->sc_txdirty = firsttx;
617
618 /* Set a watchdog timer in case the chip flakes out. */
619 ifp->if_timer = 5;
620 }
621 }
622
623 /*
624 * stge_watchdog: [ifnet interface function]
625 *
626 * Watchdog timer handler.
627 */
628 void
stge_watchdog(struct ifnet * ifp)629 stge_watchdog(struct ifnet *ifp)
630 {
631 struct stge_softc *sc = ifp->if_softc;
632
633 /*
634 * Sweep up first, since we don't interrupt every frame.
635 */
636 stge_txintr(sc);
637 if (sc->sc_txpending != 0) {
638 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
639 ifp->if_oerrors++;
640
641 (void) stge_init(ifp);
642
643 /* Try to get more packets going. */
644 stge_start(ifp);
645 }
646 }
647
648 /*
649 * stge_ioctl: [ifnet interface function]
650 *
651 * Handle control requests from the operator.
652 */
653 int
stge_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)654 stge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
655 {
656 struct stge_softc *sc = ifp->if_softc;
657 struct ifreq *ifr = (struct ifreq *)data;
658 int s, error = 0;
659
660 s = splnet();
661
662 switch (cmd) {
663 case SIOCSIFADDR:
664 ifp->if_flags |= IFF_UP;
665 if (!(ifp->if_flags & IFF_RUNNING))
666 stge_init(ifp);
667 break;
668
669 case SIOCSIFFLAGS:
670 if (ifp->if_flags & IFF_UP) {
671 if (ifp->if_flags & IFF_RUNNING)
672 error = ENETRESET;
673 else
674 stge_init(ifp);
675 } else {
676 if (ifp->if_flags & IFF_RUNNING)
677 stge_stop(ifp, 1);
678 }
679 break;
680
681 case SIOCSIFMEDIA:
682 case SIOCGIFMEDIA:
683 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
684 break;
685
686 default:
687 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
688 }
689
690 if (error == ENETRESET) {
691 if (ifp->if_flags & IFF_RUNNING)
692 stge_iff(sc);
693 error = 0;
694 }
695
696 splx(s);
697 return (error);
698 }
699
700 /*
701 * stge_intr:
702 *
703 * Interrupt service routine.
704 */
705 int
stge_intr(void * arg)706 stge_intr(void *arg)
707 {
708 struct stge_softc *sc = arg;
709 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
710 uint32_t txstat;
711 int wantinit;
712 uint16_t isr;
713
714 if ((CSR_READ_2(sc, STGE_IntStatus) & IS_InterruptStatus) == 0)
715 return (0);
716
717 for (wantinit = 0; wantinit == 0;) {
718 isr = CSR_READ_2(sc, STGE_IntStatusAck);
719 if ((isr & sc->sc_IntEnable) == 0)
720 break;
721
722 /* Host interface errors. */
723 if (isr & IS_HostError) {
724 printf("%s: Host interface error\n",
725 sc->sc_dev.dv_xname);
726 wantinit = 1;
727 continue;
728 }
729
730 /* Receive interrupts. */
731 if (isr & (IS_RxDMAComplete|IS_RFDListEnd)) {
732 stge_rxintr(sc);
733 if (isr & IS_RFDListEnd) {
734 printf("%s: receive ring overflow\n",
735 sc->sc_dev.dv_xname);
736 /*
737 * XXX Should try to recover from this
738 * XXX more gracefully.
739 */
740 wantinit = 1;
741 }
742 }
743
744 /* Transmit interrupts. */
745 if (isr & (IS_TxDMAComplete|IS_TxComplete))
746 stge_txintr(sc);
747
748 /* Statistics overflow. */
749 if (isr & IS_UpdateStats)
750 stge_stats_update(sc);
751
752 /* Transmission errors. */
753 if (isr & IS_TxComplete) {
754 for (;;) {
755 txstat = CSR_READ_4(sc, STGE_TxStatus);
756 if ((txstat & TS_TxComplete) == 0)
757 break;
758 if (txstat & TS_TxUnderrun) {
759 sc->sc_txthresh++;
760 if (sc->sc_txthresh > 0x0fff)
761 sc->sc_txthresh = 0x0fff;
762 printf("%s: transmit underrun, new "
763 "threshold: %d bytes\n",
764 sc->sc_dev.dv_xname,
765 sc->sc_txthresh << 5);
766 }
767 if (txstat & TS_MaxCollisions)
768 printf("%s: excessive collisions\n",
769 sc->sc_dev.dv_xname);
770 }
771 wantinit = 1;
772 }
773
774 }
775
776 if (wantinit)
777 stge_init(ifp);
778
779 CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
780
781 /* Try to get more packets going. */
782 stge_start(ifp);
783
784 return (1);
785 }
786
787 /*
788 * stge_txintr:
789 *
790 * Helper; handle transmit interrupts.
791 */
792 void
stge_txintr(struct stge_softc * sc)793 stge_txintr(struct stge_softc *sc)
794 {
795 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
796 struct stge_descsoft *ds;
797 uint64_t control;
798 int i;
799
800 ifq_clr_oactive(&ifp->if_snd);
801
802 /*
803 * Go through our Tx list and free mbufs for those
804 * frames which have been transmitted.
805 */
806 for (i = sc->sc_txdirty; sc->sc_txpending != 0;
807 i = STGE_NEXTTX(i), sc->sc_txpending--) {
808 ds = &sc->sc_txsoft[i];
809
810 STGE_CDTXSYNC(sc, i,
811 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
812
813 control = letoh64(sc->sc_txdescs[i].tfd_control);
814 if ((control & TFD_TFDDone) == 0)
815 break;
816
817 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
818 0, ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
819 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
820 m_freem(ds->ds_mbuf);
821 ds->ds_mbuf = NULL;
822 }
823
824 /* Update the dirty transmit buffer pointer. */
825 sc->sc_txdirty = i;
826
827 /*
828 * If there are no more pending transmissions, cancel the watchdog
829 * timer.
830 */
831 if (sc->sc_txpending == 0)
832 ifp->if_timer = 0;
833 }
834
835 /*
836 * stge_rxintr:
837 *
838 * Helper; handle receive interrupts.
839 */
840 void
stge_rxintr(struct stge_softc * sc)841 stge_rxintr(struct stge_softc *sc)
842 {
843 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
844 struct stge_descsoft *ds;
845 struct mbuf *m, *tailm;
846 struct mbuf_list ml = MBUF_LIST_INITIALIZER();
847 uint64_t status;
848 int i, len;
849
850 for (i = sc->sc_rxptr;; i = STGE_NEXTRX(i)) {
851 ds = &sc->sc_rxsoft[i];
852
853 STGE_CDRXSYNC(sc, i,
854 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
855
856 status = letoh64(sc->sc_rxdescs[i].rfd_status);
857
858 if ((status & RFD_RFDDone) == 0)
859 break;
860
861 if (__predict_false(sc->sc_rxdiscard)) {
862 STGE_INIT_RXDESC(sc, i);
863 if (status & RFD_FrameEnd) {
864 /* Reset our state. */
865 sc->sc_rxdiscard = 0;
866 }
867 continue;
868 }
869
870 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
871 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
872
873 m = ds->ds_mbuf;
874
875 /*
876 * Add a new receive buffer to the ring.
877 */
878 if (stge_add_rxbuf(sc, i) != 0) {
879 /*
880 * Failed, throw away what we've done so
881 * far, and discard the rest of the packet.
882 */
883 ifp->if_ierrors++;
884 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
885 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
886 STGE_INIT_RXDESC(sc, i);
887 if ((status & RFD_FrameEnd) == 0)
888 sc->sc_rxdiscard = 1;
889 m_freem(sc->sc_rxhead);
890 STGE_RXCHAIN_RESET(sc);
891 continue;
892 }
893
894 #ifdef DIAGNOSTIC
895 if (status & RFD_FrameStart) {
896 KASSERT(sc->sc_rxhead == NULL);
897 KASSERT(sc->sc_rxtailp == &sc->sc_rxhead);
898 }
899 #endif
900
901 STGE_RXCHAIN_LINK(sc, m);
902
903 /*
904 * If this is not the end of the packet, keep
905 * looking.
906 */
907 if ((status & RFD_FrameEnd) == 0) {
908 sc->sc_rxlen += m->m_len;
909 continue;
910 }
911
912 /*
913 * Okay, we have the entire packet now...
914 */
915 *sc->sc_rxtailp = NULL;
916 m = sc->sc_rxhead;
917 tailm = sc->sc_rxtail;
918
919 STGE_RXCHAIN_RESET(sc);
920
921 /*
922 * If the packet had an error, drop it. Note we
923 * count the error later in the periodic stats update.
924 */
925 if (status & (RFD_RxFIFOOverrun | RFD_RxRuntFrame |
926 RFD_RxAlignmentError | RFD_RxFCSError |
927 RFD_RxLengthError)) {
928 m_freem(m);
929 continue;
930 }
931
932 /*
933 * No errors.
934 *
935 * Note we have configured the chip to not include
936 * the CRC at the end of the packet.
937 */
938 len = RFD_RxDMAFrameLen(status);
939 tailm->m_len = len - sc->sc_rxlen;
940
941 /*
942 * If the packet is small enough to fit in a
943 * single header mbuf, allocate one and copy
944 * the data into it. This greatly reduces
945 * memory consumption when we receive lots
946 * of small packets.
947 */
948 if (stge_copy_small != 0 && len <= (MHLEN - 2)) {
949 struct mbuf *nm;
950 MGETHDR(nm, M_DONTWAIT, MT_DATA);
951 if (nm == NULL) {
952 ifp->if_ierrors++;
953 m_freem(m);
954 continue;
955 }
956 nm->m_data += 2;
957 nm->m_pkthdr.len = nm->m_len = len;
958 m_copydata(m, 0, len, mtod(nm, caddr_t));
959 m_freem(m);
960 m = nm;
961 }
962
963 /*
964 * Set the incoming checksum information for the packet.
965 */
966 if ((status & RFD_IPDetected) &&
967 (!(status & RFD_IPError)))
968 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
969 if ((status & RFD_TCPDetected) &&
970 (!(status & RFD_TCPError)))
971 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK;
972 else if ((status & RFD_UDPDetected) &&
973 (!(status & RFD_UDPError)))
974 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK;
975
976 #if NVLAN > 0
977 /* Check for VLAN tagged packets. */
978 if (status & RFD_VLANDetected) {
979 m->m_pkthdr.ether_vtag = RFD_TCI(status);
980 m->m_flags |= M_VLANTAG;
981 }
982 #endif
983
984 m->m_pkthdr.len = len;
985
986 ml_enqueue(&ml, m);
987 }
988
989 /* Update the receive pointer. */
990 sc->sc_rxptr = i;
991
992 if_input(ifp, &ml);
993 }
994
995 /*
996 * stge_tick:
997 *
998 * One second timer, used to tick the MII.
999 */
1000 void
stge_tick(void * arg)1001 stge_tick(void *arg)
1002 {
1003 struct stge_softc *sc = arg;
1004 int s;
1005
1006 s = splnet();
1007 mii_tick(&sc->sc_mii);
1008 stge_stats_update(sc);
1009 splx(s);
1010
1011 timeout_add_sec(&sc->sc_timeout, 1);
1012 }
1013
1014 /*
1015 * stge_stats_update:
1016 *
1017 * Read the TC9021 statistics counters.
1018 */
1019 void
stge_stats_update(struct stge_softc * sc)1020 stge_stats_update(struct stge_softc *sc)
1021 {
1022 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1023
1024 (void) CSR_READ_4(sc, STGE_OctetRcvOk);
1025
1026 ifp->if_ierrors +=
1027 (u_int) CSR_READ_2(sc, STGE_FramesLostRxErrors);
1028
1029 (void) CSR_READ_4(sc, STGE_OctetXmtdOk);
1030
1031 ifp->if_collisions +=
1032 CSR_READ_4(sc, STGE_LateCollisions) +
1033 CSR_READ_4(sc, STGE_MultiColFrames) +
1034 CSR_READ_4(sc, STGE_SingleColFrames);
1035
1036 ifp->if_oerrors +=
1037 (u_int) CSR_READ_2(sc, STGE_FramesAbortXSColls) +
1038 (u_int) CSR_READ_2(sc, STGE_FramesWEXDeferal);
1039 }
1040
1041 /*
1042 * stge_reset:
1043 *
1044 * Perform a soft reset on the TC9021.
1045 */
1046 void
stge_reset(struct stge_softc * sc)1047 stge_reset(struct stge_softc *sc)
1048 {
1049 uint32_t ac;
1050 int i;
1051
1052 ac = CSR_READ_4(sc, STGE_AsicCtrl);
1053
1054 /*
1055 * Only assert RstOut if we're fiber. We need GMII clocks
1056 * to be present in order for the reset to complete on fiber
1057 * cards.
1058 */
1059 CSR_WRITE_4(sc, STGE_AsicCtrl,
1060 ac | AC_GlobalReset | AC_RxReset | AC_TxReset |
1061 AC_DMA | AC_FIFO | AC_Network | AC_Host | AC_AutoInit |
1062 (sc->sc_usefiber ? AC_RstOut : 0));
1063
1064 delay(50000);
1065
1066 for (i = 0; i < STGE_TIMEOUT; i++) {
1067 delay(5000);
1068 if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0)
1069 break;
1070 }
1071
1072 if (i == STGE_TIMEOUT)
1073 printf("%s: reset failed to complete\n", sc->sc_dev.dv_xname);
1074
1075 delay(1000);
1076 }
1077
1078 /*
1079 * stge_init: [ ifnet interface function ]
1080 *
1081 * Initialize the interface. Must be called at splnet().
1082 */
1083 int
stge_init(struct ifnet * ifp)1084 stge_init(struct ifnet *ifp)
1085 {
1086 struct stge_softc *sc = ifp->if_softc;
1087 struct stge_descsoft *ds;
1088 int i, error = 0;
1089
1090 /*
1091 * Cancel any pending I/O.
1092 */
1093 stge_stop(ifp, 0);
1094
1095 /*
1096 * Reset the chip to a known state.
1097 */
1098 stge_reset(sc);
1099
1100 /*
1101 * Initialize the transmit descriptor ring.
1102 */
1103 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1104 for (i = 0; i < STGE_NTXDESC; i++) {
1105 sc->sc_txdescs[i].tfd_next = htole64(
1106 STGE_CDTXADDR(sc, STGE_NEXTTX(i)));
1107 sc->sc_txdescs[i].tfd_control = htole64(TFD_TFDDone);
1108 }
1109 sc->sc_txpending = 0;
1110 sc->sc_txdirty = 0;
1111 sc->sc_txlast = STGE_NTXDESC - 1;
1112
1113 /*
1114 * Initialize the receive descriptor and receive job
1115 * descriptor rings.
1116 */
1117 for (i = 0; i < STGE_NRXDESC; i++) {
1118 ds = &sc->sc_rxsoft[i];
1119 if (ds->ds_mbuf == NULL) {
1120 if ((error = stge_add_rxbuf(sc, i)) != 0) {
1121 printf("%s: unable to allocate or map rx "
1122 "buffer %d, error = %d\n",
1123 sc->sc_dev.dv_xname, i, error);
1124 /*
1125 * XXX Should attempt to run with fewer receive
1126 * XXX buffers instead of just failing.
1127 */
1128 stge_rxdrain(sc);
1129 goto out;
1130 }
1131 } else
1132 STGE_INIT_RXDESC(sc, i);
1133 }
1134 sc->sc_rxptr = 0;
1135 sc->sc_rxdiscard = 0;
1136 STGE_RXCHAIN_RESET(sc);
1137
1138 /* Set the station address. */
1139 if (sc->sc_stge1023) {
1140 CSR_WRITE_2(sc, STGE_StationAddress0,
1141 sc->sc_arpcom.ac_enaddr[0] | sc->sc_arpcom.ac_enaddr[1] << 8);
1142 CSR_WRITE_2(sc, STGE_StationAddress1,
1143 sc->sc_arpcom.ac_enaddr[2] | sc->sc_arpcom.ac_enaddr[3] << 8);
1144 CSR_WRITE_2(sc, STGE_StationAddress2,
1145 sc->sc_arpcom.ac_enaddr[4] | sc->sc_arpcom.ac_enaddr[5] << 8);
1146 } else {
1147 for (i = 0; i < ETHER_ADDR_LEN; i++)
1148 CSR_WRITE_1(sc, STGE_StationAddress0 + i,
1149 sc->sc_arpcom.ac_enaddr[i]);
1150 }
1151
1152 /*
1153 * Set the statistics masks. Disable all the RMON stats,
1154 * and disable selected stats in the non-RMON stats registers.
1155 */
1156 CSR_WRITE_4(sc, STGE_RMONStatisticsMask, 0xffffffff);
1157 CSR_WRITE_4(sc, STGE_StatisticsMask,
1158 (1U << 1) | (1U << 2) | (1U << 3) | (1U << 4) | (1U << 5) |
1159 (1U << 6) | (1U << 7) | (1U << 8) | (1U << 9) | (1U << 10) |
1160 (1U << 13) | (1U << 14) | (1U << 15) | (1U << 19) | (1U << 20) |
1161 (1U << 21));
1162
1163 /* Program promiscuous mode and multicast filters. */
1164 stge_iff(sc);
1165
1166 /*
1167 * Give the transmit and receive ring to the chip.
1168 */
1169 CSR_WRITE_4(sc, STGE_TFDListPtrHi, 0); /* NOTE: 32-bit DMA */
1170 CSR_WRITE_4(sc, STGE_TFDListPtrLo,
1171 STGE_CDTXADDR(sc, sc->sc_txdirty));
1172
1173 CSR_WRITE_4(sc, STGE_RFDListPtrHi, 0); /* NOTE: 32-bit DMA */
1174 CSR_WRITE_4(sc, STGE_RFDListPtrLo,
1175 STGE_CDRXADDR(sc, sc->sc_rxptr));
1176
1177 /*
1178 * Initialize the Tx auto-poll period. It's OK to make this number
1179 * large (255 is the max, but we use 127) -- we explicitly kick the
1180 * transmit engine when there's actually a packet.
1181 */
1182 CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127);
1183
1184 /* ..and the Rx auto-poll period. */
1185 CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 64);
1186
1187 /* Initialize the Tx start threshold. */
1188 CSR_WRITE_2(sc, STGE_TxStartThresh, sc->sc_txthresh);
1189
1190 /* RX DMA thresholds, from linux */
1191 CSR_WRITE_1(sc, STGE_RxDMABurstThresh, 0x30);
1192 CSR_WRITE_1(sc, STGE_RxDMAUrgentThresh, 0x30);
1193
1194 /* Rx early threshold, from Linux */
1195 CSR_WRITE_2(sc, STGE_RxEarlyThresh, 0x7ff);
1196
1197 /* Tx DMA thresholds, from Linux */
1198 CSR_WRITE_1(sc, STGE_TxDMABurstThresh, 0x30);
1199 CSR_WRITE_1(sc, STGE_TxDMAUrgentThresh, 0x04);
1200
1201 /*
1202 * Initialize the Rx DMA interrupt control register. We
1203 * request an interrupt after every incoming packet, but
1204 * defer it for 32us (64 * 512 ns). When the number of
1205 * interrupts pending reaches 8, we stop deferring the
1206 * interrupt, and signal it immediately.
1207 */
1208 CSR_WRITE_4(sc, STGE_RxDMAIntCtrl,
1209 RDIC_RxFrameCount(8) | RDIC_RxDMAWaitTime(512));
1210
1211 /*
1212 * Initialize the interrupt mask.
1213 */
1214 sc->sc_IntEnable = IS_HostError | IS_TxComplete | IS_UpdateStats |
1215 IS_TxDMAComplete | IS_RxDMAComplete | IS_RFDListEnd;
1216 CSR_WRITE_2(sc, STGE_IntStatus, 0xffff);
1217 CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
1218
1219 /*
1220 * Configure the DMA engine.
1221 * XXX Should auto-tune TxBurstLimit.
1222 */
1223 CSR_WRITE_4(sc, STGE_DMACtrl, sc->sc_DMACtrl |
1224 DMAC_TxBurstLimit(3));
1225
1226 /*
1227 * Send a PAUSE frame when we reach 29,696 bytes in the Rx
1228 * FIFO, and send an un-PAUSE frame when we reach 3056 bytes
1229 * in the Rx FIFO.
1230 */
1231 CSR_WRITE_2(sc, STGE_FlowOnTresh, 29696 / 16);
1232 CSR_WRITE_2(sc, STGE_FlowOffThresh, 3056 / 16);
1233
1234 /*
1235 * Set the maximum frame size.
1236 */
1237 #ifdef STGE_JUMBO
1238 CSR_WRITE_2(sc, STGE_MaxFrameSize, STGE_JUMBO_FRAMELEN);
1239 #else
1240 CSR_WRITE_2(sc, STGE_MaxFrameSize, ETHER_MAX_LEN);
1241 #endif
1242
1243 /*
1244 * Initialize MacCtrl -- do it before setting the media,
1245 * as setting the media will actually program the register.
1246 *
1247 * Note: We have to poke the IFS value before poking
1248 * anything else.
1249 */
1250 sc->sc_MACCtrl = MC_IFSSelect(0);
1251 CSR_WRITE_4(sc, STGE_MACCtrl, sc->sc_MACCtrl);
1252
1253 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
1254 sc->sc_MACCtrl |= MC_AutoVLANuntagging;
1255
1256 sc->sc_MACCtrl |= MC_StatisticsEnable | MC_TxEnable | MC_RxEnable;
1257
1258 if (sc->sc_rev >= 6) { /* >= B.2 */
1259 /* Multi-frag frame bug work-around. */
1260 CSR_WRITE_2(sc, STGE_DebugCtrl,
1261 CSR_READ_2(sc, STGE_DebugCtrl) | 0x0200);
1262
1263 /* Tx Poll Now bug work-around. */
1264 CSR_WRITE_2(sc, STGE_DebugCtrl,
1265 CSR_READ_2(sc, STGE_DebugCtrl) | 0x0010);
1266
1267 /* Rx Poll Now bug work-around. */
1268 CSR_WRITE_2(sc, STGE_DebugCtrl,
1269 CSR_READ_2(sc, STGE_DebugCtrl) | 0x0020);
1270 }
1271
1272 /*
1273 * Set the current media.
1274 */
1275 mii_mediachg(&sc->sc_mii);
1276
1277 /*
1278 * Start the one second MII clock.
1279 */
1280 timeout_add_sec(&sc->sc_timeout, 1);
1281
1282 /*
1283 * ...all done!
1284 */
1285 ifp->if_flags |= IFF_RUNNING;
1286 ifq_clr_oactive(&ifp->if_snd);
1287
1288 out:
1289 if (error)
1290 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
1291 return (error);
1292 }
1293
1294 /*
1295 * stge_drain:
1296 *
1297 * Drain the receive queue.
1298 */
1299 void
stge_rxdrain(struct stge_softc * sc)1300 stge_rxdrain(struct stge_softc *sc)
1301 {
1302 struct stge_descsoft *ds;
1303 int i;
1304
1305 for (i = 0; i < STGE_NRXDESC; i++) {
1306 ds = &sc->sc_rxsoft[i];
1307 if (ds->ds_mbuf != NULL) {
1308 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1309 ds->ds_mbuf->m_next = NULL;
1310 m_freem(ds->ds_mbuf);
1311 ds->ds_mbuf = NULL;
1312 }
1313 }
1314 }
1315
1316 /*
1317 * stge_stop: [ ifnet interface function ]
1318 *
1319 * Stop transmission on the interface.
1320 */
1321 void
stge_stop(struct ifnet * ifp,int disable)1322 stge_stop(struct ifnet *ifp, int disable)
1323 {
1324 struct stge_softc *sc = ifp->if_softc;
1325 struct stge_descsoft *ds;
1326 int i;
1327
1328 /*
1329 * Stop the one second clock.
1330 */
1331 timeout_del(&sc->sc_timeout);
1332
1333 /*
1334 * Mark the interface down and cancel the watchdog timer.
1335 */
1336 ifp->if_flags &= ~IFF_RUNNING;
1337 ifq_clr_oactive(&ifp->if_snd);
1338 ifp->if_timer = 0;
1339
1340 /* Down the MII. */
1341 mii_down(&sc->sc_mii);
1342
1343 /*
1344 * Disable interrupts.
1345 */
1346 CSR_WRITE_2(sc, STGE_IntEnable, 0);
1347
1348 /*
1349 * Stop receiver, transmitter, and stats update.
1350 */
1351 CSR_WRITE_4(sc, STGE_MACCtrl,
1352 MC_StatisticsDisable | MC_TxDisable | MC_RxDisable);
1353
1354 /*
1355 * Stop the transmit and receive DMA.
1356 */
1357 stge_dma_wait(sc);
1358 CSR_WRITE_4(sc, STGE_TFDListPtrHi, 0);
1359 CSR_WRITE_4(sc, STGE_TFDListPtrLo, 0);
1360 CSR_WRITE_4(sc, STGE_RFDListPtrHi, 0);
1361 CSR_WRITE_4(sc, STGE_RFDListPtrLo, 0);
1362
1363 /*
1364 * Release any queued transmit buffers.
1365 */
1366 for (i = 0; i < STGE_NTXDESC; i++) {
1367 ds = &sc->sc_txsoft[i];
1368 if (ds->ds_mbuf != NULL) {
1369 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1370 m_freem(ds->ds_mbuf);
1371 ds->ds_mbuf = NULL;
1372 }
1373 }
1374
1375 if (disable)
1376 stge_rxdrain(sc);
1377 }
1378
1379 static int
stge_eeprom_wait(struct stge_softc * sc)1380 stge_eeprom_wait(struct stge_softc *sc)
1381 {
1382 int i;
1383
1384 for (i = 0; i < STGE_TIMEOUT; i++) {
1385 delay(1000);
1386 if ((CSR_READ_2(sc, STGE_EepromCtrl) & EC_EepromBusy) == 0)
1387 return (0);
1388 }
1389 return (1);
1390 }
1391
1392 /*
1393 * stge_read_eeprom:
1394 *
1395 * Read data from the serial EEPROM.
1396 */
1397 void
stge_read_eeprom(struct stge_softc * sc,int offset,uint16_t * data)1398 stge_read_eeprom(struct stge_softc *sc, int offset, uint16_t *data)
1399 {
1400
1401 if (stge_eeprom_wait(sc))
1402 printf("%s: EEPROM failed to come ready\n",
1403 sc->sc_dev.dv_xname);
1404
1405 CSR_WRITE_2(sc, STGE_EepromCtrl,
1406 EC_EepromAddress(offset) | EC_EepromOpcode(EC_OP_RR));
1407 if (stge_eeprom_wait(sc))
1408 printf("%s: EEPROM read timed out\n",
1409 sc->sc_dev.dv_xname);
1410 *data = CSR_READ_2(sc, STGE_EepromData);
1411 }
1412
1413 /*
1414 * stge_add_rxbuf:
1415 *
1416 * Add a receive buffer to the indicated descriptor.
1417 */
1418 int
stge_add_rxbuf(struct stge_softc * sc,int idx)1419 stge_add_rxbuf(struct stge_softc *sc, int idx)
1420 {
1421 struct stge_descsoft *ds = &sc->sc_rxsoft[idx];
1422 struct mbuf *m;
1423 int error;
1424
1425 MGETHDR(m, M_DONTWAIT, MT_DATA);
1426 if (m == NULL)
1427 return (ENOBUFS);
1428
1429 MCLGET(m, M_DONTWAIT);
1430 if ((m->m_flags & M_EXT) == 0) {
1431 m_freem(m);
1432 return (ENOBUFS);
1433 }
1434
1435 m->m_data = m->m_ext.ext_buf + 2;
1436 m->m_len = MCLBYTES - 2;
1437
1438 if (ds->ds_mbuf != NULL)
1439 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1440
1441 ds->ds_mbuf = m;
1442
1443 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1444 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
1445 if (error) {
1446 printf("%s: can't load rx DMA map %d, error = %d\n",
1447 sc->sc_dev.dv_xname, idx, error);
1448 panic("stge_add_rxbuf"); /* XXX */
1449 }
1450
1451 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1452 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1453
1454 STGE_INIT_RXDESC(sc, idx);
1455
1456 return (0);
1457 }
1458
1459 /*
1460 * stge_iff:
1461 *
1462 * Set up the receive filter.
1463 */
1464 void
stge_iff(struct stge_softc * sc)1465 stge_iff(struct stge_softc *sc)
1466 {
1467 struct arpcom *ac = &sc->sc_arpcom;
1468 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1469 struct ether_multi *enm;
1470 struct ether_multistep step;
1471 uint32_t crc;
1472 uint32_t mchash[2];
1473
1474 memset(mchash, 0, sizeof(mchash));
1475 ifp->if_flags &= ~IFF_ALLMULTI;
1476
1477 /*
1478 * Always accept broadcast packets.
1479 * Always accept frames destined to our station address.
1480 */
1481 sc->sc_ReceiveMode = RM_ReceiveBroadcast | RM_ReceiveUnicast;
1482
1483 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1484 ifp->if_flags |= IFF_ALLMULTI;
1485 if (ifp->if_flags & IFF_PROMISC)
1486 sc->sc_ReceiveMode |= RM_ReceiveAllFrames;
1487 else
1488 sc->sc_ReceiveMode |= RM_ReceiveMulticast;
1489 } else {
1490 /*
1491 * Set up the multicast address filter by passing all
1492 * multicast addresses through a CRC generator, and then
1493 * using the low-order 6 bits as an index into the 64 bit
1494 * multicast hash table. The high order bits select the
1495 * register, while the rest of the bits select the bit
1496 * within the register.
1497 */
1498 sc->sc_ReceiveMode |= RM_ReceiveMulticastHash;
1499
1500 ETHER_FIRST_MULTI(step, ac, enm);
1501 while (enm != NULL) {
1502 crc = ether_crc32_be(enm->enm_addrlo,
1503 ETHER_ADDR_LEN);
1504
1505 /* Just want the 6 least significant bits. */
1506 crc &= 0x3f;
1507
1508 /* Set the corresponding bit in the hash table. */
1509 mchash[crc >> 5] |= 1 << (crc & 0x1f);
1510
1511 ETHER_NEXT_MULTI(step, enm);
1512 }
1513 }
1514
1515 CSR_WRITE_4(sc, STGE_HashTable0, mchash[0]);
1516 CSR_WRITE_4(sc, STGE_HashTable1, mchash[1]);
1517 CSR_WRITE_2(sc, STGE_ReceiveMode, sc->sc_ReceiveMode);
1518 }
1519
1520 /*
1521 * stge_mii_readreg: [mii interface function]
1522 *
1523 * Read a PHY register on the MII of the TC9021.
1524 */
1525 int
stge_mii_readreg(struct device * self,int phy,int reg)1526 stge_mii_readreg(struct device *self, int phy, int reg)
1527 {
1528
1529 return (mii_bitbang_readreg(self, &stge_mii_bitbang_ops, phy, reg));
1530 }
1531
1532 /*
1533 * stge_mii_writereg: [mii interface function]
1534 *
1535 * Write a PHY register on the MII of the TC9021.
1536 */
1537 void
stge_mii_writereg(struct device * self,int phy,int reg,int val)1538 stge_mii_writereg(struct device *self, int phy, int reg, int val)
1539 {
1540
1541 mii_bitbang_writereg(self, &stge_mii_bitbang_ops, phy, reg, val);
1542 }
1543
1544 /*
1545 * stge_mii_statchg: [mii interface function]
1546 *
1547 * Callback from MII layer when media changes.
1548 */
1549 void
stge_mii_statchg(struct device * self)1550 stge_mii_statchg(struct device *self)
1551 {
1552 struct stge_softc *sc = (struct stge_softc *) self;
1553 struct mii_data *mii = &sc->sc_mii;
1554
1555 sc->sc_MACCtrl &= ~(MC_DuplexSelect | MC_RxFlowControlEnable |
1556 MC_TxFlowControlEnable);
1557
1558 if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0)
1559 sc->sc_MACCtrl |= MC_DuplexSelect;
1560
1561 if (((mii->mii_media_active & IFM_GMASK) & IFM_ETH_RXPAUSE) != 0)
1562 sc->sc_MACCtrl |= MC_RxFlowControlEnable;
1563 if (((mii->mii_media_active & IFM_GMASK) & IFM_ETH_TXPAUSE) != 0)
1564 sc->sc_MACCtrl |= MC_TxFlowControlEnable;
1565
1566 CSR_WRITE_4(sc, STGE_MACCtrl, sc->sc_MACCtrl);
1567 }
1568
1569 /*
1570 * sste_mii_bitbang_read: [mii bit-bang interface function]
1571 *
1572 * Read the MII serial port for the MII bit-bang module.
1573 */
1574 uint32_t
stge_mii_bitbang_read(struct device * self)1575 stge_mii_bitbang_read(struct device *self)
1576 {
1577 struct stge_softc *sc = (void *) self;
1578
1579 return (CSR_READ_1(sc, STGE_PhyCtrl));
1580 }
1581
1582 /*
1583 * stge_mii_bitbang_write: [mii big-bang interface function]
1584 *
1585 * Write the MII serial port for the MII bit-bang module.
1586 */
1587 void
stge_mii_bitbang_write(struct device * self,uint32_t val)1588 stge_mii_bitbang_write(struct device *self, uint32_t val)
1589 {
1590 struct stge_softc *sc = (void *) self;
1591
1592 CSR_WRITE_1(sc, STGE_PhyCtrl, val | sc->sc_PhyCtrl);
1593 }
1594
1595 /*
1596 * stge_mediastatus: [ifmedia interface function]
1597 *
1598 * Get the current interface media status.
1599 */
1600 void
stge_mediastatus(struct ifnet * ifp,struct ifmediareq * ifmr)1601 stge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1602 {
1603 struct stge_softc *sc = ifp->if_softc;
1604
1605 mii_pollstat(&sc->sc_mii);
1606 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1607 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1608 }
1609
1610 /*
1611 * stge_mediachange: [ifmedia interface function]
1612 *
1613 * Set hardware to newly-selected media.
1614 */
1615 int
stge_mediachange(struct ifnet * ifp)1616 stge_mediachange(struct ifnet *ifp)
1617 {
1618 struct stge_softc *sc = ifp->if_softc;
1619
1620 if (ifp->if_flags & IFF_UP)
1621 mii_mediachg(&sc->sc_mii);
1622 return (0);
1623 }
1624