1 /* $OpenBSD: aic6915.c,v 1.25 2023/11/10 15:51:20 bluhm Exp $ */
2 /* $NetBSD: aic6915.c,v 1.15 2005/12/24 20:27:29 perry Exp $ */
3
4 /*-
5 * Copyright (c) 2001 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Jason R. Thorpe.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Device driver for the Adaptec AIC-6915 (``Starfire'')
35 * 10/100 Ethernet controller.
36 */
37
38 #include "bpfilter.h"
39
40 #include <sys/param.h>
41 #include <sys/endian.h>
42 #include <sys/systm.h>
43 #include <sys/timeout.h>
44 #include <sys/mbuf.h>
45 #include <sys/malloc.h>
46 #include <sys/kernel.h>
47 #include <sys/socket.h>
48 #include <sys/ioctl.h>
49 #include <sys/errno.h>
50 #include <sys/device.h>
51
52 #include <net/if.h>
53 #include <net/if_dl.h>
54
55 #include <netinet/in.h>
56 #include <netinet/if_ether.h>
57
58 #include <net/if_media.h>
59
60 #if NBPFILTER > 0
61 #include <net/bpf.h>
62 #endif
63
64 #include <machine/bus.h>
65 #include <machine/intr.h>
66
67 #include <dev/mii/miivar.h>
68
69 #include <dev/ic/aic6915.h>
70
71 void sf_start(struct ifnet *);
72 void sf_watchdog(struct ifnet *);
73 int sf_ioctl(struct ifnet *, u_long, caddr_t);
74 int sf_init(struct ifnet *);
75 void sf_stop(struct ifnet *, int);
76
77 void sf_txintr(struct sf_softc *);
78 void sf_rxintr(struct sf_softc *);
79 void sf_stats_update(struct sf_softc *);
80
81 void sf_reset(struct sf_softc *);
82 void sf_macreset(struct sf_softc *);
83 void sf_rxdrain(struct sf_softc *);
84 int sf_add_rxbuf(struct sf_softc *, int);
85 uint8_t sf_read_eeprom(struct sf_softc *, int);
86 void sf_set_filter(struct sf_softc *);
87
88 int sf_mii_read(struct device *, int, int);
89 void sf_mii_write(struct device *, int, int, int);
90 void sf_mii_statchg(struct device *);
91
92 void sf_tick(void *);
93
94 int sf_mediachange(struct ifnet *);
95 void sf_mediastatus(struct ifnet *, struct ifmediareq *);
96
97 uint32_t sf_reg_read(struct sf_softc *, bus_addr_t);
98 void sf_reg_write(struct sf_softc *, bus_addr_t , uint32_t);
99
100 void sf_set_filter_perfect(struct sf_softc *, int , uint8_t *);
101 void sf_set_filter_hash(struct sf_softc *, uint8_t *);
102
103 struct cfdriver sf_cd = {
104 NULL, "sf", DV_IFNET
105 };
106
107 #define sf_funcreg_read(sc, reg) \
108 bus_space_read_4((sc)->sc_st, (sc)->sc_sh_func, (reg))
109 #define sf_funcreg_write(sc, reg, val) \
110 bus_space_write_4((sc)->sc_st, (sc)->sc_sh_func, (reg), (val))
111
112 uint32_t
sf_reg_read(struct sf_softc * sc,bus_addr_t reg)113 sf_reg_read(struct sf_softc *sc, bus_addr_t reg)
114 {
115
116 if (__predict_false(sc->sc_iomapped)) {
117 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess,
118 reg);
119 return (bus_space_read_4(sc->sc_st, sc->sc_sh,
120 SF_IndirectIoDataPort));
121 }
122
123 return (bus_space_read_4(sc->sc_st, sc->sc_sh, reg));
124 }
125
126 void
sf_reg_write(struct sf_softc * sc,bus_addr_t reg,uint32_t val)127 sf_reg_write(struct sf_softc *sc, bus_addr_t reg, uint32_t val)
128 {
129
130 if (__predict_false(sc->sc_iomapped)) {
131 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess,
132 reg);
133 bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoDataPort,
134 val);
135 return;
136 }
137
138 bus_space_write_4(sc->sc_st, sc->sc_sh, reg, val);
139 }
140
141 #define sf_genreg_read(sc, reg) \
142 sf_reg_read((sc), (reg) + SF_GENREG_OFFSET)
143 #define sf_genreg_write(sc, reg, val) \
144 sf_reg_write((sc), (reg) + SF_GENREG_OFFSET, (val))
145
146 /*
147 * sf_attach:
148 *
149 * Attach a Starfire interface to the system.
150 */
151 void
sf_attach(struct sf_softc * sc)152 sf_attach(struct sf_softc *sc)
153 {
154 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
155 int i, rseg, error;
156 bus_dma_segment_t seg;
157 u_int8_t enaddr[ETHER_ADDR_LEN];
158
159 timeout_set(&sc->sc_mii_timeout, sf_tick, sc);
160
161 /*
162 * If we're I/O mapped, the functional register handle is
163 * the same as the base handle. If we're memory mapped,
164 * carve off a chunk of the register space for the functional
165 * registers, to save on arithmetic later.
166 */
167 if (sc->sc_iomapped)
168 sc->sc_sh_func = sc->sc_sh;
169 else {
170 if ((error = bus_space_subregion(sc->sc_st, sc->sc_sh,
171 SF_GENREG_OFFSET, SF_FUNCREG_SIZE, &sc->sc_sh_func)) != 0) {
172 printf("%s: unable to sub-region functional "
173 "registers, error = %d\n", sc->sc_dev.dv_xname,
174 error);
175 return;
176 }
177 }
178
179 /*
180 * Initialize the transmit threshold for this interface. The
181 * manual describes the default as 4 * 16 bytes. We start out
182 * at 10 * 16 bytes, to avoid a bunch of initial underruns on
183 * several platforms.
184 */
185 sc->sc_txthresh = 10;
186
187 /*
188 * Allocate the control data structures, and create and load the
189 * DMA map for it.
190 */
191 if ((error = bus_dmamem_alloc(sc->sc_dmat,
192 sizeof(struct sf_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
193 BUS_DMA_NOWAIT)) != 0) {
194 printf("%s: unable to allocate control data, error = %d\n",
195 sc->sc_dev.dv_xname, error);
196 goto fail_0;
197 }
198
199 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
200 sizeof(struct sf_control_data), (caddr_t *)&sc->sc_control_data,
201 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
202 printf("%s: unable to map control data, error = %d\n",
203 sc->sc_dev.dv_xname, error);
204 goto fail_1;
205 }
206
207 if ((error = bus_dmamap_create(sc->sc_dmat,
208 sizeof(struct sf_control_data), 1,
209 sizeof(struct sf_control_data), 0, BUS_DMA_NOWAIT,
210 &sc->sc_cddmamap)) != 0) {
211 printf("%s: unable to create control data DMA map, "
212 "error = %d\n", sc->sc_dev.dv_xname, error);
213 goto fail_2;
214 }
215
216 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
217 sc->sc_control_data, sizeof(struct sf_control_data), NULL,
218 BUS_DMA_NOWAIT)) != 0) {
219 printf("%s: unable to load control data DMA map, error = %d\n",
220 sc->sc_dev.dv_xname, error);
221 goto fail_3;
222 }
223
224 /*
225 * Create the transmit buffer DMA maps.
226 */
227 for (i = 0; i < SF_NTXDESC; i++) {
228 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
229 SF_NTXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
230 &sc->sc_txsoft[i].ds_dmamap)) != 0) {
231 printf("%s: unable to create tx DMA map %d, "
232 "error = %d\n", sc->sc_dev.dv_xname, i, error);
233 goto fail_4;
234 }
235 }
236
237 /*
238 * Create the receive buffer DMA maps.
239 */
240 for (i = 0; i < SF_NRXDESC; i++) {
241 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
242 MCLBYTES, 0, BUS_DMA_NOWAIT,
243 &sc->sc_rxsoft[i].ds_dmamap)) != 0) {
244 printf("%s: unable to create rx DMA map %d, "
245 "error = %d\n", sc->sc_dev.dv_xname, i, error);
246 goto fail_5;
247 }
248 }
249
250 /*
251 * Reset the chip to a known state.
252 */
253 sf_reset(sc);
254
255 /*
256 * Read the Ethernet address from the EEPROM.
257 */
258 for (i = 0; i < ETHER_ADDR_LEN; i++)
259 enaddr[i] = sf_read_eeprom(sc, (15 + (ETHER_ADDR_LEN - 1)) - i);
260
261 printf(", address %s\n", ether_sprintf(enaddr));
262
263 #ifdef DEBUG
264 if (sf_funcreg_read(sc, SF_PciDeviceConfig) & PDC_System64)
265 printf("%s: 64-bit PCI slot detected\n", sc->sc_dev.dv_xname);
266 #endif
267
268 /*
269 * Initialize our media structures and probe the MII.
270 */
271 sc->sc_mii.mii_ifp = ifp;
272 sc->sc_mii.mii_readreg = sf_mii_read;
273 sc->sc_mii.mii_writereg = sf_mii_write;
274 sc->sc_mii.mii_statchg = sf_mii_statchg;
275 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, sf_mediachange,
276 sf_mediastatus);
277 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
278 MII_OFFSET_ANY, 0);
279 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
280 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
281 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
282 } else
283 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
284 bcopy(enaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
285 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
286 ifp = &sc->sc_arpcom.ac_if;
287 ifp->if_softc = sc;
288 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
289 ifp->if_ioctl = sf_ioctl;
290 ifp->if_start = sf_start;
291 ifp->if_watchdog = sf_watchdog;
292 ifq_init_maxlen(&ifp->if_snd, SF_NTXDESC_MASK);
293
294 /*
295 * Attach the interface.
296 */
297 if_attach(ifp);
298 ether_ifattach(ifp);
299 return;
300
301 /*
302 * Free any resources we've allocated during the failed attach
303 * attempt. Do this in reverse order an fall through.
304 */
305 fail_5:
306 for (i = 0; i < SF_NRXDESC; i++) {
307 if (sc->sc_rxsoft[i].ds_dmamap != NULL)
308 bus_dmamap_destroy(sc->sc_dmat,
309 sc->sc_rxsoft[i].ds_dmamap);
310 }
311 fail_4:
312 for (i = 0; i < SF_NTXDESC; i++) {
313 if (sc->sc_txsoft[i].ds_dmamap != NULL)
314 bus_dmamap_destroy(sc->sc_dmat,
315 sc->sc_txsoft[i].ds_dmamap);
316 }
317 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
318 fail_3:
319 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
320 fail_2:
321 bus_dmamem_unmap(sc->sc_dmat, (caddr_t) sc->sc_control_data,
322 sizeof(struct sf_control_data));
323 fail_1:
324 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
325 fail_0:
326 return;
327 }
328
329 /*
330 * sf_start: [ifnet interface function]
331 *
332 * Start packet transmission on the interface.
333 */
334 void
sf_start(struct ifnet * ifp)335 sf_start(struct ifnet *ifp)
336 {
337 struct sf_softc *sc = ifp->if_softc;
338 struct mbuf *m0, *m;
339 struct sf_txdesc0 *txd;
340 struct sf_descsoft *ds;
341 bus_dmamap_t dmamap;
342 int error, producer, last = -1, opending, seg;
343
344 /*
345 * Remember the previous number of pending transmits.
346 */
347 opending = sc->sc_txpending;
348
349 /*
350 * Find out where we're sitting.
351 */
352 producer = SF_TXDINDEX_TO_HOST(
353 TDQPI_HiPrTxProducerIndex_get(
354 sf_funcreg_read(sc, SF_TxDescQueueProducerIndex)));
355
356 /*
357 * Loop through the send queue, setting up transmit descriptors
358 * until we drain the queue, or use up all available transmit
359 * descriptors. Leave a blank one at the end for sanity's sake.
360 */
361 while (sc->sc_txpending < (SF_NTXDESC - 1)) {
362 /*
363 * Grab a packet off the queue.
364 */
365 m0 = ifq_deq_begin(&ifp->if_snd);
366 if (m0 == NULL)
367 break;
368 m = NULL;
369
370 /*
371 * Get the transmit descriptor.
372 */
373 txd = &sc->sc_txdescs[producer];
374 ds = &sc->sc_txsoft[producer];
375 dmamap = ds->ds_dmamap;
376
377 /*
378 * Load the DMA map. If this fails, the packet either
379 * didn't fit in the allotted number of frags, or we were
380 * short on resources. In this case, we'll copy and try
381 * again.
382 */
383 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
384 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
385 MGETHDR(m, M_DONTWAIT, MT_DATA);
386 if (m == NULL) {
387 ifq_deq_rollback(&ifp->if_snd, m0);
388 printf("%s: unable to allocate Tx mbuf\n",
389 sc->sc_dev.dv_xname);
390 break;
391 }
392 if (m0->m_pkthdr.len > MHLEN) {
393 MCLGET(m, M_DONTWAIT);
394 if ((m->m_flags & M_EXT) == 0) {
395 ifq_deq_rollback(&ifp->if_snd, m0);
396 printf("%s: unable to allocate Tx "
397 "cluster\n", sc->sc_dev.dv_xname);
398 m_freem(m);
399 break;
400 }
401 }
402 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
403 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
404 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
405 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
406 if (error) {
407 ifq_deq_rollback(&ifp->if_snd, m0);
408 printf("%s: unable to load Tx buffer, "
409 "error = %d\n", sc->sc_dev.dv_xname, error);
410 m_freem(m);
411 break;
412 }
413 }
414
415 /*
416 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
417 */
418 ifq_deq_commit(&ifp->if_snd, m0);
419 if (m != NULL) {
420 m_freem(m0);
421 m0 = m;
422 }
423
424 /* Initialize the descriptor. */
425 txd->td_word0 =
426 htole32(TD_W0_ID | TD_W0_CRCEN | m0->m_pkthdr.len);
427 if (producer == (SF_NTXDESC - 1))
428 txd->td_word0 |= TD_W0_END;
429 txd->td_word1 = htole32(dmamap->dm_nsegs);
430 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
431 txd->td_frags[seg].fr_addr =
432 htole32(dmamap->dm_segs[seg].ds_addr);
433 txd->td_frags[seg].fr_len =
434 htole32(dmamap->dm_segs[seg].ds_len);
435 }
436
437 /* Sync the descriptor and the DMA map. */
438 SF_CDTXDSYNC(sc, producer, BUS_DMASYNC_PREWRITE);
439 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
440 BUS_DMASYNC_PREWRITE);
441
442 /*
443 * Store a pointer to the packet so we can free it later.
444 */
445 ds->ds_mbuf = m0;
446
447 /* Advance the Tx pointer. */
448 sc->sc_txpending++;
449 last = producer;
450 producer = SF_NEXTTX(producer);
451
452 #if NBPFILTER > 0
453 /*
454 * Pass the packet to any BPF listeners.
455 */
456 if (ifp->if_bpf)
457 bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
458 #endif
459 }
460
461 if (sc->sc_txpending == (SF_NTXDESC - 1)) {
462 /* No more slots left; notify upper layer. */
463 ifq_set_oactive(&ifp->if_snd);
464 }
465
466 if (sc->sc_txpending != opending) {
467 KASSERT(last != -1);
468 /*
469 * We enqueued packets. Cause a transmit interrupt to
470 * happen on the last packet we enqueued, and give the
471 * new descriptors to the chip by writing the new
472 * producer index.
473 */
474 sc->sc_txdescs[last].td_word0 |= TD_W0_INTR;
475 SF_CDTXDSYNC(sc, last, BUS_DMASYNC_PREWRITE);
476
477 sf_funcreg_write(sc, SF_TxDescQueueProducerIndex,
478 TDQPI_HiPrTxProducerIndex(SF_TXDINDEX_TO_CHIP(producer)));
479
480 /* Set a watchdog timer in case the chip flakes out. */
481 ifp->if_timer = 5;
482 }
483 }
484
485 /*
486 * sf_watchdog: [ifnet interface function]
487 *
488 * Watchdog timer handler.
489 */
490 void
sf_watchdog(struct ifnet * ifp)491 sf_watchdog(struct ifnet *ifp)
492 {
493 struct sf_softc *sc = ifp->if_softc;
494
495 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
496 ifp->if_oerrors++;
497
498 (void) sf_init(ifp);
499
500 /* Try to get more packets going. */
501 sf_start(ifp);
502 }
503
504 /*
505 * sf_ioctl: [ifnet interface function]
506 *
507 * Handle control requests from the operator.
508 */
509 int
sf_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)510 sf_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
511 {
512 struct sf_softc *sc = (struct sf_softc *)ifp->if_softc;
513 struct ifreq *ifr = (struct ifreq *) data;
514 int s, error = 0;
515
516 s = splnet();
517
518 switch (cmd) {
519 case SIOCSIFADDR:
520 ifp->if_flags |= IFF_UP;
521 if (!(ifp->if_flags & IFF_RUNNING))
522 sf_init(ifp);
523 break;
524
525 case SIOCSIFFLAGS:
526 if (ifp->if_flags & IFF_UP) {
527 if (ifp->if_flags & IFF_RUNNING &&
528 ((ifp->if_flags ^ sc->sc_flags) &
529 IFF_PROMISC)) {
530 sf_set_filter(sc);
531 } else {
532 if (!(ifp->if_flags & IFF_RUNNING))
533 sf_init(ifp);
534 }
535 } else {
536 if (ifp->if_flags & IFF_RUNNING)
537 sf_stop(ifp, 1);
538 }
539 sc->sc_flags = ifp->if_flags;
540 break;
541
542 case SIOCGIFMEDIA:
543 case SIOCSIFMEDIA:
544 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
545 break;
546
547 default:
548 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
549 }
550
551 if (error == ENETRESET) {
552 if (ifp->if_flags & IFF_RUNNING)
553 sf_set_filter(sc);
554 error = 0;
555 }
556
557 /* Try to get more packets going. */
558 sf_start(ifp);
559
560 splx(s);
561 return (error);
562 }
563
564 /*
565 * sf_intr:
566 *
567 * Interrupt service routine.
568 */
569 int
sf_intr(void * arg)570 sf_intr(void *arg)
571 {
572 struct sf_softc *sc = arg;
573 uint32_t isr;
574 int handled = 0, wantinit = 0;
575
576 for (;;) {
577 /* Reading clears all interrupts we're interested in. */
578 isr = sf_funcreg_read(sc, SF_InterruptStatus);
579 if ((isr & IS_PCIPadInt) == 0)
580 break;
581
582 handled = 1;
583
584 /* Handle receive interrupts. */
585 if (isr & IS_RxQ1DoneInt)
586 sf_rxintr(sc);
587
588 /* Handle transmit completion interrupts. */
589 if (isr & (IS_TxDmaDoneInt|IS_TxQueueDoneInt))
590 sf_txintr(sc);
591
592 /* Handle abnormal interrupts. */
593 if (isr & IS_AbnormalInterrupt) {
594 /* Statistics. */
595 if (isr & IS_StatisticWrapInt)
596 sf_stats_update(sc);
597
598 /* DMA errors. */
599 if (isr & IS_DmaErrInt) {
600 wantinit = 1;
601 printf("%s: WARNING: DMA error\n",
602 sc->sc_dev.dv_xname);
603 }
604
605 /* Transmit FIFO underruns. */
606 if (isr & IS_TxDataLowInt) {
607 if (sc->sc_txthresh < 0xff)
608 sc->sc_txthresh++;
609 #ifdef DEBUG
610 printf("%s: transmit FIFO underrun, new "
611 "threshold: %d bytes\n",
612 sc->sc_dev.dv_xname,
613 sc->sc_txthresh * 16);
614 #endif
615 sf_funcreg_write(sc, SF_TransmitFrameCSR,
616 sc->sc_TransmitFrameCSR |
617 TFCSR_TransmitThreshold(sc->sc_txthresh));
618 sf_funcreg_write(sc, SF_TxDescQueueCtrl,
619 sc->sc_TxDescQueueCtrl |
620 TDQC_TxHighPriorityFifoThreshold(
621 sc->sc_txthresh));
622 }
623 }
624 }
625
626 if (handled) {
627 /* Reset the interface, if necessary. */
628 if (wantinit)
629 sf_init(&sc->sc_arpcom.ac_if);
630
631 /* Try and get more packets going. */
632 sf_start(&sc->sc_arpcom.ac_if);
633 }
634
635 return (handled);
636 }
637
638 /*
639 * sf_txintr:
640 *
641 * Helper -- handle transmit completion interrupts.
642 */
643 void
sf_txintr(struct sf_softc * sc)644 sf_txintr(struct sf_softc *sc)
645 {
646 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
647 struct sf_descsoft *ds;
648 uint32_t cqci, tcd;
649 int consumer, producer, txidx;
650
651 try_again:
652 cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex);
653
654 consumer = CQCI_TxCompletionConsumerIndex_get(cqci);
655 producer = CQPI_TxCompletionProducerIndex_get(
656 sf_funcreg_read(sc, SF_CompletionQueueProducerIndex));
657
658 if (consumer == producer)
659 return;
660
661 ifq_clr_oactive(&ifp->if_snd);
662
663 while (consumer != producer) {
664 SF_CDTXCSYNC(sc, consumer, BUS_DMASYNC_POSTREAD);
665 tcd = letoh32(sc->sc_txcomp[consumer].tcd_word0);
666
667 txidx = SF_TCD_INDEX_TO_HOST(TCD_INDEX(tcd));
668 #ifdef DIAGNOSTIC
669 if ((tcd & TCD_PR) == 0)
670 printf("%s: Tx queue mismatch, index %d\n",
671 sc->sc_dev.dv_xname, txidx);
672 #endif
673 /*
674 * NOTE: stats are updated later. We're just
675 * releasing packets that have been DMA'd to
676 * the chip.
677 */
678 ds = &sc->sc_txsoft[txidx];
679 SF_CDTXDSYNC(sc, txidx, BUS_DMASYNC_POSTWRITE);
680 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
681 0, ds->ds_dmamap->dm_mapsize,
682 BUS_DMASYNC_POSTWRITE);
683 m_freem(ds->ds_mbuf);
684 ds->ds_mbuf = NULL;
685
686 consumer = SF_NEXTTCD(consumer);
687 sc->sc_txpending--;
688 }
689
690 /* XXXJRT -- should be KDASSERT() */
691 KASSERT(sc->sc_txpending >= 0);
692
693 /* If all packets are done, cancel the watchdog timer. */
694 if (sc->sc_txpending == 0)
695 ifp->if_timer = 0;
696
697 /* Update the consumer index. */
698 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
699 (cqci & ~CQCI_TxCompletionConsumerIndex(0x7ff)) |
700 CQCI_TxCompletionConsumerIndex(consumer));
701
702 /* Double check for new completions. */
703 goto try_again;
704 }
705
706 /*
707 * sf_rxintr:
708 *
709 * Helper -- handle receive interrupts.
710 */
711 void
sf_rxintr(struct sf_softc * sc)712 sf_rxintr(struct sf_softc *sc)
713 {
714 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
715 struct sf_descsoft *ds;
716 struct sf_rcd_full *rcd;
717 struct mbuf_list ml = MBUF_LIST_INITIALIZER();
718 struct mbuf *m;
719 uint32_t cqci, word0;
720 int consumer, producer, bufproducer, rxidx, len;
721
722 cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex);
723
724 consumer = CQCI_RxCompletionQ1ConsumerIndex_get(cqci);
725 producer = CQPI_RxCompletionQ1ProducerIndex_get(
726 sf_funcreg_read(sc, SF_CompletionQueueProducerIndex));
727 bufproducer = RXQ1P_RxDescQ1Producer_get(
728 sf_funcreg_read(sc, SF_RxDescQueue1Ptrs));
729
730 if (consumer == producer)
731 return;
732
733 while (consumer != producer) {
734 rcd = &sc->sc_rxcomp[consumer];
735 SF_CDRXCSYNC(sc, consumer,
736 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
737 SF_CDRXCSYNC(sc, consumer,
738 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
739
740 word0 = letoh32(rcd->rcd_word0);
741 rxidx = RCD_W0_EndIndex(word0);
742
743 ds = &sc->sc_rxsoft[rxidx];
744
745 consumer = SF_NEXTRCD(consumer);
746 bufproducer = SF_NEXTRX(bufproducer);
747
748 if ((word0 & RCD_W0_OK) == 0) {
749 SF_INIT_RXDESC(sc, rxidx);
750 continue;
751 }
752
753 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
754 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
755
756 /*
757 * No errors; receive the packet. Note that we have
758 * configured the Starfire to NOT transfer the CRC
759 * with the packet.
760 */
761 len = RCD_W0_Length(word0);
762
763 #ifndef __STRICT_ALIGNMENT
764 /*
765 * Allocate a new mbuf cluster. If that fails, we are
766 * out of memory, and must drop the packet and recycle
767 * the buffer that's already attached to this descriptor.
768 */
769 m = ds->ds_mbuf;
770 if (sf_add_rxbuf(sc, rxidx) != 0) {
771 ifp->if_ierrors++;
772 SF_INIT_RXDESC(sc, rxidx);
773 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
774 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
775 continue;
776 }
777 #else
778 /*
779 * The Starfire's receive buffer must be 4-byte aligned.
780 * But this means that the data after the Ethernet header
781 * is misaligned. We must allocate a new buffer and
782 * copy the data, shifted forward 2 bytes.
783 */
784 MGETHDR(m, M_DONTWAIT, MT_DATA);
785 if (m == NULL) {
786 dropit:
787 ifp->if_ierrors++;
788 SF_INIT_RXDESC(sc, rxidx);
789 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
790 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
791 continue;
792 }
793 if (len > (MHLEN - 2)) {
794 MCLGET(m, M_DONTWAIT);
795 if ((m->m_flags & M_EXT) == 0) {
796 m_freem(m);
797 goto dropit;
798 }
799 }
800 m->m_data += 2;
801
802 /*
803 * Note that we use cluster for incoming frames, so the
804 * buffer is virtually contiguous.
805 */
806 memcpy(mtod(m, caddr_t), mtod(ds->ds_mbuf, caddr_t), len);
807
808 /* Allow the receive descriptor to continue using its mbuf. */
809 SF_INIT_RXDESC(sc, rxidx);
810 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
811 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
812 #endif /* __STRICT_ALIGNMENT */
813
814 m->m_pkthdr.len = m->m_len = len;
815
816 ml_enqueue(&ml, m);
817 }
818
819 if_input(ifp, &ml);
820
821 /* Update the chip's pointers. */
822 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
823 (cqci & ~CQCI_RxCompletionQ1ConsumerIndex(0x7ff)) |
824 CQCI_RxCompletionQ1ConsumerIndex(consumer));
825 sf_funcreg_write(sc, SF_RxDescQueue1Ptrs,
826 RXQ1P_RxDescQ1Producer(bufproducer));
827 }
828
829 /*
830 * sf_tick:
831 *
832 * One second timer, used to tick the MII and update stats.
833 */
834 void
sf_tick(void * arg)835 sf_tick(void *arg)
836 {
837 struct sf_softc *sc = arg;
838 int s;
839
840 s = splnet();
841 mii_tick(&sc->sc_mii);
842 sf_stats_update(sc);
843 splx(s);
844
845 timeout_add_sec(&sc->sc_mii_timeout, 1);
846 }
847
848 /*
849 * sf_stats_update:
850 *
851 * Read the statistics counters.
852 */
853 void
sf_stats_update(struct sf_softc * sc)854 sf_stats_update(struct sf_softc *sc)
855 {
856 struct sf_stats stats;
857 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
858 uint32_t *p;
859 u_int i;
860
861 p = &stats.TransmitOKFrames;
862 for (i = 0; i < (sizeof(stats) / sizeof(uint32_t)); i++) {
863 *p++ = sf_genreg_read(sc,
864 SF_STATS_BASE + (i * sizeof(uint32_t)));
865 sf_genreg_write(sc, SF_STATS_BASE + (i * sizeof(uint32_t)), 0);
866 }
867
868 ifp->if_collisions += stats.SingleCollisionFrames +
869 stats.MultipleCollisionFrames;
870
871 ifp->if_oerrors += stats.TransmitAbortDueToExcessiveCollisions +
872 stats.TransmitAbortDueToExcessingDeferral +
873 stats.FramesLostDueToInternalTransmitErrors;
874
875 ifp->if_ierrors += stats.ReceiveCRCErrors + stats.AlignmentErrors +
876 stats.ReceiveFramesTooLong + stats.ReceiveFramesTooShort +
877 stats.ReceiveFramesJabbersError +
878 stats.FramesLostDueToInternalReceiveErrors;
879 }
880
881 /*
882 * sf_reset:
883 *
884 * Perform a soft reset on the Starfire.
885 */
886 void
sf_reset(struct sf_softc * sc)887 sf_reset(struct sf_softc *sc)
888 {
889 int i;
890
891 sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0);
892
893 sf_macreset(sc);
894
895 sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_SoftReset);
896 for (i = 0; i < 1000; i++) {
897 delay(10);
898 if ((sf_funcreg_read(sc, SF_PciDeviceConfig) &
899 PDC_SoftReset) == 0)
900 break;
901 }
902
903 if (i == 1000) {
904 printf("%s: reset failed to complete\n", sc->sc_dev.dv_xname);
905 sf_funcreg_write(sc, SF_PciDeviceConfig, 0);
906 }
907
908 delay(1000);
909 }
910
911 /*
912 * sf_macreset:
913 *
914 * Reset the MAC portion of the Starfire.
915 */
916 void
sf_macreset(struct sf_softc * sc)917 sf_macreset(struct sf_softc *sc)
918 {
919
920 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1 | MC1_SoftRst);
921 delay(1000);
922 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1);
923 }
924
925 /*
926 * sf_init: [ifnet interface function]
927 *
928 * Initialize the interface. Must be called at splnet().
929 */
930 int
sf_init(struct ifnet * ifp)931 sf_init(struct ifnet *ifp)
932 {
933 struct sf_softc *sc = ifp->if_softc;
934 struct sf_descsoft *ds;
935 int error = 0;
936 u_int i;
937
938 /*
939 * Cancel any pending I/O.
940 */
941 sf_stop(ifp, 0);
942
943 /*
944 * Reset the Starfire to a known state.
945 */
946 sf_reset(sc);
947
948 /* Clear the stat counters. */
949 for (i = 0; i < sizeof(struct sf_stats); i += sizeof(uint32_t))
950 sf_genreg_write(sc, SF_STATS_BASE + i, 0);
951
952 /*
953 * Initialize the transmit descriptor ring.
954 */
955 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
956 sf_funcreg_write(sc, SF_TxDescQueueHighAddr, 0);
957 sf_funcreg_write(sc, SF_HiPrTxDescQueueBaseAddr, SF_CDTXDADDR(sc, 0));
958 sf_funcreg_write(sc, SF_LoPrTxDescQueueBaseAddr, 0);
959
960 /*
961 * Initialize the transmit completion ring.
962 */
963 for (i = 0; i < SF_NTCD; i++) {
964 sc->sc_txcomp[i].tcd_word0 = TCD_DMA_ID;
965 SF_CDTXCSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
966 }
967 sf_funcreg_write(sc, SF_CompletionQueueHighAddr, 0);
968 sf_funcreg_write(sc, SF_TxCompletionQueueCtrl, SF_CDTXCADDR(sc, 0));
969
970 /*
971 * Initialize the receive descriptor ring.
972 */
973 for (i = 0; i < SF_NRXDESC; i++) {
974 ds = &sc->sc_rxsoft[i];
975 if (ds->ds_mbuf == NULL) {
976 if ((error = sf_add_rxbuf(sc, i)) != 0) {
977 printf("%s: unable to allocate or map rx "
978 "buffer %d, error = %d\n",
979 sc->sc_dev.dv_xname, i, error);
980 /*
981 * XXX Should attempt to run with fewer receive
982 * XXX buffers instead of just failing.
983 */
984 sf_rxdrain(sc);
985 goto out;
986 }
987 } else
988 SF_INIT_RXDESC(sc, i);
989 }
990 sf_funcreg_write(sc, SF_RxDescQueueHighAddress, 0);
991 sf_funcreg_write(sc, SF_RxDescQueue1LowAddress, SF_CDRXDADDR(sc, 0));
992 sf_funcreg_write(sc, SF_RxDescQueue2LowAddress, 0);
993
994 /*
995 * Initialize the receive completion ring.
996 */
997 for (i = 0; i < SF_NRCD; i++) {
998 sc->sc_rxcomp[i].rcd_word0 = RCD_W0_ID;
999 sc->sc_rxcomp[i].rcd_word1 = 0;
1000 sc->sc_rxcomp[i].rcd_word2 = 0;
1001 sc->sc_rxcomp[i].rcd_timestamp = 0;
1002 SF_CDRXCSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1003 }
1004 sf_funcreg_write(sc, SF_RxCompletionQueue1Ctrl, SF_CDRXCADDR(sc, 0) |
1005 RCQ1C_RxCompletionQ1Type(3));
1006 sf_funcreg_write(sc, SF_RxCompletionQueue2Ctrl, 0);
1007
1008 /*
1009 * Initialize the Tx CSR.
1010 */
1011 sc->sc_TransmitFrameCSR = 0;
1012 sf_funcreg_write(sc, SF_TransmitFrameCSR,
1013 sc->sc_TransmitFrameCSR |
1014 TFCSR_TransmitThreshold(sc->sc_txthresh));
1015
1016 /*
1017 * Initialize the Tx descriptor control register.
1018 */
1019 sc->sc_TxDescQueueCtrl = TDQC_SkipLength(0) |
1020 TDQC_TxDmaBurstSize(4) | /* default */
1021 TDQC_MinFrameSpacing(3) | /* 128 bytes */
1022 TDQC_TxDescType(0);
1023 sf_funcreg_write(sc, SF_TxDescQueueCtrl,
1024 sc->sc_TxDescQueueCtrl |
1025 TDQC_TxHighPriorityFifoThreshold(sc->sc_txthresh));
1026
1027 /*
1028 * Initialize the Rx descriptor control registers.
1029 */
1030 sf_funcreg_write(sc, SF_RxDescQueue1Ctrl,
1031 RDQ1C_RxQ1BufferLength(MCLBYTES) |
1032 RDQ1C_RxDescSpacing(0));
1033 sf_funcreg_write(sc, SF_RxDescQueue2Ctrl, 0);
1034
1035 /*
1036 * Initialize the Tx descriptor producer indices.
1037 */
1038 sf_funcreg_write(sc, SF_TxDescQueueProducerIndex,
1039 TDQPI_HiPrTxProducerIndex(0) |
1040 TDQPI_LoPrTxProducerIndex(0));
1041
1042 /*
1043 * Initialize the Rx descriptor producer indices.
1044 */
1045 sf_funcreg_write(sc, SF_RxDescQueue1Ptrs,
1046 RXQ1P_RxDescQ1Producer(SF_NRXDESC - 1));
1047 sf_funcreg_write(sc, SF_RxDescQueue2Ptrs,
1048 RXQ2P_RxDescQ2Producer(0));
1049
1050 /*
1051 * Initialize the Tx and Rx completion queue consumer indices.
1052 */
1053 sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
1054 CQCI_TxCompletionConsumerIndex(0) |
1055 CQCI_RxCompletionQ1ConsumerIndex(0));
1056 sf_funcreg_write(sc, SF_RxHiPrCompletionPtrs, 0);
1057
1058 /*
1059 * Initialize the Rx DMA control register.
1060 */
1061 sf_funcreg_write(sc, SF_RxDmaCtrl,
1062 RDC_RxHighPriorityThreshold(6) | /* default */
1063 RDC_RxBurstSize(4)); /* default */
1064
1065 /*
1066 * Set the receive filter.
1067 */
1068 sc->sc_RxAddressFilteringCtl = 0;
1069 sf_set_filter(sc);
1070
1071 /*
1072 * Set MacConfig1. When we set the media, MacConfig1 will
1073 * actually be written and the MAC part reset.
1074 */
1075 sc->sc_MacConfig1 = MC1_PadEn;
1076
1077 /*
1078 * Set the media.
1079 */
1080 mii_mediachg(&sc->sc_mii);
1081
1082 /*
1083 * Initialize the interrupt register.
1084 */
1085 sc->sc_InterruptEn = IS_PCIPadInt | IS_RxQ1DoneInt |
1086 IS_TxQueueDoneInt | IS_TxDmaDoneInt | IS_DmaErrInt |
1087 IS_StatisticWrapInt;
1088 sf_funcreg_write(sc, SF_InterruptEn, sc->sc_InterruptEn);
1089
1090 sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_IntEnable |
1091 PDC_PCIMstDmaEn | (1 << PDC_FifoThreshold_SHIFT));
1092
1093 /*
1094 * Start the transmit and receive processes.
1095 */
1096 sf_funcreg_write(sc, SF_GeneralEthernetCtrl,
1097 GEC_TxDmaEn|GEC_RxDmaEn|GEC_TransmitEn|GEC_ReceiveEn);
1098
1099 /* Start the on second clock. */
1100 timeout_add_sec(&sc->sc_mii_timeout, 1);
1101
1102 /*
1103 * Note that the interface is now running.
1104 */
1105 ifp->if_flags |= IFF_RUNNING;
1106 ifq_clr_oactive(&ifp->if_snd);
1107
1108 out:
1109 if (error) {
1110 ifp->if_flags &= ~IFF_RUNNING;
1111 ifq_clr_oactive(&ifp->if_snd);
1112 ifp->if_timer = 0;
1113 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
1114 }
1115 return (error);
1116 }
1117
1118 /*
1119 * sf_rxdrain:
1120 *
1121 * Drain the receive queue.
1122 */
1123 void
sf_rxdrain(struct sf_softc * sc)1124 sf_rxdrain(struct sf_softc *sc)
1125 {
1126 struct sf_descsoft *ds;
1127 int i;
1128
1129 for (i = 0; i < SF_NRXDESC; i++) {
1130 ds = &sc->sc_rxsoft[i];
1131 if (ds->ds_mbuf != NULL) {
1132 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1133 m_freem(ds->ds_mbuf);
1134 ds->ds_mbuf = NULL;
1135 }
1136 }
1137 }
1138
1139 /*
1140 * sf_stop: [ifnet interface function]
1141 *
1142 * Stop transmission on the interface.
1143 */
1144 void
sf_stop(struct ifnet * ifp,int disable)1145 sf_stop(struct ifnet *ifp, int disable)
1146 {
1147 struct sf_softc *sc = ifp->if_softc;
1148 struct sf_descsoft *ds;
1149 int i;
1150
1151 /* Stop the one second clock. */
1152 timeout_del(&sc->sc_mii_timeout);
1153
1154 /* Down the MII. */
1155 mii_down(&sc->sc_mii);
1156
1157 /* Disable interrupts. */
1158 sf_funcreg_write(sc, SF_InterruptEn, 0);
1159
1160 /* Stop the transmit and receive processes. */
1161 sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0);
1162
1163 /*
1164 * Release any queued transmit buffers.
1165 */
1166 for (i = 0; i < SF_NTXDESC; i++) {
1167 ds = &sc->sc_txsoft[i];
1168 if (ds->ds_mbuf != NULL) {
1169 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1170 m_freem(ds->ds_mbuf);
1171 ds->ds_mbuf = NULL;
1172 }
1173 }
1174 sc->sc_txpending = 0;
1175
1176 if (disable)
1177 sf_rxdrain(sc);
1178
1179 /*
1180 * Mark the interface down and cancel the watchdog timer.
1181 */
1182 ifp->if_flags &= ~IFF_RUNNING;
1183 ifq_clr_oactive(&ifp->if_snd);
1184 ifp->if_timer = 0;
1185 }
1186
1187 /*
1188 * sf_read_eeprom:
1189 *
1190 * Read from the Starfire EEPROM.
1191 */
1192 uint8_t
sf_read_eeprom(struct sf_softc * sc,int offset)1193 sf_read_eeprom(struct sf_softc *sc, int offset)
1194 {
1195 uint32_t reg;
1196
1197 reg = sf_genreg_read(sc, SF_EEPROM_BASE + (offset & ~3));
1198
1199 return ((reg >> (8 * (offset & 3))) & 0xff);
1200 }
1201
1202 /*
1203 * sf_add_rxbuf:
1204 *
1205 * Add a receive buffer to the indicated descriptor.
1206 */
1207 int
sf_add_rxbuf(struct sf_softc * sc,int idx)1208 sf_add_rxbuf(struct sf_softc *sc, int idx)
1209 {
1210 struct sf_descsoft *ds = &sc->sc_rxsoft[idx];
1211 struct mbuf *m;
1212 int error;
1213
1214 MGETHDR(m, M_DONTWAIT, MT_DATA);
1215 if (m == NULL)
1216 return (ENOBUFS);
1217
1218 MCLGET(m, M_DONTWAIT);
1219 if ((m->m_flags & M_EXT) == 0) {
1220 m_freem(m);
1221 return (ENOBUFS);
1222 }
1223
1224 if (ds->ds_mbuf != NULL)
1225 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1226
1227 ds->ds_mbuf = m;
1228
1229 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1230 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1231 BUS_DMA_READ|BUS_DMA_NOWAIT);
1232 if (error) {
1233 printf("%s: can't load rx DMA map %d, error = %d\n",
1234 sc->sc_dev.dv_xname, idx, error);
1235 panic("sf_add_rxbuf"); /* XXX */
1236 }
1237
1238 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1239 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1240
1241 SF_INIT_RXDESC(sc, idx);
1242
1243 return (0);
1244 }
1245
1246 void
sf_set_filter_perfect(struct sf_softc * sc,int slot,uint8_t * enaddr)1247 sf_set_filter_perfect(struct sf_softc *sc, int slot, uint8_t *enaddr)
1248 {
1249 uint32_t reg0, reg1, reg2;
1250
1251 reg0 = enaddr[5] | (enaddr[4] << 8);
1252 reg1 = enaddr[3] | (enaddr[2] << 8);
1253 reg2 = enaddr[1] | (enaddr[0] << 8);
1254
1255 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 0, reg0);
1256 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 4, reg1);
1257 sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 8, reg2);
1258 }
1259
1260 void
sf_set_filter_hash(struct sf_softc * sc,uint8_t * enaddr)1261 sf_set_filter_hash(struct sf_softc *sc, uint8_t *enaddr)
1262 {
1263 uint32_t hash, slot, reg;
1264
1265 hash = ether_crc32_be(enaddr, ETHER_ADDR_LEN) >> 23;
1266 slot = hash >> 4;
1267
1268 reg = sf_genreg_read(sc, SF_HASH_BASE + (slot * 0x10));
1269 reg |= 1 << (hash & 0xf);
1270 sf_genreg_write(sc, SF_HASH_BASE + (slot * 0x10), reg);
1271 }
1272
1273 /*
1274 * sf_set_filter:
1275 *
1276 * Set the Starfire receive filter.
1277 */
1278 void
sf_set_filter(struct sf_softc * sc)1279 sf_set_filter(struct sf_softc *sc)
1280 {
1281 struct arpcom *ac = &sc->sc_arpcom;
1282 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1283 struct ether_multi *enm;
1284 struct ether_multistep step;
1285 int i;
1286
1287 /* Start by clearing the perfect and hash tables. */
1288 for (i = 0; i < SF_PERFECT_SIZE; i += sizeof(uint32_t))
1289 sf_genreg_write(sc, SF_PERFECT_BASE + i, 0);
1290
1291 for (i = 0; i < SF_HASH_SIZE; i += sizeof(uint32_t))
1292 sf_genreg_write(sc, SF_HASH_BASE + i, 0);
1293
1294 /*
1295 * Clear the perfect and hash mode bits.
1296 */
1297 sc->sc_RxAddressFilteringCtl &=
1298 ~(RAFC_PerfectFilteringMode(3) | RAFC_HashFilteringMode(3));
1299
1300 if (ifp->if_flags & IFF_BROADCAST)
1301 sc->sc_RxAddressFilteringCtl |= RAFC_PassBroadcast;
1302 else
1303 sc->sc_RxAddressFilteringCtl &= ~RAFC_PassBroadcast;
1304
1305 if (ifp->if_flags & IFF_PROMISC) {
1306 sc->sc_RxAddressFilteringCtl |= RAFC_PromiscuousMode;
1307 goto allmulti;
1308 } else
1309 sc->sc_RxAddressFilteringCtl &= ~RAFC_PromiscuousMode;
1310
1311 /*
1312 * Set normal perfect filtering mode.
1313 */
1314 sc->sc_RxAddressFilteringCtl |= RAFC_PerfectFilteringMode(1);
1315
1316 /*
1317 * First, write the station address to the perfect filter
1318 * table.
1319 */
1320 sf_set_filter_perfect(sc, 0, LLADDR(ifp->if_sadl));
1321
1322 if (ac->ac_multirangecnt > 0)
1323 goto allmulti;
1324
1325 /*
1326 * Now set the hash bits for each multicast address in our
1327 * list.
1328 */
1329 ETHER_FIRST_MULTI(step, ac, enm);
1330 if (enm == NULL)
1331 goto done;
1332 while (enm != NULL) {
1333 sf_set_filter_hash(sc, enm->enm_addrlo);
1334 ETHER_NEXT_MULTI(step, enm);
1335 }
1336
1337 /*
1338 * Set "hash only multicast dest, match regardless of VLAN ID".
1339 */
1340 sc->sc_RxAddressFilteringCtl |= RAFC_HashFilteringMode(2);
1341 goto done;
1342
1343 allmulti:
1344 /*
1345 * XXX RAFC_PassMulticast is sub-optimal if using VLAN mode.
1346 */
1347 sc->sc_RxAddressFilteringCtl |= RAFC_PassMulticast;
1348 ifp->if_flags |= IFF_ALLMULTI;
1349
1350 done:
1351 sf_funcreg_write(sc, SF_RxAddressFilteringCtl,
1352 sc->sc_RxAddressFilteringCtl);
1353 }
1354
1355 /*
1356 * sf_mii_read: [mii interface function]
1357 *
1358 * Read from the MII.
1359 */
1360 int
sf_mii_read(struct device * self,int phy,int reg)1361 sf_mii_read(struct device *self, int phy, int reg)
1362 {
1363 struct sf_softc *sc = (void *) self;
1364 uint32_t v;
1365 int i;
1366
1367 for (i = 0; i < 1000; i++) {
1368 v = sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg));
1369 if (v & MiiDataValid)
1370 break;
1371 delay(1);
1372 }
1373
1374 if ((v & MiiDataValid) == 0)
1375 return (0);
1376
1377 if (MiiRegDataPort(v) == 0xffff)
1378 return (0);
1379
1380 return (MiiRegDataPort(v));
1381 }
1382
1383 /*
1384 * sf_mii_write: [mii interface function]
1385 *
1386 * Write to the MII.
1387 */
1388 void
sf_mii_write(struct device * self,int phy,int reg,int val)1389 sf_mii_write(struct device *self, int phy, int reg, int val)
1390 {
1391 struct sf_softc *sc = (void *) self;
1392 int i;
1393
1394 sf_genreg_write(sc, SF_MII_PHY_REG(phy, reg), val);
1395
1396 for (i = 0; i < 1000; i++) {
1397 if ((sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg)) &
1398 MiiBusy) == 0)
1399 return;
1400 delay(1);
1401 }
1402
1403 printf("%s: MII write timed out\n", sc->sc_dev.dv_xname);
1404 }
1405
1406 /*
1407 * sf_mii_statchg: [mii interface function]
1408 *
1409 * Callback from the PHY when the media changes.
1410 */
1411 void
sf_mii_statchg(struct device * self)1412 sf_mii_statchg(struct device *self)
1413 {
1414 struct sf_softc *sc = (void *) self;
1415 uint32_t ipg;
1416
1417 if (sc->sc_mii.mii_media_active & IFM_FDX) {
1418 sc->sc_MacConfig1 |= MC1_FullDuplex;
1419 ipg = 0x15;
1420 } else {
1421 sc->sc_MacConfig1 &= ~MC1_FullDuplex;
1422 ipg = 0x11;
1423 }
1424
1425 sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1);
1426 sf_macreset(sc);
1427
1428 sf_genreg_write(sc, SF_BkToBkIPG, ipg);
1429 }
1430
1431 /*
1432 * sf_mediastatus: [ifmedia interface function]
1433 *
1434 * Callback from ifmedia to request current media status.
1435 */
1436 void
sf_mediastatus(struct ifnet * ifp,struct ifmediareq * ifmr)1437 sf_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1438 {
1439 struct sf_softc *sc = ifp->if_softc;
1440
1441 mii_pollstat(&sc->sc_mii);
1442 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1443 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1444 }
1445
1446 /*
1447 * sf_mediachange: [ifmedia interface function]
1448 *
1449 * Callback from ifmedia to request new media setting.
1450 */
1451 int
sf_mediachange(struct ifnet * ifp)1452 sf_mediachange(struct ifnet *ifp)
1453 {
1454 struct sf_softc *sc = ifp->if_softc;
1455
1456 if (ifp->if_flags & IFF_UP)
1457 mii_mediachg(&sc->sc_mii);
1458 return (0);
1459 }
1460