1 /* $OpenBSD: if_bce.c,v 1.57 2024/08/31 16:23:09 deraadt Exp $ */
2 /* $NetBSD: if_bce.c,v 1.3 2003/09/29 01:53:02 mrg Exp $ */
3
4 /*
5 * Copyright (c) 2003 Clifford Wright. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 /*
32 * Broadcom BCM440x 10/100 ethernet (broadcom.com)
33 * SiliconBackplane is technology from Sonics, Inc.(sonicsinc.com)
34 *
35 * Cliff Wright cliff@snipe444.org
36 */
37
38 #include "bpfilter.h"
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/timeout.h>
43 #include <sys/sockio.h>
44 #include <sys/mbuf.h>
45 #include <sys/device.h>
46
47 #include <net/if.h>
48 #include <net/if_media.h>
49
50 #include <netinet/in.h>
51 #include <netinet/if_ether.h>
52 #if NBPFILTER > 0
53 #include <net/bpf.h>
54 #endif
55
56 #include <dev/pci/pcireg.h>
57 #include <dev/pci/pcivar.h>
58 #include <dev/pci/pcidevs.h>
59
60 #include <dev/mii/mii.h>
61 #include <dev/mii/miivar.h>
62
63 #include <dev/pci/if_bcereg.h>
64
65 #include <uvm/uvm.h>
66
67 /* ring descriptor */
68 struct bce_dma_slot {
69 u_int32_t ctrl;
70 u_int32_t addr;
71 };
72 #define CTRL_BC_MASK 0x1fff /* buffer byte count */
73 #define CTRL_EOT 0x10000000 /* end of descriptor table */
74 #define CTRL_IOC 0x20000000 /* interrupt on completion */
75 #define CTRL_EOF 0x40000000 /* end of frame */
76 #define CTRL_SOF 0x80000000 /* start of frame */
77
78 #define BCE_RXBUF_LEN (MCLBYTES - 4)
79
80 /* Packet status is returned in a pre-packet header */
81 struct rx_pph {
82 u_int16_t len;
83 u_int16_t flags;
84 u_int16_t pad[12];
85 };
86
87 #define BCE_PREPKT_HEADER_SIZE 30
88
89 /* packet status flags bits */
90 #define RXF_NO 0x8 /* odd number of nibbles */
91 #define RXF_RXER 0x4 /* receive symbol error */
92 #define RXF_CRC 0x2 /* crc error */
93 #define RXF_OV 0x1 /* fifo overflow */
94
95 /* number of descriptors used in a ring */
96 #define BCE_NRXDESC 64
97 #define BCE_NTXDESC 64
98
99 #define BCE_TIMEOUT 100 /* # 10us for mii read/write */
100
101 struct bce_softc {
102 struct device bce_dev;
103 bus_space_tag_t bce_btag;
104 bus_space_handle_t bce_bhandle;
105 bus_dma_tag_t bce_dmatag;
106 struct arpcom bce_ac; /* interface info */
107 void *bce_intrhand;
108 struct pci_attach_args bce_pa;
109 struct mii_data bce_mii;
110 u_int32_t bce_phy; /* eeprom indicated phy */
111 struct bce_dma_slot *bce_rx_ring; /* receive ring */
112 struct bce_dma_slot *bce_tx_ring; /* transmit ring */
113 caddr_t bce_data;
114 bus_dmamap_t bce_ring_map;
115 bus_dmamap_t bce_rxdata_map;
116 bus_dmamap_t bce_txdata_map;
117 u_int32_t bce_intmask; /* current intr mask */
118 u_int32_t bce_rxin; /* last rx descriptor seen */
119 u_int32_t bce_txin; /* last tx descriptor seen */
120 int bce_txsfree; /* no. tx slots available */
121 int bce_txsnext; /* next available tx slot */
122 struct timeout bce_timeout;
123 };
124
125 int bce_probe(struct device *, void *, void *);
126 void bce_attach(struct device *, struct device *, void *);
127 int bce_activate(struct device *, int);
128 int bce_ioctl(struct ifnet *, u_long, caddr_t);
129 void bce_start(struct ifnet *);
130 void bce_watchdog(struct ifnet *);
131 int bce_intr(void *);
132 void bce_rxintr(struct bce_softc *);
133 void bce_txintr(struct bce_softc *);
134 int bce_init(struct ifnet *);
135 void bce_add_mac(struct bce_softc *, u_int8_t *, unsigned long);
136 void bce_add_rxbuf(struct bce_softc *, int);
137 void bce_stop(struct ifnet *);
138 void bce_reset(struct bce_softc *);
139 void bce_iff(struct ifnet *);
140 int bce_mii_read(struct device *, int, int);
141 void bce_mii_write(struct device *, int, int, int);
142 void bce_statchg(struct device *);
143 int bce_mediachange(struct ifnet *);
144 void bce_mediastatus(struct ifnet *, struct ifmediareq *);
145 void bce_tick(void *);
146
147 #ifdef BCE_DEBUG
148 #define DPRINTF(x) do { \
149 if (bcedebug) \
150 printf x; \
151 } while (/* CONSTCOND */ 0)
152 #define DPRINTFN(n,x) do { \
153 if (bcedebug >= (n)) \
154 printf x; \
155 } while (/* CONSTCOND */ 0)
156 int bcedebug = 0;
157 #else
158 #define DPRINTF(x)
159 #define DPRINTFN(n,x)
160 #endif
161
162 const struct cfattach bce_ca = {
163 sizeof(struct bce_softc), bce_probe, bce_attach, NULL, bce_activate
164 };
165 struct cfdriver bce_cd = {
166 NULL, "bce", DV_IFNET
167 };
168
169 const struct pci_matchid bce_devices[] = {
170 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4401 },
171 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4401B0 },
172 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4401B1 }
173 };
174
175 int
bce_probe(struct device * parent,void * match,void * aux)176 bce_probe(struct device *parent, void *match, void *aux)
177 {
178 return (pci_matchbyid((struct pci_attach_args *)aux, bce_devices,
179 nitems(bce_devices)));
180 }
181
182 void
bce_attach(struct device * parent,struct device * self,void * aux)183 bce_attach(struct device *parent, struct device *self, void *aux)
184 {
185 struct bce_softc *sc = (struct bce_softc *) self;
186 struct pci_attach_args *pa = aux;
187 pci_chipset_tag_t pc = pa->pa_pc;
188 pci_intr_handle_t ih;
189 const char *intrstr = NULL;
190 caddr_t kva;
191 bus_dma_segment_t seg;
192 int rseg;
193 struct ifnet *ifp;
194 pcireg_t memtype;
195 bus_addr_t memaddr;
196 bus_size_t memsize;
197 int pmreg;
198 pcireg_t pmode;
199 int error;
200
201 sc->bce_pa = *pa;
202 sc->bce_dmatag = pa->pa_dmat;
203
204 /*
205 * Map control/status registers.
206 */
207 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BCE_PCI_BAR0);
208 if (pci_mapreg_map(pa, BCE_PCI_BAR0, memtype, 0, &sc->bce_btag,
209 &sc->bce_bhandle, &memaddr, &memsize, 0)) {
210 printf(": unable to find mem space\n");
211 return;
212 }
213
214 /* Get it out of power save mode if needed. */
215 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
216 pmode = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3;
217 if (pmode == 3) {
218 /*
219 * The card has lost all configuration data in
220 * this state, so punt.
221 */
222 printf(": unable to wake up from power state D3\n");
223 return;
224 }
225 if (pmode != 0) {
226 printf(": waking up from power state D%d\n",
227 pmode);
228 pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0);
229 }
230 }
231
232 if (pci_intr_map(pa, &ih)) {
233 printf(": couldn't map interrupt\n");
234 return;
235 }
236
237 intrstr = pci_intr_string(pc, ih);
238 sc->bce_intrhand = pci_intr_establish(pc, ih, IPL_NET, bce_intr, sc,
239 self->dv_xname);
240 if (sc->bce_intrhand == NULL) {
241 printf(": couldn't establish interrupt");
242 if (intrstr != NULL)
243 printf(" at %s", intrstr);
244 printf("\n");
245 return;
246 }
247
248 /* reset the chip */
249 bce_reset(sc);
250
251 /* Create the data DMA region and maps. */
252 if ((sc->bce_data = (caddr_t)uvm_km_kmemalloc_pla(kernel_map,
253 uvm.kernel_object, (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES, 0,
254 UVM_KMF_NOWAIT, 0, (paddr_t)(0x40000000 - 1), 0, 0, 1)) == NULL) {
255 printf(": unable to alloc space for ring");
256 return;
257 }
258
259 /* create a dma map for the RX ring */
260 if ((error = bus_dmamap_create(sc->bce_dmatag, BCE_NRXDESC * MCLBYTES,
261 1, BCE_NRXDESC * MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
262 &sc->bce_rxdata_map))) {
263 printf(": unable to create ring DMA map, error = %d\n", error);
264 uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
265 (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
266 return;
267 }
268
269 /* connect the ring space to the dma map */
270 if (bus_dmamap_load(sc->bce_dmatag, sc->bce_rxdata_map, sc->bce_data,
271 BCE_NRXDESC * MCLBYTES, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT)) {
272 printf(": unable to load rx ring DMA map\n");
273 uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
274 (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
275 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
276 return;
277 }
278
279 /* create a dma map for the TX ring */
280 if ((error = bus_dmamap_create(sc->bce_dmatag, BCE_NTXDESC * MCLBYTES,
281 1, BCE_NTXDESC * MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
282 &sc->bce_txdata_map))) {
283 printf(": unable to create ring DMA map, error = %d\n", error);
284 uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
285 (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
286 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
287 return;
288 }
289
290 /* connect the ring space to the dma map */
291 if (bus_dmamap_load(sc->bce_dmatag, sc->bce_txdata_map,
292 sc->bce_data + BCE_NRXDESC * MCLBYTES,
293 BCE_NTXDESC * MCLBYTES, NULL, BUS_DMA_WRITE | BUS_DMA_NOWAIT)) {
294 printf(": unable to load tx ring DMA map\n");
295 uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
296 (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
297 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
298 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map);
299 return;
300 }
301
302
303 /*
304 * Allocate DMA-safe memory for ring descriptors.
305 * The receive, and transmit rings can not share the same
306 * 4k space, however both are allocated at once here.
307 */
308 /*
309 * XXX PAGE_SIZE is wasteful; we only need 1KB + 1KB, but
310 * due to the limitation above. ??
311 */
312 if ((error = bus_dmamem_alloc_range(sc->bce_dmatag, 2 * PAGE_SIZE,
313 PAGE_SIZE, 2 * PAGE_SIZE, &seg, 1, &rseg, BUS_DMA_NOWAIT,
314 (bus_addr_t)0, (bus_addr_t)0x3fffffff))) {
315 printf(": unable to alloc space for ring descriptors, "
316 "error = %d\n", error);
317 uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
318 (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
319 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
320 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map);
321 return;
322 }
323
324 /* map ring space to kernel */
325 if ((error = bus_dmamem_map(sc->bce_dmatag, &seg, rseg,
326 2 * PAGE_SIZE, &kva, BUS_DMA_NOWAIT))) {
327 printf(": unable to map DMA buffers, error = %d\n", error);
328 uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
329 (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
330 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
331 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map);
332 bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
333 return;
334 }
335
336 /* create a dma map for the ring */
337 if ((error = bus_dmamap_create(sc->bce_dmatag, 2 * PAGE_SIZE, 1,
338 2 * PAGE_SIZE, 0, BUS_DMA_NOWAIT, &sc->bce_ring_map))) {
339 printf(": unable to create ring DMA map, error = %d\n", error);
340 uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
341 (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
342 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
343 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map);
344 bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
345 return;
346 }
347
348 /* connect the ring space to the dma map */
349 if (bus_dmamap_load(sc->bce_dmatag, sc->bce_ring_map, kva,
350 2 * PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
351 printf(": unable to load ring DMA map\n");
352 uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
353 (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
354 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
355 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map);
356 bus_dmamap_destroy(sc->bce_dmatag, sc->bce_ring_map);
357 bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
358 return;
359 }
360
361 /* save the ring space in softc */
362 sc->bce_rx_ring = (struct bce_dma_slot *)kva;
363 sc->bce_tx_ring = (struct bce_dma_slot *)(kva + PAGE_SIZE);
364
365 /* Set up ifnet structure */
366 ifp = &sc->bce_ac.ac_if;
367 strlcpy(ifp->if_xname, sc->bce_dev.dv_xname, IF_NAMESIZE);
368 ifp->if_softc = sc;
369 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
370 ifp->if_ioctl = bce_ioctl;
371 ifp->if_start = bce_start;
372 ifp->if_watchdog = bce_watchdog;
373
374 ifp->if_capabilities = IFCAP_VLAN_MTU;
375
376 /* MAC address */
377 sc->bce_ac.ac_enaddr[0] =
378 bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET0);
379 sc->bce_ac.ac_enaddr[1] =
380 bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET1);
381 sc->bce_ac.ac_enaddr[2] =
382 bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET2);
383 sc->bce_ac.ac_enaddr[3] =
384 bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET3);
385 sc->bce_ac.ac_enaddr[4] =
386 bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET4);
387 sc->bce_ac.ac_enaddr[5] =
388 bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET5);
389
390 printf(": %s, address %s\n", intrstr,
391 ether_sprintf(sc->bce_ac.ac_enaddr));
392
393 /* Initialize our media structures and probe the MII. */
394 sc->bce_mii.mii_ifp = ifp;
395 sc->bce_mii.mii_readreg = bce_mii_read;
396 sc->bce_mii.mii_writereg = bce_mii_write;
397 sc->bce_mii.mii_statchg = bce_statchg;
398 ifmedia_init(&sc->bce_mii.mii_media, 0, bce_mediachange,
399 bce_mediastatus);
400 mii_attach(&sc->bce_dev, &sc->bce_mii, 0xffffffff, MII_PHY_ANY,
401 MII_OFFSET_ANY, 0);
402 if (LIST_FIRST(&sc->bce_mii.mii_phys) == NULL) {
403 ifmedia_add(&sc->bce_mii.mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
404 ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER | IFM_NONE);
405 } else
406 ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER | IFM_AUTO);
407
408 /* get the phy */
409 sc->bce_phy = bus_space_read_1(sc->bce_btag, sc->bce_bhandle,
410 BCE_PHY) & 0x1f;
411
412 /*
413 * Enable activity led.
414 * XXX This should be in a phy driver, but not currently.
415 */
416 bce_mii_write((struct device *) sc, 1, 26, /* MAGIC */
417 bce_mii_read((struct device *) sc, 1, 26) & 0x7fff); /* MAGIC */
418
419 /* enable traffic meter led mode */
420 bce_mii_write((struct device *) sc, 1, 27, /* MAGIC */
421 bce_mii_read((struct device *) sc, 1, 27) | (1 << 6)); /* MAGIC */
422
423 /* Attach the interface */
424 if_attach(ifp);
425 ether_ifattach(ifp);
426
427 timeout_set(&sc->bce_timeout, bce_tick, sc);
428 }
429
430 int
bce_activate(struct device * self,int act)431 bce_activate(struct device *self, int act)
432 {
433 struct bce_softc *sc = (struct bce_softc *)self;
434 struct ifnet *ifp = &sc->bce_ac.ac_if;
435
436 switch (act) {
437 case DVACT_SUSPEND:
438 if (ifp->if_flags & IFF_RUNNING)
439 bce_stop(ifp);
440 break;
441 case DVACT_RESUME:
442 if (ifp->if_flags & IFF_UP) {
443 bce_init(ifp);
444 bce_start(ifp);
445 }
446 break;
447 }
448 return (0);
449 }
450
451 /* handle media, and ethernet requests */
452 int
bce_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)453 bce_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
454 {
455 struct bce_softc *sc = ifp->if_softc;
456 struct ifreq *ifr = (struct ifreq *) data;
457 int s, error = 0;
458
459 s = splnet();
460
461 switch (cmd) {
462 case SIOCSIFADDR:
463 ifp->if_flags |= IFF_UP;
464 if (!(ifp->if_flags & IFF_RUNNING))
465 bce_init(ifp);
466 break;
467
468 case SIOCSIFFLAGS:
469 if (ifp->if_flags & IFF_UP) {
470 if (ifp->if_flags & IFF_RUNNING)
471 error = ENETRESET;
472 else
473 bce_init(ifp);
474 } else {
475 if (ifp->if_flags & IFF_RUNNING)
476 bce_stop(ifp);
477 }
478 break;
479
480 case SIOCSIFMEDIA:
481 case SIOCGIFMEDIA:
482 error = ifmedia_ioctl(ifp, ifr, &sc->bce_mii.mii_media, cmd);
483 break;
484
485 default:
486 error = ether_ioctl(ifp, &sc->bce_ac, cmd, data);
487 }
488
489 if (error == ENETRESET) {
490 if (ifp->if_flags & IFF_RUNNING)
491 bce_iff(ifp);
492 error = 0;
493 }
494
495 splx(s);
496 return error;
497 }
498
499 /* Start packet transmission on the interface. */
500 void
bce_start(struct ifnet * ifp)501 bce_start(struct ifnet *ifp)
502 {
503 struct bce_softc *sc = ifp->if_softc;
504 struct mbuf *m0;
505 u_int32_t ctrl;
506 int txstart;
507 int txsfree;
508 int newpkts = 0;
509
510 /*
511 * do not start another if currently transmitting, and more
512 * descriptors(tx slots) are needed for next packet.
513 */
514 if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
515 return;
516
517 /* determine number of descriptors available */
518 if (sc->bce_txsnext >= sc->bce_txin)
519 txsfree = BCE_NTXDESC - 1 + sc->bce_txin - sc->bce_txsnext;
520 else
521 txsfree = sc->bce_txin - sc->bce_txsnext - 1;
522
523 /*
524 * Loop through the send queue, setting up transmit descriptors
525 * until we drain the queue, or use up all available transmit
526 * descriptors.
527 */
528 while (txsfree > 0) {
529
530 /* Grab a packet off the queue. */
531 m0 = ifq_dequeue(&ifp->if_snd);
532 if (m0 == NULL)
533 break;
534
535 /*
536 * copy mbuf chain into DMA memory buffer.
537 */
538 m_copydata(m0, 0, m0->m_pkthdr.len, sc->bce_data +
539 (sc->bce_txsnext + BCE_NRXDESC) * MCLBYTES);
540 ctrl = m0->m_pkthdr.len & CTRL_BC_MASK;
541 ctrl |= CTRL_SOF | CTRL_EOF | CTRL_IOC;
542
543 #if NBPFILTER > 0
544 /* Pass the packet to any BPF listeners. */
545 if (ifp->if_bpf)
546 bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
547 #endif
548 /* mbuf no longer needed */
549 m_freem(m0);
550
551 /* Sync the data DMA map. */
552 bus_dmamap_sync(sc->bce_dmatag, sc->bce_txdata_map,
553 sc->bce_txsnext * MCLBYTES, MCLBYTES, BUS_DMASYNC_PREWRITE);
554
555 /* Initialize the transmit descriptor(s). */
556 txstart = sc->bce_txsnext;
557
558 if (sc->bce_txsnext == BCE_NTXDESC - 1)
559 ctrl |= CTRL_EOT;
560 sc->bce_tx_ring[sc->bce_txsnext].ctrl = htole32(ctrl);
561 sc->bce_tx_ring[sc->bce_txsnext].addr =
562 htole32(sc->bce_txdata_map->dm_segs[0].ds_addr +
563 sc->bce_txsnext * MCLBYTES + 0x40000000); /* MAGIC */
564 if (sc->bce_txsnext + 1 > BCE_NTXDESC - 1)
565 sc->bce_txsnext = 0;
566 else
567 sc->bce_txsnext++;
568 txsfree--;
569
570 /* sync descriptors being used */
571 bus_dmamap_sync(sc->bce_dmatag, sc->bce_ring_map,
572 sizeof(struct bce_dma_slot) * txstart + PAGE_SIZE,
573 sizeof(struct bce_dma_slot),
574 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
575
576 /* Give the packet to the chip. */
577 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_DPTR,
578 sc->bce_txsnext * sizeof(struct bce_dma_slot));
579
580 newpkts++;
581 }
582 if (txsfree == 0) {
583 /* No more slots left; notify upper layer. */
584 ifq_set_oactive(&ifp->if_snd);
585 }
586 if (newpkts) {
587 /* Set a watchdog timer in case the chip flakes out. */
588 ifp->if_timer = 5;
589 }
590 }
591
592 /* Watchdog timer handler. */
593 void
bce_watchdog(struct ifnet * ifp)594 bce_watchdog(struct ifnet *ifp)
595 {
596 struct bce_softc *sc = ifp->if_softc;
597
598 printf("%s: device timeout\n", sc->bce_dev.dv_xname);
599 ifp->if_oerrors++;
600
601 (void) bce_init(ifp);
602
603 /* Try to get more packets going. */
604 bce_start(ifp);
605 }
606
607 int
bce_intr(void * xsc)608 bce_intr(void *xsc)
609 {
610 struct bce_softc *sc;
611 struct ifnet *ifp;
612 u_int32_t intstatus;
613 int wantinit;
614 int handled = 0;
615
616 sc = xsc;
617 ifp = &sc->bce_ac.ac_if;
618
619
620 for (wantinit = 0; wantinit == 0;) {
621 intstatus = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
622 BCE_INT_STS);
623
624 /* ignore if not ours, or unsolicited interrupts */
625 intstatus &= sc->bce_intmask;
626 if (intstatus == 0)
627 break;
628
629 handled = 1;
630
631 /* Ack interrupt */
632 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_STS,
633 intstatus);
634
635 /* Receive interrupts. */
636 if (intstatus & I_RI)
637 bce_rxintr(sc);
638 /* Transmit interrupts. */
639 if (intstatus & I_XI)
640 bce_txintr(sc);
641 /* Error interrupts */
642 if (intstatus & ~(I_RI | I_XI)) {
643 if (intstatus & I_XU)
644 printf("%s: transmit fifo underflow\n",
645 sc->bce_dev.dv_xname);
646 if (intstatus & I_RO) {
647 printf("%s: receive fifo overflow\n",
648 sc->bce_dev.dv_xname);
649 ifp->if_ierrors++;
650 }
651 if (intstatus & I_RU)
652 printf("%s: receive descriptor underflow\n",
653 sc->bce_dev.dv_xname);
654 if (intstatus & I_DE)
655 printf("%s: descriptor protocol error\n",
656 sc->bce_dev.dv_xname);
657 if (intstatus & I_PD)
658 printf("%s: data error\n",
659 sc->bce_dev.dv_xname);
660 if (intstatus & I_PC)
661 printf("%s: descriptor error\n",
662 sc->bce_dev.dv_xname);
663 if (intstatus & I_TO)
664 printf("%s: general purpose timeout\n",
665 sc->bce_dev.dv_xname);
666 wantinit = 1;
667 }
668 }
669
670 if (handled) {
671 if (wantinit)
672 bce_init(ifp);
673 /* Try to get more packets going. */
674 bce_start(ifp);
675 }
676 return (handled);
677 }
678
679 /* Receive interrupt handler */
680 void
bce_rxintr(struct bce_softc * sc)681 bce_rxintr(struct bce_softc *sc)
682 {
683 struct ifnet *ifp = &sc->bce_ac.ac_if;
684 struct mbuf_list ml = MBUF_LIST_INITIALIZER();
685 struct rx_pph *pph;
686 struct mbuf *m;
687 int curr;
688 int len;
689 int i;
690
691 /* get pointer to active receive slot */
692 curr = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXSTATUS)
693 & RS_CD_MASK;
694 curr = curr / sizeof(struct bce_dma_slot);
695 if (curr >= BCE_NRXDESC)
696 curr = BCE_NRXDESC - 1;
697
698 /* process packets up to but not current packet being worked on */
699 for (i = sc->bce_rxin; i != curr; i = (i + 1) % BCE_NRXDESC) {
700 /* complete any post dma memory ops on packet */
701 bus_dmamap_sync(sc->bce_dmatag, sc->bce_rxdata_map,
702 i * MCLBYTES, MCLBYTES, BUS_DMASYNC_POSTREAD);
703
704 /*
705 * If the packet had an error, simply recycle the buffer,
706 * resetting the len, and flags.
707 */
708 pph = (struct rx_pph *)(sc->bce_data + i * MCLBYTES);
709 if (pph->flags & (RXF_NO | RXF_RXER | RXF_CRC | RXF_OV)) {
710 ifp->if_ierrors++;
711 pph->len = 0;
712 pph->flags = 0;
713 continue;
714 }
715 /* receive the packet */
716 len = pph->len;
717 if (len == 0)
718 continue; /* no packet if empty */
719 pph->len = 0;
720 pph->flags = 0;
721
722 /*
723 * The chip includes the CRC with every packet. Trim
724 * it off here.
725 */
726 len -= ETHER_CRC_LEN;
727
728 m = m_devget(sc->bce_data + i * MCLBYTES +
729 BCE_PREPKT_HEADER_SIZE, len, ETHER_ALIGN);
730
731 ml_enqueue(&ml, m);
732
733 /* re-check current in case it changed */
734 curr = (bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
735 BCE_DMA_RXSTATUS) & RS_CD_MASK) /
736 sizeof(struct bce_dma_slot);
737 if (curr >= BCE_NRXDESC)
738 curr = BCE_NRXDESC - 1;
739 }
740
741 if_input(ifp, &ml);
742
743 sc->bce_rxin = curr;
744 }
745
746 /* Transmit interrupt handler */
747 void
bce_txintr(struct bce_softc * sc)748 bce_txintr(struct bce_softc *sc)
749 {
750 struct ifnet *ifp = &sc->bce_ac.ac_if;
751 int curr;
752 int i;
753
754 ifq_clr_oactive(&ifp->if_snd);
755
756 /*
757 * Go through the Tx list and free mbufs for those
758 * frames which have been transmitted.
759 */
760 curr = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
761 BCE_DMA_TXSTATUS) & RS_CD_MASK;
762 curr = curr / sizeof(struct bce_dma_slot);
763 if (curr >= BCE_NTXDESC)
764 curr = BCE_NTXDESC - 1;
765 for (i = sc->bce_txin; i != curr; i = (i + 1) % BCE_NTXDESC) {
766 /* do any post dma memory ops on transmit data */
767 bus_dmamap_sync(sc->bce_dmatag, sc->bce_txdata_map,
768 i * MCLBYTES, MCLBYTES, BUS_DMASYNC_POSTWRITE);
769 }
770 sc->bce_txin = curr;
771
772 /*
773 * If there are no more pending transmissions, cancel the watchdog
774 * timer
775 */
776 if (sc->bce_txsnext == sc->bce_txin)
777 ifp->if_timer = 0;
778 }
779
780 /* initialize the interface */
781 int
bce_init(struct ifnet * ifp)782 bce_init(struct ifnet *ifp)
783 {
784 struct bce_softc *sc = ifp->if_softc;
785 u_int32_t reg_win;
786 int i;
787
788 /* Cancel any pending I/O. */
789 bce_stop(ifp);
790
791 /* enable pci interrupts, bursts, and prefetch */
792
793 /* remap the pci registers to the Sonics config registers */
794
795 /* save the current map, so it can be restored */
796 reg_win = pci_conf_read(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag,
797 BCE_REG_WIN);
798
799 /* set register window to Sonics registers */
800 pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN,
801 BCE_SONICS_WIN);
802
803 /* enable SB to PCI interrupt */
804 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC,
805 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC) |
806 SBIV_ENET0);
807
808 /* enable prefetch and bursts for sonics-to-pci translation 2 */
809 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2,
810 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2) |
811 SBTOPCI_PREF | SBTOPCI_BURST);
812
813 /* restore to ethernet register space */
814 pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN,
815 reg_win);
816
817 /* Reset the chip to a known state. */
818 bce_reset(sc);
819
820 /* Initialize transmit descriptors */
821 memset(sc->bce_tx_ring, 0, BCE_NTXDESC * sizeof(struct bce_dma_slot));
822 sc->bce_txsnext = 0;
823 sc->bce_txin = 0;
824
825 /* enable crc32 generation and set proper LED modes */
826 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL,
827 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL) |
828 BCE_EMC_CRC32_ENAB | BCE_EMC_LED);
829
830 /* reset or clear powerdown control bit */
831 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL,
832 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL) &
833 ~BCE_EMC_PDOWN);
834
835 /* setup DMA interrupt control */
836 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMAI_CTL, 1 << 24); /* MAGIC */
837
838 /* program promiscuous mode and multicast filters */
839 bce_iff(ifp);
840
841 /* set max frame length, account for possible VLAN tag */
842 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_MAX,
843 ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
844 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_MAX,
845 ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
846
847 /* set tx watermark */
848 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_WATER, 56);
849
850 /* enable transmit */
851 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL, XC_XE);
852 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXADDR,
853 sc->bce_ring_map->dm_segs[0].ds_addr + PAGE_SIZE + 0x40000000); /* MAGIC */
854
855 /*
856 * Give the receive ring to the chip, and
857 * start the receive DMA engine.
858 */
859 sc->bce_rxin = 0;
860
861 /* clear the rx descriptor ring */
862 memset(sc->bce_rx_ring, 0, BCE_NRXDESC * sizeof(struct bce_dma_slot));
863 /* enable receive */
864 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXCTL,
865 BCE_PREPKT_HEADER_SIZE << 1 | XC_XE);
866 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXADDR,
867 sc->bce_ring_map->dm_segs[0].ds_addr + 0x40000000); /* MAGIC */
868
869 /* Initialize receive descriptors */
870 for (i = 0; i < BCE_NRXDESC; i++)
871 bce_add_rxbuf(sc, i);
872
873 /* Enable interrupts */
874 sc->bce_intmask =
875 I_XI | I_RI | I_XU | I_RO | I_RU | I_DE | I_PD | I_PC | I_TO;
876 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_MASK,
877 sc->bce_intmask);
878
879 /* start the receive dma */
880 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXDPTR,
881 BCE_NRXDESC * sizeof(struct bce_dma_slot));
882
883 /* set media */
884 mii_mediachg(&sc->bce_mii);
885
886 /* turn on the ethernet mac */
887 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL,
888 bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
889 BCE_ENET_CTL) | EC_EE);
890
891 /* start timer */
892 timeout_add_sec(&sc->bce_timeout, 1);
893
894 /* mark as running, and no outputs active */
895 ifp->if_flags |= IFF_RUNNING;
896 ifq_clr_oactive(&ifp->if_snd);
897
898 return 0;
899 }
900
901 /* add a mac address to packet filter */
902 void
bce_add_mac(struct bce_softc * sc,u_int8_t * mac,unsigned long idx)903 bce_add_mac(struct bce_softc *sc, u_int8_t *mac, unsigned long idx)
904 {
905 int i;
906 u_int32_t rval;
907
908 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_LOW,
909 mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]);
910 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_HI,
911 mac[0] << 8 | mac[1] | 0x10000); /* MAGIC */
912 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL,
913 idx << 16 | 8); /* MAGIC */
914 /* wait for write to complete */
915 for (i = 0; i < 100; i++) {
916 rval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
917 BCE_FILT_CTL);
918 if (!(rval & 0x80000000)) /* MAGIC */
919 break;
920 delay(10);
921 }
922 if (i == 100) {
923 printf("%s: timed out writing pkt filter ctl\n",
924 sc->bce_dev.dv_xname);
925 }
926 }
927
928 /* Add a receive buffer to the indicated descriptor. */
929 void
bce_add_rxbuf(struct bce_softc * sc,int idx)930 bce_add_rxbuf(struct bce_softc *sc, int idx)
931 {
932 struct bce_dma_slot *bced = &sc->bce_rx_ring[idx];
933
934 bus_dmamap_sync(sc->bce_dmatag, sc->bce_rxdata_map, idx * MCLBYTES,
935 MCLBYTES, BUS_DMASYNC_PREREAD);
936
937 *(u_int32_t *)(sc->bce_data + idx * MCLBYTES) = 0;
938 bced->addr = htole32(sc->bce_rxdata_map->dm_segs[0].ds_addr +
939 idx * MCLBYTES + 0x40000000);
940 if (idx != (BCE_NRXDESC - 1))
941 bced->ctrl = htole32(BCE_RXBUF_LEN);
942 else
943 bced->ctrl = htole32(BCE_RXBUF_LEN | CTRL_EOT);
944
945 bus_dmamap_sync(sc->bce_dmatag, sc->bce_ring_map,
946 sizeof(struct bce_dma_slot) * idx,
947 sizeof(struct bce_dma_slot),
948 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
949
950 }
951
952 /* Stop transmission on the interface */
953 void
bce_stop(struct ifnet * ifp)954 bce_stop(struct ifnet *ifp)
955 {
956 struct bce_softc *sc = ifp->if_softc;
957 int i;
958 u_int32_t val;
959
960 /* Stop the 1 second timer */
961 timeout_del(&sc->bce_timeout);
962
963 /* Mark the interface down and cancel the watchdog timer. */
964 ifp->if_flags &= ~IFF_RUNNING;
965 ifq_clr_oactive(&ifp->if_snd);
966 ifp->if_timer = 0;
967
968 /* Down the MII. */
969 mii_down(&sc->bce_mii);
970
971 /* Disable interrupts. */
972 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_MASK, 0);
973 sc->bce_intmask = 0;
974 delay(10);
975
976 /* Disable emac */
977 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL, EC_ED);
978 for (i = 0; i < 200; i++) {
979 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
980 BCE_ENET_CTL);
981 if (!(val & EC_ED))
982 break;
983 delay(10);
984 }
985
986 /* Stop the DMA */
987 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXCTL, 0);
988 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL, 0);
989 delay(10);
990 }
991
992 /* reset the chip */
993 void
bce_reset(struct bce_softc * sc)994 bce_reset(struct bce_softc *sc)
995 {
996 u_int32_t val;
997 u_int32_t sbval;
998 int i;
999
1000 /* if SB core is up */
1001 sbval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1002 BCE_SBTMSTATELOW);
1003 if ((sbval & (SBTML_RESET | SBTML_REJ | SBTML_CLK)) == SBTML_CLK) {
1004 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMAI_CTL,
1005 0);
1006
1007 /* disable emac */
1008 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL,
1009 EC_ED);
1010 for (i = 0; i < 200; i++) {
1011 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1012 BCE_ENET_CTL);
1013 if (!(val & EC_ED))
1014 break;
1015 delay(10);
1016 }
1017 if (i == 200)
1018 printf("%s: timed out disabling ethernet mac\n",
1019 sc->bce_dev.dv_xname);
1020
1021 /* reset the dma engines */
1022 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL,
1023 0);
1024 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1025 BCE_DMA_RXSTATUS);
1026 /* if error on receive, wait to go idle */
1027 if (val & RS_ERROR) {
1028 for (i = 0; i < 100; i++) {
1029 val = bus_space_read_4(sc->bce_btag,
1030 sc->bce_bhandle, BCE_DMA_RXSTATUS);
1031 if (val & RS_DMA_IDLE)
1032 break;
1033 delay(10);
1034 }
1035 if (i == 100)
1036 printf("%s: receive dma did not go idle after"
1037 " error\n", sc->bce_dev.dv_xname);
1038 }
1039 bus_space_write_4(sc->bce_btag, sc->bce_bhandle,
1040 BCE_DMA_RXSTATUS, 0);
1041
1042 /* reset ethernet mac */
1043 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL,
1044 EC_ES);
1045 for (i = 0; i < 200; i++) {
1046 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1047 BCE_ENET_CTL);
1048 if (!(val & EC_ES))
1049 break;
1050 delay(10);
1051 }
1052 if (i == 200)
1053 printf("%s: timed out resetting ethernet mac\n",
1054 sc->bce_dev.dv_xname);
1055 } else {
1056 u_int32_t reg_win;
1057
1058 /* remap the pci registers to the Sonics config registers */
1059
1060 /* save the current map, so it can be restored */
1061 reg_win = pci_conf_read(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag,
1062 BCE_REG_WIN);
1063 /* set register window to Sonics registers */
1064 pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag,
1065 BCE_REG_WIN, BCE_SONICS_WIN);
1066
1067 /* enable SB to PCI interrupt */
1068 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC,
1069 bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1070 BCE_SBINTVEC) | SBIV_ENET0);
1071
1072 /* enable prefetch and bursts for sonics-to-pci translation 2 */
1073 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2,
1074 bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1075 BCE_SPCI_TR2) | SBTOPCI_PREF | SBTOPCI_BURST);
1076
1077 /* restore to ethernet register space */
1078 pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN,
1079 reg_win);
1080 }
1081
1082 /* disable SB core if not in reset */
1083 if (!(sbval & SBTML_RESET)) {
1084
1085 /* set the reject bit */
1086 bus_space_write_4(sc->bce_btag, sc->bce_bhandle,
1087 BCE_SBTMSTATELOW, SBTML_REJ | SBTML_CLK);
1088 for (i = 0; i < 200; i++) {
1089 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1090 BCE_SBTMSTATELOW);
1091 if (val & SBTML_REJ)
1092 break;
1093 delay(1);
1094 }
1095 if (i == 200)
1096 printf("%s: while resetting core, reject did not set\n",
1097 sc->bce_dev.dv_xname);
1098 /* wait until busy is clear */
1099 for (i = 0; i < 200; i++) {
1100 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1101 BCE_SBTMSTATEHI);
1102 if (!(val & 0x4))
1103 break;
1104 delay(1);
1105 }
1106 if (i == 200)
1107 printf("%s: while resetting core, busy did not clear\n",
1108 sc->bce_dev.dv_xname);
1109 /* set reset and reject while enabling the clocks */
1110 bus_space_write_4(sc->bce_btag, sc->bce_bhandle,
1111 BCE_SBTMSTATELOW,
1112 SBTML_FGC | SBTML_CLK | SBTML_REJ | SBTML_RESET);
1113 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1114 BCE_SBTMSTATELOW);
1115 delay(10);
1116 bus_space_write_4(sc->bce_btag, sc->bce_bhandle,
1117 BCE_SBTMSTATELOW, SBTML_REJ | SBTML_RESET);
1118 delay(1);
1119 }
1120 /* enable clock */
1121 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW,
1122 SBTML_FGC | SBTML_CLK | SBTML_RESET);
1123 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW);
1124 delay(1);
1125
1126 /* clear any error bits that may be on */
1127 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATEHI);
1128 if (val & 1)
1129 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATEHI,
1130 0);
1131 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBIMSTATE);
1132 if (val & SBIM_ERRORBITS)
1133 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBIMSTATE,
1134 val & ~SBIM_ERRORBITS);
1135
1136 /* clear reset and allow it to propagate throughout the core */
1137 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW,
1138 SBTML_FGC | SBTML_CLK);
1139 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW);
1140 delay(1);
1141
1142 /* leave clock enabled */
1143 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW,
1144 SBTML_CLK);
1145 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW);
1146 delay(1);
1147
1148 /* initialize MDC preamble, frequency */
1149 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_CTL, 0x8d); /* MAGIC */
1150
1151 /* enable phy, differs for internal, and external */
1152 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_DEVCTL);
1153 if (!(val & BCE_DC_IP)) {
1154 /* select external phy */
1155 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL,
1156 EC_EP);
1157 } else if (val & BCE_DC_ER) { /* internal, clear reset bit if on */
1158 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DEVCTL,
1159 val & ~BCE_DC_ER);
1160 delay(100);
1161 }
1162 }
1163
1164 /* Set up the receive filter. */
1165 void
bce_iff(struct ifnet * ifp)1166 bce_iff(struct ifnet *ifp)
1167 {
1168 struct bce_softc *sc = ifp->if_softc;
1169 struct arpcom *ac = &sc->bce_ac;
1170 u_int32_t rxctl;
1171
1172 rxctl = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL);
1173 rxctl &= ~(ERC_AM | ERC_DB | ERC_PE);
1174 ifp->if_flags |= IFF_ALLMULTI;
1175
1176 /* disable the filter */
1177 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL, 0);
1178
1179 /* add our own address */
1180 bce_add_mac(sc, ac->ac_enaddr, 0);
1181
1182 if (ifp->if_flags & IFF_PROMISC || ac->ac_multicnt > 0) {
1183 ifp->if_flags |= IFF_ALLMULTI;
1184 if (ifp->if_flags & IFF_PROMISC)
1185 rxctl |= ERC_PE;
1186 else
1187 rxctl |= ERC_AM;
1188 }
1189
1190 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL, rxctl);
1191
1192 /* enable the filter */
1193 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL,
1194 bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL) | 1);
1195 }
1196
1197 /* Read a PHY register on the MII. */
1198 int
bce_mii_read(struct device * self,int phy,int reg)1199 bce_mii_read(struct device *self, int phy, int reg)
1200 {
1201 struct bce_softc *sc = (struct bce_softc *) self;
1202 int i;
1203 u_int32_t val;
1204
1205 /* clear mii_int */
1206 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_STS,
1207 BCE_MIINTR);
1208
1209 /* Read the PHY register */
1210 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM,
1211 (MII_COMMAND_READ << 28) | (MII_COMMAND_START << 30) | /* MAGIC */
1212 (MII_COMMAND_ACK << 16) | BCE_MIPHY(phy) | BCE_MIREG(reg)); /* MAGIC */
1213
1214 for (i = 0; i < BCE_TIMEOUT; i++) {
1215 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1216 BCE_MI_STS);
1217 if (val & BCE_MIINTR)
1218 break;
1219 delay(10);
1220 }
1221 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM);
1222 if (i == BCE_TIMEOUT) {
1223 printf("%s: PHY read timed out reading phy %d, reg %d, val = "
1224 "0x%08x\n", sc->bce_dev.dv_xname, phy, reg, val);
1225 return (0);
1226 }
1227 return (val & BCE_MICOMM_DATA);
1228 }
1229
1230 /* Write a PHY register on the MII */
1231 void
bce_mii_write(struct device * self,int phy,int reg,int val)1232 bce_mii_write(struct device *self, int phy, int reg, int val)
1233 {
1234 struct bce_softc *sc = (struct bce_softc *) self;
1235 int i;
1236 u_int32_t rval;
1237
1238 /* clear mii_int */
1239 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_STS,
1240 BCE_MIINTR);
1241
1242 /* Write the PHY register */
1243 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM,
1244 (MII_COMMAND_WRITE << 28) | (MII_COMMAND_START << 30) | /* MAGIC */
1245 (MII_COMMAND_ACK << 16) | (val & BCE_MICOMM_DATA) | /* MAGIC */
1246 BCE_MIPHY(phy) | BCE_MIREG(reg));
1247
1248 /* wait for write to complete */
1249 for (i = 0; i < BCE_TIMEOUT; i++) {
1250 rval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1251 BCE_MI_STS);
1252 if (rval & BCE_MIINTR)
1253 break;
1254 delay(10);
1255 }
1256 rval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM);
1257 if (i == BCE_TIMEOUT) {
1258 printf("%s: PHY timed out writing phy %d, reg %d, val "
1259 "= 0x%08x\n", sc->bce_dev.dv_xname, phy, reg, val);
1260 }
1261 }
1262
1263 /* sync hardware duplex mode to software state */
1264 void
bce_statchg(struct device * self)1265 bce_statchg(struct device *self)
1266 {
1267 struct bce_softc *sc = (struct bce_softc *) self;
1268 u_int32_t reg;
1269
1270 /* if needed, change register to match duplex mode */
1271 reg = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_CTL);
1272 if (sc->bce_mii.mii_media_active & IFM_FDX && !(reg & EXC_FD))
1273 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_CTL,
1274 reg | EXC_FD);
1275 else if (!(sc->bce_mii.mii_media_active & IFM_FDX) && reg & EXC_FD)
1276 bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_CTL,
1277 reg & ~EXC_FD);
1278
1279 /*
1280 * Enable activity led.
1281 * XXX This should be in a phy driver, but not currently.
1282 */
1283 bce_mii_write((struct device *) sc, 1, 26, /* MAGIC */
1284 bce_mii_read((struct device *) sc, 1, 26) & 0x7fff); /* MAGIC */
1285 /* enable traffic meter led mode */
1286 bce_mii_write((struct device *) sc, 1, 26, /* MAGIC */
1287 bce_mii_read((struct device *) sc, 1, 27) | (1 << 6)); /* MAGIC */
1288 }
1289
1290 /* Set hardware to newly-selected media */
1291 int
bce_mediachange(struct ifnet * ifp)1292 bce_mediachange(struct ifnet *ifp)
1293 {
1294 struct bce_softc *sc = ifp->if_softc;
1295
1296 if (ifp->if_flags & IFF_UP)
1297 mii_mediachg(&sc->bce_mii);
1298 return (0);
1299 }
1300
1301 /* Get the current interface media status */
1302 void
bce_mediastatus(struct ifnet * ifp,struct ifmediareq * ifmr)1303 bce_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1304 {
1305 struct bce_softc *sc = ifp->if_softc;
1306
1307 mii_pollstat(&sc->bce_mii);
1308 ifmr->ifm_active = sc->bce_mii.mii_media_active;
1309 ifmr->ifm_status = sc->bce_mii.mii_media_status;
1310 }
1311
1312 /* One second timer, checks link status */
1313 void
bce_tick(void * v)1314 bce_tick(void *v)
1315 {
1316 struct bce_softc *sc = v;
1317 int s;
1318
1319 s = splnet();
1320 mii_tick(&sc->bce_mii);
1321 splx(s);
1322
1323 timeout_add_sec(&sc->bce_timeout, 1);
1324 }
1325