xref: /openbsd/sys/dev/pci/if_bce.c (revision d89ec533)
1 /* $OpenBSD: if_bce.c,v 1.53 2020/07/10 13:22:20 patrick Exp $ */
2 /* $NetBSD: if_bce.c,v 1.3 2003/09/29 01:53:02 mrg Exp $	 */
3 
4 /*
5  * Copyright (c) 2003 Clifford Wright. All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The name of the author may not be used to endorse or promote products
16  *    derived from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 /*
32  * Broadcom BCM440x 10/100 ethernet (broadcom.com)
33  * SiliconBackplane is technology from Sonics, Inc.(sonicsinc.com)
34  *
35  * Cliff Wright cliff@snipe444.org
36  */
37 
38 #include "bpfilter.h"
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/timeout.h>
43 #include <sys/sockio.h>
44 #include <sys/mbuf.h>
45 #include <sys/malloc.h>
46 #include <sys/kernel.h>
47 #include <sys/device.h>
48 #include <sys/socket.h>
49 
50 #include <net/if.h>
51 #include <net/if_media.h>
52 
53 #include <netinet/in.h>
54 #include <netinet/if_ether.h>
55 #if NBPFILTER > 0
56 #include <net/bpf.h>
57 #endif
58 
59 #include <dev/pci/pcireg.h>
60 #include <dev/pci/pcivar.h>
61 #include <dev/pci/pcidevs.h>
62 
63 #include <dev/mii/mii.h>
64 #include <dev/mii/miivar.h>
65 #include <dev/mii/miidevs.h>
66 
67 #include <dev/pci/if_bcereg.h>
68 
69 #include <uvm/uvm.h>
70 
71 /* ring descriptor */
72 struct bce_dma_slot {
73 	u_int32_t ctrl;
74 	u_int32_t addr;
75 };
76 #define CTRL_BC_MASK	0x1fff	/* buffer byte count */
77 #define CTRL_EOT	0x10000000	/* end of descriptor table */
78 #define CTRL_IOC	0x20000000	/* interrupt on completion */
79 #define CTRL_EOF	0x40000000	/* end of frame */
80 #define CTRL_SOF	0x80000000	/* start of frame */
81 
82 #define BCE_RXBUF_LEN	(MCLBYTES - 4)
83 
84 /* Packet status is returned in a pre-packet header */
85 struct rx_pph {
86 	u_int16_t len;
87 	u_int16_t flags;
88 	u_int16_t pad[12];
89 };
90 
91 #define	BCE_PREPKT_HEADER_SIZE		30
92 
93 /* packet status flags bits */
94 #define RXF_NO				0x8	/* odd number of nibbles */
95 #define RXF_RXER			0x4	/* receive symbol error */
96 #define RXF_CRC				0x2	/* crc error */
97 #define RXF_OV				0x1	/* fifo overflow */
98 
99 /* number of descriptors used in a ring */
100 #define BCE_NRXDESC		64
101 #define BCE_NTXDESC		64
102 
103 #define BCE_TIMEOUT		100	/* # 10us for mii read/write */
104 
105 struct bce_softc {
106 	struct device		bce_dev;
107 	bus_space_tag_t		bce_btag;
108 	bus_space_handle_t	bce_bhandle;
109 	bus_dma_tag_t		bce_dmatag;
110 	struct arpcom		bce_ac;		/* interface info */
111 	void			*bce_intrhand;
112 	struct pci_attach_args	bce_pa;
113 	struct mii_data		bce_mii;
114 	u_int32_t		bce_phy;	/* eeprom indicated phy */
115 	struct bce_dma_slot	*bce_rx_ring;	/* receive ring */
116 	struct bce_dma_slot	*bce_tx_ring;	/* transmit ring */
117 	caddr_t			bce_data;
118 	bus_dmamap_t		bce_ring_map;
119 	bus_dmamap_t		bce_rxdata_map;
120 	bus_dmamap_t		bce_txdata_map;
121 	u_int32_t		bce_intmask;	/* current intr mask */
122 	u_int32_t		bce_rxin;	/* last rx descriptor seen */
123 	u_int32_t		bce_txin;	/* last tx descriptor seen */
124 	int			bce_txsfree;	/* no. tx slots available */
125 	int			bce_txsnext;	/* next available tx slot */
126 	struct timeout		bce_timeout;
127 };
128 
129 int	bce_probe(struct device *, void *, void *);
130 void	bce_attach(struct device *, struct device *, void *);
131 int	bce_activate(struct device *, int);
132 int	bce_ioctl(struct ifnet *, u_long, caddr_t);
133 void	bce_start(struct ifnet *);
134 void	bce_watchdog(struct ifnet *);
135 int	bce_intr(void *);
136 void	bce_rxintr(struct bce_softc *);
137 void	bce_txintr(struct bce_softc *);
138 int	bce_init(struct ifnet *);
139 void	bce_add_mac(struct bce_softc *, u_int8_t *, unsigned long);
140 void	bce_add_rxbuf(struct bce_softc *, int);
141 void	bce_stop(struct ifnet *);
142 void	bce_reset(struct bce_softc *);
143 void	bce_iff(struct ifnet *);
144 int	bce_mii_read(struct device *, int, int);
145 void	bce_mii_write(struct device *, int, int, int);
146 void	bce_statchg(struct device *);
147 int	bce_mediachange(struct ifnet *);
148 void	bce_mediastatus(struct ifnet *, struct ifmediareq *);
149 void	bce_tick(void *);
150 
151 #ifdef BCE_DEBUG
152 #define DPRINTF(x)	do {		\
153 	if (bcedebug)			\
154 		printf x;		\
155 } while (/* CONSTCOND */ 0)
156 #define DPRINTFN(n,x)	do {		\
157 	if (bcedebug >= (n))		\
158 		printf x;		\
159 } while (/* CONSTCOND */ 0)
160 int	bcedebug = 0;
161 #else
162 #define DPRINTF(x)
163 #define DPRINTFN(n,x)
164 #endif
165 
166 struct cfattach bce_ca = {
167 	sizeof(struct bce_softc), bce_probe, bce_attach, NULL, bce_activate
168 };
169 struct cfdriver bce_cd = {
170 	NULL, "bce", DV_IFNET
171 };
172 
173 const struct pci_matchid bce_devices[] = {
174 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4401 },
175 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4401B0 },
176 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4401B1 }
177 };
178 
179 int
180 bce_probe(struct device *parent, void *match, void *aux)
181 {
182 	return (pci_matchbyid((struct pci_attach_args *)aux, bce_devices,
183 	    nitems(bce_devices)));
184 }
185 
186 void
187 bce_attach(struct device *parent, struct device *self, void *aux)
188 {
189 	struct bce_softc *sc = (struct bce_softc *) self;
190 	struct pci_attach_args *pa = aux;
191 	pci_chipset_tag_t pc = pa->pa_pc;
192 	pci_intr_handle_t ih;
193 	const char *intrstr = NULL;
194 	caddr_t kva;
195 	bus_dma_segment_t seg;
196 	int rseg;
197 	struct ifnet *ifp;
198 	pcireg_t memtype;
199 	bus_addr_t memaddr;
200 	bus_size_t memsize;
201 	int pmreg;
202 	pcireg_t pmode;
203 	int error;
204 
205 	sc->bce_pa = *pa;
206 	sc->bce_dmatag = pa->pa_dmat;
207 
208 	/*
209 	 * Map control/status registers.
210 	 */
211 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BCE_PCI_BAR0);
212 	if (pci_mapreg_map(pa, BCE_PCI_BAR0, memtype, 0, &sc->bce_btag,
213 	    &sc->bce_bhandle, &memaddr, &memsize, 0)) {
214 		printf(": unable to find mem space\n");
215 		return;
216 	}
217 
218 	/* Get it out of power save mode if needed. */
219 	if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
220 		pmode = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3;
221 		if (pmode == 3) {
222 			/*
223 			 * The card has lost all configuration data in
224 			 * this state, so punt.
225 			 */
226 			printf(": unable to wake up from power state D3\n");
227 			return;
228 		}
229 		if (pmode != 0) {
230 			printf(": waking up from power state D%d\n",
231 			    pmode);
232 			pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0);
233 		}
234 	}
235 
236 	if (pci_intr_map(pa, &ih)) {
237 		printf(": couldn't map interrupt\n");
238 		return;
239 	}
240 
241 	intrstr = pci_intr_string(pc, ih);
242 	sc->bce_intrhand = pci_intr_establish(pc, ih, IPL_NET, bce_intr, sc,
243 	    self->dv_xname);
244 	if (sc->bce_intrhand == NULL) {
245 		printf(": couldn't establish interrupt");
246 		if (intrstr != NULL)
247 			printf(" at %s", intrstr);
248 		printf("\n");
249 		return;
250 	}
251 
252 	/* reset the chip */
253 	bce_reset(sc);
254 
255 	/* Create the data DMA region and maps. */
256 	if ((sc->bce_data = (caddr_t)uvm_km_kmemalloc_pla(kernel_map,
257 	    uvm.kernel_object, (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES, 0,
258 	    UVM_KMF_NOWAIT, 0, (paddr_t)(0x40000000 - 1), 0, 0, 1)) == NULL) {
259 		printf(": unable to alloc space for ring");
260 		return;
261 	}
262 
263 	/* create a dma map for the RX ring */
264 	if ((error = bus_dmamap_create(sc->bce_dmatag, BCE_NRXDESC * MCLBYTES,
265 	    1, BCE_NRXDESC * MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
266 	    &sc->bce_rxdata_map))) {
267 		printf(": unable to create ring DMA map, error = %d\n", error);
268 		uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
269 		    (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
270 		return;
271 	}
272 
273 	/* connect the ring space to the dma map */
274 	if (bus_dmamap_load(sc->bce_dmatag, sc->bce_rxdata_map, sc->bce_data,
275 	    BCE_NRXDESC * MCLBYTES, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT)) {
276 		printf(": unable to load rx ring DMA map\n");
277 		uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
278 		    (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
279 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
280 		return;
281 	}
282 
283 	/* create a dma map for the TX ring */
284 	if ((error = bus_dmamap_create(sc->bce_dmatag, BCE_NTXDESC * MCLBYTES,
285 	    1, BCE_NTXDESC * MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
286 	    &sc->bce_txdata_map))) {
287 		printf(": unable to create ring DMA map, error = %d\n", error);
288 		uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
289 		    (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
290 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
291 		return;
292 	}
293 
294 	/* connect the ring space to the dma map */
295 	if (bus_dmamap_load(sc->bce_dmatag, sc->bce_txdata_map,
296 	    sc->bce_data + BCE_NRXDESC * MCLBYTES,
297 	    BCE_NTXDESC * MCLBYTES, NULL, BUS_DMA_WRITE | BUS_DMA_NOWAIT)) {
298 		printf(": unable to load tx ring DMA map\n");
299 		uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
300 		    (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
301 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
302 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map);
303 		return;
304 	}
305 
306 
307 	/*
308 	 * Allocate DMA-safe memory for ring descriptors.
309 	 * The receive, and transmit rings can not share the same
310 	 * 4k space, however both are allocated at once here.
311 	 */
312 	/*
313 	 * XXX PAGE_SIZE is wasteful; we only need 1KB + 1KB, but
314 	 * due to the limition above. ??
315 	 */
316 	if ((error = bus_dmamem_alloc_range(sc->bce_dmatag, 2 * PAGE_SIZE,
317 	    PAGE_SIZE, 2 * PAGE_SIZE, &seg, 1, &rseg, BUS_DMA_NOWAIT,
318 	    (bus_addr_t)0, (bus_addr_t)0x3fffffff))) {
319 		printf(": unable to alloc space for ring descriptors, "
320 		    "error = %d\n", error);
321 		uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
322 		    (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
323 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
324 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map);
325 		return;
326 	}
327 
328 	/* map ring space to kernel */
329 	if ((error = bus_dmamem_map(sc->bce_dmatag, &seg, rseg,
330 	    2 * PAGE_SIZE, &kva, BUS_DMA_NOWAIT))) {
331 		printf(": unable to map DMA buffers, error = %d\n", error);
332 		uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
333 		    (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
334 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
335 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map);
336 		bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
337 		return;
338 	}
339 
340 	/* create a dma map for the ring */
341 	if ((error = bus_dmamap_create(sc->bce_dmatag, 2 * PAGE_SIZE, 1,
342 	    2 * PAGE_SIZE, 0, BUS_DMA_NOWAIT, &sc->bce_ring_map))) {
343 		printf(": unable to create ring DMA map, error = %d\n", error);
344 		uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
345 		    (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
346 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
347 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map);
348 		bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
349 		return;
350 	}
351 
352 	/* connect the ring space to the dma map */
353 	if (bus_dmamap_load(sc->bce_dmatag, sc->bce_ring_map, kva,
354 	    2 * PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
355 		printf(": unable to load ring DMA map\n");
356 		uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
357 		    (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
358 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
359 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map);
360 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_ring_map);
361 		bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
362 		return;
363 	}
364 
365 	/* save the ring space in softc */
366 	sc->bce_rx_ring = (struct bce_dma_slot *)kva;
367 	sc->bce_tx_ring = (struct bce_dma_slot *)(kva + PAGE_SIZE);
368 
369 	/* Set up ifnet structure */
370 	ifp = &sc->bce_ac.ac_if;
371 	strlcpy(ifp->if_xname, sc->bce_dev.dv_xname, IF_NAMESIZE);
372 	ifp->if_softc = sc;
373 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
374 	ifp->if_ioctl = bce_ioctl;
375 	ifp->if_start = bce_start;
376 	ifp->if_watchdog = bce_watchdog;
377 
378 	ifp->if_capabilities = IFCAP_VLAN_MTU;
379 
380 	/* MAC address */
381 	sc->bce_ac.ac_enaddr[0] =
382 	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET0);
383 	sc->bce_ac.ac_enaddr[1] =
384 	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET1);
385 	sc->bce_ac.ac_enaddr[2] =
386 	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET2);
387 	sc->bce_ac.ac_enaddr[3] =
388 	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET3);
389 	sc->bce_ac.ac_enaddr[4] =
390 	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET4);
391 	sc->bce_ac.ac_enaddr[5] =
392 	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET5);
393 
394 	printf(": %s, address %s\n", intrstr,
395 	    ether_sprintf(sc->bce_ac.ac_enaddr));
396 
397 	/* Initialize our media structures and probe the MII. */
398 	sc->bce_mii.mii_ifp = ifp;
399 	sc->bce_mii.mii_readreg = bce_mii_read;
400 	sc->bce_mii.mii_writereg = bce_mii_write;
401 	sc->bce_mii.mii_statchg = bce_statchg;
402 	ifmedia_init(&sc->bce_mii.mii_media, 0, bce_mediachange,
403 	    bce_mediastatus);
404 	mii_attach(&sc->bce_dev, &sc->bce_mii, 0xffffffff, MII_PHY_ANY,
405 	    MII_OFFSET_ANY, 0);
406 	if (LIST_FIRST(&sc->bce_mii.mii_phys) == NULL) {
407 		ifmedia_add(&sc->bce_mii.mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
408 		ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER | IFM_NONE);
409 	} else
410 		ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER | IFM_AUTO);
411 
412 	/* get the phy */
413 	sc->bce_phy = bus_space_read_1(sc->bce_btag, sc->bce_bhandle,
414 	    BCE_PHY) & 0x1f;
415 
416 	/*
417 	 * Enable activity led.
418 	 * XXX This should be in a phy driver, but not currently.
419 	 */
420 	bce_mii_write((struct device *) sc, 1, 26,	 /* MAGIC */
421 	    bce_mii_read((struct device *) sc, 1, 26) & 0x7fff);	 /* MAGIC */
422 
423 	/* enable traffic meter led mode */
424 	bce_mii_write((struct device *) sc, 1, 27,	 /* MAGIC */
425 	    bce_mii_read((struct device *) sc, 1, 27) | (1 << 6));	 /* MAGIC */
426 
427 	/* Attach the interface */
428 	if_attach(ifp);
429 	ether_ifattach(ifp);
430 
431 	timeout_set(&sc->bce_timeout, bce_tick, sc);
432 }
433 
434 int
435 bce_activate(struct device *self, int act)
436 {
437 	struct bce_softc *sc = (struct bce_softc *)self;
438 	struct ifnet *ifp = &sc->bce_ac.ac_if;
439 
440 	switch (act) {
441 	case DVACT_SUSPEND:
442 		if (ifp->if_flags & IFF_RUNNING)
443 			bce_stop(ifp);
444 		break;
445 	case DVACT_RESUME:
446 		if (ifp->if_flags & IFF_UP) {
447 			bce_init(ifp);
448 			bce_start(ifp);
449 		}
450 		break;
451 	}
452 
453 	return (0);
454 }
455 
456 /* handle media, and ethernet requests */
457 int
458 bce_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
459 {
460 	struct bce_softc *sc = ifp->if_softc;
461 	struct ifreq *ifr = (struct ifreq *) data;
462 	int s, error = 0;
463 
464 	s = splnet();
465 
466 	switch (cmd) {
467 	case SIOCSIFADDR:
468 		ifp->if_flags |= IFF_UP;
469 		if (!(ifp->if_flags & IFF_RUNNING))
470 			bce_init(ifp);
471 		break;
472 
473 	case SIOCSIFFLAGS:
474 		if (ifp->if_flags & IFF_UP) {
475 			if (ifp->if_flags & IFF_RUNNING)
476 				error = ENETRESET;
477 			else
478 				bce_init(ifp);
479 		} else {
480 			if (ifp->if_flags & IFF_RUNNING)
481 				bce_stop(ifp);
482 		}
483 		break;
484 
485 	case SIOCSIFMEDIA:
486 	case SIOCGIFMEDIA:
487 		error = ifmedia_ioctl(ifp, ifr, &sc->bce_mii.mii_media, cmd);
488 		break;
489 
490 	default:
491 		error = ether_ioctl(ifp, &sc->bce_ac, cmd, data);
492 	}
493 
494 	if (error == ENETRESET) {
495 		if (ifp->if_flags & IFF_RUNNING)
496 			bce_iff(ifp);
497 		error = 0;
498 	}
499 
500 	splx(s);
501 	return error;
502 }
503 
504 /* Start packet transmission on the interface. */
505 void
506 bce_start(struct ifnet *ifp)
507 {
508 	struct bce_softc *sc = ifp->if_softc;
509 	struct mbuf *m0;
510 	u_int32_t ctrl;
511 	int txstart;
512 	int txsfree;
513 	int newpkts = 0;
514 
515 	/*
516 	 * do not start another if currently transmitting, and more
517 	 * descriptors(tx slots) are needed for next packet.
518 	 */
519 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
520 		return;
521 
522 	/* determine number of descriptors available */
523 	if (sc->bce_txsnext >= sc->bce_txin)
524 		txsfree = BCE_NTXDESC - 1 + sc->bce_txin - sc->bce_txsnext;
525 	else
526 		txsfree = sc->bce_txin - sc->bce_txsnext - 1;
527 
528 	/*
529 	 * Loop through the send queue, setting up transmit descriptors
530 	 * until we drain the queue, or use up all available transmit
531 	 * descriptors.
532 	 */
533 	while (txsfree > 0) {
534 
535 		/* Grab a packet off the queue. */
536 		m0 = ifq_dequeue(&ifp->if_snd);
537 		if (m0 == NULL)
538 			break;
539 
540 		/*
541 		 * copy mbuf chain into DMA memory buffer.
542 		 */
543 		m_copydata(m0, 0, m0->m_pkthdr.len, sc->bce_data +
544 		    (sc->bce_txsnext + BCE_NRXDESC) * MCLBYTES);
545 		ctrl = m0->m_pkthdr.len & CTRL_BC_MASK;
546 		ctrl |= CTRL_SOF | CTRL_EOF | CTRL_IOC;
547 
548 #if NBPFILTER > 0
549 		/* Pass the packet to any BPF listeners. */
550 		if (ifp->if_bpf)
551 			bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
552 #endif
553 		/* mbuf no longer needed */
554 		m_freem(m0);
555 
556 		/* Sync the data DMA map. */
557 		bus_dmamap_sync(sc->bce_dmatag, sc->bce_txdata_map,
558 		    sc->bce_txsnext * MCLBYTES, MCLBYTES, BUS_DMASYNC_PREWRITE);
559 
560 		/* Initialize the transmit descriptor(s). */
561 		txstart = sc->bce_txsnext;
562 
563 		if (sc->bce_txsnext == BCE_NTXDESC - 1)
564 			ctrl |= CTRL_EOT;
565 		sc->bce_tx_ring[sc->bce_txsnext].ctrl = htole32(ctrl);
566 		sc->bce_tx_ring[sc->bce_txsnext].addr =
567 		    htole32(sc->bce_txdata_map->dm_segs[0].ds_addr +
568 		    sc->bce_txsnext * MCLBYTES + 0x40000000);	/* MAGIC */
569 		if (sc->bce_txsnext + 1 > BCE_NTXDESC - 1)
570 			sc->bce_txsnext = 0;
571 		else
572 			sc->bce_txsnext++;
573 		txsfree--;
574 
575 		/* sync descriptors being used */
576 		bus_dmamap_sync(sc->bce_dmatag, sc->bce_ring_map,
577 		    sizeof(struct bce_dma_slot) * txstart + PAGE_SIZE,
578 		    sizeof(struct bce_dma_slot),
579 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
580 
581 		/* Give the packet to the chip. */
582 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_DPTR,
583 		    sc->bce_txsnext * sizeof(struct bce_dma_slot));
584 
585 		newpkts++;
586 	}
587 	if (txsfree == 0) {
588 		/* No more slots left; notify upper layer. */
589 		ifq_set_oactive(&ifp->if_snd);
590 	}
591 	if (newpkts) {
592 		/* Set a watchdog timer in case the chip flakes out. */
593 		ifp->if_timer = 5;
594 	}
595 }
596 
597 /* Watchdog timer handler. */
598 void
599 bce_watchdog(struct ifnet *ifp)
600 {
601 	struct bce_softc *sc = ifp->if_softc;
602 
603 	printf("%s: device timeout\n", sc->bce_dev.dv_xname);
604 	ifp->if_oerrors++;
605 
606 	(void) bce_init(ifp);
607 
608 	/* Try to get more packets going. */
609 	bce_start(ifp);
610 }
611 
612 int
613 bce_intr(void *xsc)
614 {
615 	struct bce_softc *sc;
616 	struct ifnet *ifp;
617 	u_int32_t intstatus;
618 	int wantinit;
619 	int handled = 0;
620 
621 	sc = xsc;
622 	ifp = &sc->bce_ac.ac_if;
623 
624 
625 	for (wantinit = 0; wantinit == 0;) {
626 		intstatus = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
627 		    BCE_INT_STS);
628 
629 		/* ignore if not ours, or unsolicited interrupts */
630 		intstatus &= sc->bce_intmask;
631 		if (intstatus == 0)
632 			break;
633 
634 		handled = 1;
635 
636 		/* Ack interrupt */
637 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_STS,
638 		    intstatus);
639 
640 		/* Receive interrupts. */
641 		if (intstatus & I_RI)
642 			bce_rxintr(sc);
643 		/* Transmit interrupts. */
644 		if (intstatus & I_XI)
645 			bce_txintr(sc);
646 		/* Error interrupts */
647 		if (intstatus & ~(I_RI | I_XI)) {
648 			if (intstatus & I_XU)
649 				printf("%s: transmit fifo underflow\n",
650 				    sc->bce_dev.dv_xname);
651 			if (intstatus & I_RO) {
652 				printf("%s: receive fifo overflow\n",
653 				    sc->bce_dev.dv_xname);
654 				ifp->if_ierrors++;
655 			}
656 			if (intstatus & I_RU)
657 				printf("%s: receive descriptor underflow\n",
658 				    sc->bce_dev.dv_xname);
659 			if (intstatus & I_DE)
660 				printf("%s: descriptor protocol error\n",
661 				    sc->bce_dev.dv_xname);
662 			if (intstatus & I_PD)
663 				printf("%s: data error\n",
664 				    sc->bce_dev.dv_xname);
665 			if (intstatus & I_PC)
666 				printf("%s: descriptor error\n",
667 				    sc->bce_dev.dv_xname);
668 			if (intstatus & I_TO)
669 				printf("%s: general purpose timeout\n",
670 				    sc->bce_dev.dv_xname);
671 			wantinit = 1;
672 		}
673 	}
674 
675 	if (handled) {
676 		if (wantinit)
677 			bce_init(ifp);
678 		/* Try to get more packets going. */
679 		bce_start(ifp);
680 	}
681 	return (handled);
682 }
683 
684 /* Receive interrupt handler */
685 void
686 bce_rxintr(struct bce_softc *sc)
687 {
688 	struct ifnet *ifp = &sc->bce_ac.ac_if;
689 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
690 	struct rx_pph *pph;
691 	struct mbuf *m;
692 	int curr;
693 	int len;
694 	int i;
695 
696 	/* get pointer to active receive slot */
697 	curr = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXSTATUS)
698 	    & RS_CD_MASK;
699 	curr = curr / sizeof(struct bce_dma_slot);
700 	if (curr >= BCE_NRXDESC)
701 		curr = BCE_NRXDESC - 1;
702 
703 	/* process packets up to but not current packet being worked on */
704 	for (i = sc->bce_rxin; i != curr; i = (i + 1) % BCE_NRXDESC) {
705 		/* complete any post dma memory ops on packet */
706 		bus_dmamap_sync(sc->bce_dmatag, sc->bce_rxdata_map,
707 		    i * MCLBYTES, MCLBYTES, BUS_DMASYNC_POSTREAD);
708 
709 		/*
710 		 * If the packet had an error, simply recycle the buffer,
711 		 * resetting the len, and flags.
712 		 */
713 		pph = (struct rx_pph *)(sc->bce_data + i * MCLBYTES);
714 		if (pph->flags & (RXF_NO | RXF_RXER | RXF_CRC | RXF_OV)) {
715 			ifp->if_ierrors++;
716 			pph->len = 0;
717 			pph->flags = 0;
718 			continue;
719 		}
720 		/* receive the packet */
721 		len = pph->len;
722 		if (len == 0)
723 			continue;	/* no packet if empty */
724 		pph->len = 0;
725 		pph->flags = 0;
726 
727  		/*
728 		 * The chip includes the CRC with every packet.  Trim
729 		 * it off here.
730 		 */
731 		len -= ETHER_CRC_LEN;
732 
733 		m = m_devget(sc->bce_data + i * MCLBYTES +
734 		    BCE_PREPKT_HEADER_SIZE, len, ETHER_ALIGN);
735 
736 		ml_enqueue(&ml, m);
737 
738 		/* re-check current in case it changed */
739 		curr = (bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
740 		    BCE_DMA_RXSTATUS) & RS_CD_MASK) /
741 		    sizeof(struct bce_dma_slot);
742 		if (curr >= BCE_NRXDESC)
743 			curr = BCE_NRXDESC - 1;
744 	}
745 
746 	if_input(ifp, &ml);
747 
748 	sc->bce_rxin = curr;
749 }
750 
751 /* Transmit interrupt handler */
752 void
753 bce_txintr(struct bce_softc *sc)
754 {
755 	struct ifnet   *ifp = &sc->bce_ac.ac_if;
756 	int curr;
757 	int i;
758 
759 	ifq_clr_oactive(&ifp->if_snd);
760 
761 	/*
762 	 * Go through the Tx list and free mbufs for those
763 	 * frames which have been transmitted.
764 	 */
765 	curr = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
766 	    BCE_DMA_TXSTATUS) & RS_CD_MASK;
767 	curr = curr / sizeof(struct bce_dma_slot);
768 	if (curr >= BCE_NTXDESC)
769 		curr = BCE_NTXDESC - 1;
770 	for (i = sc->bce_txin; i != curr; i = (i + 1) % BCE_NTXDESC) {
771 		/* do any post dma memory ops on transmit data */
772 		bus_dmamap_sync(sc->bce_dmatag, sc->bce_txdata_map,
773 		    i * MCLBYTES, MCLBYTES, BUS_DMASYNC_POSTWRITE);
774 	}
775 	sc->bce_txin = curr;
776 
777 	/*
778 	 * If there are no more pending transmissions, cancel the watchdog
779 	 * timer
780 	 */
781 	if (sc->bce_txsnext == sc->bce_txin)
782 		ifp->if_timer = 0;
783 }
784 
785 /* initialize the interface */
786 int
787 bce_init(struct ifnet *ifp)
788 {
789 	struct bce_softc *sc = ifp->if_softc;
790 	u_int32_t reg_win;
791 	int i;
792 
793 	/* Cancel any pending I/O. */
794 	bce_stop(ifp);
795 
796 	/* enable pci inerrupts, bursts, and prefetch */
797 
798 	/* remap the pci registers to the Sonics config registers */
799 
800 	/* save the current map, so it can be restored */
801 	reg_win = pci_conf_read(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag,
802 	    BCE_REG_WIN);
803 
804 	/* set register window to Sonics registers */
805 	pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN,
806 	    BCE_SONICS_WIN);
807 
808 	/* enable SB to PCI interrupt */
809 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC,
810 	    bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC) |
811 	    SBIV_ENET0);
812 
813 	/* enable prefetch and bursts for sonics-to-pci translation 2 */
814 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2,
815 	    bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2) |
816 	    SBTOPCI_PREF | SBTOPCI_BURST);
817 
818 	/* restore to ethernet register space */
819 	pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN,
820 	    reg_win);
821 
822 	/* Reset the chip to a known state. */
823 	bce_reset(sc);
824 
825 	/* Initialize transmit descriptors */
826 	memset(sc->bce_tx_ring, 0, BCE_NTXDESC * sizeof(struct bce_dma_slot));
827 	sc->bce_txsnext = 0;
828 	sc->bce_txin = 0;
829 
830 	/* enable crc32 generation and set proper LED modes */
831 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL,
832 	    bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL) |
833 	    BCE_EMC_CRC32_ENAB | BCE_EMC_LED);
834 
835 	/* reset or clear powerdown control bit  */
836 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL,
837 	    bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL) &
838 	    ~BCE_EMC_PDOWN);
839 
840 	/* setup DMA interrupt control */
841 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMAI_CTL, 1 << 24);	/* MAGIC */
842 
843 	/* program promiscuous mode and multicast filters */
844 	bce_iff(ifp);
845 
846 	/* set max frame length, account for possible VLAN tag */
847 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_MAX,
848 	    ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
849 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_MAX,
850 	    ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
851 
852 	/* set tx watermark */
853 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_WATER, 56);
854 
855 	/* enable transmit */
856 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL, XC_XE);
857 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXADDR,
858 	    sc->bce_ring_map->dm_segs[0].ds_addr + PAGE_SIZE + 0x40000000);	/* MAGIC */
859 
860 	/*
861 	 * Give the receive ring to the chip, and
862 	 * start the receive DMA engine.
863 	 */
864 	sc->bce_rxin = 0;
865 
866 	/* clear the rx descriptor ring */
867 	memset(sc->bce_rx_ring, 0, BCE_NRXDESC * sizeof(struct bce_dma_slot));
868 	/* enable receive */
869 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXCTL,
870 	    BCE_PREPKT_HEADER_SIZE << 1 | XC_XE);
871 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXADDR,
872 	    sc->bce_ring_map->dm_segs[0].ds_addr + 0x40000000);		/* MAGIC */
873 
874 	/* Initialize receive descriptors */
875 	for (i = 0; i < BCE_NRXDESC; i++)
876 		bce_add_rxbuf(sc, i);
877 
878 	/* Enable interrupts */
879 	sc->bce_intmask =
880 	    I_XI | I_RI | I_XU | I_RO | I_RU | I_DE | I_PD | I_PC | I_TO;
881 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_MASK,
882 	    sc->bce_intmask);
883 
884 	/* start the receive dma */
885 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXDPTR,
886 	    BCE_NRXDESC * sizeof(struct bce_dma_slot));
887 
888 	/* set media */
889 	mii_mediachg(&sc->bce_mii);
890 
891 	/* turn on the ethernet mac */
892 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL,
893 	    bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
894 	    BCE_ENET_CTL) | EC_EE);
895 
896 	/* start timer */
897 	timeout_add_sec(&sc->bce_timeout, 1);
898 
899 	/* mark as running, and no outputs active */
900 	ifp->if_flags |= IFF_RUNNING;
901 	ifq_clr_oactive(&ifp->if_snd);
902 
903 	return 0;
904 }
905 
906 /* add a mac address to packet filter */
907 void
908 bce_add_mac(struct bce_softc *sc, u_int8_t *mac, unsigned long idx)
909 {
910 	int i;
911 	u_int32_t rval;
912 
913 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_LOW,
914 	    mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]);
915 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_HI,
916 	    mac[0] << 8 | mac[1] | 0x10000);	/* MAGIC */
917 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL,
918 	    idx << 16 | 8);	/* MAGIC */
919 	/* wait for write to complete */
920 	for (i = 0; i < 100; i++) {
921 		rval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
922 		    BCE_FILT_CTL);
923 		if (!(rval & 0x80000000))	/* MAGIC */
924 			break;
925 		delay(10);
926 	}
927 	if (i == 100) {
928 		printf("%s: timed out writing pkt filter ctl\n",
929 		   sc->bce_dev.dv_xname);
930 	}
931 }
932 
933 /* Add a receive buffer to the indiciated descriptor. */
934 void
935 bce_add_rxbuf(struct bce_softc *sc, int idx)
936 {
937 	struct bce_dma_slot *bced = &sc->bce_rx_ring[idx];
938 
939 	bus_dmamap_sync(sc->bce_dmatag, sc->bce_rxdata_map, idx * MCLBYTES,
940 	    MCLBYTES, BUS_DMASYNC_PREREAD);
941 
942 	*(u_int32_t *)(sc->bce_data + idx * MCLBYTES) = 0;
943 	bced->addr = htole32(sc->bce_rxdata_map->dm_segs[0].ds_addr +
944 	    idx * MCLBYTES + 0x40000000);
945 	if (idx != (BCE_NRXDESC - 1))
946 		bced->ctrl = htole32(BCE_RXBUF_LEN);
947 	else
948 		bced->ctrl = htole32(BCE_RXBUF_LEN | CTRL_EOT);
949 
950 	bus_dmamap_sync(sc->bce_dmatag, sc->bce_ring_map,
951 	    sizeof(struct bce_dma_slot) * idx,
952 	    sizeof(struct bce_dma_slot),
953 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
954 
955 }
956 
957 /* Stop transmission on the interface */
958 void
959 bce_stop(struct ifnet *ifp)
960 {
961 	struct bce_softc *sc = ifp->if_softc;
962 	int i;
963 	u_int32_t val;
964 
965 	/* Stop the 1 second timer */
966 	timeout_del(&sc->bce_timeout);
967 
968 	/* Mark the interface down and cancel the watchdog timer. */
969 	ifp->if_flags &= ~IFF_RUNNING;
970 	ifq_clr_oactive(&ifp->if_snd);
971 	ifp->if_timer = 0;
972 
973 	/* Down the MII. */
974 	mii_down(&sc->bce_mii);
975 
976 	/* Disable interrupts. */
977 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_MASK, 0);
978 	sc->bce_intmask = 0;
979 	delay(10);
980 
981 	/* Disable emac */
982 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL, EC_ED);
983 	for (i = 0; i < 200; i++) {
984 		val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
985 		    BCE_ENET_CTL);
986 		if (!(val & EC_ED))
987 			break;
988 		delay(10);
989 	}
990 
991 	/* Stop the DMA */
992 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXCTL, 0);
993 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL, 0);
994 	delay(10);
995 }
996 
997 /* reset the chip */
998 void
999 bce_reset(struct bce_softc *sc)
1000 {
1001 	u_int32_t val;
1002 	u_int32_t sbval;
1003 	int i;
1004 
1005 	/* if SB core is up */
1006 	sbval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1007 	    BCE_SBTMSTATELOW);
1008 	if ((sbval & (SBTML_RESET | SBTML_REJ | SBTML_CLK)) == SBTML_CLK) {
1009 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMAI_CTL,
1010 		    0);
1011 
1012 		/* disable emac */
1013 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL,
1014 		    EC_ED);
1015 		for (i = 0; i < 200; i++) {
1016 			val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1017 			    BCE_ENET_CTL);
1018 			if (!(val & EC_ED))
1019 				break;
1020 			delay(10);
1021 		}
1022 		if (i == 200)
1023 			printf("%s: timed out disabling ethernet mac\n",
1024 			    sc->bce_dev.dv_xname);
1025 
1026 		/* reset the dma engines */
1027 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL,
1028 		    0);
1029 		val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1030 		    BCE_DMA_RXSTATUS);
1031 		/* if error on receive, wait to go idle */
1032 		if (val & RS_ERROR) {
1033 			for (i = 0; i < 100; i++) {
1034 				val = bus_space_read_4(sc->bce_btag,
1035 				    sc->bce_bhandle, BCE_DMA_RXSTATUS);
1036 				if (val & RS_DMA_IDLE)
1037 					break;
1038 				delay(10);
1039 			}
1040 			if (i == 100)
1041 				printf("%s: receive dma did not go idle after"
1042 				    " error\n", sc->bce_dev.dv_xname);
1043 		}
1044 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle,
1045 		   BCE_DMA_RXSTATUS, 0);
1046 
1047 		/* reset ethernet mac */
1048 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL,
1049 		    EC_ES);
1050 		for (i = 0; i < 200; i++) {
1051 			val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1052 			    BCE_ENET_CTL);
1053 			if (!(val & EC_ES))
1054 				break;
1055 			delay(10);
1056 		}
1057 		if (i == 200)
1058 			printf("%s: timed out resetting ethernet mac\n",
1059 			    sc->bce_dev.dv_xname);
1060 	} else {
1061 		u_int32_t reg_win;
1062 
1063 		/* remap the pci registers to the Sonics config registers */
1064 
1065 		/* save the current map, so it can be restored */
1066 		reg_win = pci_conf_read(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag,
1067 		    BCE_REG_WIN);
1068 		/* set register window to Sonics registers */
1069 		pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag,
1070 		    BCE_REG_WIN, BCE_SONICS_WIN);
1071 
1072 		/* enable SB to PCI interrupt */
1073 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC,
1074 		    bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1075 		    BCE_SBINTVEC) | SBIV_ENET0);
1076 
1077 		/* enable prefetch and bursts for sonics-to-pci translation 2 */
1078 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2,
1079 		    bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1080 		    BCE_SPCI_TR2) | SBTOPCI_PREF | SBTOPCI_BURST);
1081 
1082 		/* restore to ethernet register space */
1083 		pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN,
1084 		    reg_win);
1085 	}
1086 
1087 	/* disable SB core if not in reset */
1088 	if (!(sbval & SBTML_RESET)) {
1089 
1090 		/* set the reject bit */
1091 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle,
1092 		    BCE_SBTMSTATELOW, SBTML_REJ | SBTML_CLK);
1093 		for (i = 0; i < 200; i++) {
1094 			val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1095 			    BCE_SBTMSTATELOW);
1096 			if (val & SBTML_REJ)
1097 				break;
1098 			delay(1);
1099 		}
1100 		if (i == 200)
1101 			printf("%s: while resetting core, reject did not set\n",
1102 			    sc->bce_dev.dv_xname);
1103 		/* wait until busy is clear */
1104 		for (i = 0; i < 200; i++) {
1105 			val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1106 			    BCE_SBTMSTATEHI);
1107 			if (!(val & 0x4))
1108 				break;
1109 			delay(1);
1110 		}
1111 		if (i == 200)
1112 			printf("%s: while resetting core, busy did not clear\n",
1113 			    sc->bce_dev.dv_xname);
1114 		/* set reset and reject while enabling the clocks */
1115 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle,
1116 		    BCE_SBTMSTATELOW,
1117 		    SBTML_FGC | SBTML_CLK | SBTML_REJ | SBTML_RESET);
1118 		val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1119 		    BCE_SBTMSTATELOW);
1120 		delay(10);
1121 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle,
1122 		    BCE_SBTMSTATELOW, SBTML_REJ | SBTML_RESET);
1123 		delay(1);
1124 	}
1125 	/* enable clock */
1126 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW,
1127 	    SBTML_FGC | SBTML_CLK | SBTML_RESET);
1128 	val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW);
1129 	delay(1);
1130 
1131 	/* clear any error bits that may be on */
1132 	val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATEHI);
1133 	if (val & 1)
1134 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATEHI,
1135 		    0);
1136 	val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBIMSTATE);
1137 	if (val & SBIM_ERRORBITS)
1138 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBIMSTATE,
1139 		    val & ~SBIM_ERRORBITS);
1140 
1141 	/* clear reset and allow it to propagate throughout the core */
1142 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW,
1143 	    SBTML_FGC | SBTML_CLK);
1144 	val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW);
1145 	delay(1);
1146 
1147 	/* leave clock enabled */
1148 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW,
1149 	    SBTML_CLK);
1150 	val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW);
1151 	delay(1);
1152 
1153 	/* initialize MDC preamble, frequency */
1154 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_CTL, 0x8d);	/* MAGIC */
1155 
1156 	/* enable phy, differs for internal, and external */
1157 	val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_DEVCTL);
1158 	if (!(val & BCE_DC_IP)) {
1159 		/* select external phy */
1160 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL,
1161 		    EC_EP);
1162 	} else if (val & BCE_DC_ER) {	/* internal, clear reset bit if on */
1163 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DEVCTL,
1164 		    val & ~BCE_DC_ER);
1165 		delay(100);
1166 	}
1167 }
1168 
1169 /* Set up the receive filter. */
1170 void
1171 bce_iff(struct ifnet *ifp)
1172 {
1173 	struct bce_softc *sc = ifp->if_softc;
1174 	struct arpcom *ac = &sc->bce_ac;
1175 	u_int32_t rxctl;
1176 
1177 	rxctl = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL);
1178 	rxctl &= ~(ERC_AM | ERC_DB | ERC_PE);
1179 	ifp->if_flags |= IFF_ALLMULTI;
1180 
1181 	/* disable the filter */
1182 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL, 0);
1183 
1184 	/* add our own address */
1185 	bce_add_mac(sc, ac->ac_enaddr, 0);
1186 
1187 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multicnt > 0) {
1188 		ifp->if_flags |= IFF_ALLMULTI;
1189 		if (ifp->if_flags & IFF_PROMISC)
1190 			rxctl |= ERC_PE;
1191 		else
1192 			rxctl |= ERC_AM;
1193 	}
1194 
1195 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL, rxctl);
1196 
1197 	/* enable the filter */
1198 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL,
1199 	    bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL) | 1);
1200 }
1201 
1202 /* Read a PHY register on the MII. */
1203 int
1204 bce_mii_read(struct device *self, int phy, int reg)
1205 {
1206 	struct bce_softc *sc = (struct bce_softc *) self;
1207 	int i;
1208 	u_int32_t val;
1209 
1210 	/* clear mii_int */
1211 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_STS,
1212 	    BCE_MIINTR);
1213 
1214 	/* Read the PHY register */
1215 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM,
1216 	    (MII_COMMAND_READ << 28) | (MII_COMMAND_START << 30) |	/* MAGIC */
1217 	    (MII_COMMAND_ACK << 16) | BCE_MIPHY(phy) | BCE_MIREG(reg));	/* MAGIC */
1218 
1219 	for (i = 0; i < BCE_TIMEOUT; i++) {
1220 		val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1221 		    BCE_MI_STS);
1222 		if (val & BCE_MIINTR)
1223 			break;
1224 		delay(10);
1225 	}
1226 	val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM);
1227 	if (i == BCE_TIMEOUT) {
1228 		printf("%s: PHY read timed out reading phy %d, reg %d, val = "
1229 		    "0x%08x\n", sc->bce_dev.dv_xname, phy, reg, val);
1230 		return (0);
1231 	}
1232 	return (val & BCE_MICOMM_DATA);
1233 }
1234 
1235 /* Write a PHY register on the MII */
1236 void
1237 bce_mii_write(struct device *self, int phy, int reg, int val)
1238 {
1239 	struct bce_softc *sc = (struct bce_softc *) self;
1240 	int i;
1241 	u_int32_t rval;
1242 
1243 	/* clear mii_int */
1244 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_STS,
1245 	    BCE_MIINTR);
1246 
1247 	/* Write the PHY register */
1248 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM,
1249 	    (MII_COMMAND_WRITE << 28) | (MII_COMMAND_START << 30) |	/* MAGIC */
1250 	    (MII_COMMAND_ACK << 16) | (val & BCE_MICOMM_DATA) |	/* MAGIC */
1251 	    BCE_MIPHY(phy) | BCE_MIREG(reg));
1252 
1253 	/* wait for write to complete */
1254 	for (i = 0; i < BCE_TIMEOUT; i++) {
1255 		rval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1256 		    BCE_MI_STS);
1257 		if (rval & BCE_MIINTR)
1258 			break;
1259 		delay(10);
1260 	}
1261 	rval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM);
1262 	if (i == BCE_TIMEOUT) {
1263 		printf("%s: PHY timed out writing phy %d, reg %d, val "
1264 		    "= 0x%08x\n", sc->bce_dev.dv_xname, phy, reg, val);
1265 	}
1266 }
1267 
1268 /* sync hardware duplex mode to software state */
1269 void
1270 bce_statchg(struct device *self)
1271 {
1272 	struct bce_softc *sc = (struct bce_softc *) self;
1273 	u_int32_t reg;
1274 
1275 	/* if needed, change register to match duplex mode */
1276 	reg = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_CTL);
1277 	if (sc->bce_mii.mii_media_active & IFM_FDX && !(reg & EXC_FD))
1278 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_CTL,
1279 		    reg | EXC_FD);
1280 	else if (!(sc->bce_mii.mii_media_active & IFM_FDX) && reg & EXC_FD)
1281 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_CTL,
1282 		    reg & ~EXC_FD);
1283 
1284 	/*
1285 	 * Enable activity led.
1286 	 * XXX This should be in a phy driver, but not currently.
1287 	 */
1288 	bce_mii_write((struct device *) sc, 1, 26,	/* MAGIC */
1289 	    bce_mii_read((struct device *) sc, 1, 26) & 0x7fff);	/* MAGIC */
1290 	/* enable traffic meter led mode */
1291 	bce_mii_write((struct device *) sc, 1, 26,	/* MAGIC */
1292 	    bce_mii_read((struct device *) sc, 1, 27) | (1 << 6));	/* MAGIC */
1293 }
1294 
1295 /* Set hardware to newly-selected media */
1296 int
1297 bce_mediachange(struct ifnet *ifp)
1298 {
1299 	struct bce_softc *sc = ifp->if_softc;
1300 
1301 	if (ifp->if_flags & IFF_UP)
1302 		mii_mediachg(&sc->bce_mii);
1303 	return (0);
1304 }
1305 
1306 /* Get the current interface media status */
1307 void
1308 bce_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1309 {
1310 	struct bce_softc *sc = ifp->if_softc;
1311 
1312 	mii_pollstat(&sc->bce_mii);
1313 	ifmr->ifm_active = sc->bce_mii.mii_media_active;
1314 	ifmr->ifm_status = sc->bce_mii.mii_media_status;
1315 }
1316 
1317 /* One second timer, checks link status */
1318 void
1319 bce_tick(void *v)
1320 {
1321 	struct bce_softc *sc = v;
1322 	int s;
1323 
1324 	s = splnet();
1325 	mii_tick(&sc->bce_mii);
1326 	splx(s);
1327 
1328 	timeout_add_sec(&sc->bce_timeout, 1);
1329 }
1330