xref: /openbsd/sys/dev/pci/if_bce.c (revision 8529ddd3)
1 /* $OpenBSD: if_bce.c,v 1.45 2015/04/13 08:45:48 mpi Exp $ */
2 /* $NetBSD: if_bce.c,v 1.3 2003/09/29 01:53:02 mrg Exp $	 */
3 
4 /*
5  * Copyright (c) 2003 Clifford Wright. All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The name of the author may not be used to endorse or promote products
16  *    derived from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 /*
32  * Broadcom BCM440x 10/100 ethernet (broadcom.com)
33  * SiliconBackplane is technology from Sonics, Inc.(sonicsinc.com)
34  *
35  * Cliff Wright cliff@snipe444.org
36  */
37 
38 #include "bpfilter.h"
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/timeout.h>
43 #include <sys/sockio.h>
44 #include <sys/mbuf.h>
45 #include <sys/malloc.h>
46 #include <sys/kernel.h>
47 #include <sys/device.h>
48 #include <sys/socket.h>
49 
50 #include <net/if.h>
51 #include <net/if_dl.h>
52 #include <net/if_media.h>
53 
54 #include <netinet/in.h>
55 #include <netinet/if_ether.h>
56 #if NBPFILTER > 0
57 #include <net/bpf.h>
58 #endif
59 
60 #include <dev/pci/pcireg.h>
61 #include <dev/pci/pcivar.h>
62 #include <dev/pci/pcidevs.h>
63 
64 #include <dev/mii/mii.h>
65 #include <dev/mii/miivar.h>
66 #include <dev/mii/miidevs.h>
67 
68 #include <dev/pci/if_bcereg.h>
69 
70 #include <uvm/uvm.h>
71 
72 /* ring descriptor */
73 struct bce_dma_slot {
74 	u_int32_t ctrl;
75 	u_int32_t addr;
76 };
77 #define CTRL_BC_MASK	0x1fff	/* buffer byte count */
78 #define CTRL_EOT	0x10000000	/* end of descriptor table */
79 #define CTRL_IOC	0x20000000	/* interrupt on completion */
80 #define CTRL_EOF	0x40000000	/* end of frame */
81 #define CTRL_SOF	0x80000000	/* start of frame */
82 
83 #define BCE_RXBUF_LEN	(MCLBYTES - 4)
84 
85 /* Packet status is returned in a pre-packet header */
86 struct rx_pph {
87 	u_int16_t len;
88 	u_int16_t flags;
89 	u_int16_t pad[12];
90 };
91 
92 #define	BCE_PREPKT_HEADER_SIZE		30
93 
94 /* packet status flags bits */
95 #define RXF_NO				0x8	/* odd number of nibbles */
96 #define RXF_RXER			0x4	/* receive symbol error */
97 #define RXF_CRC				0x2	/* crc error */
98 #define RXF_OV				0x1	/* fifo overflow */
99 
100 /* number of descriptors used in a ring */
101 #define BCE_NRXDESC		64
102 #define BCE_NTXDESC		64
103 
104 #define BCE_TIMEOUT		100	/* # 10us for mii read/write */
105 
106 struct bce_softc {
107 	struct device		bce_dev;
108 	bus_space_tag_t		bce_btag;
109 	bus_space_handle_t	bce_bhandle;
110 	bus_dma_tag_t		bce_dmatag;
111 	struct arpcom		bce_ac;		/* interface info */
112 	void			*bce_intrhand;
113 	struct pci_attach_args	bce_pa;
114 	struct mii_data		bce_mii;
115 	u_int32_t		bce_phy;	/* eeprom indicated phy */
116 	struct bce_dma_slot	*bce_rx_ring;	/* receive ring */
117 	struct bce_dma_slot	*bce_tx_ring;	/* transmit ring */
118 	caddr_t			bce_data;
119 	bus_dmamap_t		bce_ring_map;
120 	bus_dmamap_t		bce_rxdata_map;
121 	bus_dmamap_t		bce_txdata_map;
122 	u_int32_t		bce_intmask;	/* current intr mask */
123 	u_int32_t		bce_rxin;	/* last rx descriptor seen */
124 	u_int32_t		bce_txin;	/* last tx descriptor seen */
125 	int			bce_txsfree;	/* no. tx slots available */
126 	int			bce_txsnext;	/* next available tx slot */
127 	struct timeout		bce_timeout;
128 };
129 
130 int	bce_probe(struct device *, void *, void *);
131 void	bce_attach(struct device *, struct device *, void *);
132 int	bce_activate(struct device *, int);
133 int	bce_ioctl(struct ifnet *, u_long, caddr_t);
134 void	bce_start(struct ifnet *);
135 void	bce_watchdog(struct ifnet *);
136 int	bce_intr(void *);
137 void	bce_rxintr(struct bce_softc *);
138 void	bce_txintr(struct bce_softc *);
139 int	bce_init(struct ifnet *);
140 void	bce_add_mac(struct bce_softc *, u_int8_t *, unsigned long);
141 void	bce_add_rxbuf(struct bce_softc *, int);
142 void	bce_stop(struct ifnet *);
143 void	bce_reset(struct bce_softc *);
144 void	bce_iff(struct ifnet *);
145 int	bce_mii_read(struct device *, int, int);
146 void	bce_mii_write(struct device *, int, int, int);
147 void	bce_statchg(struct device *);
148 int	bce_mediachange(struct ifnet *);
149 void	bce_mediastatus(struct ifnet *, struct ifmediareq *);
150 void	bce_tick(void *);
151 
152 #ifdef BCE_DEBUG
153 #define DPRINTF(x)	do {		\
154 	if (bcedebug)			\
155 		printf x;		\
156 } while (/* CONSTCOND */ 0)
157 #define DPRINTFN(n,x)	do {		\
158 	if (bcedebug >= (n))		\
159 		printf x;		\
160 } while (/* CONSTCOND */ 0)
161 int	bcedebug = 0;
162 #else
163 #define DPRINTF(x)
164 #define DPRINTFN(n,x)
165 #endif
166 
167 struct cfattach bce_ca = {
168 	sizeof(struct bce_softc), bce_probe, bce_attach, NULL, bce_activate
169 };
170 struct cfdriver bce_cd = {
171 	NULL, "bce", DV_IFNET
172 };
173 
174 const struct pci_matchid bce_devices[] = {
175 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4401 },
176 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4401B0 },
177 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4401B1 }
178 };
179 
180 int
181 bce_probe(struct device *parent, void *match, void *aux)
182 {
183 	return (pci_matchbyid((struct pci_attach_args *)aux, bce_devices,
184 	    nitems(bce_devices)));
185 }
186 
187 void
188 bce_attach(struct device *parent, struct device *self, void *aux)
189 {
190 	struct bce_softc *sc = (struct bce_softc *) self;
191 	struct pci_attach_args *pa = aux;
192 	pci_chipset_tag_t pc = pa->pa_pc;
193 	pci_intr_handle_t ih;
194 	const char *intrstr = NULL;
195 	caddr_t kva;
196 	bus_dma_segment_t seg;
197 	int rseg;
198 	struct ifnet *ifp;
199 	pcireg_t memtype;
200 	bus_addr_t memaddr;
201 	bus_size_t memsize;
202 	int pmreg;
203 	pcireg_t pmode;
204 	int error;
205 
206 	sc->bce_pa = *pa;
207 	sc->bce_dmatag = pa->pa_dmat;
208 
209 	/*
210 	 * Map control/status registers.
211 	 */
212 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BCE_PCI_BAR0);
213 	if (pci_mapreg_map(pa, BCE_PCI_BAR0, memtype, 0, &sc->bce_btag,
214 	    &sc->bce_bhandle, &memaddr, &memsize, 0)) {
215 		printf(": unable to find mem space\n");
216 		return;
217 	}
218 
219 	/* Get it out of power save mode if needed. */
220 	if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
221 		pmode = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3;
222 		if (pmode == 3) {
223 			/*
224 			 * The card has lost all configuration data in
225 			 * this state, so punt.
226 			 */
227 			printf(": unable to wake up from power state D3\n");
228 			return;
229 		}
230 		if (pmode != 0) {
231 			printf(": waking up from power state D%d\n",
232 			    pmode);
233 			pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0);
234 		}
235 	}
236 
237 	if (pci_intr_map(pa, &ih)) {
238 		printf(": couldn't map interrupt\n");
239 		return;
240 	}
241 
242 	intrstr = pci_intr_string(pc, ih);
243 	sc->bce_intrhand = pci_intr_establish(pc, ih, IPL_NET, bce_intr, sc,
244 	    self->dv_xname);
245 	if (sc->bce_intrhand == NULL) {
246 		printf(": couldn't establish interrupt");
247 		if (intrstr != NULL)
248 			printf(" at %s", intrstr);
249 		printf("\n");
250 		return;
251 	}
252 
253 	/* reset the chip */
254 	bce_reset(sc);
255 
256 	/* Create the data DMA region and maps. */
257 	if ((sc->bce_data = (caddr_t)uvm_km_kmemalloc_pla(kernel_map,
258 	    uvm.kernel_object, (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES, 0,
259 	    UVM_KMF_NOWAIT, 0, (paddr_t)(0x40000000 - 1), 0, 0, 1)) == NULL) {
260 		printf(": unable to alloc space for ring");
261 		return;
262 	}
263 
264 	/* create a dma map for the RX ring */
265 	if ((error = bus_dmamap_create(sc->bce_dmatag, BCE_NRXDESC * MCLBYTES,
266 	    1, BCE_NRXDESC * MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
267 	    &sc->bce_rxdata_map))) {
268 		printf(": unable to create ring DMA map, error = %d\n", error);
269 		uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
270 		    (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
271 		return;
272 	}
273 
274 	/* connect the ring space to the dma map */
275 	if (bus_dmamap_load(sc->bce_dmatag, sc->bce_rxdata_map, sc->bce_data,
276 	    BCE_NRXDESC * MCLBYTES, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT)) {
277 		printf(": unable to load rx ring DMA map\n");
278 		uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
279 		    (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
280 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
281 		return;
282 	}
283 
284 	/* create a dma map for the TX ring */
285 	if ((error = bus_dmamap_create(sc->bce_dmatag, BCE_NTXDESC * MCLBYTES,
286 	    1, BCE_NTXDESC * MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
287 	    &sc->bce_txdata_map))) {
288 		printf(": unable to create ring DMA map, error = %d\n", error);
289 		uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
290 		    (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
291 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
292 		return;
293 	}
294 
295 	/* connect the ring space to the dma map */
296 	if (bus_dmamap_load(sc->bce_dmatag, sc->bce_txdata_map,
297 	    sc->bce_data + BCE_NRXDESC * MCLBYTES,
298 	    BCE_NTXDESC * MCLBYTES, NULL, BUS_DMA_WRITE | BUS_DMA_NOWAIT)) {
299 		printf(": unable to load tx ring DMA map\n");
300 		uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
301 		    (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
302 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
303 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map);
304 		return;
305 	}
306 
307 
308 	/*
309 	 * Allocate DMA-safe memory for ring descriptors.
310 	 * The receive, and transmit rings can not share the same
311 	 * 4k space, however both are allocated at once here.
312 	 */
313 	/*
314 	 * XXX PAGE_SIZE is wasteful; we only need 1KB + 1KB, but
315 	 * due to the limition above. ??
316 	 */
317 	if ((error = bus_dmamem_alloc_range(sc->bce_dmatag, 2 * PAGE_SIZE,
318 	    PAGE_SIZE, 2 * PAGE_SIZE, &seg, 1, &rseg, BUS_DMA_NOWAIT,
319 	    (bus_addr_t)0, (bus_addr_t)0x3fffffff))) {
320 		printf(": unable to alloc space for ring descriptors, "
321 		    "error = %d\n", error);
322 		uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
323 		    (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
324 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
325 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map);
326 		return;
327 	}
328 
329 	/* map ring space to kernel */
330 	if ((error = bus_dmamem_map(sc->bce_dmatag, &seg, rseg,
331 	    2 * PAGE_SIZE, &kva, BUS_DMA_NOWAIT))) {
332 		printf(": unable to map DMA buffers, error = %d\n", error);
333 		uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
334 		    (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
335 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
336 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map);
337 		bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
338 		return;
339 	}
340 
341 	/* create a dma map for the ring */
342 	if ((error = bus_dmamap_create(sc->bce_dmatag, 2 * PAGE_SIZE, 1,
343 	    2 * PAGE_SIZE, 0, BUS_DMA_NOWAIT, &sc->bce_ring_map))) {
344 		printf(": unable to create ring DMA map, error = %d\n", error);
345 		uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
346 		    (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
347 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
348 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map);
349 		bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
350 		return;
351 	}
352 
353 	/* connect the ring space to the dma map */
354 	if (bus_dmamap_load(sc->bce_dmatag, sc->bce_ring_map, kva,
355 	    2 * PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
356 		printf(": unable to load ring DMA map\n");
357 		uvm_km_free(kernel_map, (vaddr_t)sc->bce_data,
358 		    (BCE_NTXDESC + BCE_NRXDESC) * MCLBYTES);
359 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_rxdata_map);
360 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_txdata_map);
361 		bus_dmamap_destroy(sc->bce_dmatag, sc->bce_ring_map);
362 		bus_dmamem_free(sc->bce_dmatag, &seg, rseg);
363 		return;
364 	}
365 
366 	/* save the ring space in softc */
367 	sc->bce_rx_ring = (struct bce_dma_slot *)kva;
368 	sc->bce_tx_ring = (struct bce_dma_slot *)(kva + PAGE_SIZE);
369 
370 	/* Set up ifnet structure */
371 	ifp = &sc->bce_ac.ac_if;
372 	strlcpy(ifp->if_xname, sc->bce_dev.dv_xname, IF_NAMESIZE);
373 	ifp->if_softc = sc;
374 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
375 	ifp->if_ioctl = bce_ioctl;
376 	ifp->if_start = bce_start;
377 	ifp->if_watchdog = bce_watchdog;
378 	IFQ_SET_READY(&ifp->if_snd);
379 
380 	ifp->if_capabilities = IFCAP_VLAN_MTU;
381 
382 	/* MAC address */
383 	sc->bce_ac.ac_enaddr[0] =
384 	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET0);
385 	sc->bce_ac.ac_enaddr[1] =
386 	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET1);
387 	sc->bce_ac.ac_enaddr[2] =
388 	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET2);
389 	sc->bce_ac.ac_enaddr[3] =
390 	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET3);
391 	sc->bce_ac.ac_enaddr[4] =
392 	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET4);
393 	sc->bce_ac.ac_enaddr[5] =
394 	    bus_space_read_1(sc->bce_btag, sc->bce_bhandle, BCE_ENET5);
395 
396 	printf(": %s, address %s\n", intrstr,
397 	    ether_sprintf(sc->bce_ac.ac_enaddr));
398 
399 	/* Initialize our media structures and probe the MII. */
400 	sc->bce_mii.mii_ifp = ifp;
401 	sc->bce_mii.mii_readreg = bce_mii_read;
402 	sc->bce_mii.mii_writereg = bce_mii_write;
403 	sc->bce_mii.mii_statchg = bce_statchg;
404 	ifmedia_init(&sc->bce_mii.mii_media, 0, bce_mediachange,
405 	    bce_mediastatus);
406 	mii_attach(&sc->bce_dev, &sc->bce_mii, 0xffffffff, MII_PHY_ANY,
407 	    MII_OFFSET_ANY, 0);
408 	if (LIST_FIRST(&sc->bce_mii.mii_phys) == NULL) {
409 		ifmedia_add(&sc->bce_mii.mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
410 		ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER | IFM_NONE);
411 	} else
412 		ifmedia_set(&sc->bce_mii.mii_media, IFM_ETHER | IFM_AUTO);
413 
414 	/* get the phy */
415 	sc->bce_phy = bus_space_read_1(sc->bce_btag, sc->bce_bhandle,
416 	    BCE_PHY) & 0x1f;
417 
418 	/*
419 	 * Enable activity led.
420 	 * XXX This should be in a phy driver, but not currently.
421 	 */
422 	bce_mii_write((struct device *) sc, 1, 26,	 /* MAGIC */
423 	    bce_mii_read((struct device *) sc, 1, 26) & 0x7fff);	 /* MAGIC */
424 
425 	/* enable traffic meter led mode */
426 	bce_mii_write((struct device *) sc, 1, 27,	 /* MAGIC */
427 	    bce_mii_read((struct device *) sc, 1, 27) | (1 << 6));	 /* MAGIC */
428 
429 	/* Attach the interface */
430 	if_attach(ifp);
431 	ether_ifattach(ifp);
432 
433 	timeout_set(&sc->bce_timeout, bce_tick, sc);
434 }
435 
436 int
437 bce_activate(struct device *self, int act)
438 {
439 	struct bce_softc *sc = (struct bce_softc *)self;
440 	struct ifnet *ifp = &sc->bce_ac.ac_if;
441 
442 	switch (act) {
443 	case DVACT_SUSPEND:
444 		if (ifp->if_flags & IFF_RUNNING)
445 			bce_stop(ifp);
446 		break;
447 	case DVACT_RESUME:
448 		if (ifp->if_flags & IFF_UP) {
449 			bce_init(ifp);
450 			bce_start(ifp);
451 		}
452 		break;
453 	}
454 
455 	return (0);
456 }
457 
458 /* handle media, and ethernet requests */
459 int
460 bce_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
461 {
462 	struct bce_softc *sc = ifp->if_softc;
463 	struct ifaddr *ifa = (struct ifaddr *) data;
464 	struct ifreq *ifr = (struct ifreq *) data;
465 	int s, error = 0;
466 
467 	s = splnet();
468 
469 	switch (cmd) {
470 	case SIOCSIFADDR:
471 		ifp->if_flags |= IFF_UP;
472 		if (!(ifp->if_flags & IFF_RUNNING))
473 			bce_init(ifp);
474 		if (ifa->ifa_addr->sa_family == AF_INET)
475 			arp_ifinit(&sc->bce_ac, ifa);
476 		break;
477 
478 	case SIOCSIFFLAGS:
479 		if (ifp->if_flags & IFF_UP) {
480 			if (ifp->if_flags & IFF_RUNNING)
481 				error = ENETRESET;
482 			else
483 				bce_init(ifp);
484 		} else {
485 			if (ifp->if_flags & IFF_RUNNING)
486 				bce_stop(ifp);
487 		}
488 		break;
489 
490 	case SIOCSIFMEDIA:
491 	case SIOCGIFMEDIA:
492 		error = ifmedia_ioctl(ifp, ifr, &sc->bce_mii.mii_media, cmd);
493 		break;
494 
495 	default:
496 		error = ether_ioctl(ifp, &sc->bce_ac, cmd, data);
497 	}
498 
499 	if (error == ENETRESET) {
500 		if (ifp->if_flags & IFF_RUNNING)
501 			bce_iff(ifp);
502 		error = 0;
503 	}
504 
505 	splx(s);
506 	return error;
507 }
508 
509 /* Start packet transmission on the interface. */
510 void
511 bce_start(struct ifnet *ifp)
512 {
513 	struct bce_softc *sc = ifp->if_softc;
514 	struct mbuf *m0;
515 	u_int32_t ctrl;
516 	int txstart;
517 	int txsfree;
518 	int newpkts = 0;
519 
520 	/*
521 	 * do not start another if currently transmitting, and more
522 	 * descriptors(tx slots) are needed for next packet.
523 	 */
524 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
525 		return;
526 
527 	/* determine number of descriptors available */
528 	if (sc->bce_txsnext >= sc->bce_txin)
529 		txsfree = BCE_NTXDESC - 1 + sc->bce_txin - sc->bce_txsnext;
530 	else
531 		txsfree = sc->bce_txin - sc->bce_txsnext - 1;
532 
533 	/*
534 	 * Loop through the send queue, setting up transmit descriptors
535 	 * until we drain the queue, or use up all available transmit
536 	 * descriptors.
537 	 */
538 	while (txsfree > 0) {
539 
540 		/* Grab a packet off the queue. */
541 		IFQ_POLL(&ifp->if_snd, m0);
542 		if (m0 == NULL)
543 			break;
544 
545 		/*
546 		 * copy mbuf chain into DMA memory buffer.
547 		 */
548 		m_copydata(m0, 0, m0->m_pkthdr.len, sc->bce_data +
549 		    (sc->bce_txsnext + BCE_NRXDESC) * MCLBYTES);
550 		ctrl = m0->m_pkthdr.len & CTRL_BC_MASK;
551 		ctrl |= CTRL_SOF | CTRL_EOF | CTRL_IOC;
552 
553 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
554 		IFQ_DEQUEUE(&ifp->if_snd, m0);
555 
556 #if NBPFILTER > 0
557 		/* Pass the packet to any BPF listeners. */
558 		if (ifp->if_bpf)
559 			bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
560 #endif
561 		/* mbuf no longer needed */
562 		m_freem(m0);
563 
564 		/* Sync the data DMA map. */
565 		bus_dmamap_sync(sc->bce_dmatag, sc->bce_txdata_map,
566 		    sc->bce_txsnext * MCLBYTES, MCLBYTES, BUS_DMASYNC_PREWRITE);
567 
568 		/* Initialize the transmit descriptor(s). */
569 		txstart = sc->bce_txsnext;
570 
571 		if (sc->bce_txsnext == BCE_NTXDESC - 1)
572 			ctrl |= CTRL_EOT;
573 		sc->bce_tx_ring[sc->bce_txsnext].ctrl = htole32(ctrl);
574 		sc->bce_tx_ring[sc->bce_txsnext].addr =
575 		    htole32(sc->bce_txdata_map->dm_segs[0].ds_addr +
576 		    sc->bce_txsnext * MCLBYTES + 0x40000000);	/* MAGIC */
577 		if (sc->bce_txsnext + 1 > BCE_NTXDESC - 1)
578 			sc->bce_txsnext = 0;
579 		else
580 			sc->bce_txsnext++;
581 		txsfree--;
582 
583 		/* sync descriptors being used */
584 		bus_dmamap_sync(sc->bce_dmatag, sc->bce_ring_map,
585 		    sizeof(struct bce_dma_slot) * txstart + PAGE_SIZE,
586 		    sizeof(struct bce_dma_slot),
587 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
588 
589 		/* Give the packet to the chip. */
590 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_DPTR,
591 		    sc->bce_txsnext * sizeof(struct bce_dma_slot));
592 
593 		newpkts++;
594 	}
595 	if (txsfree == 0) {
596 		/* No more slots left; notify upper layer. */
597 		ifp->if_flags |= IFF_OACTIVE;
598 	}
599 	if (newpkts) {
600 		/* Set a watchdog timer in case the chip flakes out. */
601 		ifp->if_timer = 5;
602 	}
603 }
604 
605 /* Watchdog timer handler. */
606 void
607 bce_watchdog(struct ifnet *ifp)
608 {
609 	struct bce_softc *sc = ifp->if_softc;
610 
611 	printf("%s: device timeout\n", sc->bce_dev.dv_xname);
612 	ifp->if_oerrors++;
613 
614 	(void) bce_init(ifp);
615 
616 	/* Try to get more packets going. */
617 	bce_start(ifp);
618 }
619 
620 int
621 bce_intr(void *xsc)
622 {
623 	struct bce_softc *sc;
624 	struct ifnet *ifp;
625 	u_int32_t intstatus;
626 	int wantinit;
627 	int handled = 0;
628 
629 	sc = xsc;
630 	ifp = &sc->bce_ac.ac_if;
631 
632 
633 	for (wantinit = 0; wantinit == 0;) {
634 		intstatus = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
635 		    BCE_INT_STS);
636 
637 		/* ignore if not ours, or unsolicited interrupts */
638 		intstatus &= sc->bce_intmask;
639 		if (intstatus == 0)
640 			break;
641 
642 		handled = 1;
643 
644 		/* Ack interrupt */
645 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_STS,
646 		    intstatus);
647 
648 		/* Receive interrupts. */
649 		if (intstatus & I_RI)
650 			bce_rxintr(sc);
651 		/* Transmit interrupts. */
652 		if (intstatus & I_XI)
653 			bce_txintr(sc);
654 		/* Error interrupts */
655 		if (intstatus & ~(I_RI | I_XI)) {
656 			if (intstatus & I_XU)
657 				printf("%s: transmit fifo underflow\n",
658 				    sc->bce_dev.dv_xname);
659 			if (intstatus & I_RO) {
660 				printf("%s: receive fifo overflow\n",
661 				    sc->bce_dev.dv_xname);
662 				ifp->if_ierrors++;
663 			}
664 			if (intstatus & I_RU)
665 				printf("%s: receive descriptor underflow\n",
666 				    sc->bce_dev.dv_xname);
667 			if (intstatus & I_DE)
668 				printf("%s: descriptor protocol error\n",
669 				    sc->bce_dev.dv_xname);
670 			if (intstatus & I_PD)
671 				printf("%s: data error\n",
672 				    sc->bce_dev.dv_xname);
673 			if (intstatus & I_PC)
674 				printf("%s: descriptor error\n",
675 				    sc->bce_dev.dv_xname);
676 			if (intstatus & I_TO)
677 				printf("%s: general purpose timeout\n",
678 				    sc->bce_dev.dv_xname);
679 			wantinit = 1;
680 		}
681 	}
682 
683 	if (handled) {
684 		if (wantinit)
685 			bce_init(ifp);
686 		/* Try to get more packets going. */
687 		bce_start(ifp);
688 	}
689 	return (handled);
690 }
691 
692 /* Receive interrupt handler */
693 void
694 bce_rxintr(struct bce_softc *sc)
695 {
696 	struct ifnet *ifp = &sc->bce_ac.ac_if;
697 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
698 	struct rx_pph *pph;
699 	struct mbuf *m;
700 	int curr;
701 	int len;
702 	int i;
703 
704 	/* get pointer to active receive slot */
705 	curr = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXSTATUS)
706 	    & RS_CD_MASK;
707 	curr = curr / sizeof(struct bce_dma_slot);
708 	if (curr >= BCE_NRXDESC)
709 		curr = BCE_NRXDESC - 1;
710 
711 	/* process packets up to but not current packet being worked on */
712 	for (i = sc->bce_rxin; i != curr; i = (i + 1) % BCE_NRXDESC) {
713 		/* complete any post dma memory ops on packet */
714 		bus_dmamap_sync(sc->bce_dmatag, sc->bce_rxdata_map,
715 		    i * MCLBYTES, MCLBYTES, BUS_DMASYNC_POSTREAD);
716 
717 		/*
718 		 * If the packet had an error, simply recycle the buffer,
719 		 * resetting the len, and flags.
720 		 */
721 		pph = (struct rx_pph *)(sc->bce_data + i * MCLBYTES);
722 		if (pph->flags & (RXF_NO | RXF_RXER | RXF_CRC | RXF_OV)) {
723 			ifp->if_ierrors++;
724 			pph->len = 0;
725 			pph->flags = 0;
726 			continue;
727 		}
728 		/* receive the packet */
729 		len = pph->len;
730 		if (len == 0)
731 			continue;	/* no packet if empty */
732 		pph->len = 0;
733 		pph->flags = 0;
734 
735  		/*
736 		 * The chip includes the CRC with every packet.  Trim
737 		 * it off here.
738 		 */
739 		len -= ETHER_CRC_LEN;
740 
741 		m = m_devget(sc->bce_data + i * MCLBYTES +
742 		    BCE_PREPKT_HEADER_SIZE, len, ETHER_ALIGN);
743 		ifp->if_ipackets++;
744 
745 		ml_enqueue(&ml, m);
746 
747 		/* re-check current in case it changed */
748 		curr = (bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
749 		    BCE_DMA_RXSTATUS) & RS_CD_MASK) /
750 		    sizeof(struct bce_dma_slot);
751 		if (curr >= BCE_NRXDESC)
752 			curr = BCE_NRXDESC - 1;
753 	}
754 
755 	if_input(ifp, &ml);
756 
757 	sc->bce_rxin = curr;
758 }
759 
760 /* Transmit interrupt handler */
761 void
762 bce_txintr(struct bce_softc *sc)
763 {
764 	struct ifnet   *ifp = &sc->bce_ac.ac_if;
765 	int curr;
766 	int i;
767 
768 	ifp->if_flags &= ~IFF_OACTIVE;
769 
770 	/*
771 	 * Go through the Tx list and free mbufs for those
772 	 * frames which have been transmitted.
773 	 */
774 	curr = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
775 	    BCE_DMA_TXSTATUS) & RS_CD_MASK;
776 	curr = curr / sizeof(struct bce_dma_slot);
777 	if (curr >= BCE_NTXDESC)
778 		curr = BCE_NTXDESC - 1;
779 	for (i = sc->bce_txin; i != curr; i = (i + 1) % BCE_NTXDESC) {
780 		/* do any post dma memory ops on transmit data */
781 		bus_dmamap_sync(sc->bce_dmatag, sc->bce_txdata_map,
782 		    i * MCLBYTES, MCLBYTES, BUS_DMASYNC_POSTWRITE);
783 		ifp->if_opackets++;
784 	}
785 	sc->bce_txin = curr;
786 
787 	/*
788 	 * If there are no more pending transmissions, cancel the watchdog
789 	 * timer
790 	 */
791 	if (sc->bce_txsnext == sc->bce_txin)
792 		ifp->if_timer = 0;
793 }
794 
795 /* initialize the interface */
796 int
797 bce_init(struct ifnet *ifp)
798 {
799 	struct bce_softc *sc = ifp->if_softc;
800 	u_int32_t reg_win;
801 	int i;
802 
803 	/* Cancel any pending I/O. */
804 	bce_stop(ifp);
805 
806 	/* enable pci inerrupts, bursts, and prefetch */
807 
808 	/* remap the pci registers to the Sonics config registers */
809 
810 	/* save the current map, so it can be restored */
811 	reg_win = pci_conf_read(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag,
812 	    BCE_REG_WIN);
813 
814 	/* set register window to Sonics registers */
815 	pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN,
816 	    BCE_SONICS_WIN);
817 
818 	/* enable SB to PCI interrupt */
819 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC,
820 	    bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC) |
821 	    SBIV_ENET0);
822 
823 	/* enable prefetch and bursts for sonics-to-pci translation 2 */
824 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2,
825 	    bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2) |
826 	    SBTOPCI_PREF | SBTOPCI_BURST);
827 
828 	/* restore to ethernet register space */
829 	pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN,
830 	    reg_win);
831 
832 	/* Reset the chip to a known state. */
833 	bce_reset(sc);
834 
835 	/* Initialize transmit descriptors */
836 	memset(sc->bce_tx_ring, 0, BCE_NTXDESC * sizeof(struct bce_dma_slot));
837 	sc->bce_txsnext = 0;
838 	sc->bce_txin = 0;
839 
840 	/* enable crc32 generation and set proper LED modes */
841 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL,
842 	    bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL) |
843 	    BCE_EMC_CRC32_ENAB | BCE_EMC_LED);
844 
845 	/* reset or clear powerdown control bit  */
846 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL,
847 	    bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MACCTL) &
848 	    ~BCE_EMC_PDOWN);
849 
850 	/* setup DMA interrupt control */
851 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMAI_CTL, 1 << 24);	/* MAGIC */
852 
853 	/* program promiscuous mode and multicast filters */
854 	bce_iff(ifp);
855 
856 	/* set max frame length, account for possible VLAN tag */
857 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_MAX,
858 	    ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
859 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_MAX,
860 	    ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
861 
862 	/* set tx watermark */
863 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_WATER, 56);
864 
865 	/* enable transmit */
866 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL, XC_XE);
867 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXADDR,
868 	    sc->bce_ring_map->dm_segs[0].ds_addr + PAGE_SIZE + 0x40000000);	/* MAGIC */
869 
870 	/*
871 	 * Give the receive ring to the chip, and
872 	 * start the receive DMA engine.
873 	 */
874 	sc->bce_rxin = 0;
875 
876 	/* clear the rx descriptor ring */
877 	memset(sc->bce_rx_ring, 0, BCE_NRXDESC * sizeof(struct bce_dma_slot));
878 	/* enable receive */
879 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXCTL,
880 	    BCE_PREPKT_HEADER_SIZE << 1 | XC_XE);
881 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXADDR,
882 	    sc->bce_ring_map->dm_segs[0].ds_addr + 0x40000000);		/* MAGIC */
883 
884 	/* Initialize receive descriptors */
885 	for (i = 0; i < BCE_NRXDESC; i++)
886 		bce_add_rxbuf(sc, i);
887 
888 	/* Enable interrupts */
889 	sc->bce_intmask =
890 	    I_XI | I_RI | I_XU | I_RO | I_RU | I_DE | I_PD | I_PC | I_TO;
891 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_MASK,
892 	    sc->bce_intmask);
893 
894 	/* start the receive dma */
895 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXDPTR,
896 	    BCE_NRXDESC * sizeof(struct bce_dma_slot));
897 
898 	/* set media */
899 	mii_mediachg(&sc->bce_mii);
900 
901 	/* turn on the ethernet mac */
902 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL,
903 	    bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
904 	    BCE_ENET_CTL) | EC_EE);
905 
906 	/* start timer */
907 	timeout_add_sec(&sc->bce_timeout, 1);
908 
909 	/* mark as running, and no outputs active */
910 	ifp->if_flags |= IFF_RUNNING;
911 	ifp->if_flags &= ~IFF_OACTIVE;
912 
913 	return 0;
914 }
915 
916 /* add a mac address to packet filter */
917 void
918 bce_add_mac(struct bce_softc *sc, u_int8_t *mac, unsigned long idx)
919 {
920 	int i;
921 	u_int32_t rval;
922 
923 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_LOW,
924 	    mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]);
925 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_HI,
926 	    mac[0] << 8 | mac[1] | 0x10000);	/* MAGIC */
927 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL,
928 	    idx << 16 | 8);	/* MAGIC */
929 	/* wait for write to complete */
930 	for (i = 0; i < 100; i++) {
931 		rval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
932 		    BCE_FILT_CTL);
933 		if (!(rval & 0x80000000))	/* MAGIC */
934 			break;
935 		delay(10);
936 	}
937 	if (i == 100) {
938 		printf("%s: timed out writing pkt filter ctl\n",
939 		   sc->bce_dev.dv_xname);
940 	}
941 }
942 
943 /* Add a receive buffer to the indiciated descriptor. */
944 void
945 bce_add_rxbuf(struct bce_softc *sc, int idx)
946 {
947 	struct bce_dma_slot *bced = &sc->bce_rx_ring[idx];
948 
949 	bus_dmamap_sync(sc->bce_dmatag, sc->bce_rxdata_map, idx * MCLBYTES,
950 	    MCLBYTES, BUS_DMASYNC_PREREAD);
951 
952 	*(u_int32_t *)(sc->bce_data + idx * MCLBYTES) = 0;
953 	bced->addr = htole32(sc->bce_rxdata_map->dm_segs[0].ds_addr +
954 	    idx * MCLBYTES + 0x40000000);
955 	if (idx != (BCE_NRXDESC - 1))
956 		bced->ctrl = htole32(BCE_RXBUF_LEN);
957 	else
958 		bced->ctrl = htole32(BCE_RXBUF_LEN | CTRL_EOT);
959 
960 	bus_dmamap_sync(sc->bce_dmatag, sc->bce_ring_map,
961 	    sizeof(struct bce_dma_slot) * idx,
962 	    sizeof(struct bce_dma_slot),
963 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
964 
965 }
966 
967 /* Stop transmission on the interface */
968 void
969 bce_stop(struct ifnet *ifp)
970 {
971 	struct bce_softc *sc = ifp->if_softc;
972 	int i;
973 	u_int32_t val;
974 
975 	/* Stop the 1 second timer */
976 	timeout_del(&sc->bce_timeout);
977 
978 	/* Mark the interface down and cancel the watchdog timer. */
979 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
980 	ifp->if_timer = 0;
981 
982 	/* Down the MII. */
983 	mii_down(&sc->bce_mii);
984 
985 	/* Disable interrupts. */
986 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_INT_MASK, 0);
987 	sc->bce_intmask = 0;
988 	delay(10);
989 
990 	/* Disable emac */
991 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL, EC_ED);
992 	for (i = 0; i < 200; i++) {
993 		val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
994 		    BCE_ENET_CTL);
995 		if (!(val & EC_ED))
996 			break;
997 		delay(10);
998 	}
999 
1000 	/* Stop the DMA */
1001 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_RXCTL, 0);
1002 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL, 0);
1003 	delay(10);
1004 }
1005 
1006 /* reset the chip */
1007 void
1008 bce_reset(struct bce_softc *sc)
1009 {
1010 	u_int32_t val;
1011 	u_int32_t sbval;
1012 	int i;
1013 
1014 	/* if SB core is up */
1015 	sbval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1016 	    BCE_SBTMSTATELOW);
1017 	if ((sbval & (SBTML_RESET | SBTML_REJ | SBTML_CLK)) == SBTML_CLK) {
1018 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMAI_CTL,
1019 		    0);
1020 
1021 		/* disable emac */
1022 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL,
1023 		    EC_ED);
1024 		for (i = 0; i < 200; i++) {
1025 			val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1026 			    BCE_ENET_CTL);
1027 			if (!(val & EC_ED))
1028 				break;
1029 			delay(10);
1030 		}
1031 		if (i == 200)
1032 			printf("%s: timed out disabling ethernet mac\n",
1033 			    sc->bce_dev.dv_xname);
1034 
1035 		/* reset the dma engines */
1036 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DMA_TXCTL,
1037 		    0);
1038 		val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1039 		    BCE_DMA_RXSTATUS);
1040 		/* if error on receive, wait to go idle */
1041 		if (val & RS_ERROR) {
1042 			for (i = 0; i < 100; i++) {
1043 				val = bus_space_read_4(sc->bce_btag,
1044 				    sc->bce_bhandle, BCE_DMA_RXSTATUS);
1045 				if (val & RS_DMA_IDLE)
1046 					break;
1047 				delay(10);
1048 			}
1049 			if (i == 100)
1050 				printf("%s: receive dma did not go idle after"
1051 				    " error\n", sc->bce_dev.dv_xname);
1052 		}
1053 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle,
1054 		   BCE_DMA_RXSTATUS, 0);
1055 
1056 		/* reset ethernet mac */
1057 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL,
1058 		    EC_ES);
1059 		for (i = 0; i < 200; i++) {
1060 			val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1061 			    BCE_ENET_CTL);
1062 			if (!(val & EC_ES))
1063 				break;
1064 			delay(10);
1065 		}
1066 		if (i == 200)
1067 			printf("%s: timed out resetting ethernet mac\n",
1068 			    sc->bce_dev.dv_xname);
1069 	} else {
1070 		u_int32_t reg_win;
1071 
1072 		/* remap the pci registers to the Sonics config registers */
1073 
1074 		/* save the current map, so it can be restored */
1075 		reg_win = pci_conf_read(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag,
1076 		    BCE_REG_WIN);
1077 		/* set register window to Sonics registers */
1078 		pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag,
1079 		    BCE_REG_WIN, BCE_SONICS_WIN);
1080 
1081 		/* enable SB to PCI interrupt */
1082 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBINTVEC,
1083 		    bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1084 		    BCE_SBINTVEC) | SBIV_ENET0);
1085 
1086 		/* enable prefetch and bursts for sonics-to-pci translation 2 */
1087 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SPCI_TR2,
1088 		    bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1089 		    BCE_SPCI_TR2) | SBTOPCI_PREF | SBTOPCI_BURST);
1090 
1091 		/* restore to ethernet register space */
1092 		pci_conf_write(sc->bce_pa.pa_pc, sc->bce_pa.pa_tag, BCE_REG_WIN,
1093 		    reg_win);
1094 	}
1095 
1096 	/* disable SB core if not in reset */
1097 	if (!(sbval & SBTML_RESET)) {
1098 
1099 		/* set the reject bit */
1100 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle,
1101 		    BCE_SBTMSTATELOW, SBTML_REJ | SBTML_CLK);
1102 		for (i = 0; i < 200; i++) {
1103 			val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1104 			    BCE_SBTMSTATELOW);
1105 			if (val & SBTML_REJ)
1106 				break;
1107 			delay(1);
1108 		}
1109 		if (i == 200)
1110 			printf("%s: while resetting core, reject did not set\n",
1111 			    sc->bce_dev.dv_xname);
1112 		/* wait until busy is clear */
1113 		for (i = 0; i < 200; i++) {
1114 			val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1115 			    BCE_SBTMSTATEHI);
1116 			if (!(val & 0x4))
1117 				break;
1118 			delay(1);
1119 		}
1120 		if (i == 200)
1121 			printf("%s: while resetting core, busy did not clear\n",
1122 			    sc->bce_dev.dv_xname);
1123 		/* set reset and reject while enabling the clocks */
1124 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle,
1125 		    BCE_SBTMSTATELOW,
1126 		    SBTML_FGC | SBTML_CLK | SBTML_REJ | SBTML_RESET);
1127 		val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1128 		    BCE_SBTMSTATELOW);
1129 		delay(10);
1130 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle,
1131 		    BCE_SBTMSTATELOW, SBTML_REJ | SBTML_RESET);
1132 		delay(1);
1133 	}
1134 	/* enable clock */
1135 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW,
1136 	    SBTML_FGC | SBTML_CLK | SBTML_RESET);
1137 	val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW);
1138 	delay(1);
1139 
1140 	/* clear any error bits that may be on */
1141 	val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATEHI);
1142 	if (val & 1)
1143 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATEHI,
1144 		    0);
1145 	val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBIMSTATE);
1146 	if (val & SBIM_ERRORBITS)
1147 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBIMSTATE,
1148 		    val & ~SBIM_ERRORBITS);
1149 
1150 	/* clear reset and allow it to propagate throughout the core */
1151 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW,
1152 	    SBTML_FGC | SBTML_CLK);
1153 	val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW);
1154 	delay(1);
1155 
1156 	/* leave clock enabled */
1157 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW,
1158 	    SBTML_CLK);
1159 	val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_SBTMSTATELOW);
1160 	delay(1);
1161 
1162 	/* initialize MDC preamble, frequency */
1163 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_CTL, 0x8d);	/* MAGIC */
1164 
1165 	/* enable phy, differs for internal, and external */
1166 	val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_DEVCTL);
1167 	if (!(val & BCE_DC_IP)) {
1168 		/* select external phy */
1169 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_ENET_CTL,
1170 		    EC_EP);
1171 	} else if (val & BCE_DC_ER) {	/* internal, clear reset bit if on */
1172 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_DEVCTL,
1173 		    val & ~BCE_DC_ER);
1174 		delay(100);
1175 	}
1176 }
1177 
1178 /* Set up the receive filter. */
1179 void
1180 bce_iff(struct ifnet *ifp)
1181 {
1182 	struct bce_softc *sc = ifp->if_softc;
1183 	struct arpcom *ac = &sc->bce_ac;
1184 	u_int32_t rxctl;
1185 
1186 	rxctl = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL);
1187 	rxctl &= ~(ERC_AM | ERC_DB | ERC_PE);
1188 	ifp->if_flags |= IFF_ALLMULTI;
1189 
1190 	/* disable the filter */
1191 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL, 0);
1192 
1193 	/* add our own address */
1194 	bce_add_mac(sc, ac->ac_enaddr, 0);
1195 
1196 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multicnt > 0) {
1197 		ifp->if_flags |= IFF_ALLMULTI;
1198 		if (ifp->if_flags & IFF_PROMISC)
1199 			rxctl |= ERC_PE;
1200 		else
1201 			rxctl |= ERC_AM;
1202 	}
1203 
1204 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_RX_CTL, rxctl);
1205 
1206 	/* enable the filter */
1207 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL,
1208 	    bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_FILT_CTL) | 1);
1209 }
1210 
1211 /* Read a PHY register on the MII. */
1212 int
1213 bce_mii_read(struct device *self, int phy, int reg)
1214 {
1215 	struct bce_softc *sc = (struct bce_softc *) self;
1216 	int i;
1217 	u_int32_t val;
1218 
1219 	/* clear mii_int */
1220 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_STS,
1221 	    BCE_MIINTR);
1222 
1223 	/* Read the PHY register */
1224 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM,
1225 	    (MII_COMMAND_READ << 28) | (MII_COMMAND_START << 30) |	/* MAGIC */
1226 	    (MII_COMMAND_ACK << 16) | BCE_MIPHY(phy) | BCE_MIREG(reg));	/* MAGIC */
1227 
1228 	for (i = 0; i < BCE_TIMEOUT; i++) {
1229 		val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1230 		    BCE_MI_STS);
1231 		if (val & BCE_MIINTR)
1232 			break;
1233 		delay(10);
1234 	}
1235 	val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM);
1236 	if (i == BCE_TIMEOUT) {
1237 		printf("%s: PHY read timed out reading phy %d, reg %d, val = "
1238 		    "0x%08x\n", sc->bce_dev.dv_xname, phy, reg, val);
1239 		return (0);
1240 	}
1241 	return (val & BCE_MICOMM_DATA);
1242 }
1243 
1244 /* Write a PHY register on the MII */
1245 void
1246 bce_mii_write(struct device *self, int phy, int reg, int val)
1247 {
1248 	struct bce_softc *sc = (struct bce_softc *) self;
1249 	int i;
1250 	u_int32_t rval;
1251 
1252 	/* clear mii_int */
1253 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_STS,
1254 	    BCE_MIINTR);
1255 
1256 	/* Write the PHY register */
1257 	bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM,
1258 	    (MII_COMMAND_WRITE << 28) | (MII_COMMAND_START << 30) |	/* MAGIC */
1259 	    (MII_COMMAND_ACK << 16) | (val & BCE_MICOMM_DATA) |	/* MAGIC */
1260 	    BCE_MIPHY(phy) | BCE_MIREG(reg));
1261 
1262 	/* wait for write to complete */
1263 	for (i = 0; i < BCE_TIMEOUT; i++) {
1264 		rval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle,
1265 		    BCE_MI_STS);
1266 		if (rval & BCE_MIINTR)
1267 			break;
1268 		delay(10);
1269 	}
1270 	rval = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_MI_COMM);
1271 	if (i == BCE_TIMEOUT) {
1272 		printf("%s: PHY timed out writing phy %d, reg %d, val "
1273 		    "= 0x%08x\n", sc->bce_dev.dv_xname, phy, reg, val);
1274 	}
1275 }
1276 
1277 /* sync hardware duplex mode to software state */
1278 void
1279 bce_statchg(struct device *self)
1280 {
1281 	struct bce_softc *sc = (struct bce_softc *) self;
1282 	u_int32_t reg;
1283 
1284 	/* if needed, change register to match duplex mode */
1285 	reg = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_CTL);
1286 	if (sc->bce_mii.mii_media_active & IFM_FDX && !(reg & EXC_FD))
1287 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_CTL,
1288 		    reg | EXC_FD);
1289 	else if (!(sc->bce_mii.mii_media_active & IFM_FDX) && reg & EXC_FD)
1290 		bus_space_write_4(sc->bce_btag, sc->bce_bhandle, BCE_TX_CTL,
1291 		    reg & ~EXC_FD);
1292 
1293 	/*
1294 	 * Enable activity led.
1295 	 * XXX This should be in a phy driver, but not currently.
1296 	 */
1297 	bce_mii_write((struct device *) sc, 1, 26,	/* MAGIC */
1298 	    bce_mii_read((struct device *) sc, 1, 26) & 0x7fff);	/* MAGIC */
1299 	/* enable traffic meter led mode */
1300 	bce_mii_write((struct device *) sc, 1, 26,	/* MAGIC */
1301 	    bce_mii_read((struct device *) sc, 1, 27) | (1 << 6));	/* MAGIC */
1302 }
1303 
1304 /* Set hardware to newly-selected media */
1305 int
1306 bce_mediachange(struct ifnet *ifp)
1307 {
1308 	struct bce_softc *sc = ifp->if_softc;
1309 
1310 	if (ifp->if_flags & IFF_UP)
1311 		mii_mediachg(&sc->bce_mii);
1312 	return (0);
1313 }
1314 
1315 /* Get the current interface media status */
1316 void
1317 bce_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1318 {
1319 	struct bce_softc *sc = ifp->if_softc;
1320 
1321 	mii_pollstat(&sc->bce_mii);
1322 	ifmr->ifm_active = sc->bce_mii.mii_media_active;
1323 	ifmr->ifm_status = sc->bce_mii.mii_media_status;
1324 }
1325 
1326 /* One second timer, checks link status */
1327 void
1328 bce_tick(void *v)
1329 {
1330 	struct bce_softc *sc = v;
1331 	int s;
1332 
1333 	s = splnet();
1334 	mii_tick(&sc->bce_mii);
1335 	splx(s);
1336 
1337 	timeout_add_sec(&sc->bce_timeout, 1);
1338 }
1339