xref: /openbsd/sys/dev/pci/if_cas.c (revision 4cfece93)
1 /*	$OpenBSD: if_cas.c,v 1.53 2020/07/10 13:26:37 patrick Exp $	*/
2 
3 /*
4  *
5  * Copyright (C) 2007 Mark Kettenis.
6  * Copyright (C) 2001 Eduardo Horvath.
7  * All rights reserved.
8  *
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR  ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR  BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  */
32 
33 /*
34  * Driver for Sun Cassini ethernet controllers.
35  *
36  * There are basically two variants of this chip: Cassini and
37  * Cassini+.  We can distinguish between the two by revision: 0x10 and
38  * up are Cassini+.  The most important difference is that Cassini+
39  * has a second RX descriptor ring.  Cassini+ will not work without
40  * configuring that second ring.  However, since we don't use it we
41  * don't actually fill the descriptors, and only hand off the first
42  * four to the chip.
43  */
44 
45 #include "bpfilter.h"
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/timeout.h>
50 #include <sys/mbuf.h>
51 #include <sys/syslog.h>
52 #include <sys/malloc.h>
53 #include <sys/kernel.h>
54 #include <sys/socket.h>
55 #include <sys/ioctl.h>
56 #include <sys/errno.h>
57 #include <sys/device.h>
58 #include <sys/endian.h>
59 #include <sys/atomic.h>
60 
61 #include <net/if.h>
62 #include <net/if_media.h>
63 
64 #include <netinet/in.h>
65 #include <netinet/if_ether.h>
66 
67 #if NBPFILTER > 0
68 #include <net/bpf.h>
69 #endif
70 
71 #include <machine/bus.h>
72 #include <machine/intr.h>
73 
74 #include <dev/mii/mii.h>
75 #include <dev/mii/miivar.h>
76 
77 #include <dev/pci/if_casreg.h>
78 #include <dev/pci/if_casvar.h>
79 
80 #include <dev/pci/pcivar.h>
81 #include <dev/pci/pcireg.h>
82 #include <dev/pci/pcidevs.h>
83 
84 #ifdef __sparc64__
85 #include <dev/ofw/openfirm.h>
86 #endif
87 
88 #define TRIES	10000
89 
90 struct cfdriver cas_cd = {
91 	NULL, "cas", DV_IFNET
92 };
93 
94 int	cas_match(struct device *, void *, void *);
95 void	cas_attach(struct device *, struct device *, void *);
96 int	cas_pci_enaddr(struct cas_softc *, struct pci_attach_args *);
97 
98 struct cfattach cas_ca = {
99 	sizeof(struct cas_softc), cas_match, cas_attach
100 };
101 
102 void		cas_config(struct cas_softc *);
103 void		cas_start(struct ifnet *);
104 void		cas_stop(struct ifnet *, int);
105 int		cas_ioctl(struct ifnet *, u_long, caddr_t);
106 void		cas_tick(void *);
107 void		cas_watchdog(struct ifnet *);
108 int		cas_init(struct ifnet *);
109 void		cas_init_regs(struct cas_softc *);
110 int		cas_ringsize(int);
111 int		cas_cringsize(int);
112 int		cas_meminit(struct cas_softc *);
113 void		cas_mifinit(struct cas_softc *);
114 int		cas_bitwait(struct cas_softc *, bus_space_handle_t, int,
115 		    u_int32_t, u_int32_t);
116 void		cas_reset(struct cas_softc *);
117 int		cas_reset_rx(struct cas_softc *);
118 int		cas_reset_tx(struct cas_softc *);
119 int		cas_disable_rx(struct cas_softc *);
120 int		cas_disable_tx(struct cas_softc *);
121 void		cas_rxdrain(struct cas_softc *);
122 int		cas_add_rxbuf(struct cas_softc *, int idx);
123 void		cas_iff(struct cas_softc *);
124 int		cas_encap(struct cas_softc *, struct mbuf *, int *);
125 
126 /* MII methods & callbacks */
127 int		cas_mii_readreg(struct device *, int, int);
128 void		cas_mii_writereg(struct device *, int, int, int);
129 void		cas_mii_statchg(struct device *);
130 int		cas_pcs_readreg(struct device *, int, int);
131 void		cas_pcs_writereg(struct device *, int, int, int);
132 
133 int		cas_mediachange(struct ifnet *);
134 void		cas_mediastatus(struct ifnet *, struct ifmediareq *);
135 
136 int		cas_eint(struct cas_softc *, u_int);
137 int		cas_rint(struct cas_softc *);
138 int		cas_tint(struct cas_softc *, u_int32_t);
139 int		cas_pint(struct cas_softc *);
140 int		cas_intr(void *);
141 
142 #ifdef CAS_DEBUG
143 #define	DPRINTF(sc, x)	if ((sc)->sc_arpcom.ac_if.if_flags & IFF_DEBUG) \
144 				printf x
145 #else
146 #define	DPRINTF(sc, x)	/* nothing */
147 #endif
148 
149 const struct pci_matchid cas_pci_devices[] = {
150 	{ PCI_VENDOR_SUN, PCI_PRODUCT_SUN_CASSINI },
151 	{ PCI_VENDOR_NS, PCI_PRODUCT_NS_SATURN }
152 };
153 
154 int
155 cas_match(struct device *parent, void *cf, void *aux)
156 {
157 	return (pci_matchbyid((struct pci_attach_args *)aux, cas_pci_devices,
158 	    nitems(cas_pci_devices)));
159 }
160 
161 #define	PROMHDR_PTR_DATA	0x18
162 #define	PROMDATA_PTR_VPD	0x08
163 #define	PROMDATA_DATA2		0x0a
164 
165 static const u_int8_t cas_promhdr[] = { 0x55, 0xaa };
166 static const u_int8_t cas_promdat_sun[] = {
167 	'P', 'C', 'I', 'R',
168 	PCI_VENDOR_SUN & 0xff, PCI_VENDOR_SUN >> 8,
169 	PCI_PRODUCT_SUN_CASSINI & 0xff, PCI_PRODUCT_SUN_CASSINI >> 8
170 };
171 static const u_int8_t cas_promdat_ns[] = {
172 	'P', 'C', 'I', 'R',
173 	PCI_VENDOR_NS & 0xff, PCI_VENDOR_NS >> 8,
174 	PCI_PRODUCT_NS_SATURN & 0xff, PCI_PRODUCT_NS_SATURN >> 8
175 };
176 
177 static const u_int8_t cas_promdat2[] = {
178 	0x18, 0x00,			/* structure length */
179 	0x00,				/* structure revision */
180 	0x00,				/* interface revision */
181 	PCI_SUBCLASS_NETWORK_ETHERNET,	/* subclass code */
182 	PCI_CLASS_NETWORK		/* class code */
183 };
184 
185 int
186 cas_pci_enaddr(struct cas_softc *sc, struct pci_attach_args *pa)
187 {
188 	struct pci_vpd_largeres *res;
189 	struct pci_vpd *vpd;
190 	bus_space_handle_t romh;
191 	bus_space_tag_t romt;
192 	bus_size_t romsize = 0;
193 	u_int8_t buf[32], *desc;
194 	pcireg_t address;
195 	int dataoff, vpdoff, len;
196 	int rv = -1;
197 
198 	if (pci_mapreg_map(pa, PCI_ROM_REG, PCI_MAPREG_TYPE_MEM, 0,
199 	    &romt, &romh, 0, &romsize, 0))
200 		return (-1);
201 
202 	address = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ROM_REG);
203 	address |= PCI_ROM_ENABLE;
204 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_ROM_REG, address);
205 
206 	bus_space_read_region_1(romt, romh, 0, buf, sizeof(buf));
207 	if (bcmp(buf, cas_promhdr, sizeof(cas_promhdr)))
208 		goto fail;
209 
210 	dataoff = buf[PROMHDR_PTR_DATA] | (buf[PROMHDR_PTR_DATA + 1] << 8);
211 	if (dataoff < 0x1c)
212 		goto fail;
213 
214 	bus_space_read_region_1(romt, romh, dataoff, buf, sizeof(buf));
215 	if ((bcmp(buf, cas_promdat_sun, sizeof(cas_promdat_sun)) &&
216 	    bcmp(buf, cas_promdat_ns, sizeof(cas_promdat_ns))) ||
217 	    bcmp(buf + PROMDATA_DATA2, cas_promdat2, sizeof(cas_promdat2)))
218 		goto fail;
219 
220 	vpdoff = buf[PROMDATA_PTR_VPD] | (buf[PROMDATA_PTR_VPD + 1] << 8);
221 	if (vpdoff < 0x1c)
222 		goto fail;
223 
224 next:
225 	bus_space_read_region_1(romt, romh, vpdoff, buf, sizeof(buf));
226 	if (!PCI_VPDRES_ISLARGE(buf[0]))
227 		goto fail;
228 
229 	res = (struct pci_vpd_largeres *)buf;
230 	vpdoff += sizeof(*res);
231 
232 	len = ((res->vpdres_len_msb << 8) + res->vpdres_len_lsb);
233 	switch(PCI_VPDRES_LARGE_NAME(res->vpdres_byte0)) {
234 	case PCI_VPDRES_TYPE_IDENTIFIER_STRING:
235 		/* Skip identifier string. */
236 		vpdoff += len;
237 		goto next;
238 
239 	case PCI_VPDRES_TYPE_VPD:
240 		while (len > 0) {
241 			bus_space_read_region_1(romt, romh, vpdoff,
242 			     buf, sizeof(buf));
243 
244 			vpd = (struct pci_vpd *)buf;
245 			vpdoff += sizeof(*vpd) + vpd->vpd_len;
246 			len -= sizeof(*vpd) + vpd->vpd_len;
247 
248 			/*
249 			 * We're looking for an "Enhanced" VPD...
250 			 */
251 			if (vpd->vpd_key0 != 'Z')
252 				continue;
253 
254 			desc = buf + sizeof(*vpd);
255 
256 			/*
257 			 * ...which is an instance property...
258 			 */
259 			if (desc[0] != 'I')
260 				continue;
261 			desc += 3;
262 
263 			/*
264 			 * ...that's a byte array with the proper
265 			 * length for a MAC address...
266 			 */
267 			if (desc[0] != 'B' || desc[1] != ETHER_ADDR_LEN)
268 				continue;
269 			desc += 2;
270 
271 			/*
272 			 * ...named "local-mac-address".
273 			 */
274 			if (strcmp(desc, "local-mac-address") != 0)
275 				continue;
276 			desc += strlen("local-mac-address") + 1;
277 
278 			bcopy(desc, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
279 			sc->sc_arpcom.ac_enaddr[5] += pa->pa_device;
280 			rv = 0;
281 		}
282 		break;
283 
284 	default:
285 		goto fail;
286 	}
287 
288  fail:
289 	if (romsize != 0)
290 		bus_space_unmap(romt, romh, romsize);
291 
292 	address = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ROM_REG);
293 	address &= ~PCI_ROM_ENABLE;
294 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_ROM_REG, address);
295 
296 	return (rv);
297 }
298 
299 void
300 cas_attach(struct device *parent, struct device *self, void *aux)
301 {
302 	struct pci_attach_args *pa = aux;
303 	struct cas_softc *sc = (void *)self;
304 	pci_intr_handle_t ih;
305 #ifdef __sparc64__
306 	/* XXX the following declarations should be elsewhere */
307 	extern void myetheraddr(u_char *);
308 #endif
309 	const char *intrstr = NULL;
310 	bus_size_t size;
311 	int gotenaddr = 0;
312 
313 	sc->sc_rev = PCI_REVISION(pa->pa_class);
314 	sc->sc_dmatag = pa->pa_dmat;
315 
316 #define PCI_CAS_BASEADDR	0x10
317 	if (pci_mapreg_map(pa, PCI_CAS_BASEADDR, PCI_MAPREG_TYPE_MEM, 0,
318 	    &sc->sc_memt, &sc->sc_memh, NULL, &size, 0) != 0) {
319 		printf(": can't map registers\n");
320 		return;
321 	}
322 
323 	if (cas_pci_enaddr(sc, pa) == 0)
324 		gotenaddr = 1;
325 
326 #ifdef __sparc64__
327 	if (!gotenaddr) {
328 		if (OF_getprop(PCITAG_NODE(pa->pa_tag), "local-mac-address",
329 		    sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN) <= 0)
330 			myetheraddr(sc->sc_arpcom.ac_enaddr);
331 		gotenaddr = 1;
332 	}
333 #endif
334 #ifdef __powerpc__
335 	if (!gotenaddr) {
336 		pci_ether_hw_addr(pa->pa_pc, sc->sc_arpcom.ac_enaddr);
337 		gotenaddr = 1;
338 	}
339 #endif
340 
341 	sc->sc_burst = 16;	/* XXX */
342 
343 	if (pci_intr_map(pa, &ih) != 0) {
344 		printf(": couldn't map interrupt\n");
345 		bus_space_unmap(sc->sc_memt, sc->sc_memh, size);
346 		return;
347 	}
348 	intrstr = pci_intr_string(pa->pa_pc, ih);
349 	sc->sc_ih = pci_intr_establish(pa->pa_pc,
350 	    ih, IPL_NET | IPL_MPSAFE, cas_intr, sc, self->dv_xname);
351 	if (sc->sc_ih == NULL) {
352 		printf(": couldn't establish interrupt");
353 		if (intrstr != NULL)
354 			printf(" at %s", intrstr);
355 		printf("\n");
356 		bus_space_unmap(sc->sc_memt, sc->sc_memh, size);
357 		return;
358 	}
359 
360 	printf(": %s", intrstr);
361 
362 	/*
363 	 * call the main configure
364 	 */
365 	cas_config(sc);
366 }
367 
368 /*
369  * cas_config:
370  *
371  *	Attach a Cassini interface to the system.
372  */
373 void
374 cas_config(struct cas_softc *sc)
375 {
376 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
377 	struct mii_data *mii = &sc->sc_mii;
378 	struct mii_softc *child;
379 	int i, error;
380 
381 	/* Make sure the chip is stopped. */
382 	ifp->if_softc = sc;
383 	cas_reset(sc);
384 
385 	/*
386 	 * Allocate the control data structures, and create and load the
387 	 * DMA map for it.
388 	 */
389 	if ((error = bus_dmamem_alloc(sc->sc_dmatag,
390 	    sizeof(struct cas_control_data), CAS_PAGE_SIZE, 0, &sc->sc_cdseg,
391 	    1, &sc->sc_cdnseg, BUS_DMA_ZERO)) != 0) {
392 		printf("\n%s: unable to allocate control data, error = %d\n",
393 		    sc->sc_dev.dv_xname, error);
394 		goto fail_0;
395 	}
396 
397 	/* XXX should map this in with correct endianness */
398 	if ((error = bus_dmamem_map(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg,
399 	    sizeof(struct cas_control_data), (caddr_t *)&sc->sc_control_data,
400 	    BUS_DMA_COHERENT)) != 0) {
401 		printf("\n%s: unable to map control data, error = %d\n",
402 		    sc->sc_dev.dv_xname, error);
403 		goto fail_1;
404 	}
405 
406 	if ((error = bus_dmamap_create(sc->sc_dmatag,
407 	    sizeof(struct cas_control_data), 1,
408 	    sizeof(struct cas_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
409 		printf("\n%s: unable to create control data DMA map, "
410 		    "error = %d\n", sc->sc_dev.dv_xname, error);
411 		goto fail_2;
412 	}
413 
414 	if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_cddmamap,
415 	    sc->sc_control_data, sizeof(struct cas_control_data), NULL,
416 	    0)) != 0) {
417 		printf("\n%s: unable to load control data DMA map, error = %d\n",
418 		    sc->sc_dev.dv_xname, error);
419 		goto fail_3;
420 	}
421 
422 	/*
423 	 * Create the receive buffer DMA maps.
424 	 */
425 	for (i = 0; i < CAS_NRXDESC; i++) {
426 		bus_dma_segment_t seg;
427 		caddr_t kva;
428 		int rseg;
429 
430 		if ((error = bus_dmamem_alloc(sc->sc_dmatag, CAS_PAGE_SIZE,
431 		    CAS_PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
432 			printf("\n%s: unable to alloc rx DMA mem %d, "
433 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
434 			goto fail_5;
435 		}
436 		sc->sc_rxsoft[i].rxs_dmaseg = seg;
437 
438 		if ((error = bus_dmamem_map(sc->sc_dmatag, &seg, rseg,
439 		    CAS_PAGE_SIZE, &kva, BUS_DMA_NOWAIT)) != 0) {
440 			printf("\n%s: unable to alloc rx DMA mem %d, "
441 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
442 			goto fail_5;
443 		}
444 		sc->sc_rxsoft[i].rxs_kva = kva;
445 
446 		if ((error = bus_dmamap_create(sc->sc_dmatag, CAS_PAGE_SIZE, 1,
447 		    CAS_PAGE_SIZE, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
448 			printf("\n%s: unable to create rx DMA map %d, "
449 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
450 			goto fail_5;
451 		}
452 
453 		if ((error = bus_dmamap_load(sc->sc_dmatag,
454 		   sc->sc_rxsoft[i].rxs_dmamap, kva, CAS_PAGE_SIZE, NULL,
455 		   BUS_DMA_NOWAIT)) != 0) {
456 			printf("\n%s: unable to load rx DMA map %d, "
457 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
458 			goto fail_5;
459 		}
460 	}
461 
462 	/*
463 	 * Create the transmit buffer DMA maps.
464 	 */
465 	for (i = 0; i < CAS_NTXDESC; i++) {
466 		if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES,
467 		    CAS_NTXSEGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
468 		    &sc->sc_txd[i].sd_map)) != 0) {
469 			printf("\n%s: unable to create tx DMA map %d, "
470 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
471 			goto fail_6;
472 		}
473 		sc->sc_txd[i].sd_mbuf = NULL;
474 	}
475 
476 	/*
477 	 * From this point forward, the attachment cannot fail.  A failure
478 	 * before this point releases all resources that may have been
479 	 * allocated.
480 	 */
481 
482 	/* Announce ourselves. */
483 	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
484 
485 	/* Get RX FIFO size */
486 	sc->sc_rxfifosize = 16 * 1024;
487 
488 	/* Initialize ifnet structure. */
489 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof ifp->if_xname);
490 	ifp->if_softc = sc;
491 	ifp->if_flags =
492 	    IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
493 	ifp->if_start = cas_start;
494 	ifp->if_ioctl = cas_ioctl;
495 	ifp->if_watchdog = cas_watchdog;
496 	ifq_set_maxlen(&ifp->if_snd, CAS_NTXDESC - 1);
497 
498 	ifp->if_capabilities = IFCAP_VLAN_MTU;
499 
500 	/* Initialize ifmedia structures and MII info */
501 	mii->mii_ifp = ifp;
502 	mii->mii_readreg = cas_mii_readreg;
503 	mii->mii_writereg = cas_mii_writereg;
504 	mii->mii_statchg = cas_mii_statchg;
505 
506 	ifmedia_init(&mii->mii_media, 0, cas_mediachange, cas_mediastatus);
507 
508 	bus_space_write_4(sc->sc_memt, sc->sc_memh, CAS_MII_DATAPATH_MODE, 0);
509 
510 	cas_mifinit(sc);
511 
512 	if (sc->sc_mif_config & CAS_MIF_CONFIG_MDI1) {
513 		sc->sc_mif_config |= CAS_MIF_CONFIG_PHY_SEL;
514 		bus_space_write_4(sc->sc_memt, sc->sc_memh,
515 	            CAS_MIF_CONFIG, sc->sc_mif_config);
516 	}
517 
518 	mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
519 	    MII_OFFSET_ANY, 0);
520 
521 	child = LIST_FIRST(&mii->mii_phys);
522 	if (child == NULL &&
523 	    sc->sc_mif_config & (CAS_MIF_CONFIG_MDI0|CAS_MIF_CONFIG_MDI1)) {
524 		/*
525 		 * Try the external PCS SERDES if we didn't find any
526 		 * MII devices.
527 		 */
528 		bus_space_write_4(sc->sc_memt, sc->sc_memh,
529 		    CAS_MII_DATAPATH_MODE, CAS_MII_DATAPATH_SERDES);
530 
531 		bus_space_write_4(sc->sc_memt, sc->sc_memh,
532 		     CAS_MII_CONFIG, CAS_MII_CONFIG_ENABLE);
533 
534 		mii->mii_readreg = cas_pcs_readreg;
535 		mii->mii_writereg = cas_pcs_writereg;
536 
537 		mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
538 		    MII_OFFSET_ANY, MIIF_NOISOLATE);
539 	}
540 
541 	child = LIST_FIRST(&mii->mii_phys);
542 	if (child == NULL) {
543 		/* No PHY attached */
544 		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
545 		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
546 	} else {
547 		/*
548 		 * Walk along the list of attached MII devices and
549 		 * establish an `MII instance' to `phy number'
550 		 * mapping. We'll use this mapping in media change
551 		 * requests to determine which phy to use to program
552 		 * the MIF configuration register.
553 		 */
554 		for (; child != NULL; child = LIST_NEXT(child, mii_list)) {
555 			/*
556 			 * Note: we support just two PHYs: the built-in
557 			 * internal device and an external on the MII
558 			 * connector.
559 			 */
560 			if (child->mii_phy > 1 || child->mii_inst > 1) {
561 				printf("%s: cannot accommodate MII device %s"
562 				       " at phy %d, instance %lld\n",
563 				       sc->sc_dev.dv_xname,
564 				       child->mii_dev.dv_xname,
565 				       child->mii_phy, child->mii_inst);
566 				continue;
567 			}
568 
569 			sc->sc_phys[child->mii_inst] = child->mii_phy;
570 		}
571 
572 		/*
573 		 * XXX - we can really do the following ONLY if the
574 		 * phy indeed has the auto negotiation capability!!
575 		 */
576 		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
577 	}
578 
579 	/* Attach the interface. */
580 	if_attach(ifp);
581 	ether_ifattach(ifp);
582 
583 	timeout_set(&sc->sc_tick_ch, cas_tick, sc);
584 	return;
585 
586 	/*
587 	 * Free any resources we've allocated during the failed attach
588 	 * attempt.  Do this in reverse order and fall through.
589 	 */
590  fail_6:
591 	for (i = 0; i < CAS_NTXDESC; i++) {
592 		if (sc->sc_txd[i].sd_map != NULL)
593 			bus_dmamap_destroy(sc->sc_dmatag,
594 			    sc->sc_txd[i].sd_map);
595 	}
596  fail_5:
597 	for (i = 0; i < CAS_NRXDESC; i++) {
598 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
599 			bus_dmamap_destroy(sc->sc_dmatag,
600 			    sc->sc_rxsoft[i].rxs_dmamap);
601 	}
602 	bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap);
603  fail_3:
604 	bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cddmamap);
605  fail_2:
606 	bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sc_control_data,
607 	    sizeof(struct cas_control_data));
608  fail_1:
609 	bus_dmamem_free(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg);
610  fail_0:
611 	return;
612 }
613 
614 
615 void
616 cas_tick(void *arg)
617 {
618 	struct cas_softc *sc = arg;
619 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
620 	bus_space_tag_t t = sc->sc_memt;
621 	bus_space_handle_t mac = sc->sc_memh;
622 	int s;
623 	u_int32_t v;
624 
625 	/* unload collisions counters */
626 	v = bus_space_read_4(t, mac, CAS_MAC_EXCESS_COLL_CNT) +
627 	    bus_space_read_4(t, mac, CAS_MAC_LATE_COLL_CNT);
628 	ifp->if_collisions += v +
629 	    bus_space_read_4(t, mac, CAS_MAC_NORM_COLL_CNT) +
630 	    bus_space_read_4(t, mac, CAS_MAC_FIRST_COLL_CNT);
631 	ifp->if_oerrors += v;
632 
633 	/* read error counters */
634 	ifp->if_ierrors +=
635 	    bus_space_read_4(t, mac, CAS_MAC_RX_LEN_ERR_CNT) +
636 	    bus_space_read_4(t, mac, CAS_MAC_RX_ALIGN_ERR) +
637 	    bus_space_read_4(t, mac, CAS_MAC_RX_CRC_ERR_CNT) +
638 	    bus_space_read_4(t, mac, CAS_MAC_RX_CODE_VIOL);
639 
640 	/* clear the hardware counters */
641 	bus_space_write_4(t, mac, CAS_MAC_NORM_COLL_CNT, 0);
642 	bus_space_write_4(t, mac, CAS_MAC_FIRST_COLL_CNT, 0);
643 	bus_space_write_4(t, mac, CAS_MAC_EXCESS_COLL_CNT, 0);
644 	bus_space_write_4(t, mac, CAS_MAC_LATE_COLL_CNT, 0);
645 	bus_space_write_4(t, mac, CAS_MAC_RX_LEN_ERR_CNT, 0);
646 	bus_space_write_4(t, mac, CAS_MAC_RX_ALIGN_ERR, 0);
647 	bus_space_write_4(t, mac, CAS_MAC_RX_CRC_ERR_CNT, 0);
648 	bus_space_write_4(t, mac, CAS_MAC_RX_CODE_VIOL, 0);
649 
650 	s = splnet();
651 	mii_tick(&sc->sc_mii);
652 	splx(s);
653 
654 	timeout_add_sec(&sc->sc_tick_ch, 1);
655 }
656 
657 int
658 cas_bitwait(struct cas_softc *sc, bus_space_handle_t h, int r,
659     u_int32_t clr, u_int32_t set)
660 {
661 	int i;
662 	u_int32_t reg;
663 
664 	for (i = TRIES; i--; DELAY(100)) {
665 		reg = bus_space_read_4(sc->sc_memt, h, r);
666 		if ((reg & clr) == 0 && (reg & set) == set)
667 			return (1);
668 	}
669 
670 	return (0);
671 }
672 
673 void
674 cas_reset(struct cas_softc *sc)
675 {
676 	bus_space_tag_t t = sc->sc_memt;
677 	bus_space_handle_t h = sc->sc_memh;
678 	int s;
679 
680 	s = splnet();
681 	DPRINTF(sc, ("%s: cas_reset\n", sc->sc_dev.dv_xname));
682 	cas_reset_rx(sc);
683 	cas_reset_tx(sc);
684 
685 	/* Do a full reset */
686 	bus_space_write_4(t, h, CAS_RESET,
687 	    CAS_RESET_RX | CAS_RESET_TX | CAS_RESET_BLOCK_PCS);
688 	if (!cas_bitwait(sc, h, CAS_RESET, CAS_RESET_RX | CAS_RESET_TX, 0))
689 		printf("%s: cannot reset device\n", sc->sc_dev.dv_xname);
690 	splx(s);
691 }
692 
693 
694 /*
695  * cas_rxdrain:
696  *
697  *	Drain the receive queue.
698  */
699 void
700 cas_rxdrain(struct cas_softc *sc)
701 {
702 	/* Nothing to do yet. */
703 }
704 
705 /*
706  * Reset the whole thing.
707  */
708 void
709 cas_stop(struct ifnet *ifp, int disable)
710 {
711 	struct cas_softc *sc = (struct cas_softc *)ifp->if_softc;
712 	struct cas_sxd *sd;
713 	u_int32_t i;
714 
715 	DPRINTF(sc, ("%s: cas_stop\n", sc->sc_dev.dv_xname));
716 
717 	timeout_del(&sc->sc_tick_ch);
718 
719 	/*
720 	 * Mark the interface down and cancel the watchdog timer.
721 	 */
722 	ifp->if_flags &= ~IFF_RUNNING;
723 	ifq_clr_oactive(&ifp->if_snd);
724 	ifp->if_timer = 0;
725 
726 	mii_down(&sc->sc_mii);
727 
728 	cas_reset_rx(sc);
729 	cas_reset_tx(sc);
730 
731 	intr_barrier(sc->sc_ih);
732 	KASSERT((ifp->if_flags & IFF_RUNNING) == 0);
733 
734 	/*
735 	 * Release any queued transmit buffers.
736 	 */
737 	for (i = 0; i < CAS_NTXDESC; i++) {
738 		sd = &sc->sc_txd[i];
739 		if (sd->sd_mbuf != NULL) {
740 			bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0,
741 			    sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
742 			bus_dmamap_unload(sc->sc_dmatag, sd->sd_map);
743 			m_freem(sd->sd_mbuf);
744 			sd->sd_mbuf = NULL;
745 		}
746 	}
747 	sc->sc_tx_cnt = sc->sc_tx_prod = sc->sc_tx_cons = 0;
748 
749 	if (disable)
750 		cas_rxdrain(sc);
751 }
752 
753 
754 /*
755  * Reset the receiver
756  */
757 int
758 cas_reset_rx(struct cas_softc *sc)
759 {
760 	bus_space_tag_t t = sc->sc_memt;
761 	bus_space_handle_t h = sc->sc_memh;
762 
763 	/*
764 	 * Resetting while DMA is in progress can cause a bus hang, so we
765 	 * disable DMA first.
766 	 */
767 	cas_disable_rx(sc);
768 	bus_space_write_4(t, h, CAS_RX_CONFIG, 0);
769 	/* Wait till it finishes */
770 	if (!cas_bitwait(sc, h, CAS_RX_CONFIG, 1, 0))
771 		printf("%s: cannot disable rx dma\n", sc->sc_dev.dv_xname);
772 	/* Wait 5ms extra. */
773 	delay(5000);
774 
775 	/* Finally, reset the ERX */
776 	bus_space_write_4(t, h, CAS_RESET, CAS_RESET_RX);
777 	/* Wait till it finishes */
778 	if (!cas_bitwait(sc, h, CAS_RESET, CAS_RESET_RX, 0)) {
779 		printf("%s: cannot reset receiver\n", sc->sc_dev.dv_xname);
780 		return (1);
781 	}
782 	return (0);
783 }
784 
785 
786 /*
787  * Reset the transmitter
788  */
789 int
790 cas_reset_tx(struct cas_softc *sc)
791 {
792 	bus_space_tag_t t = sc->sc_memt;
793 	bus_space_handle_t h = sc->sc_memh;
794 
795 	/*
796 	 * Resetting while DMA is in progress can cause a bus hang, so we
797 	 * disable DMA first.
798 	 */
799 	cas_disable_tx(sc);
800 	bus_space_write_4(t, h, CAS_TX_CONFIG, 0);
801 	/* Wait till it finishes */
802 	if (!cas_bitwait(sc, h, CAS_TX_CONFIG, 1, 0))
803 		printf("%s: cannot disable tx dma\n", sc->sc_dev.dv_xname);
804 	/* Wait 5ms extra. */
805 	delay(5000);
806 
807 	/* Finally, reset the ETX */
808 	bus_space_write_4(t, h, CAS_RESET, CAS_RESET_TX);
809 	/* Wait till it finishes */
810 	if (!cas_bitwait(sc, h, CAS_RESET, CAS_RESET_TX, 0)) {
811 		printf("%s: cannot reset transmitter\n",
812 			sc->sc_dev.dv_xname);
813 		return (1);
814 	}
815 	return (0);
816 }
817 
818 /*
819  * disable receiver.
820  */
821 int
822 cas_disable_rx(struct cas_softc *sc)
823 {
824 	bus_space_tag_t t = sc->sc_memt;
825 	bus_space_handle_t h = sc->sc_memh;
826 	u_int32_t cfg;
827 
828 	/* Flip the enable bit */
829 	cfg = bus_space_read_4(t, h, CAS_MAC_RX_CONFIG);
830 	cfg &= ~CAS_MAC_RX_ENABLE;
831 	bus_space_write_4(t, h, CAS_MAC_RX_CONFIG, cfg);
832 
833 	/* Wait for it to finish */
834 	return (cas_bitwait(sc, h, CAS_MAC_RX_CONFIG, CAS_MAC_RX_ENABLE, 0));
835 }
836 
837 /*
838  * disable transmitter.
839  */
840 int
841 cas_disable_tx(struct cas_softc *sc)
842 {
843 	bus_space_tag_t t = sc->sc_memt;
844 	bus_space_handle_t h = sc->sc_memh;
845 	u_int32_t cfg;
846 
847 	/* Flip the enable bit */
848 	cfg = bus_space_read_4(t, h, CAS_MAC_TX_CONFIG);
849 	cfg &= ~CAS_MAC_TX_ENABLE;
850 	bus_space_write_4(t, h, CAS_MAC_TX_CONFIG, cfg);
851 
852 	/* Wait for it to finish */
853 	return (cas_bitwait(sc, h, CAS_MAC_TX_CONFIG, CAS_MAC_TX_ENABLE, 0));
854 }
855 
856 /*
857  * Initialize interface.
858  */
859 int
860 cas_meminit(struct cas_softc *sc)
861 {
862 	struct cas_rxsoft *rxs;
863 	int i, error;
864 
865 	rxs = (void *)&error;
866 
867 	/*
868 	 * Initialize the transmit descriptor ring.
869 	 */
870 	for (i = 0; i < CAS_NTXDESC; i++) {
871 		sc->sc_txdescs[i].cd_flags = 0;
872 		sc->sc_txdescs[i].cd_addr = 0;
873 	}
874 	CAS_CDTXSYNC(sc, 0, CAS_NTXDESC,
875 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
876 
877 	/*
878 	 * Initialize the receive descriptor and receive job
879 	 * descriptor rings.
880 	 */
881 	for (i = 0; i < CAS_NRXDESC; i++)
882 		CAS_INIT_RXDESC(sc, i, i);
883 	sc->sc_rxdptr = 0;
884 	sc->sc_rxptr = 0;
885 
886 	/*
887 	 * Initialize the receive completion ring.
888 	 */
889 	for (i = 0; i < CAS_NRXCOMP; i++) {
890 		sc->sc_rxcomps[i].cc_word[0] = 0;
891 		sc->sc_rxcomps[i].cc_word[1] = 0;
892 		sc->sc_rxcomps[i].cc_word[2] = 0;
893 		sc->sc_rxcomps[i].cc_word[3] = CAS_DMA_WRITE(CAS_RC3_OWN);
894 		CAS_CDRXCSYNC(sc, i,
895 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
896 	}
897 
898 	return (0);
899 }
900 
901 int
902 cas_ringsize(int sz)
903 {
904 	switch (sz) {
905 	case 32:
906 		return CAS_RING_SZ_32;
907 	case 64:
908 		return CAS_RING_SZ_64;
909 	case 128:
910 		return CAS_RING_SZ_128;
911 	case 256:
912 		return CAS_RING_SZ_256;
913 	case 512:
914 		return CAS_RING_SZ_512;
915 	case 1024:
916 		return CAS_RING_SZ_1024;
917 	case 2048:
918 		return CAS_RING_SZ_2048;
919 	case 4096:
920 		return CAS_RING_SZ_4096;
921 	case 8192:
922 		return CAS_RING_SZ_8192;
923 	default:
924 		printf("cas: invalid Receive Descriptor ring size %d\n", sz);
925 		return CAS_RING_SZ_32;
926 	}
927 }
928 
929 int
930 cas_cringsize(int sz)
931 {
932 	int i;
933 
934 	for (i = 0; i < 9; i++)
935 		if (sz == (128 << i))
936 			return i;
937 
938 	printf("cas: invalid completion ring size %d\n", sz);
939 	return 128;
940 }
941 
942 /*
943  * Initialization of interface; set up initialization block
944  * and transmit/receive descriptor rings.
945  */
946 int
947 cas_init(struct ifnet *ifp)
948 {
949 	struct cas_softc *sc = (struct cas_softc *)ifp->if_softc;
950 	bus_space_tag_t t = sc->sc_memt;
951 	bus_space_handle_t h = sc->sc_memh;
952 	int s;
953 	u_int max_frame_size;
954 	u_int32_t v;
955 
956 	s = splnet();
957 
958 	DPRINTF(sc, ("%s: cas_init: calling stop\n", sc->sc_dev.dv_xname));
959 	/*
960 	 * Initialization sequence. The numbered steps below correspond
961 	 * to the sequence outlined in section 6.3.5.1 in the Ethernet
962 	 * Channel Engine manual (part of the PCIO manual).
963 	 * See also the STP2002-STQ document from Sun Microsystems.
964 	 */
965 
966 	/* step 1 & 2. Reset the Ethernet Channel */
967 	cas_stop(ifp, 0);
968 	cas_reset(sc);
969 	DPRINTF(sc, ("%s: cas_init: restarting\n", sc->sc_dev.dv_xname));
970 
971 	/* Re-initialize the MIF */
972 	cas_mifinit(sc);
973 
974 	/* step 3. Setup data structures in host memory */
975 	cas_meminit(sc);
976 
977 	/* step 4. TX MAC registers & counters */
978 	cas_init_regs(sc);
979 	max_frame_size = ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN;
980 	v = (max_frame_size) | (0x2000 << 16) /* Burst size */;
981 	bus_space_write_4(t, h, CAS_MAC_MAC_MAX_FRAME, v);
982 
983 	/* step 5. RX MAC registers & counters */
984 	cas_iff(sc);
985 
986 	/* step 6 & 7. Program Descriptor Ring Base Addresses */
987 	KASSERT((CAS_CDTXADDR(sc, 0) & 0x1fff) == 0);
988 	bus_space_write_4(t, h, CAS_TX_RING_PTR_HI,
989 	    (((uint64_t)CAS_CDTXADDR(sc,0)) >> 32));
990 	bus_space_write_4(t, h, CAS_TX_RING_PTR_LO, CAS_CDTXADDR(sc, 0));
991 
992 	KASSERT((CAS_CDRXADDR(sc, 0) & 0x1fff) == 0);
993 	bus_space_write_4(t, h, CAS_RX_DRING_PTR_HI,
994 	    (((uint64_t)CAS_CDRXADDR(sc,0)) >> 32));
995 	bus_space_write_4(t, h, CAS_RX_DRING_PTR_LO, CAS_CDRXADDR(sc, 0));
996 
997 	KASSERT((CAS_CDRXCADDR(sc, 0) & 0x1fff) == 0);
998 	bus_space_write_4(t, h, CAS_RX_CRING_PTR_HI,
999 	    (((uint64_t)CAS_CDRXCADDR(sc,0)) >> 32));
1000 	bus_space_write_4(t, h, CAS_RX_CRING_PTR_LO, CAS_CDRXCADDR(sc, 0));
1001 
1002 	if (CAS_PLUS(sc)) {
1003 		KASSERT((CAS_CDRXADDR2(sc, 0) & 0x1fff) == 0);
1004 		bus_space_write_4(t, h, CAS_RX_DRING_PTR_HI2,
1005 		    (((uint64_t)CAS_CDRXADDR2(sc,0)) >> 32));
1006 		bus_space_write_4(t, h, CAS_RX_DRING_PTR_LO2,
1007 		    CAS_CDRXADDR2(sc, 0));
1008 	}
1009 
1010 	/* step 8. Global Configuration & Interrupt Mask */
1011 	bus_space_write_4(t, h, CAS_INTMASK,
1012 		      ~(CAS_INTR_TX_INTME|CAS_INTR_TX_EMPTY|
1013 			CAS_INTR_TX_TAG_ERR|
1014 			CAS_INTR_RX_DONE|CAS_INTR_RX_NOBUF|
1015 			CAS_INTR_RX_TAG_ERR|
1016 			CAS_INTR_RX_COMP_FULL|CAS_INTR_PCS|
1017 			CAS_INTR_MAC_CONTROL|CAS_INTR_MIF|
1018 			CAS_INTR_BERR));
1019 	bus_space_write_4(t, h, CAS_MAC_RX_MASK,
1020 	    CAS_MAC_RX_DONE|CAS_MAC_RX_FRAME_CNT);
1021 	bus_space_write_4(t, h, CAS_MAC_TX_MASK, CAS_MAC_TX_XMIT_DONE);
1022 	bus_space_write_4(t, h, CAS_MAC_CONTROL_MASK, 0); /* XXXX */
1023 
1024 	/* step 9. ETX Configuration: use mostly default values */
1025 
1026 	/* Enable DMA */
1027 	v = cas_ringsize(CAS_NTXDESC /*XXX*/) << 10;
1028 	bus_space_write_4(t, h, CAS_TX_CONFIG,
1029 	    v|CAS_TX_CONFIG_TXDMA_EN|(1<<24)|(1<<29));
1030 	bus_space_write_4(t, h, CAS_TX_KICK, 0);
1031 
1032 	/* step 10. ERX Configuration */
1033 
1034 	/* Encode Receive Descriptor ring size */
1035 	v = cas_ringsize(CAS_NRXDESC) << CAS_RX_CONFIG_RXDRNG_SZ_SHIFT;
1036 	if (CAS_PLUS(sc))
1037 		v |= cas_ringsize(32) << CAS_RX_CONFIG_RXDRNG2_SZ_SHIFT;
1038 
1039 	/* Encode Receive Completion ring size */
1040 	v |= cas_cringsize(CAS_NRXCOMP) << CAS_RX_CONFIG_RXCRNG_SZ_SHIFT;
1041 
1042 	/* Enable DMA */
1043 	bus_space_write_4(t, h, CAS_RX_CONFIG,
1044 	    v|(2<<CAS_RX_CONFIG_FBOFF_SHFT)|CAS_RX_CONFIG_RXDMA_EN);
1045 
1046 	/*
1047 	 * The following value is for an OFF Threshold of about 3/4 full
1048 	 * and an ON Threshold of 1/4 full.
1049 	 */
1050 	bus_space_write_4(t, h, CAS_RX_PAUSE_THRESH,
1051 	    (3 * sc->sc_rxfifosize / 256) |
1052 	    ((sc->sc_rxfifosize / 256) << 12));
1053 	bus_space_write_4(t, h, CAS_RX_BLANKING, (6 << 12) | 6);
1054 
1055 	/* step 11. Configure Media */
1056 	mii_mediachg(&sc->sc_mii);
1057 
1058 	/* step 12. RX_MAC Configuration Register */
1059 	v = bus_space_read_4(t, h, CAS_MAC_RX_CONFIG);
1060 	v |= CAS_MAC_RX_ENABLE | CAS_MAC_RX_STRIP_CRC;
1061 	bus_space_write_4(t, h, CAS_MAC_RX_CONFIG, v);
1062 
1063 	/* step 14. Issue Transmit Pending command */
1064 
1065 	/* step 15.  Give the receiver a swift kick */
1066 	bus_space_write_4(t, h, CAS_RX_KICK, CAS_NRXDESC-4);
1067 	if (CAS_PLUS(sc))
1068 		bus_space_write_4(t, h, CAS_RX_KICK2, 4);
1069 
1070 	/* Start the one second timer. */
1071 	timeout_add_sec(&sc->sc_tick_ch, 1);
1072 
1073 	ifp->if_flags |= IFF_RUNNING;
1074 	ifq_clr_oactive(&ifp->if_snd);
1075 	ifp->if_timer = 0;
1076 	splx(s);
1077 
1078 	return (0);
1079 }
1080 
1081 void
1082 cas_init_regs(struct cas_softc *sc)
1083 {
1084 	bus_space_tag_t t = sc->sc_memt;
1085 	bus_space_handle_t h = sc->sc_memh;
1086 	u_int32_t v, r;
1087 
1088 	/* These regs are not cleared on reset */
1089 	sc->sc_inited = 0;
1090 	if (!sc->sc_inited) {
1091 		/* Load recommended values  */
1092 		bus_space_write_4(t, h, CAS_MAC_IPG0, 0x00);
1093 		bus_space_write_4(t, h, CAS_MAC_IPG1, 0x08);
1094 		bus_space_write_4(t, h, CAS_MAC_IPG2, 0x04);
1095 
1096 		bus_space_write_4(t, h, CAS_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN);
1097 		/* Max frame and max burst size */
1098 		v = ETHER_MAX_LEN | (0x2000 << 16) /* Burst size */;
1099 		bus_space_write_4(t, h, CAS_MAC_MAC_MAX_FRAME, v);
1100 
1101 		bus_space_write_4(t, h, CAS_MAC_PREAMBLE_LEN, 0x07);
1102 		bus_space_write_4(t, h, CAS_MAC_JAM_SIZE, 0x04);
1103 		bus_space_write_4(t, h, CAS_MAC_ATTEMPT_LIMIT, 0x10);
1104 		bus_space_write_4(t, h, CAS_MAC_CONTROL_TYPE, 0x8088);
1105 		bus_space_write_4(t, h, CAS_MAC_RANDOM_SEED,
1106 		    ((sc->sc_arpcom.ac_enaddr[5]<<8)|sc->sc_arpcom.ac_enaddr[4])&0x3ff);
1107 
1108 		/* Secondary MAC addresses set to 0:0:0:0:0:0 */
1109 		for (r = CAS_MAC_ADDR3; r < CAS_MAC_ADDR42; r += 4)
1110 		  	bus_space_write_4(t, h, r, 0);
1111 
1112 		/* MAC control addr set to 0:1:c2:0:1:80 */
1113 		bus_space_write_4(t, h, CAS_MAC_ADDR42, 0x0001);
1114 		bus_space_write_4(t, h, CAS_MAC_ADDR43, 0xc200);
1115 		bus_space_write_4(t, h, CAS_MAC_ADDR44, 0x0180);
1116 
1117 		/* MAC filter addr set to 0:0:0:0:0:0 */
1118 		bus_space_write_4(t, h, CAS_MAC_ADDR_FILTER0, 0);
1119 		bus_space_write_4(t, h, CAS_MAC_ADDR_FILTER1, 0);
1120 		bus_space_write_4(t, h, CAS_MAC_ADDR_FILTER2, 0);
1121 
1122 		bus_space_write_4(t, h, CAS_MAC_ADR_FLT_MASK1_2, 0);
1123 		bus_space_write_4(t, h, CAS_MAC_ADR_FLT_MASK0, 0);
1124 
1125 		/* Hash table initialized to 0 */
1126 		for (r = CAS_MAC_HASH0; r <= CAS_MAC_HASH15; r += 4)
1127 			bus_space_write_4(t, h, r, 0);
1128 
1129 		sc->sc_inited = 1;
1130 	}
1131 
1132 	/* Counters need to be zeroed */
1133 	bus_space_write_4(t, h, CAS_MAC_NORM_COLL_CNT, 0);
1134 	bus_space_write_4(t, h, CAS_MAC_FIRST_COLL_CNT, 0);
1135 	bus_space_write_4(t, h, CAS_MAC_EXCESS_COLL_CNT, 0);
1136 	bus_space_write_4(t, h, CAS_MAC_LATE_COLL_CNT, 0);
1137 	bus_space_write_4(t, h, CAS_MAC_DEFER_TMR_CNT, 0);
1138 	bus_space_write_4(t, h, CAS_MAC_PEAK_ATTEMPTS, 0);
1139 	bus_space_write_4(t, h, CAS_MAC_RX_FRAME_COUNT, 0);
1140 	bus_space_write_4(t, h, CAS_MAC_RX_LEN_ERR_CNT, 0);
1141 	bus_space_write_4(t, h, CAS_MAC_RX_ALIGN_ERR, 0);
1142 	bus_space_write_4(t, h, CAS_MAC_RX_CRC_ERR_CNT, 0);
1143 	bus_space_write_4(t, h, CAS_MAC_RX_CODE_VIOL, 0);
1144 
1145 	/* Un-pause stuff */
1146 	bus_space_write_4(t, h, CAS_MAC_SEND_PAUSE_CMD, 0);
1147 
1148 	/*
1149 	 * Set the station address.
1150 	 */
1151 	bus_space_write_4(t, h, CAS_MAC_ADDR0,
1152 		(sc->sc_arpcom.ac_enaddr[4]<<8) | sc->sc_arpcom.ac_enaddr[5]);
1153 	bus_space_write_4(t, h, CAS_MAC_ADDR1,
1154 		(sc->sc_arpcom.ac_enaddr[2]<<8) | sc->sc_arpcom.ac_enaddr[3]);
1155 	bus_space_write_4(t, h, CAS_MAC_ADDR2,
1156 		(sc->sc_arpcom.ac_enaddr[0]<<8) | sc->sc_arpcom.ac_enaddr[1]);
1157 }
1158 
1159 /*
1160  * Receive interrupt.
1161  */
1162 int
1163 cas_rint(struct cas_softc *sc)
1164 {
1165 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1166 	bus_space_tag_t t = sc->sc_memt;
1167 	bus_space_handle_t h = sc->sc_memh;
1168 	struct cas_rxsoft *rxs;
1169 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1170 	struct mbuf *m;
1171 	u_int64_t word[4];
1172 	int len, off, idx;
1173 	int i, skip;
1174 	caddr_t cp;
1175 
1176 	for (i = sc->sc_rxptr;; i = CAS_NEXTRX(i + skip)) {
1177 		CAS_CDRXCSYNC(sc, i,
1178 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1179 
1180 		word[0] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[0]);
1181 		word[1] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[1]);
1182 		word[2] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[2]);
1183 		word[3] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[3]);
1184 
1185 		/* Stop if the hardware still owns the descriptor. */
1186 		if ((word[0] & CAS_RC0_TYPE) == 0 || word[3] & CAS_RC3_OWN)
1187 			break;
1188 
1189 		len = CAS_RC1_HDR_LEN(word[1]);
1190 		if (len > 0) {
1191 			off = CAS_RC1_HDR_OFF(word[1]);
1192 			idx = CAS_RC1_HDR_IDX(word[1]);
1193 			rxs = &sc->sc_rxsoft[idx];
1194 
1195 			DPRINTF(sc, ("hdr at idx %d, off %d, len %d\n",
1196 			    idx, off, len));
1197 
1198 			bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
1199 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1200 
1201 			cp = rxs->rxs_kva + off * 256 + ETHER_ALIGN;
1202 			m = m_devget(cp, len, ETHER_ALIGN);
1203 
1204 			if (word[0] & CAS_RC0_RELEASE_HDR)
1205 				cas_add_rxbuf(sc, idx);
1206 
1207 			if (m != NULL) {
1208 				ml_enqueue(&ml, m);
1209 			} else
1210 				ifp->if_ierrors++;
1211 		}
1212 
1213 		len = CAS_RC0_DATA_LEN(word[0]);
1214 		if (len > 0) {
1215 			off = CAS_RC0_DATA_OFF(word[0]);
1216 			idx = CAS_RC0_DATA_IDX(word[0]);
1217 			rxs = &sc->sc_rxsoft[idx];
1218 
1219 			DPRINTF(sc, ("data at idx %d, off %d, len %d\n",
1220 			    idx, off, len));
1221 
1222 			bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
1223 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1224 
1225 			/* XXX We should not be copying the packet here. */
1226 			cp = rxs->rxs_kva + off + ETHER_ALIGN;
1227 			m = m_devget(cp, len, ETHER_ALIGN);
1228 
1229 			if (word[0] & CAS_RC0_RELEASE_DATA)
1230 				cas_add_rxbuf(sc, idx);
1231 
1232 			if (m != NULL) {
1233 				ml_enqueue(&ml, m);
1234 			} else
1235 				ifp->if_ierrors++;
1236 		}
1237 
1238 		if (word[0] & CAS_RC0_SPLIT)
1239 			printf("split packet\n");
1240 
1241 		skip = CAS_RC0_SKIP(word[0]);
1242 	}
1243 
1244 	while (sc->sc_rxptr != i) {
1245 		sc->sc_rxcomps[sc->sc_rxptr].cc_word[0] = 0;
1246 		sc->sc_rxcomps[sc->sc_rxptr].cc_word[1] = 0;
1247 		sc->sc_rxcomps[sc->sc_rxptr].cc_word[2] = 0;
1248 		sc->sc_rxcomps[sc->sc_rxptr].cc_word[3] =
1249 		    CAS_DMA_WRITE(CAS_RC3_OWN);
1250 		CAS_CDRXCSYNC(sc, sc->sc_rxptr,
1251 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1252 
1253 		sc->sc_rxptr = CAS_NEXTRX(sc->sc_rxptr);
1254 	}
1255 
1256 	bus_space_write_4(t, h, CAS_RX_COMP_TAIL, sc->sc_rxptr);
1257 
1258 	DPRINTF(sc, ("cas_rint: done sc->rxptr %d, complete %d\n",
1259 		sc->sc_rxptr, bus_space_read_4(t, h, CAS_RX_COMPLETION)));
1260 
1261 	if_input(ifp, &ml);
1262 
1263 	return (1);
1264 }
1265 
1266 /*
1267  * cas_add_rxbuf:
1268  *
1269  *	Add a receive buffer to the indicated descriptor.
1270  */
1271 int
1272 cas_add_rxbuf(struct cas_softc *sc, int idx)
1273 {
1274 	bus_space_tag_t t = sc->sc_memt;
1275 	bus_space_handle_t h = sc->sc_memh;
1276 
1277 	CAS_INIT_RXDESC(sc, sc->sc_rxdptr, idx);
1278 
1279 	if ((sc->sc_rxdptr % 4) == 0)
1280 		bus_space_write_4(t, h, CAS_RX_KICK, sc->sc_rxdptr);
1281 
1282 	if (++sc->sc_rxdptr == CAS_NRXDESC)
1283 		sc->sc_rxdptr = 0;
1284 
1285 	return (0);
1286 }
1287 
1288 int
1289 cas_eint(struct cas_softc *sc, u_int status)
1290 {
1291 	if ((status & CAS_INTR_MIF) != 0) {
1292 #ifdef CAS_DEBUG
1293 		printf("%s: link status changed\n", sc->sc_dev.dv_xname);
1294 #endif
1295 		return (1);
1296 	}
1297 
1298 	printf("%s: status=%b\n", sc->sc_dev.dv_xname, status, CAS_INTR_BITS);
1299 	return (1);
1300 }
1301 
1302 int
1303 cas_pint(struct cas_softc *sc)
1304 {
1305 	bus_space_tag_t t = sc->sc_memt;
1306 	bus_space_handle_t seb = sc->sc_memh;
1307 	u_int32_t status;
1308 
1309 	status = bus_space_read_4(t, seb, CAS_MII_INTERRUP_STATUS);
1310 	status |= bus_space_read_4(t, seb, CAS_MII_INTERRUP_STATUS);
1311 #ifdef CAS_DEBUG
1312 	if (status)
1313 		printf("%s: link status changed\n", sc->sc_dev.dv_xname);
1314 #endif
1315 	return (1);
1316 }
1317 
1318 int
1319 cas_intr(void *v)
1320 {
1321 	struct cas_softc *sc = (struct cas_softc *)v;
1322 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1323 	bus_space_tag_t t = sc->sc_memt;
1324 	bus_space_handle_t seb = sc->sc_memh;
1325 	u_int32_t status;
1326 	int r = 0;
1327 
1328 	status = bus_space_read_4(t, seb, CAS_STATUS);
1329 	DPRINTF(sc, ("%s: cas_intr: cplt %xstatus %b\n",
1330 		sc->sc_dev.dv_xname, (status>>19), status, CAS_INTR_BITS));
1331 
1332 	if ((status & CAS_INTR_PCS) != 0)
1333 		r |= cas_pint(sc);
1334 
1335 	if ((status & (CAS_INTR_TX_TAG_ERR | CAS_INTR_RX_TAG_ERR |
1336 	    CAS_INTR_RX_COMP_FULL | CAS_INTR_BERR)) != 0)
1337 		r |= cas_eint(sc, status);
1338 
1339 	if ((status & (CAS_INTR_TX_EMPTY | CAS_INTR_TX_INTME)) != 0)
1340 		r |= cas_tint(sc, status);
1341 
1342 	if ((status & (CAS_INTR_RX_DONE | CAS_INTR_RX_NOBUF)) != 0)
1343 		r |= cas_rint(sc);
1344 
1345 	/* We should eventually do more than just print out error stats. */
1346 	if (status & CAS_INTR_TX_MAC) {
1347 		int txstat = bus_space_read_4(t, seb, CAS_MAC_TX_STATUS);
1348 #ifdef CAS_DEBUG
1349 		if (txstat & ~CAS_MAC_TX_XMIT_DONE)
1350 			printf("%s: MAC tx fault, status %x\n",
1351 			    sc->sc_dev.dv_xname, txstat);
1352 #endif
1353 		if (txstat & (CAS_MAC_TX_UNDERRUN | CAS_MAC_TX_PKT_TOO_LONG)) {
1354 			KERNEL_LOCK();
1355 			cas_init(ifp);
1356 			KERNEL_UNLOCK();
1357 		}
1358 	}
1359 	if (status & CAS_INTR_RX_MAC) {
1360 		int rxstat = bus_space_read_4(t, seb, CAS_MAC_RX_STATUS);
1361 #ifdef CAS_DEBUG
1362  		if (rxstat & ~CAS_MAC_RX_DONE)
1363  			printf("%s: MAC rx fault, status %x\n",
1364  			    sc->sc_dev.dv_xname, rxstat);
1365 #endif
1366 		/*
1367 		 * On some chip revisions CAS_MAC_RX_OVERFLOW happen often
1368 		 * due to a silicon bug so handle them silently.
1369 		 */
1370 		if (rxstat & CAS_MAC_RX_OVERFLOW) {
1371 			KERNEL_LOCK();
1372 			ifp->if_ierrors++;
1373 			cas_init(ifp);
1374 			KERNEL_UNLOCK();
1375 		}
1376 #ifdef CAS_DEBUG
1377 		else if (rxstat & ~(CAS_MAC_RX_DONE | CAS_MAC_RX_FRAME_CNT))
1378 			printf("%s: MAC rx fault, status %x\n",
1379 			    sc->sc_dev.dv_xname, rxstat);
1380 #endif
1381 	}
1382 	return (r);
1383 }
1384 
1385 
1386 void
1387 cas_watchdog(struct ifnet *ifp)
1388 {
1389 	struct cas_softc *sc = ifp->if_softc;
1390 
1391 	DPRINTF(sc, ("cas_watchdog: CAS_RX_CONFIG %x CAS_MAC_RX_STATUS %x "
1392 		"CAS_MAC_RX_CONFIG %x\n",
1393 		bus_space_read_4(sc->sc_memt, sc->sc_memh, CAS_RX_CONFIG),
1394 		bus_space_read_4(sc->sc_memt, sc->sc_memh, CAS_MAC_RX_STATUS),
1395 		bus_space_read_4(sc->sc_memt, sc->sc_memh, CAS_MAC_RX_CONFIG)));
1396 
1397 	log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
1398 	++ifp->if_oerrors;
1399 
1400 	/* Try to get more packets going. */
1401 	cas_init(ifp);
1402 }
1403 
1404 /*
1405  * Initialize the MII Management Interface
1406  */
1407 void
1408 cas_mifinit(struct cas_softc *sc)
1409 {
1410 	bus_space_tag_t t = sc->sc_memt;
1411 	bus_space_handle_t mif = sc->sc_memh;
1412 
1413 	/* Configure the MIF in frame mode */
1414 	sc->sc_mif_config = bus_space_read_4(t, mif, CAS_MIF_CONFIG);
1415 	sc->sc_mif_config &= ~CAS_MIF_CONFIG_BB_ENA;
1416 	bus_space_write_4(t, mif, CAS_MIF_CONFIG, sc->sc_mif_config);
1417 }
1418 
1419 /*
1420  * MII interface
1421  *
1422  * The Cassini MII interface supports at least three different operating modes:
1423  *
1424  * Bitbang mode is implemented using data, clock and output enable registers.
1425  *
1426  * Frame mode is implemented by loading a complete frame into the frame
1427  * register and polling the valid bit for completion.
1428  *
1429  * Polling mode uses the frame register but completion is indicated by
1430  * an interrupt.
1431  *
1432  */
1433 int
1434 cas_mii_readreg(struct device *self, int phy, int reg)
1435 {
1436 	struct cas_softc *sc = (void *)self;
1437 	bus_space_tag_t t = sc->sc_memt;
1438 	bus_space_handle_t mif = sc->sc_memh;
1439 	int n;
1440 	u_int32_t v;
1441 
1442 #ifdef CAS_DEBUG
1443 	if (sc->sc_debug)
1444 		printf("cas_mii_readreg: phy %d reg %d\n", phy, reg);
1445 #endif
1446 
1447 	/* Construct the frame command */
1448 	v = (reg << CAS_MIF_REG_SHIFT)	| (phy << CAS_MIF_PHY_SHIFT) |
1449 		CAS_MIF_FRAME_READ;
1450 
1451 	bus_space_write_4(t, mif, CAS_MIF_FRAME, v);
1452 	for (n = 0; n < 100; n++) {
1453 		DELAY(1);
1454 		v = bus_space_read_4(t, mif, CAS_MIF_FRAME);
1455 		if (v & CAS_MIF_FRAME_TA0)
1456 			return (v & CAS_MIF_FRAME_DATA);
1457 	}
1458 
1459 	printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname);
1460 	return (0);
1461 }
1462 
1463 void
1464 cas_mii_writereg(struct device *self, int phy, int reg, int val)
1465 {
1466 	struct cas_softc *sc = (void *)self;
1467 	bus_space_tag_t t = sc->sc_memt;
1468 	bus_space_handle_t mif = sc->sc_memh;
1469 	int n;
1470 	u_int32_t v;
1471 
1472 #ifdef CAS_DEBUG
1473 	if (sc->sc_debug)
1474 		printf("cas_mii_writereg: phy %d reg %d val %x\n",
1475 			phy, reg, val);
1476 #endif
1477 
1478 	/* Construct the frame command */
1479 	v = CAS_MIF_FRAME_WRITE			|
1480 	    (phy << CAS_MIF_PHY_SHIFT)		|
1481 	    (reg << CAS_MIF_REG_SHIFT)		|
1482 	    (val & CAS_MIF_FRAME_DATA);
1483 
1484 	bus_space_write_4(t, mif, CAS_MIF_FRAME, v);
1485 	for (n = 0; n < 100; n++) {
1486 		DELAY(1);
1487 		v = bus_space_read_4(t, mif, CAS_MIF_FRAME);
1488 		if (v & CAS_MIF_FRAME_TA0)
1489 			return;
1490 	}
1491 
1492 	printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname);
1493 }
1494 
1495 void
1496 cas_mii_statchg(struct device *dev)
1497 {
1498 	struct cas_softc *sc = (void *)dev;
1499 #ifdef CAS_DEBUG
1500 	uint64_t instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media);
1501 #endif
1502 	bus_space_tag_t t = sc->sc_memt;
1503 	bus_space_handle_t mac = sc->sc_memh;
1504 	u_int32_t v;
1505 
1506 #ifdef CAS_DEBUG
1507 	if (sc->sc_debug)
1508 		printf("cas_mii_statchg: status change: phy = %d\n",
1509 		    sc->sc_phys[instance]);
1510 #endif
1511 
1512 	/* Set tx full duplex options */
1513 	bus_space_write_4(t, mac, CAS_MAC_TX_CONFIG, 0);
1514 	delay(10000); /* reg must be cleared and delay before changing. */
1515 	v = CAS_MAC_TX_ENA_IPG0|CAS_MAC_TX_NGU|CAS_MAC_TX_NGU_LIMIT|
1516 		CAS_MAC_TX_ENABLE;
1517 	if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) {
1518 		v |= CAS_MAC_TX_IGN_CARRIER|CAS_MAC_TX_IGN_COLLIS;
1519 	}
1520 	bus_space_write_4(t, mac, CAS_MAC_TX_CONFIG, v);
1521 
1522 	/* XIF Configuration */
1523 	v = CAS_MAC_XIF_TX_MII_ENA;
1524 	v |= CAS_MAC_XIF_LINK_LED;
1525 
1526 	/* MII needs echo disable if half duplex. */
1527 	if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0)
1528 		/* turn on full duplex LED */
1529 		v |= CAS_MAC_XIF_FDPLX_LED;
1530 	else
1531 		/* half duplex -- disable echo */
1532 		v |= CAS_MAC_XIF_ECHO_DISABL;
1533 
1534 	switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
1535 	case IFM_1000_T:  /* Gigabit using GMII interface */
1536 	case IFM_1000_SX:
1537 		v |= CAS_MAC_XIF_GMII_MODE;
1538 		break;
1539 	default:
1540 		v &= ~CAS_MAC_XIF_GMII_MODE;
1541 	}
1542 	bus_space_write_4(t, mac, CAS_MAC_XIF_CONFIG, v);
1543 }
1544 
1545 int
1546 cas_pcs_readreg(struct device *self, int phy, int reg)
1547 {
1548 	struct cas_softc *sc = (void *)self;
1549 	bus_space_tag_t t = sc->sc_memt;
1550 	bus_space_handle_t pcs = sc->sc_memh;
1551 
1552 #ifdef CAS_DEBUG
1553 	if (sc->sc_debug)
1554 		printf("cas_pcs_readreg: phy %d reg %d\n", phy, reg);
1555 #endif
1556 
1557 	if (phy != CAS_PHYAD_EXTERNAL)
1558 		return (0);
1559 
1560 	switch (reg) {
1561 	case MII_BMCR:
1562 		reg = CAS_MII_CONTROL;
1563 		break;
1564 	case MII_BMSR:
1565 		reg = CAS_MII_STATUS;
1566 		break;
1567 	case MII_ANAR:
1568 		reg = CAS_MII_ANAR;
1569 		break;
1570 	case MII_ANLPAR:
1571 		reg = CAS_MII_ANLPAR;
1572 		break;
1573 	case MII_EXTSR:
1574 		return (EXTSR_1000XFDX|EXTSR_1000XHDX);
1575 	default:
1576 		return (0);
1577 	}
1578 
1579 	return bus_space_read_4(t, pcs, reg);
1580 }
1581 
1582 void
1583 cas_pcs_writereg(struct device *self, int phy, int reg, int val)
1584 {
1585 	struct cas_softc *sc = (void *)self;
1586 	bus_space_tag_t t = sc->sc_memt;
1587 	bus_space_handle_t pcs = sc->sc_memh;
1588 	int reset = 0;
1589 
1590 #ifdef CAS_DEBUG
1591 	if (sc->sc_debug)
1592 		printf("cas_pcs_writereg: phy %d reg %d val %x\n",
1593 			phy, reg, val);
1594 #endif
1595 
1596 	if (phy != CAS_PHYAD_EXTERNAL)
1597 		return;
1598 
1599 	if (reg == MII_ANAR)
1600 		bus_space_write_4(t, pcs, CAS_MII_CONFIG, 0);
1601 
1602 	switch (reg) {
1603 	case MII_BMCR:
1604 		reset = (val & CAS_MII_CONTROL_RESET);
1605 		reg = CAS_MII_CONTROL;
1606 		break;
1607 	case MII_BMSR:
1608 		reg = CAS_MII_STATUS;
1609 		break;
1610 	case MII_ANAR:
1611 		reg = CAS_MII_ANAR;
1612 		break;
1613 	case MII_ANLPAR:
1614 		reg = CAS_MII_ANLPAR;
1615 		break;
1616 	default:
1617 		return;
1618 	}
1619 
1620 	bus_space_write_4(t, pcs, reg, val);
1621 
1622 	if (reset)
1623 		cas_bitwait(sc, pcs, CAS_MII_CONTROL, CAS_MII_CONTROL_RESET, 0);
1624 
1625 	if (reg == CAS_MII_ANAR || reset)
1626 		bus_space_write_4(t, pcs, CAS_MII_CONFIG,
1627 		    CAS_MII_CONFIG_ENABLE);
1628 }
1629 
1630 int
1631 cas_mediachange(struct ifnet *ifp)
1632 {
1633 	struct cas_softc *sc = ifp->if_softc;
1634 	struct mii_data *mii = &sc->sc_mii;
1635 
1636 	if (mii->mii_instance) {
1637 		struct mii_softc *miisc;
1638 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1639 			mii_phy_reset(miisc);
1640 	}
1641 
1642 	return (mii_mediachg(&sc->sc_mii));
1643 }
1644 
1645 void
1646 cas_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1647 {
1648 	struct cas_softc *sc = ifp->if_softc;
1649 
1650 	mii_pollstat(&sc->sc_mii);
1651 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
1652 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
1653 }
1654 
1655 /*
1656  * Process an ioctl request.
1657  */
1658 int
1659 cas_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1660 {
1661 	struct cas_softc *sc = ifp->if_softc;
1662 	struct ifreq *ifr = (struct ifreq *)data;
1663 	int s, error = 0;
1664 
1665 	s = splnet();
1666 
1667 	switch (cmd) {
1668 	case SIOCSIFADDR:
1669 		ifp->if_flags |= IFF_UP;
1670 		if ((ifp->if_flags & IFF_RUNNING) == 0)
1671 			cas_init(ifp);
1672 		break;
1673 
1674 	case SIOCSIFFLAGS:
1675 		if (ifp->if_flags & IFF_UP) {
1676 			if (ifp->if_flags & IFF_RUNNING)
1677 				error = ENETRESET;
1678 			else
1679 				cas_init(ifp);
1680 		} else {
1681 			if (ifp->if_flags & IFF_RUNNING)
1682 				cas_stop(ifp, 1);
1683 		}
1684 #ifdef CAS_DEBUG
1685 		sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
1686 #endif
1687 		break;
1688 
1689 	case SIOCGIFMEDIA:
1690 	case SIOCSIFMEDIA:
1691 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1692 		break;
1693 
1694 	default:
1695 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
1696 	}
1697 
1698 	if (error == ENETRESET) {
1699 		if (ifp->if_flags & IFF_RUNNING)
1700 			cas_iff(sc);
1701 		error = 0;
1702 	}
1703 
1704 	splx(s);
1705 	return (error);
1706 }
1707 
1708 void
1709 cas_iff(struct cas_softc *sc)
1710 {
1711 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1712 	struct arpcom *ac = &sc->sc_arpcom;
1713 	struct ether_multi *enm;
1714 	struct ether_multistep step;
1715 	bus_space_tag_t t = sc->sc_memt;
1716 	bus_space_handle_t h = sc->sc_memh;
1717 	u_int32_t crc, hash[16], rxcfg;
1718 	int i;
1719 
1720 	rxcfg = bus_space_read_4(t, h, CAS_MAC_RX_CONFIG);
1721 	rxcfg &= ~(CAS_MAC_RX_HASH_FILTER | CAS_MAC_RX_PROMISCUOUS |
1722 	    CAS_MAC_RX_PROMISC_GRP);
1723 	ifp->if_flags &= ~IFF_ALLMULTI;
1724 
1725 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1726 		ifp->if_flags |= IFF_ALLMULTI;
1727 		if (ifp->if_flags & IFF_PROMISC)
1728 			rxcfg |= CAS_MAC_RX_PROMISCUOUS;
1729 		else
1730 			rxcfg |= CAS_MAC_RX_PROMISC_GRP;
1731         } else {
1732 		/*
1733 		 * Set up multicast address filter by passing all multicast
1734 		 * addresses through a crc generator, and then using the
1735 		 * high order 8 bits as an index into the 256 bit logical
1736 		 * address filter.  The high order 4 bits selects the word,
1737 		 * while the other 4 bits select the bit within the word
1738 		 * (where bit 0 is the MSB).
1739 		 */
1740 
1741 		rxcfg |= CAS_MAC_RX_HASH_FILTER;
1742 
1743 		/* Clear hash table */
1744 		for (i = 0; i < 16; i++)
1745 			hash[i] = 0;
1746 
1747 		ETHER_FIRST_MULTI(step, ac, enm);
1748 		while (enm != NULL) {
1749                         crc = ether_crc32_le(enm->enm_addrlo,
1750                             ETHER_ADDR_LEN);
1751 
1752                         /* Just want the 8 most significant bits. */
1753                         crc >>= 24;
1754 
1755                         /* Set the corresponding bit in the filter. */
1756                         hash[crc >> 4] |= 1 << (15 - (crc & 15));
1757 
1758 			ETHER_NEXT_MULTI(step, enm);
1759 		}
1760 
1761 		/* Now load the hash table into the chip (if we are using it) */
1762 		for (i = 0; i < 16; i++) {
1763 			bus_space_write_4(t, h,
1764 			    CAS_MAC_HASH0 + i * (CAS_MAC_HASH1 - CAS_MAC_HASH0),
1765 			    hash[i]);
1766 		}
1767 	}
1768 
1769 	bus_space_write_4(t, h, CAS_MAC_RX_CONFIG, rxcfg);
1770 }
1771 
1772 int
1773 cas_encap(struct cas_softc *sc, struct mbuf *m, int *used)
1774 {
1775 	u_int64_t flags;
1776 	u_int32_t first, cur, frag, i;
1777 	bus_dmamap_t map;
1778 
1779 	cur = frag = (sc->sc_tx_prod + *used) % CAS_NTXDESC;
1780 	map = sc->sc_txd[cur].sd_map;
1781 
1782 	switch (bus_dmamap_load_mbuf(sc->sc_dmatag, map, m, BUS_DMA_NOWAIT)) {
1783 	case 0:
1784 		break;
1785 	case EFBIG:
1786 		if (m_defrag(m, M_DONTWAIT) == 0 &&
1787 		    bus_dmamap_load_mbuf(sc->sc_dmatag, map, m,
1788 		    BUS_DMA_NOWAIT) == 0)
1789 			break;
1790 		/* FALLTHROUGH */
1791 	default:
1792 		return (ENOBUFS);
1793 	}
1794 
1795 	bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize,
1796 	    BUS_DMASYNC_PREWRITE);
1797 
1798 	first = cur;
1799 	for (i = 0; i < map->dm_nsegs; i++) {
1800 		sc->sc_txdescs[frag].cd_addr =
1801 		    CAS_DMA_WRITE(map->dm_segs[i].ds_addr);
1802 		flags = (map->dm_segs[i].ds_len & CAS_TD_BUFSIZE) |
1803 		    (i == 0 ? CAS_TD_START_OF_PACKET : 0) |
1804 		    ((i == (map->dm_nsegs - 1)) ? CAS_TD_END_OF_PACKET : 0);
1805 		sc->sc_txdescs[frag].cd_flags = CAS_DMA_WRITE(flags);
1806 		bus_dmamap_sync(sc->sc_dmatag, sc->sc_cddmamap,
1807 		    CAS_CDTXOFF(frag), sizeof(struct cas_desc),
1808 		    BUS_DMASYNC_PREWRITE);
1809 		cur = frag;
1810 		if (++frag == CAS_NTXDESC)
1811 			frag = 0;
1812 	}
1813 
1814 	sc->sc_txd[first].sd_map = sc->sc_txd[cur].sd_map;
1815 	sc->sc_txd[cur].sd_map = map;
1816 	sc->sc_txd[cur].sd_mbuf = m;
1817 
1818 	bus_space_write_4(sc->sc_memt, sc->sc_memh, CAS_TX_KICK, frag);
1819 
1820 	*used += map->dm_nsegs;
1821 
1822 	/* sync descriptors */
1823 
1824 	return (0);
1825 }
1826 
1827 /*
1828  * Transmit interrupt.
1829  */
1830 int
1831 cas_tint(struct cas_softc *sc, u_int32_t status)
1832 {
1833 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1834 	struct cas_sxd *sd;
1835 	u_int32_t cons, comp;
1836 	int freed, used;
1837 
1838 	comp = bus_space_read_4(sc->sc_memt, sc->sc_memh, CAS_TX_COMPLETION);
1839 	cons = sc->sc_tx_cons;
1840 	freed = 0;
1841 	while (cons != comp) {
1842 		sd = &sc->sc_txd[cons];
1843 		if (sd->sd_mbuf != NULL) {
1844 			bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0,
1845 			    sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1846 			bus_dmamap_unload(sc->sc_dmatag, sd->sd_map);
1847 			m_freem(sd->sd_mbuf);
1848 			sd->sd_mbuf = NULL;
1849 		}
1850 		freed++;
1851 		if (++cons == CAS_NTXDESC)
1852 			cons = 0;
1853 	}
1854 	sc->sc_tx_cons = cons;
1855 
1856 	used = atomic_sub_int_nv(&sc->sc_tx_cnt, freed);
1857 	if (used < CAS_NTXDESC - 2)
1858 		ifq_clr_oactive(&ifp->if_snd);
1859 	if (used == 0)
1860 		ifp->if_timer = 0;
1861 
1862 	if (!ifq_empty(&ifp->if_snd)) {
1863 		KERNEL_LOCK();
1864 		cas_start(ifp);
1865 		KERNEL_UNLOCK();
1866 	}
1867 
1868 	return (1);
1869 }
1870 
1871 void
1872 cas_start(struct ifnet *ifp)
1873 {
1874 	struct cas_softc *sc = ifp->if_softc;
1875 	struct mbuf *m = NULL;
1876 	int used;
1877 
1878 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
1879 		return;
1880 
1881 	used = 0;
1882 	while (1) {
1883 		if ((sc->sc_tx_cnt + used + CAS_NTXSEGS) >= (CAS_NTXDESC - 2)) {
1884 			ifq_set_oactive(&ifp->if_snd);
1885 			break;
1886 		}
1887 
1888 		m = ifq_dequeue(&ifp->if_snd);
1889 		if (m == NULL)
1890 			break;
1891 
1892 		if (cas_encap(sc, m, &used)) {
1893 			m_freem(m);
1894 			continue;
1895 		}
1896 
1897 #if NBPFILTER > 0
1898 		if (ifp->if_bpf)
1899 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1900 #endif
1901 	}
1902 
1903 	if (used != 0) {
1904 		ifp->if_timer = 5;
1905 		sc->sc_tx_prod = (sc->sc_tx_prod + used) % CAS_NTXDESC;
1906 		atomic_add_int(&sc->sc_tx_cnt, used);
1907 	}
1908 }
1909