xref: /openbsd/sys/dev/pci/if_cas.c (revision 0f9891f1)
1 /*	$OpenBSD: if_cas.c,v 1.56 2024/05/24 06:02:53 jsg Exp $	*/
2 
3 /*
4  *
5  * Copyright (C) 2007 Mark Kettenis.
6  * Copyright (C) 2001 Eduardo Horvath.
7  * All rights reserved.
8  *
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR  ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR  BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  */
32 
33 /*
34  * Driver for Sun Cassini ethernet controllers.
35  *
36  * There are basically two variants of this chip: Cassini and
37  * Cassini+.  We can distinguish between the two by revision: 0x10 and
38  * up are Cassini+.  The most important difference is that Cassini+
39  * has a second RX descriptor ring.  Cassini+ will not work without
40  * configuring that second ring.  However, since we don't use it we
41  * don't actually fill the descriptors, and only hand off the first
42  * four to the chip.
43  */
44 
45 #include "bpfilter.h"
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/timeout.h>
50 #include <sys/mbuf.h>
51 #include <sys/syslog.h>
52 #include <sys/ioctl.h>
53 #include <sys/errno.h>
54 #include <sys/device.h>
55 #include <sys/atomic.h>
56 
57 #include <net/if.h>
58 #include <net/if_media.h>
59 
60 #include <netinet/in.h>
61 #include <netinet/if_ether.h>
62 
63 #if NBPFILTER > 0
64 #include <net/bpf.h>
65 #endif
66 
67 #include <machine/bus.h>
68 #include <machine/intr.h>
69 
70 #include <dev/mii/mii.h>
71 #include <dev/mii/miivar.h>
72 
73 #include <dev/pci/if_casreg.h>
74 #include <dev/pci/if_casvar.h>
75 
76 #include <dev/pci/pcivar.h>
77 #include <dev/pci/pcireg.h>
78 #include <dev/pci/pcidevs.h>
79 
80 #ifdef __sparc64__
81 #include <dev/ofw/openfirm.h>
82 #endif
83 
84 #define TRIES	10000
85 
86 struct cfdriver cas_cd = {
87 	NULL, "cas", DV_IFNET
88 };
89 
90 int	cas_match(struct device *, void *, void *);
91 void	cas_attach(struct device *, struct device *, void *);
92 int	cas_pci_enaddr(struct cas_softc *, struct pci_attach_args *);
93 
94 const struct cfattach cas_ca = {
95 	sizeof(struct cas_softc), cas_match, cas_attach
96 };
97 
98 void		cas_config(struct cas_softc *);
99 void		cas_start(struct ifnet *);
100 void		cas_stop(struct ifnet *, int);
101 int		cas_ioctl(struct ifnet *, u_long, caddr_t);
102 void		cas_tick(void *);
103 void		cas_watchdog(struct ifnet *);
104 int		cas_init(struct ifnet *);
105 void		cas_init_regs(struct cas_softc *);
106 int		cas_ringsize(int);
107 int		cas_cringsize(int);
108 int		cas_meminit(struct cas_softc *);
109 void		cas_mifinit(struct cas_softc *);
110 int		cas_bitwait(struct cas_softc *, bus_space_handle_t, int,
111 		    u_int32_t, u_int32_t);
112 void		cas_reset(struct cas_softc *);
113 int		cas_reset_rx(struct cas_softc *);
114 int		cas_reset_tx(struct cas_softc *);
115 int		cas_disable_rx(struct cas_softc *);
116 int		cas_disable_tx(struct cas_softc *);
117 void		cas_rxdrain(struct cas_softc *);
118 int		cas_add_rxbuf(struct cas_softc *, int idx);
119 void		cas_iff(struct cas_softc *);
120 int		cas_encap(struct cas_softc *, struct mbuf *, int *);
121 
122 /* MII methods & callbacks */
123 int		cas_mii_readreg(struct device *, int, int);
124 void		cas_mii_writereg(struct device *, int, int, int);
125 void		cas_mii_statchg(struct device *);
126 int		cas_pcs_readreg(struct device *, int, int);
127 void		cas_pcs_writereg(struct device *, int, int, int);
128 
129 int		cas_mediachange(struct ifnet *);
130 void		cas_mediastatus(struct ifnet *, struct ifmediareq *);
131 
132 int		cas_eint(struct cas_softc *, u_int);
133 int		cas_rint(struct cas_softc *);
134 int		cas_tint(struct cas_softc *, u_int32_t);
135 int		cas_pint(struct cas_softc *);
136 int		cas_intr(void *);
137 
138 #ifdef CAS_DEBUG
139 #define	DPRINTF(sc, x)	if ((sc)->sc_arpcom.ac_if.if_flags & IFF_DEBUG) \
140 				printf x
141 #else
142 #define	DPRINTF(sc, x)	/* nothing */
143 #endif
144 
145 const struct pci_matchid cas_pci_devices[] = {
146 	{ PCI_VENDOR_SUN, PCI_PRODUCT_SUN_CASSINI },
147 	{ PCI_VENDOR_NS, PCI_PRODUCT_NS_SATURN }
148 };
149 
150 int
cas_match(struct device * parent,void * cf,void * aux)151 cas_match(struct device *parent, void *cf, void *aux)
152 {
153 	return (pci_matchbyid((struct pci_attach_args *)aux, cas_pci_devices,
154 	    nitems(cas_pci_devices)));
155 }
156 
157 #define	PROMHDR_PTR_DATA	0x18
158 #define	PROMDATA_PTR_VPD	0x08
159 #define	PROMDATA_DATA2		0x0a
160 
161 static const u_int8_t cas_promhdr[] = { 0x55, 0xaa };
162 static const u_int8_t cas_promdat_sun[] = {
163 	'P', 'C', 'I', 'R',
164 	PCI_VENDOR_SUN & 0xff, PCI_VENDOR_SUN >> 8,
165 	PCI_PRODUCT_SUN_CASSINI & 0xff, PCI_PRODUCT_SUN_CASSINI >> 8
166 };
167 static const u_int8_t cas_promdat_ns[] = {
168 	'P', 'C', 'I', 'R',
169 	PCI_VENDOR_NS & 0xff, PCI_VENDOR_NS >> 8,
170 	PCI_PRODUCT_NS_SATURN & 0xff, PCI_PRODUCT_NS_SATURN >> 8
171 };
172 
173 static const u_int8_t cas_promdat2[] = {
174 	0x18, 0x00,			/* structure length */
175 	0x00,				/* structure revision */
176 	0x00,				/* interface revision */
177 	PCI_SUBCLASS_NETWORK_ETHERNET,	/* subclass code */
178 	PCI_CLASS_NETWORK		/* class code */
179 };
180 
181 int
cas_pci_enaddr(struct cas_softc * sc,struct pci_attach_args * pa)182 cas_pci_enaddr(struct cas_softc *sc, struct pci_attach_args *pa)
183 {
184 	struct pci_vpd_largeres *res;
185 	struct pci_vpd *vpd;
186 	bus_space_handle_t romh;
187 	bus_space_tag_t romt;
188 	bus_size_t romsize = 0;
189 	u_int8_t buf[32], *desc;
190 	pcireg_t address;
191 	int dataoff, vpdoff, len;
192 	int rv = -1;
193 
194 	if (pci_mapreg_map(pa, PCI_ROM_REG, PCI_MAPREG_TYPE_MEM, 0,
195 	    &romt, &romh, 0, &romsize, 0))
196 		return (-1);
197 
198 	address = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ROM_REG);
199 	address |= PCI_ROM_ENABLE;
200 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_ROM_REG, address);
201 
202 	bus_space_read_region_1(romt, romh, 0, buf, sizeof(buf));
203 	if (bcmp(buf, cas_promhdr, sizeof(cas_promhdr)))
204 		goto fail;
205 
206 	dataoff = buf[PROMHDR_PTR_DATA] | (buf[PROMHDR_PTR_DATA + 1] << 8);
207 	if (dataoff < 0x1c)
208 		goto fail;
209 
210 	bus_space_read_region_1(romt, romh, dataoff, buf, sizeof(buf));
211 	if ((bcmp(buf, cas_promdat_sun, sizeof(cas_promdat_sun)) &&
212 	    bcmp(buf, cas_promdat_ns, sizeof(cas_promdat_ns))) ||
213 	    bcmp(buf + PROMDATA_DATA2, cas_promdat2, sizeof(cas_promdat2)))
214 		goto fail;
215 
216 	vpdoff = buf[PROMDATA_PTR_VPD] | (buf[PROMDATA_PTR_VPD + 1] << 8);
217 	if (vpdoff < 0x1c)
218 		goto fail;
219 
220 next:
221 	bus_space_read_region_1(romt, romh, vpdoff, buf, sizeof(buf));
222 	if (!PCI_VPDRES_ISLARGE(buf[0]))
223 		goto fail;
224 
225 	res = (struct pci_vpd_largeres *)buf;
226 	vpdoff += sizeof(*res);
227 
228 	len = ((res->vpdres_len_msb << 8) + res->vpdres_len_lsb);
229 	switch(PCI_VPDRES_LARGE_NAME(res->vpdres_byte0)) {
230 	case PCI_VPDRES_TYPE_IDENTIFIER_STRING:
231 		/* Skip identifier string. */
232 		vpdoff += len;
233 		goto next;
234 
235 	case PCI_VPDRES_TYPE_VPD:
236 		while (len > 0) {
237 			bus_space_read_region_1(romt, romh, vpdoff,
238 			     buf, sizeof(buf));
239 
240 			vpd = (struct pci_vpd *)buf;
241 			vpdoff += sizeof(*vpd) + vpd->vpd_len;
242 			len -= sizeof(*vpd) + vpd->vpd_len;
243 
244 			/*
245 			 * We're looking for an "Enhanced" VPD...
246 			 */
247 			if (vpd->vpd_key0 != 'Z')
248 				continue;
249 
250 			desc = buf + sizeof(*vpd);
251 
252 			/*
253 			 * ...which is an instance property...
254 			 */
255 			if (desc[0] != 'I')
256 				continue;
257 			desc += 3;
258 
259 			/*
260 			 * ...that's a byte array with the proper
261 			 * length for a MAC address...
262 			 */
263 			if (desc[0] != 'B' || desc[1] != ETHER_ADDR_LEN)
264 				continue;
265 			desc += 2;
266 
267 			/*
268 			 * ...named "local-mac-address".
269 			 */
270 			if (strcmp(desc, "local-mac-address") != 0)
271 				continue;
272 			desc += strlen("local-mac-address") + 1;
273 
274 			bcopy(desc, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
275 			sc->sc_arpcom.ac_enaddr[5] += pa->pa_device;
276 			rv = 0;
277 		}
278 		break;
279 
280 	default:
281 		goto fail;
282 	}
283 
284  fail:
285 	if (romsize != 0)
286 		bus_space_unmap(romt, romh, romsize);
287 
288 	address = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ROM_REG);
289 	address &= ~PCI_ROM_ENABLE;
290 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_ROM_REG, address);
291 
292 	return (rv);
293 }
294 
295 void
cas_attach(struct device * parent,struct device * self,void * aux)296 cas_attach(struct device *parent, struct device *self, void *aux)
297 {
298 	struct pci_attach_args *pa = aux;
299 	struct cas_softc *sc = (void *)self;
300 	pci_intr_handle_t ih;
301 #ifdef __sparc64__
302 	/* XXX the following declarations should be elsewhere */
303 	extern void myetheraddr(u_char *);
304 #endif
305 	const char *intrstr = NULL;
306 	bus_size_t size;
307 	int gotenaddr = 0;
308 
309 	sc->sc_rev = PCI_REVISION(pa->pa_class);
310 	sc->sc_dmatag = pa->pa_dmat;
311 
312 #define PCI_CAS_BASEADDR	0x10
313 	if (pci_mapreg_map(pa, PCI_CAS_BASEADDR, PCI_MAPREG_TYPE_MEM, 0,
314 	    &sc->sc_memt, &sc->sc_memh, NULL, &size, 0) != 0) {
315 		printf(": can't map registers\n");
316 		return;
317 	}
318 
319 	if (cas_pci_enaddr(sc, pa) == 0)
320 		gotenaddr = 1;
321 
322 #ifdef __sparc64__
323 	if (!gotenaddr) {
324 		if (OF_getprop(PCITAG_NODE(pa->pa_tag), "local-mac-address",
325 		    sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN) <= 0)
326 			myetheraddr(sc->sc_arpcom.ac_enaddr);
327 		gotenaddr = 1;
328 	}
329 #endif
330 #ifdef __powerpc__
331 	if (!gotenaddr) {
332 		pci_ether_hw_addr(pa->pa_pc, sc->sc_arpcom.ac_enaddr);
333 		gotenaddr = 1;
334 	}
335 #endif
336 
337 	sc->sc_burst = 16;	/* XXX */
338 
339 	if (pci_intr_map(pa, &ih) != 0) {
340 		printf(": couldn't map interrupt\n");
341 		bus_space_unmap(sc->sc_memt, sc->sc_memh, size);
342 		return;
343 	}
344 	intrstr = pci_intr_string(pa->pa_pc, ih);
345 	sc->sc_ih = pci_intr_establish(pa->pa_pc,
346 	    ih, IPL_NET | IPL_MPSAFE, cas_intr, sc, self->dv_xname);
347 	if (sc->sc_ih == NULL) {
348 		printf(": couldn't establish interrupt");
349 		if (intrstr != NULL)
350 			printf(" at %s", intrstr);
351 		printf("\n");
352 		bus_space_unmap(sc->sc_memt, sc->sc_memh, size);
353 		return;
354 	}
355 
356 	printf(": %s", intrstr);
357 
358 	/*
359 	 * call the main configure
360 	 */
361 	cas_config(sc);
362 }
363 
364 /*
365  * cas_config:
366  *
367  *	Attach a Cassini interface to the system.
368  */
369 void
cas_config(struct cas_softc * sc)370 cas_config(struct cas_softc *sc)
371 {
372 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
373 	struct mii_data *mii = &sc->sc_mii;
374 	struct mii_softc *child;
375 	int i, error;
376 
377 	/* Make sure the chip is stopped. */
378 	ifp->if_softc = sc;
379 	cas_reset(sc);
380 
381 	/*
382 	 * Allocate the control data structures, and create and load the
383 	 * DMA map for it.
384 	 */
385 	if ((error = bus_dmamem_alloc(sc->sc_dmatag,
386 	    sizeof(struct cas_control_data), CAS_PAGE_SIZE, 0, &sc->sc_cdseg,
387 	    1, &sc->sc_cdnseg, BUS_DMA_ZERO)) != 0) {
388 		printf("\n%s: unable to allocate control data, error = %d\n",
389 		    sc->sc_dev.dv_xname, error);
390 		goto fail_0;
391 	}
392 
393 	/* XXX should map this in with correct endianness */
394 	if ((error = bus_dmamem_map(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg,
395 	    sizeof(struct cas_control_data), (caddr_t *)&sc->sc_control_data,
396 	    BUS_DMA_COHERENT)) != 0) {
397 		printf("\n%s: unable to map control data, error = %d\n",
398 		    sc->sc_dev.dv_xname, error);
399 		goto fail_1;
400 	}
401 
402 	if ((error = bus_dmamap_create(sc->sc_dmatag,
403 	    sizeof(struct cas_control_data), 1,
404 	    sizeof(struct cas_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
405 		printf("\n%s: unable to create control data DMA map, "
406 		    "error = %d\n", sc->sc_dev.dv_xname, error);
407 		goto fail_2;
408 	}
409 
410 	if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_cddmamap,
411 	    sc->sc_control_data, sizeof(struct cas_control_data), NULL,
412 	    0)) != 0) {
413 		printf("\n%s: unable to load control data DMA map, error = %d\n",
414 		    sc->sc_dev.dv_xname, error);
415 		goto fail_3;
416 	}
417 
418 	/*
419 	 * Create the receive buffer DMA maps.
420 	 */
421 	for (i = 0; i < CAS_NRXDESC; i++) {
422 		bus_dma_segment_t seg;
423 		caddr_t kva;
424 		int rseg;
425 
426 		if ((error = bus_dmamem_alloc(sc->sc_dmatag, CAS_PAGE_SIZE,
427 		    CAS_PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
428 			printf("\n%s: unable to alloc rx DMA mem %d, "
429 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
430 			goto fail_5;
431 		}
432 		sc->sc_rxsoft[i].rxs_dmaseg = seg;
433 
434 		if ((error = bus_dmamem_map(sc->sc_dmatag, &seg, rseg,
435 		    CAS_PAGE_SIZE, &kva, BUS_DMA_NOWAIT)) != 0) {
436 			printf("\n%s: unable to alloc rx DMA mem %d, "
437 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
438 			goto fail_5;
439 		}
440 		sc->sc_rxsoft[i].rxs_kva = kva;
441 
442 		if ((error = bus_dmamap_create(sc->sc_dmatag, CAS_PAGE_SIZE, 1,
443 		    CAS_PAGE_SIZE, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
444 			printf("\n%s: unable to create rx DMA map %d, "
445 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
446 			goto fail_5;
447 		}
448 
449 		if ((error = bus_dmamap_load(sc->sc_dmatag,
450 		   sc->sc_rxsoft[i].rxs_dmamap, kva, CAS_PAGE_SIZE, NULL,
451 		   BUS_DMA_NOWAIT)) != 0) {
452 			printf("\n%s: unable to load rx DMA map %d, "
453 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
454 			goto fail_5;
455 		}
456 	}
457 
458 	/*
459 	 * Create the transmit buffer DMA maps.
460 	 */
461 	for (i = 0; i < CAS_NTXDESC; i++) {
462 		if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES,
463 		    CAS_NTXSEGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
464 		    &sc->sc_txd[i].sd_map)) != 0) {
465 			printf("\n%s: unable to create tx DMA map %d, "
466 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
467 			goto fail_6;
468 		}
469 		sc->sc_txd[i].sd_mbuf = NULL;
470 	}
471 
472 	/*
473 	 * From this point forward, the attachment cannot fail.  A failure
474 	 * before this point releases all resources that may have been
475 	 * allocated.
476 	 */
477 
478 	/* Announce ourselves. */
479 	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
480 
481 	/* Get RX FIFO size */
482 	sc->sc_rxfifosize = 16 * 1024;
483 
484 	/* Initialize ifnet structure. */
485 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof ifp->if_xname);
486 	ifp->if_softc = sc;
487 	ifp->if_flags =
488 	    IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
489 	ifp->if_start = cas_start;
490 	ifp->if_ioctl = cas_ioctl;
491 	ifp->if_watchdog = cas_watchdog;
492 	ifq_init_maxlen(&ifp->if_snd, CAS_NTXDESC - 1);
493 
494 	ifp->if_capabilities = IFCAP_VLAN_MTU;
495 
496 	/* Initialize ifmedia structures and MII info */
497 	mii->mii_ifp = ifp;
498 	mii->mii_readreg = cas_mii_readreg;
499 	mii->mii_writereg = cas_mii_writereg;
500 	mii->mii_statchg = cas_mii_statchg;
501 
502 	ifmedia_init(&mii->mii_media, 0, cas_mediachange, cas_mediastatus);
503 
504 	bus_space_write_4(sc->sc_memt, sc->sc_memh, CAS_MII_DATAPATH_MODE, 0);
505 
506 	cas_mifinit(sc);
507 
508 	if (sc->sc_mif_config & CAS_MIF_CONFIG_MDI1) {
509 		sc->sc_mif_config |= CAS_MIF_CONFIG_PHY_SEL;
510 		bus_space_write_4(sc->sc_memt, sc->sc_memh,
511 	            CAS_MIF_CONFIG, sc->sc_mif_config);
512 	}
513 
514 	mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
515 	    MII_OFFSET_ANY, 0);
516 
517 	child = LIST_FIRST(&mii->mii_phys);
518 	if (child == NULL &&
519 	    sc->sc_mif_config & (CAS_MIF_CONFIG_MDI0|CAS_MIF_CONFIG_MDI1)) {
520 		/*
521 		 * Try the external PCS SERDES if we didn't find any
522 		 * MII devices.
523 		 */
524 		bus_space_write_4(sc->sc_memt, sc->sc_memh,
525 		    CAS_MII_DATAPATH_MODE, CAS_MII_DATAPATH_SERDES);
526 
527 		bus_space_write_4(sc->sc_memt, sc->sc_memh,
528 		     CAS_MII_CONFIG, CAS_MII_CONFIG_ENABLE);
529 
530 		mii->mii_readreg = cas_pcs_readreg;
531 		mii->mii_writereg = cas_pcs_writereg;
532 
533 		mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY,
534 		    MII_OFFSET_ANY, MIIF_NOISOLATE);
535 	}
536 
537 	child = LIST_FIRST(&mii->mii_phys);
538 	if (child == NULL) {
539 		/* No PHY attached */
540 		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
541 		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
542 	} else {
543 		/*
544 		 * Walk along the list of attached MII devices and
545 		 * establish an `MII instance' to `phy number'
546 		 * mapping. We'll use this mapping in media change
547 		 * requests to determine which phy to use to program
548 		 * the MIF configuration register.
549 		 */
550 		for (; child != NULL; child = LIST_NEXT(child, mii_list)) {
551 			/*
552 			 * Note: we support just two PHYs: the built-in
553 			 * internal device and an external on the MII
554 			 * connector.
555 			 */
556 			if (child->mii_phy > 1 || child->mii_inst > 1) {
557 				printf("%s: cannot accommodate MII device %s"
558 				       " at phy %d, instance %lld\n",
559 				       sc->sc_dev.dv_xname,
560 				       child->mii_dev.dv_xname,
561 				       child->mii_phy, child->mii_inst);
562 				continue;
563 			}
564 
565 			sc->sc_phys[child->mii_inst] = child->mii_phy;
566 		}
567 
568 		/*
569 		 * XXX - we can really do the following ONLY if the
570 		 * phy indeed has the auto negotiation capability!!
571 		 */
572 		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
573 	}
574 
575 	/* Attach the interface. */
576 	if_attach(ifp);
577 	ether_ifattach(ifp);
578 
579 	timeout_set(&sc->sc_tick_ch, cas_tick, sc);
580 	return;
581 
582 	/*
583 	 * Free any resources we've allocated during the failed attach
584 	 * attempt.  Do this in reverse order and fall through.
585 	 */
586  fail_6:
587 	for (i = 0; i < CAS_NTXDESC; i++) {
588 		if (sc->sc_txd[i].sd_map != NULL)
589 			bus_dmamap_destroy(sc->sc_dmatag,
590 			    sc->sc_txd[i].sd_map);
591 	}
592  fail_5:
593 	for (i = 0; i < CAS_NRXDESC; i++) {
594 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
595 			bus_dmamap_destroy(sc->sc_dmatag,
596 			    sc->sc_rxsoft[i].rxs_dmamap);
597 	}
598 	bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap);
599  fail_3:
600 	bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cddmamap);
601  fail_2:
602 	bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sc_control_data,
603 	    sizeof(struct cas_control_data));
604  fail_1:
605 	bus_dmamem_free(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg);
606  fail_0:
607 	return;
608 }
609 
610 
611 void
cas_tick(void * arg)612 cas_tick(void *arg)
613 {
614 	struct cas_softc *sc = arg;
615 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
616 	bus_space_tag_t t = sc->sc_memt;
617 	bus_space_handle_t mac = sc->sc_memh;
618 	int s;
619 	u_int32_t v;
620 
621 	/* unload collisions counters */
622 	v = bus_space_read_4(t, mac, CAS_MAC_EXCESS_COLL_CNT) +
623 	    bus_space_read_4(t, mac, CAS_MAC_LATE_COLL_CNT);
624 	ifp->if_collisions += v +
625 	    bus_space_read_4(t, mac, CAS_MAC_NORM_COLL_CNT) +
626 	    bus_space_read_4(t, mac, CAS_MAC_FIRST_COLL_CNT);
627 	ifp->if_oerrors += v;
628 
629 	/* read error counters */
630 	ifp->if_ierrors +=
631 	    bus_space_read_4(t, mac, CAS_MAC_RX_LEN_ERR_CNT) +
632 	    bus_space_read_4(t, mac, CAS_MAC_RX_ALIGN_ERR) +
633 	    bus_space_read_4(t, mac, CAS_MAC_RX_CRC_ERR_CNT) +
634 	    bus_space_read_4(t, mac, CAS_MAC_RX_CODE_VIOL);
635 
636 	/* clear the hardware counters */
637 	bus_space_write_4(t, mac, CAS_MAC_NORM_COLL_CNT, 0);
638 	bus_space_write_4(t, mac, CAS_MAC_FIRST_COLL_CNT, 0);
639 	bus_space_write_4(t, mac, CAS_MAC_EXCESS_COLL_CNT, 0);
640 	bus_space_write_4(t, mac, CAS_MAC_LATE_COLL_CNT, 0);
641 	bus_space_write_4(t, mac, CAS_MAC_RX_LEN_ERR_CNT, 0);
642 	bus_space_write_4(t, mac, CAS_MAC_RX_ALIGN_ERR, 0);
643 	bus_space_write_4(t, mac, CAS_MAC_RX_CRC_ERR_CNT, 0);
644 	bus_space_write_4(t, mac, CAS_MAC_RX_CODE_VIOL, 0);
645 
646 	s = splnet();
647 	mii_tick(&sc->sc_mii);
648 	splx(s);
649 
650 	timeout_add_sec(&sc->sc_tick_ch, 1);
651 }
652 
653 int
cas_bitwait(struct cas_softc * sc,bus_space_handle_t h,int r,u_int32_t clr,u_int32_t set)654 cas_bitwait(struct cas_softc *sc, bus_space_handle_t h, int r,
655     u_int32_t clr, u_int32_t set)
656 {
657 	int i;
658 	u_int32_t reg;
659 
660 	for (i = TRIES; i--; DELAY(100)) {
661 		reg = bus_space_read_4(sc->sc_memt, h, r);
662 		if ((reg & clr) == 0 && (reg & set) == set)
663 			return (1);
664 	}
665 
666 	return (0);
667 }
668 
669 void
cas_reset(struct cas_softc * sc)670 cas_reset(struct cas_softc *sc)
671 {
672 	bus_space_tag_t t = sc->sc_memt;
673 	bus_space_handle_t h = sc->sc_memh;
674 	int s;
675 
676 	s = splnet();
677 	DPRINTF(sc, ("%s: cas_reset\n", sc->sc_dev.dv_xname));
678 	cas_reset_rx(sc);
679 	cas_reset_tx(sc);
680 
681 	/* Do a full reset */
682 	bus_space_write_4(t, h, CAS_RESET,
683 	    CAS_RESET_RX | CAS_RESET_TX | CAS_RESET_BLOCK_PCS);
684 	if (!cas_bitwait(sc, h, CAS_RESET, CAS_RESET_RX | CAS_RESET_TX, 0))
685 		printf("%s: cannot reset device\n", sc->sc_dev.dv_xname);
686 	splx(s);
687 }
688 
689 
690 /*
691  * cas_rxdrain:
692  *
693  *	Drain the receive queue.
694  */
695 void
cas_rxdrain(struct cas_softc * sc)696 cas_rxdrain(struct cas_softc *sc)
697 {
698 	/* Nothing to do yet. */
699 }
700 
701 /*
702  * Reset the whole thing.
703  */
704 void
cas_stop(struct ifnet * ifp,int disable)705 cas_stop(struct ifnet *ifp, int disable)
706 {
707 	struct cas_softc *sc = (struct cas_softc *)ifp->if_softc;
708 	struct cas_sxd *sd;
709 	u_int32_t i;
710 
711 	DPRINTF(sc, ("%s: cas_stop\n", sc->sc_dev.dv_xname));
712 
713 	timeout_del(&sc->sc_tick_ch);
714 
715 	/*
716 	 * Mark the interface down and cancel the watchdog timer.
717 	 */
718 	ifp->if_flags &= ~IFF_RUNNING;
719 	ifq_clr_oactive(&ifp->if_snd);
720 	ifp->if_timer = 0;
721 
722 	mii_down(&sc->sc_mii);
723 
724 	cas_reset_rx(sc);
725 	cas_reset_tx(sc);
726 
727 	intr_barrier(sc->sc_ih);
728 	KASSERT((ifp->if_flags & IFF_RUNNING) == 0);
729 
730 	/*
731 	 * Release any queued transmit buffers.
732 	 */
733 	for (i = 0; i < CAS_NTXDESC; i++) {
734 		sd = &sc->sc_txd[i];
735 		if (sd->sd_mbuf != NULL) {
736 			bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0,
737 			    sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
738 			bus_dmamap_unload(sc->sc_dmatag, sd->sd_map);
739 			m_freem(sd->sd_mbuf);
740 			sd->sd_mbuf = NULL;
741 		}
742 	}
743 	sc->sc_tx_cnt = sc->sc_tx_prod = sc->sc_tx_cons = 0;
744 
745 	if (disable)
746 		cas_rxdrain(sc);
747 }
748 
749 
750 /*
751  * Reset the receiver
752  */
753 int
cas_reset_rx(struct cas_softc * sc)754 cas_reset_rx(struct cas_softc *sc)
755 {
756 	bus_space_tag_t t = sc->sc_memt;
757 	bus_space_handle_t h = sc->sc_memh;
758 
759 	/*
760 	 * Resetting while DMA is in progress can cause a bus hang, so we
761 	 * disable DMA first.
762 	 */
763 	cas_disable_rx(sc);
764 	bus_space_write_4(t, h, CAS_RX_CONFIG, 0);
765 	/* Wait till it finishes */
766 	if (!cas_bitwait(sc, h, CAS_RX_CONFIG, 1, 0))
767 		printf("%s: cannot disable rx dma\n", sc->sc_dev.dv_xname);
768 	/* Wait 5ms extra. */
769 	delay(5000);
770 
771 	/* Finally, reset the ERX */
772 	bus_space_write_4(t, h, CAS_RESET, CAS_RESET_RX);
773 	/* Wait till it finishes */
774 	if (!cas_bitwait(sc, h, CAS_RESET, CAS_RESET_RX, 0)) {
775 		printf("%s: cannot reset receiver\n", sc->sc_dev.dv_xname);
776 		return (1);
777 	}
778 	return (0);
779 }
780 
781 
782 /*
783  * Reset the transmitter
784  */
785 int
cas_reset_tx(struct cas_softc * sc)786 cas_reset_tx(struct cas_softc *sc)
787 {
788 	bus_space_tag_t t = sc->sc_memt;
789 	bus_space_handle_t h = sc->sc_memh;
790 
791 	/*
792 	 * Resetting while DMA is in progress can cause a bus hang, so we
793 	 * disable DMA first.
794 	 */
795 	cas_disable_tx(sc);
796 	bus_space_write_4(t, h, CAS_TX_CONFIG, 0);
797 	/* Wait till it finishes */
798 	if (!cas_bitwait(sc, h, CAS_TX_CONFIG, 1, 0))
799 		printf("%s: cannot disable tx dma\n", sc->sc_dev.dv_xname);
800 	/* Wait 5ms extra. */
801 	delay(5000);
802 
803 	/* Finally, reset the ETX */
804 	bus_space_write_4(t, h, CAS_RESET, CAS_RESET_TX);
805 	/* Wait till it finishes */
806 	if (!cas_bitwait(sc, h, CAS_RESET, CAS_RESET_TX, 0)) {
807 		printf("%s: cannot reset transmitter\n",
808 			sc->sc_dev.dv_xname);
809 		return (1);
810 	}
811 	return (0);
812 }
813 
814 /*
815  * disable receiver.
816  */
817 int
cas_disable_rx(struct cas_softc * sc)818 cas_disable_rx(struct cas_softc *sc)
819 {
820 	bus_space_tag_t t = sc->sc_memt;
821 	bus_space_handle_t h = sc->sc_memh;
822 	u_int32_t cfg;
823 
824 	/* Flip the enable bit */
825 	cfg = bus_space_read_4(t, h, CAS_MAC_RX_CONFIG);
826 	cfg &= ~CAS_MAC_RX_ENABLE;
827 	bus_space_write_4(t, h, CAS_MAC_RX_CONFIG, cfg);
828 
829 	/* Wait for it to finish */
830 	return (cas_bitwait(sc, h, CAS_MAC_RX_CONFIG, CAS_MAC_RX_ENABLE, 0));
831 }
832 
833 /*
834  * disable transmitter.
835  */
836 int
cas_disable_tx(struct cas_softc * sc)837 cas_disable_tx(struct cas_softc *sc)
838 {
839 	bus_space_tag_t t = sc->sc_memt;
840 	bus_space_handle_t h = sc->sc_memh;
841 	u_int32_t cfg;
842 
843 	/* Flip the enable bit */
844 	cfg = bus_space_read_4(t, h, CAS_MAC_TX_CONFIG);
845 	cfg &= ~CAS_MAC_TX_ENABLE;
846 	bus_space_write_4(t, h, CAS_MAC_TX_CONFIG, cfg);
847 
848 	/* Wait for it to finish */
849 	return (cas_bitwait(sc, h, CAS_MAC_TX_CONFIG, CAS_MAC_TX_ENABLE, 0));
850 }
851 
852 /*
853  * Initialize interface.
854  */
855 int
cas_meminit(struct cas_softc * sc)856 cas_meminit(struct cas_softc *sc)
857 {
858 	struct cas_rxsoft *rxs;
859 	int i, error;
860 
861 	rxs = (void *)&error;
862 
863 	/*
864 	 * Initialize the transmit descriptor ring.
865 	 */
866 	for (i = 0; i < CAS_NTXDESC; i++) {
867 		sc->sc_txdescs[i].cd_flags = 0;
868 		sc->sc_txdescs[i].cd_addr = 0;
869 	}
870 	CAS_CDTXSYNC(sc, 0, CAS_NTXDESC,
871 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
872 
873 	/*
874 	 * Initialize the receive descriptor and receive job
875 	 * descriptor rings.
876 	 */
877 	for (i = 0; i < CAS_NRXDESC; i++)
878 		CAS_INIT_RXDESC(sc, i, i);
879 	sc->sc_rxdptr = 0;
880 	sc->sc_rxptr = 0;
881 
882 	/*
883 	 * Initialize the receive completion ring.
884 	 */
885 	for (i = 0; i < CAS_NRXCOMP; i++) {
886 		sc->sc_rxcomps[i].cc_word[0] = 0;
887 		sc->sc_rxcomps[i].cc_word[1] = 0;
888 		sc->sc_rxcomps[i].cc_word[2] = 0;
889 		sc->sc_rxcomps[i].cc_word[3] = CAS_DMA_WRITE(CAS_RC3_OWN);
890 		CAS_CDRXCSYNC(sc, i,
891 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
892 	}
893 
894 	return (0);
895 }
896 
897 int
cas_ringsize(int sz)898 cas_ringsize(int sz)
899 {
900 	switch (sz) {
901 	case 32:
902 		return CAS_RING_SZ_32;
903 	case 64:
904 		return CAS_RING_SZ_64;
905 	case 128:
906 		return CAS_RING_SZ_128;
907 	case 256:
908 		return CAS_RING_SZ_256;
909 	case 512:
910 		return CAS_RING_SZ_512;
911 	case 1024:
912 		return CAS_RING_SZ_1024;
913 	case 2048:
914 		return CAS_RING_SZ_2048;
915 	case 4096:
916 		return CAS_RING_SZ_4096;
917 	case 8192:
918 		return CAS_RING_SZ_8192;
919 	default:
920 		printf("cas: invalid Receive Descriptor ring size %d\n", sz);
921 		return CAS_RING_SZ_32;
922 	}
923 }
924 
925 int
cas_cringsize(int sz)926 cas_cringsize(int sz)
927 {
928 	int i;
929 
930 	for (i = 0; i < 9; i++)
931 		if (sz == (128 << i))
932 			return i;
933 
934 	printf("cas: invalid completion ring size %d\n", sz);
935 	return 128;
936 }
937 
938 /*
939  * Initialization of interface; set up initialization block
940  * and transmit/receive descriptor rings.
941  */
942 int
cas_init(struct ifnet * ifp)943 cas_init(struct ifnet *ifp)
944 {
945 	struct cas_softc *sc = (struct cas_softc *)ifp->if_softc;
946 	bus_space_tag_t t = sc->sc_memt;
947 	bus_space_handle_t h = sc->sc_memh;
948 	int s;
949 	u_int max_frame_size;
950 	u_int32_t v;
951 
952 	s = splnet();
953 
954 	DPRINTF(sc, ("%s: cas_init: calling stop\n", sc->sc_dev.dv_xname));
955 	/*
956 	 * Initialization sequence. The numbered steps below correspond
957 	 * to the sequence outlined in section 6.3.5.1 in the Ethernet
958 	 * Channel Engine manual (part of the PCIO manual).
959 	 * See also the STP2002-STQ document from Sun Microsystems.
960 	 */
961 
962 	/* step 1 & 2. Reset the Ethernet Channel */
963 	cas_stop(ifp, 0);
964 	cas_reset(sc);
965 	DPRINTF(sc, ("%s: cas_init: restarting\n", sc->sc_dev.dv_xname));
966 
967 	/* Re-initialize the MIF */
968 	cas_mifinit(sc);
969 
970 	/* step 3. Setup data structures in host memory */
971 	cas_meminit(sc);
972 
973 	/* step 4. TX MAC registers & counters */
974 	cas_init_regs(sc);
975 	max_frame_size = ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN;
976 	v = (max_frame_size) | (0x2000 << 16) /* Burst size */;
977 	bus_space_write_4(t, h, CAS_MAC_MAC_MAX_FRAME, v);
978 
979 	/* step 5. RX MAC registers & counters */
980 	cas_iff(sc);
981 
982 	/* step 6 & 7. Program Descriptor Ring Base Addresses */
983 	KASSERT((CAS_CDTXADDR(sc, 0) & 0x1fff) == 0);
984 	bus_space_write_4(t, h, CAS_TX_RING_PTR_HI,
985 	    (((uint64_t)CAS_CDTXADDR(sc,0)) >> 32));
986 	bus_space_write_4(t, h, CAS_TX_RING_PTR_LO, CAS_CDTXADDR(sc, 0));
987 
988 	KASSERT((CAS_CDRXADDR(sc, 0) & 0x1fff) == 0);
989 	bus_space_write_4(t, h, CAS_RX_DRING_PTR_HI,
990 	    (((uint64_t)CAS_CDRXADDR(sc,0)) >> 32));
991 	bus_space_write_4(t, h, CAS_RX_DRING_PTR_LO, CAS_CDRXADDR(sc, 0));
992 
993 	KASSERT((CAS_CDRXCADDR(sc, 0) & 0x1fff) == 0);
994 	bus_space_write_4(t, h, CAS_RX_CRING_PTR_HI,
995 	    (((uint64_t)CAS_CDRXCADDR(sc,0)) >> 32));
996 	bus_space_write_4(t, h, CAS_RX_CRING_PTR_LO, CAS_CDRXCADDR(sc, 0));
997 
998 	if (CAS_PLUS(sc)) {
999 		KASSERT((CAS_CDRXADDR2(sc, 0) & 0x1fff) == 0);
1000 		bus_space_write_4(t, h, CAS_RX_DRING_PTR_HI2,
1001 		    (((uint64_t)CAS_CDRXADDR2(sc,0)) >> 32));
1002 		bus_space_write_4(t, h, CAS_RX_DRING_PTR_LO2,
1003 		    CAS_CDRXADDR2(sc, 0));
1004 	}
1005 
1006 	/* step 8. Global Configuration & Interrupt Mask */
1007 	bus_space_write_4(t, h, CAS_INTMASK,
1008 		      ~(CAS_INTR_TX_INTME|CAS_INTR_TX_EMPTY|
1009 			CAS_INTR_TX_TAG_ERR|
1010 			CAS_INTR_RX_DONE|CAS_INTR_RX_NOBUF|
1011 			CAS_INTR_RX_TAG_ERR|
1012 			CAS_INTR_RX_COMP_FULL|CAS_INTR_PCS|
1013 			CAS_INTR_MAC_CONTROL|CAS_INTR_MIF|
1014 			CAS_INTR_BERR));
1015 	bus_space_write_4(t, h, CAS_MAC_RX_MASK,
1016 	    CAS_MAC_RX_DONE|CAS_MAC_RX_FRAME_CNT);
1017 	bus_space_write_4(t, h, CAS_MAC_TX_MASK, CAS_MAC_TX_XMIT_DONE);
1018 	bus_space_write_4(t, h, CAS_MAC_CONTROL_MASK, 0); /* XXXX */
1019 
1020 	/* step 9. ETX Configuration: use mostly default values */
1021 
1022 	/* Enable DMA */
1023 	v = cas_ringsize(CAS_NTXDESC /*XXX*/) << 10;
1024 	bus_space_write_4(t, h, CAS_TX_CONFIG,
1025 	    v|CAS_TX_CONFIG_TXDMA_EN|(1<<24)|(1<<29));
1026 	bus_space_write_4(t, h, CAS_TX_KICK, 0);
1027 
1028 	/* step 10. ERX Configuration */
1029 
1030 	/* Encode Receive Descriptor ring size */
1031 	v = cas_ringsize(CAS_NRXDESC) << CAS_RX_CONFIG_RXDRNG_SZ_SHIFT;
1032 	if (CAS_PLUS(sc))
1033 		v |= cas_ringsize(32) << CAS_RX_CONFIG_RXDRNG2_SZ_SHIFT;
1034 
1035 	/* Encode Receive Completion ring size */
1036 	v |= cas_cringsize(CAS_NRXCOMP) << CAS_RX_CONFIG_RXCRNG_SZ_SHIFT;
1037 
1038 	/* Enable DMA */
1039 	bus_space_write_4(t, h, CAS_RX_CONFIG,
1040 	    v|(2<<CAS_RX_CONFIG_FBOFF_SHFT)|CAS_RX_CONFIG_RXDMA_EN);
1041 
1042 	/*
1043 	 * The following value is for an OFF Threshold of about 3/4 full
1044 	 * and an ON Threshold of 1/4 full.
1045 	 */
1046 	bus_space_write_4(t, h, CAS_RX_PAUSE_THRESH,
1047 	    (3 * sc->sc_rxfifosize / 256) |
1048 	    ((sc->sc_rxfifosize / 256) << 12));
1049 	bus_space_write_4(t, h, CAS_RX_BLANKING, (6 << 12) | 6);
1050 
1051 	/* step 11. Configure Media */
1052 	mii_mediachg(&sc->sc_mii);
1053 
1054 	/* step 12. RX_MAC Configuration Register */
1055 	v = bus_space_read_4(t, h, CAS_MAC_RX_CONFIG);
1056 	v |= CAS_MAC_RX_ENABLE | CAS_MAC_RX_STRIP_CRC;
1057 	bus_space_write_4(t, h, CAS_MAC_RX_CONFIG, v);
1058 
1059 	/* step 14. Issue Transmit Pending command */
1060 
1061 	/* step 15.  Give the receiver a swift kick */
1062 	bus_space_write_4(t, h, CAS_RX_KICK, CAS_NRXDESC-4);
1063 	if (CAS_PLUS(sc))
1064 		bus_space_write_4(t, h, CAS_RX_KICK2, 4);
1065 
1066 	/* Start the one second timer. */
1067 	timeout_add_sec(&sc->sc_tick_ch, 1);
1068 
1069 	ifp->if_flags |= IFF_RUNNING;
1070 	ifq_clr_oactive(&ifp->if_snd);
1071 	ifp->if_timer = 0;
1072 	splx(s);
1073 
1074 	return (0);
1075 }
1076 
1077 void
cas_init_regs(struct cas_softc * sc)1078 cas_init_regs(struct cas_softc *sc)
1079 {
1080 	bus_space_tag_t t = sc->sc_memt;
1081 	bus_space_handle_t h = sc->sc_memh;
1082 	u_int32_t v, r;
1083 
1084 	/* These regs are not cleared on reset */
1085 	sc->sc_inited = 0;
1086 	if (!sc->sc_inited) {
1087 		/* Load recommended values  */
1088 		bus_space_write_4(t, h, CAS_MAC_IPG0, 0x00);
1089 		bus_space_write_4(t, h, CAS_MAC_IPG1, 0x08);
1090 		bus_space_write_4(t, h, CAS_MAC_IPG2, 0x04);
1091 
1092 		bus_space_write_4(t, h, CAS_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN);
1093 		/* Max frame and max burst size */
1094 		v = ETHER_MAX_LEN | (0x2000 << 16) /* Burst size */;
1095 		bus_space_write_4(t, h, CAS_MAC_MAC_MAX_FRAME, v);
1096 
1097 		bus_space_write_4(t, h, CAS_MAC_PREAMBLE_LEN, 0x07);
1098 		bus_space_write_4(t, h, CAS_MAC_JAM_SIZE, 0x04);
1099 		bus_space_write_4(t, h, CAS_MAC_ATTEMPT_LIMIT, 0x10);
1100 		bus_space_write_4(t, h, CAS_MAC_CONTROL_TYPE, 0x8088);
1101 		bus_space_write_4(t, h, CAS_MAC_RANDOM_SEED,
1102 		    ((sc->sc_arpcom.ac_enaddr[5]<<8)|sc->sc_arpcom.ac_enaddr[4])&0x3ff);
1103 
1104 		/* Secondary MAC addresses set to 0:0:0:0:0:0 */
1105 		for (r = CAS_MAC_ADDR3; r < CAS_MAC_ADDR42; r += 4)
1106 		  	bus_space_write_4(t, h, r, 0);
1107 
1108 		/* MAC control addr set to 0:1:c2:0:1:80 */
1109 		bus_space_write_4(t, h, CAS_MAC_ADDR42, 0x0001);
1110 		bus_space_write_4(t, h, CAS_MAC_ADDR43, 0xc200);
1111 		bus_space_write_4(t, h, CAS_MAC_ADDR44, 0x0180);
1112 
1113 		/* MAC filter addr set to 0:0:0:0:0:0 */
1114 		bus_space_write_4(t, h, CAS_MAC_ADDR_FILTER0, 0);
1115 		bus_space_write_4(t, h, CAS_MAC_ADDR_FILTER1, 0);
1116 		bus_space_write_4(t, h, CAS_MAC_ADDR_FILTER2, 0);
1117 
1118 		bus_space_write_4(t, h, CAS_MAC_ADR_FLT_MASK1_2, 0);
1119 		bus_space_write_4(t, h, CAS_MAC_ADR_FLT_MASK0, 0);
1120 
1121 		/* Hash table initialized to 0 */
1122 		for (r = CAS_MAC_HASH0; r <= CAS_MAC_HASH15; r += 4)
1123 			bus_space_write_4(t, h, r, 0);
1124 
1125 		sc->sc_inited = 1;
1126 	}
1127 
1128 	/* Counters need to be zeroed */
1129 	bus_space_write_4(t, h, CAS_MAC_NORM_COLL_CNT, 0);
1130 	bus_space_write_4(t, h, CAS_MAC_FIRST_COLL_CNT, 0);
1131 	bus_space_write_4(t, h, CAS_MAC_EXCESS_COLL_CNT, 0);
1132 	bus_space_write_4(t, h, CAS_MAC_LATE_COLL_CNT, 0);
1133 	bus_space_write_4(t, h, CAS_MAC_DEFER_TMR_CNT, 0);
1134 	bus_space_write_4(t, h, CAS_MAC_PEAK_ATTEMPTS, 0);
1135 	bus_space_write_4(t, h, CAS_MAC_RX_FRAME_COUNT, 0);
1136 	bus_space_write_4(t, h, CAS_MAC_RX_LEN_ERR_CNT, 0);
1137 	bus_space_write_4(t, h, CAS_MAC_RX_ALIGN_ERR, 0);
1138 	bus_space_write_4(t, h, CAS_MAC_RX_CRC_ERR_CNT, 0);
1139 	bus_space_write_4(t, h, CAS_MAC_RX_CODE_VIOL, 0);
1140 
1141 	/* Un-pause stuff */
1142 	bus_space_write_4(t, h, CAS_MAC_SEND_PAUSE_CMD, 0);
1143 
1144 	/*
1145 	 * Set the station address.
1146 	 */
1147 	bus_space_write_4(t, h, CAS_MAC_ADDR0,
1148 		(sc->sc_arpcom.ac_enaddr[4]<<8) | sc->sc_arpcom.ac_enaddr[5]);
1149 	bus_space_write_4(t, h, CAS_MAC_ADDR1,
1150 		(sc->sc_arpcom.ac_enaddr[2]<<8) | sc->sc_arpcom.ac_enaddr[3]);
1151 	bus_space_write_4(t, h, CAS_MAC_ADDR2,
1152 		(sc->sc_arpcom.ac_enaddr[0]<<8) | sc->sc_arpcom.ac_enaddr[1]);
1153 }
1154 
1155 /*
1156  * Receive interrupt.
1157  */
1158 int
cas_rint(struct cas_softc * sc)1159 cas_rint(struct cas_softc *sc)
1160 {
1161 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1162 	bus_space_tag_t t = sc->sc_memt;
1163 	bus_space_handle_t h = sc->sc_memh;
1164 	struct cas_rxsoft *rxs;
1165 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1166 	struct mbuf *m;
1167 	u_int64_t word[4];
1168 	int len, off, idx;
1169 	int i, skip;
1170 	caddr_t cp;
1171 
1172 	for (i = sc->sc_rxptr;; i = CAS_NEXTRX(i + skip)) {
1173 		CAS_CDRXCSYNC(sc, i,
1174 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1175 
1176 		word[0] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[0]);
1177 		word[1] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[1]);
1178 		word[2] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[2]);
1179 		word[3] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[3]);
1180 
1181 		/* Stop if the hardware still owns the descriptor. */
1182 		if ((word[0] & CAS_RC0_TYPE) == 0 || word[3] & CAS_RC3_OWN)
1183 			break;
1184 
1185 		len = CAS_RC1_HDR_LEN(word[1]);
1186 		if (len > 0) {
1187 			off = CAS_RC1_HDR_OFF(word[1]);
1188 			idx = CAS_RC1_HDR_IDX(word[1]);
1189 			rxs = &sc->sc_rxsoft[idx];
1190 
1191 			DPRINTF(sc, ("hdr at idx %d, off %d, len %d\n",
1192 			    idx, off, len));
1193 
1194 			bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
1195 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1196 
1197 			cp = rxs->rxs_kva + off * 256 + ETHER_ALIGN;
1198 			m = m_devget(cp, len, ETHER_ALIGN);
1199 
1200 			if (word[0] & CAS_RC0_RELEASE_HDR)
1201 				cas_add_rxbuf(sc, idx);
1202 
1203 			if (m != NULL) {
1204 				ml_enqueue(&ml, m);
1205 			} else
1206 				ifp->if_ierrors++;
1207 		}
1208 
1209 		len = CAS_RC0_DATA_LEN(word[0]);
1210 		if (len > 0) {
1211 			off = CAS_RC0_DATA_OFF(word[0]);
1212 			idx = CAS_RC0_DATA_IDX(word[0]);
1213 			rxs = &sc->sc_rxsoft[idx];
1214 
1215 			DPRINTF(sc, ("data at idx %d, off %d, len %d\n",
1216 			    idx, off, len));
1217 
1218 			bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
1219 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1220 
1221 			/* XXX We should not be copying the packet here. */
1222 			cp = rxs->rxs_kva + off + ETHER_ALIGN;
1223 			m = m_devget(cp, len, ETHER_ALIGN);
1224 
1225 			if (word[0] & CAS_RC0_RELEASE_DATA)
1226 				cas_add_rxbuf(sc, idx);
1227 
1228 			if (m != NULL) {
1229 				ml_enqueue(&ml, m);
1230 			} else
1231 				ifp->if_ierrors++;
1232 		}
1233 
1234 		if (word[0] & CAS_RC0_SPLIT)
1235 			printf("split packet\n");
1236 
1237 		skip = CAS_RC0_SKIP(word[0]);
1238 	}
1239 
1240 	while (sc->sc_rxptr != i) {
1241 		sc->sc_rxcomps[sc->sc_rxptr].cc_word[0] = 0;
1242 		sc->sc_rxcomps[sc->sc_rxptr].cc_word[1] = 0;
1243 		sc->sc_rxcomps[sc->sc_rxptr].cc_word[2] = 0;
1244 		sc->sc_rxcomps[sc->sc_rxptr].cc_word[3] =
1245 		    CAS_DMA_WRITE(CAS_RC3_OWN);
1246 		CAS_CDRXCSYNC(sc, sc->sc_rxptr,
1247 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1248 
1249 		sc->sc_rxptr = CAS_NEXTRX(sc->sc_rxptr);
1250 	}
1251 
1252 	bus_space_write_4(t, h, CAS_RX_COMP_TAIL, sc->sc_rxptr);
1253 
1254 	DPRINTF(sc, ("cas_rint: done sc->rxptr %d, complete %d\n",
1255 		sc->sc_rxptr, bus_space_read_4(t, h, CAS_RX_COMPLETION)));
1256 
1257 	if_input(ifp, &ml);
1258 
1259 	return (1);
1260 }
1261 
1262 /*
1263  * cas_add_rxbuf:
1264  *
1265  *	Add a receive buffer to the indicated descriptor.
1266  */
1267 int
cas_add_rxbuf(struct cas_softc * sc,int idx)1268 cas_add_rxbuf(struct cas_softc *sc, int idx)
1269 {
1270 	bus_space_tag_t t = sc->sc_memt;
1271 	bus_space_handle_t h = sc->sc_memh;
1272 
1273 	CAS_INIT_RXDESC(sc, sc->sc_rxdptr, idx);
1274 
1275 	if ((sc->sc_rxdptr % 4) == 0)
1276 		bus_space_write_4(t, h, CAS_RX_KICK, sc->sc_rxdptr);
1277 
1278 	if (++sc->sc_rxdptr == CAS_NRXDESC)
1279 		sc->sc_rxdptr = 0;
1280 
1281 	return (0);
1282 }
1283 
1284 int
cas_eint(struct cas_softc * sc,u_int status)1285 cas_eint(struct cas_softc *sc, u_int status)
1286 {
1287 	if ((status & CAS_INTR_MIF) != 0) {
1288 #ifdef CAS_DEBUG
1289 		printf("%s: link status changed\n", sc->sc_dev.dv_xname);
1290 #endif
1291 		return (1);
1292 	}
1293 
1294 	printf("%s: status=%b\n", sc->sc_dev.dv_xname, status, CAS_INTR_BITS);
1295 	return (1);
1296 }
1297 
1298 int
cas_pint(struct cas_softc * sc)1299 cas_pint(struct cas_softc *sc)
1300 {
1301 	bus_space_tag_t t = sc->sc_memt;
1302 	bus_space_handle_t seb = sc->sc_memh;
1303 	u_int32_t status;
1304 
1305 	status = bus_space_read_4(t, seb, CAS_MII_INTERRUP_STATUS);
1306 	status |= bus_space_read_4(t, seb, CAS_MII_INTERRUP_STATUS);
1307 #ifdef CAS_DEBUG
1308 	if (status)
1309 		printf("%s: link status changed\n", sc->sc_dev.dv_xname);
1310 #endif
1311 	return (1);
1312 }
1313 
1314 int
cas_intr(void * v)1315 cas_intr(void *v)
1316 {
1317 	struct cas_softc *sc = (struct cas_softc *)v;
1318 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1319 	bus_space_tag_t t = sc->sc_memt;
1320 	bus_space_handle_t seb = sc->sc_memh;
1321 	u_int32_t status;
1322 	int r = 0;
1323 
1324 	status = bus_space_read_4(t, seb, CAS_STATUS);
1325 	DPRINTF(sc, ("%s: cas_intr: cplt %xstatus %b\n",
1326 		sc->sc_dev.dv_xname, (status>>19), status, CAS_INTR_BITS));
1327 
1328 	if ((status & CAS_INTR_PCS) != 0)
1329 		r |= cas_pint(sc);
1330 
1331 	if ((status & (CAS_INTR_TX_TAG_ERR | CAS_INTR_RX_TAG_ERR |
1332 	    CAS_INTR_RX_COMP_FULL | CAS_INTR_BERR)) != 0)
1333 		r |= cas_eint(sc, status);
1334 
1335 	if ((status & (CAS_INTR_TX_EMPTY | CAS_INTR_TX_INTME)) != 0)
1336 		r |= cas_tint(sc, status);
1337 
1338 	if ((status & (CAS_INTR_RX_DONE | CAS_INTR_RX_NOBUF)) != 0)
1339 		r |= cas_rint(sc);
1340 
1341 	/* We should eventually do more than just print out error stats. */
1342 	if (status & CAS_INTR_TX_MAC) {
1343 		int txstat = bus_space_read_4(t, seb, CAS_MAC_TX_STATUS);
1344 #ifdef CAS_DEBUG
1345 		if (txstat & ~CAS_MAC_TX_XMIT_DONE)
1346 			printf("%s: MAC tx fault, status %x\n",
1347 			    sc->sc_dev.dv_xname, txstat);
1348 #endif
1349 		if (txstat & (CAS_MAC_TX_UNDERRUN | CAS_MAC_TX_PKT_TOO_LONG)) {
1350 			KERNEL_LOCK();
1351 			cas_init(ifp);
1352 			KERNEL_UNLOCK();
1353 		}
1354 	}
1355 	if (status & CAS_INTR_RX_MAC) {
1356 		int rxstat = bus_space_read_4(t, seb, CAS_MAC_RX_STATUS);
1357 #ifdef CAS_DEBUG
1358  		if (rxstat & ~CAS_MAC_RX_DONE)
1359  			printf("%s: MAC rx fault, status %x\n",
1360  			    sc->sc_dev.dv_xname, rxstat);
1361 #endif
1362 		/*
1363 		 * On some chip revisions CAS_MAC_RX_OVERFLOW happen often
1364 		 * due to a silicon bug so handle them silently.
1365 		 */
1366 		if (rxstat & CAS_MAC_RX_OVERFLOW) {
1367 			KERNEL_LOCK();
1368 			ifp->if_ierrors++;
1369 			cas_init(ifp);
1370 			KERNEL_UNLOCK();
1371 		}
1372 #ifdef CAS_DEBUG
1373 		else if (rxstat & ~(CAS_MAC_RX_DONE | CAS_MAC_RX_FRAME_CNT))
1374 			printf("%s: MAC rx fault, status %x\n",
1375 			    sc->sc_dev.dv_xname, rxstat);
1376 #endif
1377 	}
1378 	return (r);
1379 }
1380 
1381 
1382 void
cas_watchdog(struct ifnet * ifp)1383 cas_watchdog(struct ifnet *ifp)
1384 {
1385 	struct cas_softc *sc = ifp->if_softc;
1386 
1387 	DPRINTF(sc, ("cas_watchdog: CAS_RX_CONFIG %x CAS_MAC_RX_STATUS %x "
1388 		"CAS_MAC_RX_CONFIG %x\n",
1389 		bus_space_read_4(sc->sc_memt, sc->sc_memh, CAS_RX_CONFIG),
1390 		bus_space_read_4(sc->sc_memt, sc->sc_memh, CAS_MAC_RX_STATUS),
1391 		bus_space_read_4(sc->sc_memt, sc->sc_memh, CAS_MAC_RX_CONFIG)));
1392 
1393 	log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
1394 	++ifp->if_oerrors;
1395 
1396 	/* Try to get more packets going. */
1397 	cas_init(ifp);
1398 }
1399 
1400 /*
1401  * Initialize the MII Management Interface
1402  */
1403 void
cas_mifinit(struct cas_softc * sc)1404 cas_mifinit(struct cas_softc *sc)
1405 {
1406 	bus_space_tag_t t = sc->sc_memt;
1407 	bus_space_handle_t mif = sc->sc_memh;
1408 
1409 	/* Configure the MIF in frame mode */
1410 	sc->sc_mif_config = bus_space_read_4(t, mif, CAS_MIF_CONFIG);
1411 	sc->sc_mif_config &= ~CAS_MIF_CONFIG_BB_ENA;
1412 	bus_space_write_4(t, mif, CAS_MIF_CONFIG, sc->sc_mif_config);
1413 }
1414 
1415 /*
1416  * MII interface
1417  *
1418  * The Cassini MII interface supports at least three different operating modes:
1419  *
1420  * Bitbang mode is implemented using data, clock and output enable registers.
1421  *
1422  * Frame mode is implemented by loading a complete frame into the frame
1423  * register and polling the valid bit for completion.
1424  *
1425  * Polling mode uses the frame register but completion is indicated by
1426  * an interrupt.
1427  *
1428  */
1429 int
cas_mii_readreg(struct device * self,int phy,int reg)1430 cas_mii_readreg(struct device *self, int phy, int reg)
1431 {
1432 	struct cas_softc *sc = (void *)self;
1433 	bus_space_tag_t t = sc->sc_memt;
1434 	bus_space_handle_t mif = sc->sc_memh;
1435 	int n;
1436 	u_int32_t v;
1437 
1438 #ifdef CAS_DEBUG
1439 	if (sc->sc_debug)
1440 		printf("cas_mii_readreg: phy %d reg %d\n", phy, reg);
1441 #endif
1442 
1443 	/* Construct the frame command */
1444 	v = (reg << CAS_MIF_REG_SHIFT)	| (phy << CAS_MIF_PHY_SHIFT) |
1445 		CAS_MIF_FRAME_READ;
1446 
1447 	bus_space_write_4(t, mif, CAS_MIF_FRAME, v);
1448 	for (n = 0; n < 100; n++) {
1449 		DELAY(1);
1450 		v = bus_space_read_4(t, mif, CAS_MIF_FRAME);
1451 		if (v & CAS_MIF_FRAME_TA0)
1452 			return (v & CAS_MIF_FRAME_DATA);
1453 	}
1454 
1455 	printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname);
1456 	return (0);
1457 }
1458 
1459 void
cas_mii_writereg(struct device * self,int phy,int reg,int val)1460 cas_mii_writereg(struct device *self, int phy, int reg, int val)
1461 {
1462 	struct cas_softc *sc = (void *)self;
1463 	bus_space_tag_t t = sc->sc_memt;
1464 	bus_space_handle_t mif = sc->sc_memh;
1465 	int n;
1466 	u_int32_t v;
1467 
1468 #ifdef CAS_DEBUG
1469 	if (sc->sc_debug)
1470 		printf("cas_mii_writereg: phy %d reg %d val %x\n",
1471 			phy, reg, val);
1472 #endif
1473 
1474 	/* Construct the frame command */
1475 	v = CAS_MIF_FRAME_WRITE			|
1476 	    (phy << CAS_MIF_PHY_SHIFT)		|
1477 	    (reg << CAS_MIF_REG_SHIFT)		|
1478 	    (val & CAS_MIF_FRAME_DATA);
1479 
1480 	bus_space_write_4(t, mif, CAS_MIF_FRAME, v);
1481 	for (n = 0; n < 100; n++) {
1482 		DELAY(1);
1483 		v = bus_space_read_4(t, mif, CAS_MIF_FRAME);
1484 		if (v & CAS_MIF_FRAME_TA0)
1485 			return;
1486 	}
1487 
1488 	printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname);
1489 }
1490 
1491 void
cas_mii_statchg(struct device * dev)1492 cas_mii_statchg(struct device *dev)
1493 {
1494 	struct cas_softc *sc = (void *)dev;
1495 #ifdef CAS_DEBUG
1496 	uint64_t instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media);
1497 #endif
1498 	bus_space_tag_t t = sc->sc_memt;
1499 	bus_space_handle_t mac = sc->sc_memh;
1500 	u_int32_t v;
1501 
1502 #ifdef CAS_DEBUG
1503 	if (sc->sc_debug)
1504 		printf("cas_mii_statchg: status change: phy = %d\n",
1505 		    sc->sc_phys[instance]);
1506 #endif
1507 
1508 	/* Set tx full duplex options */
1509 	bus_space_write_4(t, mac, CAS_MAC_TX_CONFIG, 0);
1510 	delay(10000); /* reg must be cleared and delay before changing. */
1511 	v = CAS_MAC_TX_ENA_IPG0|CAS_MAC_TX_NGU|CAS_MAC_TX_NGU_LIMIT|
1512 		CAS_MAC_TX_ENABLE;
1513 	if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) {
1514 		v |= CAS_MAC_TX_IGN_CARRIER|CAS_MAC_TX_IGN_COLLIS;
1515 	}
1516 	bus_space_write_4(t, mac, CAS_MAC_TX_CONFIG, v);
1517 
1518 	/* XIF Configuration */
1519 	v = CAS_MAC_XIF_TX_MII_ENA;
1520 	v |= CAS_MAC_XIF_LINK_LED;
1521 
1522 	/* MII needs echo disable if half duplex. */
1523 	if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0)
1524 		/* turn on full duplex LED */
1525 		v |= CAS_MAC_XIF_FDPLX_LED;
1526 	else
1527 		/* half duplex -- disable echo */
1528 		v |= CAS_MAC_XIF_ECHO_DISABL;
1529 
1530 	switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
1531 	case IFM_1000_T:  /* Gigabit using GMII interface */
1532 	case IFM_1000_SX:
1533 		v |= CAS_MAC_XIF_GMII_MODE;
1534 		break;
1535 	default:
1536 		v &= ~CAS_MAC_XIF_GMII_MODE;
1537 	}
1538 	bus_space_write_4(t, mac, CAS_MAC_XIF_CONFIG, v);
1539 }
1540 
1541 int
cas_pcs_readreg(struct device * self,int phy,int reg)1542 cas_pcs_readreg(struct device *self, int phy, int reg)
1543 {
1544 	struct cas_softc *sc = (void *)self;
1545 	bus_space_tag_t t = sc->sc_memt;
1546 	bus_space_handle_t pcs = sc->sc_memh;
1547 
1548 #ifdef CAS_DEBUG
1549 	if (sc->sc_debug)
1550 		printf("cas_pcs_readreg: phy %d reg %d\n", phy, reg);
1551 #endif
1552 
1553 	if (phy != CAS_PHYAD_EXTERNAL)
1554 		return (0);
1555 
1556 	switch (reg) {
1557 	case MII_BMCR:
1558 		reg = CAS_MII_CONTROL;
1559 		break;
1560 	case MII_BMSR:
1561 		reg = CAS_MII_STATUS;
1562 		break;
1563 	case MII_ANAR:
1564 		reg = CAS_MII_ANAR;
1565 		break;
1566 	case MII_ANLPAR:
1567 		reg = CAS_MII_ANLPAR;
1568 		break;
1569 	case MII_EXTSR:
1570 		return (EXTSR_1000XFDX|EXTSR_1000XHDX);
1571 	default:
1572 		return (0);
1573 	}
1574 
1575 	return bus_space_read_4(t, pcs, reg);
1576 }
1577 
1578 void
cas_pcs_writereg(struct device * self,int phy,int reg,int val)1579 cas_pcs_writereg(struct device *self, int phy, int reg, int val)
1580 {
1581 	struct cas_softc *sc = (void *)self;
1582 	bus_space_tag_t t = sc->sc_memt;
1583 	bus_space_handle_t pcs = sc->sc_memh;
1584 	int reset = 0;
1585 
1586 #ifdef CAS_DEBUG
1587 	if (sc->sc_debug)
1588 		printf("cas_pcs_writereg: phy %d reg %d val %x\n",
1589 			phy, reg, val);
1590 #endif
1591 
1592 	if (phy != CAS_PHYAD_EXTERNAL)
1593 		return;
1594 
1595 	if (reg == MII_ANAR)
1596 		bus_space_write_4(t, pcs, CAS_MII_CONFIG, 0);
1597 
1598 	switch (reg) {
1599 	case MII_BMCR:
1600 		reset = (val & CAS_MII_CONTROL_RESET);
1601 		reg = CAS_MII_CONTROL;
1602 		break;
1603 	case MII_BMSR:
1604 		reg = CAS_MII_STATUS;
1605 		break;
1606 	case MII_ANAR:
1607 		reg = CAS_MII_ANAR;
1608 		break;
1609 	case MII_ANLPAR:
1610 		reg = CAS_MII_ANLPAR;
1611 		break;
1612 	default:
1613 		return;
1614 	}
1615 
1616 	bus_space_write_4(t, pcs, reg, val);
1617 
1618 	if (reset)
1619 		cas_bitwait(sc, pcs, CAS_MII_CONTROL, CAS_MII_CONTROL_RESET, 0);
1620 
1621 	if (reg == CAS_MII_ANAR || reset)
1622 		bus_space_write_4(t, pcs, CAS_MII_CONFIG,
1623 		    CAS_MII_CONFIG_ENABLE);
1624 }
1625 
1626 int
cas_mediachange(struct ifnet * ifp)1627 cas_mediachange(struct ifnet *ifp)
1628 {
1629 	struct cas_softc *sc = ifp->if_softc;
1630 	struct mii_data *mii = &sc->sc_mii;
1631 
1632 	if (mii->mii_instance) {
1633 		struct mii_softc *miisc;
1634 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1635 			mii_phy_reset(miisc);
1636 	}
1637 
1638 	return (mii_mediachg(&sc->sc_mii));
1639 }
1640 
1641 void
cas_mediastatus(struct ifnet * ifp,struct ifmediareq * ifmr)1642 cas_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1643 {
1644 	struct cas_softc *sc = ifp->if_softc;
1645 
1646 	mii_pollstat(&sc->sc_mii);
1647 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
1648 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
1649 }
1650 
1651 /*
1652  * Process an ioctl request.
1653  */
1654 int
cas_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)1655 cas_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1656 {
1657 	struct cas_softc *sc = ifp->if_softc;
1658 	struct ifreq *ifr = (struct ifreq *)data;
1659 	int s, error = 0;
1660 
1661 	s = splnet();
1662 
1663 	switch (cmd) {
1664 	case SIOCSIFADDR:
1665 		ifp->if_flags |= IFF_UP;
1666 		if ((ifp->if_flags & IFF_RUNNING) == 0)
1667 			cas_init(ifp);
1668 		break;
1669 
1670 	case SIOCSIFFLAGS:
1671 		if (ifp->if_flags & IFF_UP) {
1672 			if (ifp->if_flags & IFF_RUNNING)
1673 				error = ENETRESET;
1674 			else
1675 				cas_init(ifp);
1676 		} else {
1677 			if (ifp->if_flags & IFF_RUNNING)
1678 				cas_stop(ifp, 1);
1679 		}
1680 #ifdef CAS_DEBUG
1681 		sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
1682 #endif
1683 		break;
1684 
1685 	case SIOCGIFMEDIA:
1686 	case SIOCSIFMEDIA:
1687 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1688 		break;
1689 
1690 	default:
1691 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
1692 	}
1693 
1694 	if (error == ENETRESET) {
1695 		if (ifp->if_flags & IFF_RUNNING)
1696 			cas_iff(sc);
1697 		error = 0;
1698 	}
1699 
1700 	splx(s);
1701 	return (error);
1702 }
1703 
1704 void
cas_iff(struct cas_softc * sc)1705 cas_iff(struct cas_softc *sc)
1706 {
1707 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1708 	struct arpcom *ac = &sc->sc_arpcom;
1709 	struct ether_multi *enm;
1710 	struct ether_multistep step;
1711 	bus_space_tag_t t = sc->sc_memt;
1712 	bus_space_handle_t h = sc->sc_memh;
1713 	u_int32_t crc, hash[16], rxcfg;
1714 	int i;
1715 
1716 	rxcfg = bus_space_read_4(t, h, CAS_MAC_RX_CONFIG);
1717 	rxcfg &= ~(CAS_MAC_RX_HASH_FILTER | CAS_MAC_RX_PROMISCUOUS |
1718 	    CAS_MAC_RX_PROMISC_GRP);
1719 	ifp->if_flags &= ~IFF_ALLMULTI;
1720 
1721 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1722 		ifp->if_flags |= IFF_ALLMULTI;
1723 		if (ifp->if_flags & IFF_PROMISC)
1724 			rxcfg |= CAS_MAC_RX_PROMISCUOUS;
1725 		else
1726 			rxcfg |= CAS_MAC_RX_PROMISC_GRP;
1727         } else {
1728 		/*
1729 		 * Set up multicast address filter by passing all multicast
1730 		 * addresses through a crc generator, and then using the
1731 		 * high order 8 bits as an index into the 256 bit logical
1732 		 * address filter.  The high order 4 bits selects the word,
1733 		 * while the other 4 bits select the bit within the word
1734 		 * (where bit 0 is the MSB).
1735 		 */
1736 
1737 		rxcfg |= CAS_MAC_RX_HASH_FILTER;
1738 
1739 		/* Clear hash table */
1740 		for (i = 0; i < 16; i++)
1741 			hash[i] = 0;
1742 
1743 		ETHER_FIRST_MULTI(step, ac, enm);
1744 		while (enm != NULL) {
1745                         crc = ether_crc32_le(enm->enm_addrlo,
1746                             ETHER_ADDR_LEN);
1747 
1748                         /* Just want the 8 most significant bits. */
1749                         crc >>= 24;
1750 
1751                         /* Set the corresponding bit in the filter. */
1752                         hash[crc >> 4] |= 1 << (15 - (crc & 15));
1753 
1754 			ETHER_NEXT_MULTI(step, enm);
1755 		}
1756 
1757 		/* Now load the hash table into the chip (if we are using it) */
1758 		for (i = 0; i < 16; i++) {
1759 			bus_space_write_4(t, h,
1760 			    CAS_MAC_HASH0 + i * (CAS_MAC_HASH1 - CAS_MAC_HASH0),
1761 			    hash[i]);
1762 		}
1763 	}
1764 
1765 	bus_space_write_4(t, h, CAS_MAC_RX_CONFIG, rxcfg);
1766 }
1767 
1768 int
cas_encap(struct cas_softc * sc,struct mbuf * m,int * used)1769 cas_encap(struct cas_softc *sc, struct mbuf *m, int *used)
1770 {
1771 	u_int64_t flags;
1772 	u_int32_t first, cur, frag, i;
1773 	bus_dmamap_t map;
1774 
1775 	cur = frag = (sc->sc_tx_prod + *used) % CAS_NTXDESC;
1776 	map = sc->sc_txd[cur].sd_map;
1777 
1778 	switch (bus_dmamap_load_mbuf(sc->sc_dmatag, map, m, BUS_DMA_NOWAIT)) {
1779 	case 0:
1780 		break;
1781 	case EFBIG:
1782 		if (m_defrag(m, M_DONTWAIT) == 0 &&
1783 		    bus_dmamap_load_mbuf(sc->sc_dmatag, map, m,
1784 		    BUS_DMA_NOWAIT) == 0)
1785 			break;
1786 		/* FALLTHROUGH */
1787 	default:
1788 		return (ENOBUFS);
1789 	}
1790 
1791 	bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize,
1792 	    BUS_DMASYNC_PREWRITE);
1793 
1794 	first = cur;
1795 	for (i = 0; i < map->dm_nsegs; i++) {
1796 		sc->sc_txdescs[frag].cd_addr =
1797 		    CAS_DMA_WRITE(map->dm_segs[i].ds_addr);
1798 		flags = (map->dm_segs[i].ds_len & CAS_TD_BUFSIZE) |
1799 		    (i == 0 ? CAS_TD_START_OF_PACKET : 0) |
1800 		    ((i == (map->dm_nsegs - 1)) ? CAS_TD_END_OF_PACKET : 0);
1801 		sc->sc_txdescs[frag].cd_flags = CAS_DMA_WRITE(flags);
1802 		bus_dmamap_sync(sc->sc_dmatag, sc->sc_cddmamap,
1803 		    CAS_CDTXOFF(frag), sizeof(struct cas_desc),
1804 		    BUS_DMASYNC_PREWRITE);
1805 		cur = frag;
1806 		if (++frag == CAS_NTXDESC)
1807 			frag = 0;
1808 	}
1809 
1810 	sc->sc_txd[first].sd_map = sc->sc_txd[cur].sd_map;
1811 	sc->sc_txd[cur].sd_map = map;
1812 	sc->sc_txd[cur].sd_mbuf = m;
1813 
1814 	bus_space_write_4(sc->sc_memt, sc->sc_memh, CAS_TX_KICK, frag);
1815 
1816 	*used += map->dm_nsegs;
1817 
1818 	/* sync descriptors */
1819 
1820 	return (0);
1821 }
1822 
1823 /*
1824  * Transmit interrupt.
1825  */
1826 int
cas_tint(struct cas_softc * sc,u_int32_t status)1827 cas_tint(struct cas_softc *sc, u_int32_t status)
1828 {
1829 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1830 	struct cas_sxd *sd;
1831 	u_int32_t cons, comp;
1832 	int freed, used;
1833 
1834 	comp = bus_space_read_4(sc->sc_memt, sc->sc_memh, CAS_TX_COMPLETION);
1835 	cons = sc->sc_tx_cons;
1836 	freed = 0;
1837 	while (cons != comp) {
1838 		sd = &sc->sc_txd[cons];
1839 		if (sd->sd_mbuf != NULL) {
1840 			bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0,
1841 			    sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1842 			bus_dmamap_unload(sc->sc_dmatag, sd->sd_map);
1843 			m_freem(sd->sd_mbuf);
1844 			sd->sd_mbuf = NULL;
1845 		}
1846 		freed++;
1847 		if (++cons == CAS_NTXDESC)
1848 			cons = 0;
1849 	}
1850 	sc->sc_tx_cons = cons;
1851 
1852 	used = atomic_sub_int_nv(&sc->sc_tx_cnt, freed);
1853 	if (used < CAS_NTXDESC - 2)
1854 		ifq_clr_oactive(&ifp->if_snd);
1855 	if (used == 0)
1856 		ifp->if_timer = 0;
1857 
1858 	if (!ifq_empty(&ifp->if_snd)) {
1859 		KERNEL_LOCK();
1860 		cas_start(ifp);
1861 		KERNEL_UNLOCK();
1862 	}
1863 
1864 	return (1);
1865 }
1866 
1867 void
cas_start(struct ifnet * ifp)1868 cas_start(struct ifnet *ifp)
1869 {
1870 	struct cas_softc *sc = ifp->if_softc;
1871 	struct mbuf *m = NULL;
1872 	int used;
1873 
1874 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
1875 		return;
1876 
1877 	used = 0;
1878 	while (1) {
1879 		if ((sc->sc_tx_cnt + used + CAS_NTXSEGS) >= (CAS_NTXDESC - 2)) {
1880 			ifq_set_oactive(&ifp->if_snd);
1881 			break;
1882 		}
1883 
1884 		m = ifq_dequeue(&ifp->if_snd);
1885 		if (m == NULL)
1886 			break;
1887 
1888 		if (cas_encap(sc, m, &used)) {
1889 			m_freem(m);
1890 			continue;
1891 		}
1892 
1893 #if NBPFILTER > 0
1894 		if (ifp->if_bpf)
1895 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1896 #endif
1897 	}
1898 
1899 	if (used != 0) {
1900 		ifp->if_timer = 5;
1901 		sc->sc_tx_prod = (sc->sc_tx_prod + used) % CAS_NTXDESC;
1902 		atomic_add_int(&sc->sc_tx_cnt, used);
1903 	}
1904 }
1905