xref: /openbsd/sys/arch/armv7/omap/if_cpsw.c (revision cf96265b)
1 /* $OpenBSD: if_cpsw.c,v 1.53 2023/11/10 15:51:19 bluhm Exp $ */
2 /*	$NetBSD: if_cpsw.c,v 1.3 2013/04/17 14:36:34 bouyer Exp $	*/
3 
4 /*
5  * Copyright (c) 2013 Jonathan A. Kollasch
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
21  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
27  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /*-
31  * Copyright (c) 2012 Damjan Marion <dmarion@Freebsd.org>
32  * All rights reserved.
33  *
34  * Redistribution and use in source and binary forms, with or without
35  * modification, are permitted provided that the following conditions
36  * are met:
37  * 1. Redistributions of source code must retain the above copyright
38  *    notice, this list of conditions and the following disclaimer.
39  * 2. Redistributions in binary form must reproduce the above copyright
40  *    notice, this list of conditions and the following disclaimer in the
41  *    documentation and/or other materials provided with the distribution.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
44  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
47  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53  * SUCH DAMAGE.
54  */
55 
56 #include "bpfilter.h"
57 
58 #include <sys/param.h>
59 #include <sys/systm.h>
60 #include <sys/sockio.h>
61 #include <sys/mbuf.h>
62 #include <sys/pool.h>
63 #include <sys/queue.h>
64 #include <sys/device.h>
65 #include <sys/timeout.h>
66 
67 #include <machine/bus.h>
68 #include <machine/fdt.h>
69 
70 #include <net/if.h>
71 #include <net/if_media.h>
72 
73 #include <netinet/in.h>
74 #include <netinet/if_ether.h>
75 
76 #if NBPFILTER > 0
77 #include <net/bpf.h>
78 #endif
79 
80 #include <dev/mii/miivar.h>
81 
82 #include <arch/armv7/omap/if_cpswreg.h>
83 
84 #include <dev/ofw/openfirm.h>
85 #include <dev/ofw/ofw_clock.h>
86 #include <dev/ofw/ofw_pinctrl.h>
87 #include <dev/ofw/fdt.h>
88 
89 #include <uvm/uvm_extern.h>
90 
91 #define CPSW_TXFRAGS	16
92 
93 #define OMAP2SCM_MAC_ID0_LO	0x630
94 #define OMAP2SCM_MAC_ID0_HI	0x634
95 
96 #define CPSW_CPPI_RAM_SIZE (0x2000)
97 #define CPSW_CPPI_RAM_TXDESCS_SIZE (CPSW_CPPI_RAM_SIZE/2)
98 #define CPSW_CPPI_RAM_RXDESCS_SIZE \
99     (CPSW_CPPI_RAM_SIZE - CPSW_CPPI_RAM_TXDESCS_SIZE)
100 #define CPSW_CPPI_RAM_TXDESCS_BASE (CPSW_CPPI_RAM_OFFSET + 0x0000)
101 #define CPSW_CPPI_RAM_RXDESCS_BASE \
102     (CPSW_CPPI_RAM_OFFSET + CPSW_CPPI_RAM_TXDESCS_SIZE)
103 
104 #define CPSW_NTXDESCS (CPSW_CPPI_RAM_TXDESCS_SIZE/sizeof(struct cpsw_cpdma_bd))
105 #define CPSW_NRXDESCS (CPSW_CPPI_RAM_RXDESCS_SIZE/sizeof(struct cpsw_cpdma_bd))
106 
107 #define CPSW_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
108 
109 #define TXDESC_NEXT(x) cpsw_txdesc_adjust((x), 1)
110 #define TXDESC_PREV(x) cpsw_txdesc_adjust((x), -1)
111 
112 #define RXDESC_NEXT(x) cpsw_rxdesc_adjust((x), 1)
113 #define RXDESC_PREV(x) cpsw_rxdesc_adjust((x), -1)
114 
115 struct cpsw_ring_data {
116 	bus_dmamap_t		 tx_dm[CPSW_NTXDESCS];
117 	struct mbuf		*tx_mb[CPSW_NTXDESCS];
118 	bus_dmamap_t		 rx_dm[CPSW_NRXDESCS];
119 	struct mbuf		*rx_mb[CPSW_NRXDESCS];
120 };
121 
122 struct cpsw_port_config {
123 	uint8_t			 enaddr[ETHER_ADDR_LEN];
124 	int			 phy_id;
125 	int			 rgmii;
126 	int			 vlan;
127 };
128 
129 struct cpsw_softc {
130 	struct device		 sc_dev;
131 	bus_space_tag_t		 sc_bst;
132 	bus_space_handle_t	 sc_bsh;
133 	bus_dma_tag_t		 sc_bdt;
134 	bus_space_handle_t	 sc_bsh_txdescs;
135 	bus_space_handle_t	 sc_bsh_rxdescs;
136 	bus_addr_t		 sc_txdescs_pa;
137 	bus_addr_t		 sc_rxdescs_pa;
138 
139 	struct arpcom		 sc_ac;
140 	struct mii_data		 sc_mii;
141 
142 	struct cpsw_ring_data	*sc_rdp;
143 	volatile u_int		 sc_txnext;
144 	volatile u_int		 sc_txhead;
145 	volatile u_int		 sc_rxhead;
146 
147 	void			*sc_rxthih;
148 	void			*sc_rxih;
149 	void			*sc_txih;
150 	void			*sc_miscih;
151 
152 	void			*sc_txpad;
153 	bus_dmamap_t		 sc_txpad_dm;
154 #define sc_txpad_pa sc_txpad_dm->dm_segs[0].ds_addr
155 
156 	volatile bool		 sc_txrun;
157 	volatile bool		 sc_rxrun;
158 	volatile bool		 sc_txeoq;
159 	volatile bool		 sc_rxeoq;
160 	struct timeout		 sc_tick;
161 	int			 sc_active_port;
162 
163 	struct cpsw_port_config	 sc_port_config[2];
164 };
165 
166 #define DEVNAME(_sc) ((_sc)->sc_dev.dv_xname)
167 
168 int	cpsw_match(struct device *, void *, void *);
169 void	cpsw_attach(struct device *, struct device *, void *);
170 
171 void	cpsw_start(struct ifnet *);
172 int	cpsw_ioctl(struct ifnet *, u_long, caddr_t);
173 void	cpsw_watchdog(struct ifnet *);
174 int	cpsw_init(struct ifnet *);
175 void	cpsw_stop(struct ifnet *);
176 
177 int	cpsw_mii_readreg(struct device *, int, int);
178 void	cpsw_mii_writereg(struct device *, int, int, int);
179 void	cpsw_mii_statchg(struct device *);
180 
181 void	cpsw_tick(void *);
182 
183 int	cpsw_new_rxbuf(struct cpsw_softc * const, const u_int);
184 int	cpsw_mediachange(struct ifnet *);
185 void	cpsw_mediastatus(struct ifnet *, struct ifmediareq *);
186 
187 int	cpsw_rxthintr(void *);
188 int	cpsw_rxintr(void *);
189 int	cpsw_txintr(void *);
190 int	cpsw_miscintr(void *);
191 
192 void	cpsw_get_port_config(struct cpsw_port_config *, int);
193 
194 const struct cfattach cpsw_ca = {
195 	sizeof(struct cpsw_softc),
196 	cpsw_match,
197 	cpsw_attach
198 };
199 
200 struct cfdriver cpsw_cd = {
201 	NULL,
202 	"cpsw",
203 	DV_IFNET
204 };
205 
206 static inline u_int
cpsw_txdesc_adjust(u_int x,int y)207 cpsw_txdesc_adjust(u_int x, int y)
208 {
209 	return (((x) + y) & (CPSW_NTXDESCS - 1));
210 }
211 
212 static inline u_int
cpsw_rxdesc_adjust(u_int x,int y)213 cpsw_rxdesc_adjust(u_int x, int y)
214 {
215 	return (((x) + y) & (CPSW_NRXDESCS - 1));
216 }
217 
218 static inline void
cpsw_set_txdesc_next(struct cpsw_softc * const sc,const u_int i,uint32_t n)219 cpsw_set_txdesc_next(struct cpsw_softc * const sc, const u_int i, uint32_t n)
220 {
221 	const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i + 0;
222 	bus_space_write_4(sc->sc_bst, sc->sc_bsh_txdescs, o, n);
223 }
224 
225 static inline void
cpsw_set_rxdesc_next(struct cpsw_softc * const sc,const u_int i,uint32_t n)226 cpsw_set_rxdesc_next(struct cpsw_softc * const sc, const u_int i, uint32_t n)
227 {
228 	const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i + 0;
229 	bus_space_write_4(sc->sc_bst, sc->sc_bsh_rxdescs, o, n);
230 }
231 
232 static inline void
cpsw_get_txdesc(struct cpsw_softc * const sc,const u_int i,struct cpsw_cpdma_bd * const bdp)233 cpsw_get_txdesc(struct cpsw_softc * const sc, const u_int i,
234     struct cpsw_cpdma_bd * const bdp)
235 {
236 	const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
237 	bus_space_read_region_4(sc->sc_bst, sc->sc_bsh_txdescs, o,
238 	    (uint32_t *)bdp, 4);
239 }
240 
241 static inline void
cpsw_set_txdesc(struct cpsw_softc * const sc,const u_int i,struct cpsw_cpdma_bd * const bdp)242 cpsw_set_txdesc(struct cpsw_softc * const sc, const u_int i,
243     struct cpsw_cpdma_bd * const bdp)
244 {
245 	const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
246 	bus_space_write_region_4(sc->sc_bst, sc->sc_bsh_txdescs, o,
247 	    (uint32_t *)bdp, 4);
248 }
249 
250 static inline void
cpsw_get_rxdesc(struct cpsw_softc * const sc,const u_int i,struct cpsw_cpdma_bd * const bdp)251 cpsw_get_rxdesc(struct cpsw_softc * const sc, const u_int i,
252     struct cpsw_cpdma_bd * const bdp)
253 {
254 	const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
255 	bus_space_read_region_4(sc->sc_bst, sc->sc_bsh_rxdescs, o,
256 	    (uint32_t *)bdp, 4);
257 }
258 
259 static inline void
cpsw_set_rxdesc(struct cpsw_softc * const sc,const u_int i,struct cpsw_cpdma_bd * const bdp)260 cpsw_set_rxdesc(struct cpsw_softc * const sc, const u_int i,
261     struct cpsw_cpdma_bd * const bdp)
262 {
263 	const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
264 	bus_space_write_region_4(sc->sc_bst, sc->sc_bsh_rxdescs, o,
265 	    (uint32_t *)bdp, 4);
266 }
267 
268 static inline bus_addr_t
cpsw_txdesc_paddr(struct cpsw_softc * const sc,u_int x)269 cpsw_txdesc_paddr(struct cpsw_softc * const sc, u_int x)
270 {
271 	KASSERT(x < CPSW_NTXDESCS);
272 	return sc->sc_txdescs_pa + sizeof(struct cpsw_cpdma_bd) * x;
273 }
274 
275 static inline bus_addr_t
cpsw_rxdesc_paddr(struct cpsw_softc * const sc,u_int x)276 cpsw_rxdesc_paddr(struct cpsw_softc * const sc, u_int x)
277 {
278 	KASSERT(x < CPSW_NRXDESCS);
279 	return sc->sc_rxdescs_pa + sizeof(struct cpsw_cpdma_bd) * x;
280 }
281 
282 static void
cpsw_mdio_init(struct cpsw_softc * sc)283 cpsw_mdio_init(struct cpsw_softc *sc)
284 {
285 	uint32_t alive, link;
286 	u_int tries;
287 
288 	sc->sc_active_port = 0;
289 
290 	/* Initialize MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */
291 	/* TODO Calculate MDCLK=CLK/(CLKDIV+1) */
292 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, MDIOCONTROL,
293 	    (1<<30) | (1<<18) | 0xFF);
294 
295 	for(tries = 0; tries < 1000; tries++) {
296 		alive = bus_space_read_4(sc->sc_bst, sc->sc_bsh, MDIOALIVE) & 3;
297 		if (alive)
298 			break;
299 		delay(1);
300 	}
301 
302 	if (alive == 0) {
303 		printf("%s: no PHY is alive\n", DEVNAME(sc));
304 		return;
305 	}
306 
307 	link = bus_space_read_4(sc->sc_bst, sc->sc_bsh, MDIOLINK) & 3;
308 
309 	if (alive == 3) {
310 		/* both ports are alive, prefer one with link */
311 		if (link == 2)
312 			sc->sc_active_port = 1;
313 	} else if (alive == 2)
314 		sc->sc_active_port = 1;
315 
316 	/* Select the port to monitor */
317 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, MDIOUSERPHYSEL0,
318 	    sc->sc_active_port);
319 }
320 
321 int
cpsw_match(struct device * parent,void * match,void * aux)322 cpsw_match(struct device *parent, void *match, void *aux)
323 {
324 	struct fdt_attach_args *faa = aux;
325 
326 	return OF_is_compatible(faa->fa_node, "ti,cpsw");
327 }
328 
329 void
cpsw_attach(struct device * parent,struct device * self,void * aux)330 cpsw_attach(struct device *parent, struct device *self, void *aux)
331 {
332 	struct cpsw_softc *sc = (struct cpsw_softc *)self;
333 	struct fdt_attach_args *faa = aux;
334 	struct arpcom * const ac = &sc->sc_ac;
335 	struct ifnet * const ifp = &ac->ac_if;
336 	void *descs;
337 	u_int32_t idver;
338 	int error;
339 	int node;
340 	u_int i;
341 	uint32_t memsize;
342 
343 	if (faa->fa_nreg < 1)
344 		return;
345 
346 	/*
347 	 * fa_reg[0].size is size of CPSW_SS and CPSW_PORT
348 	 * fa_reg[1].size is size of CPSW_WR
349 	 * we map a size that is a superset of both
350 	 */
351 	memsize = 0x4000;
352 
353 	pinctrl_byname(faa->fa_node, "default");
354 
355 	for (node = OF_child(faa->fa_node); node; node = OF_peer(node)) {
356 		if (OF_is_compatible(node, "ti,davinci_mdio")) {
357 			clock_enable(node, "fck");
358 			pinctrl_byname(node, "default");
359 		}
360 	}
361 
362 	timeout_set(&sc->sc_tick, cpsw_tick, sc);
363 
364 	cpsw_get_port_config(sc->sc_port_config, faa->fa_node);
365 	memcpy(sc->sc_ac.ac_enaddr, sc->sc_port_config[0].enaddr,
366 	    ETHER_ADDR_LEN);
367 
368 	sc->sc_rxthih = arm_intr_establish_fdt_idx(faa->fa_node, 0, IPL_NET,
369 	    cpsw_rxthintr, sc, DEVNAME(sc));
370 	sc->sc_rxih = arm_intr_establish_fdt_idx(faa->fa_node, 1, IPL_NET,
371 	    cpsw_rxintr, sc, DEVNAME(sc));
372 	sc->sc_txih = arm_intr_establish_fdt_idx(faa->fa_node, 2, IPL_NET,
373 	    cpsw_txintr, sc, DEVNAME(sc));
374 	sc->sc_miscih = arm_intr_establish_fdt_idx(faa->fa_node, 3, IPL_NET,
375 	    cpsw_miscintr, sc, DEVNAME(sc));
376 
377 	sc->sc_bst = faa->fa_iot;
378 	sc->sc_bdt = faa->fa_dmat;
379 
380 	error = bus_space_map(sc->sc_bst, faa->fa_reg[0].addr,
381 	    memsize, BUS_SPACE_MAP_LINEAR, &sc->sc_bsh);
382 	if (error) {
383 		printf("can't map registers: %d\n", error);
384 		return;
385 	}
386 
387 	error = bus_space_subregion(sc->sc_bst, sc->sc_bsh,
388 	    CPSW_CPPI_RAM_TXDESCS_BASE, CPSW_CPPI_RAM_TXDESCS_SIZE,
389 	    &sc->sc_bsh_txdescs);
390 	if (error) {
391 		printf("can't subregion tx ring SRAM: %d\n", error);
392 		return;
393 	}
394 	descs = bus_space_vaddr(sc->sc_bst, sc->sc_bsh_txdescs);
395 	pmap_extract(pmap_kernel(), (vaddr_t)descs, &sc->sc_txdescs_pa);
396 
397 	error = bus_space_subregion(sc->sc_bst, sc->sc_bsh,
398 	    CPSW_CPPI_RAM_RXDESCS_BASE, CPSW_CPPI_RAM_RXDESCS_SIZE,
399 	    &sc->sc_bsh_rxdescs);
400 	if (error) {
401 		printf("can't subregion rx ring SRAM: %d\n", error);
402 		return;
403 	}
404 	descs = bus_space_vaddr(sc->sc_bst, sc->sc_bsh_rxdescs);
405 	pmap_extract(pmap_kernel(), (vaddr_t)descs, &sc->sc_rxdescs_pa);
406 
407 	sc->sc_rdp = malloc(sizeof(*sc->sc_rdp), M_TEMP, M_WAITOK);
408 	KASSERT(sc->sc_rdp != NULL);
409 
410 	for (i = 0; i < CPSW_NTXDESCS; i++) {
411 		if ((error = bus_dmamap_create(sc->sc_bdt, MCLBYTES,
412 		    CPSW_TXFRAGS, MCLBYTES, 0, 0,
413 		    &sc->sc_rdp->tx_dm[i])) != 0) {
414 			printf("unable to create tx DMA map: %d\n", error);
415 		}
416 		sc->sc_rdp->tx_mb[i] = NULL;
417 	}
418 
419 	for (i = 0; i < CPSW_NRXDESCS; i++) {
420 		if ((error = bus_dmamap_create(sc->sc_bdt, MCLBYTES, 1,
421 		    MCLBYTES, 0, 0, &sc->sc_rdp->rx_dm[i])) != 0) {
422 			printf("unable to create rx DMA map: %d\n", error);
423 		}
424 		sc->sc_rdp->rx_mb[i] = NULL;
425 	}
426 
427 	sc->sc_txpad = dma_alloc(ETHER_MIN_LEN, PR_WAITOK | PR_ZERO);
428 	KASSERT(sc->sc_txpad != NULL);
429 	bus_dmamap_create(sc->sc_bdt, ETHER_MIN_LEN, 1, ETHER_MIN_LEN, 0,
430 	    BUS_DMA_WAITOK, &sc->sc_txpad_dm);
431 	bus_dmamap_load(sc->sc_bdt, sc->sc_txpad_dm, sc->sc_txpad,
432 	    ETHER_MIN_LEN, NULL, BUS_DMA_WAITOK|BUS_DMA_WRITE);
433 	bus_dmamap_sync(sc->sc_bdt, sc->sc_txpad_dm, 0, ETHER_MIN_LEN,
434 	    BUS_DMASYNC_PREWRITE);
435 
436 	idver = bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_SS_IDVER);
437 	printf(": version %d.%d (%d), address %s\n",
438 	    CPSW_SS_IDVER_MAJ(idver), CPSW_SS_IDVER_MIN(idver),
439 	    CPSW_SS_IDVER_RTL(idver), ether_sprintf(ac->ac_enaddr));
440 
441 	ifp->if_softc = sc;
442 	ifp->if_capabilities = IFCAP_VLAN_MTU;
443 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
444 	ifp->if_start = cpsw_start;
445 	ifp->if_ioctl = cpsw_ioctl;
446 	ifp->if_watchdog = cpsw_watchdog;
447 	ifq_init_maxlen(&ifp->if_snd, CPSW_NTXDESCS - 1);
448 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
449 
450 	cpsw_stop(ifp);
451 
452 	sc->sc_mii.mii_ifp = ifp;
453 	sc->sc_mii.mii_readreg = cpsw_mii_readreg;
454 	sc->sc_mii.mii_writereg = cpsw_mii_writereg;
455 	sc->sc_mii.mii_statchg = cpsw_mii_statchg;
456 
457 	cpsw_mdio_init(sc);
458 
459 	ifmedia_init(&sc->sc_mii.mii_media, 0, cpsw_mediachange,
460 	    cpsw_mediastatus);
461 	mii_attach(self, &sc->sc_mii, 0xffffffff,
462 	    sc->sc_port_config[0].phy_id, MII_OFFSET_ANY, 0);
463 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
464 		printf("no PHY found!\n");
465 		ifmedia_add(&sc->sc_mii.mii_media,
466 		    IFM_ETHER|IFM_MANUAL, 0, NULL);
467 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
468 	} else {
469 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
470 	}
471 
472 	if_attach(ifp);
473 	ether_ifattach(ifp);
474 
475 	return;
476 }
477 
478 int
cpsw_mediachange(struct ifnet * ifp)479 cpsw_mediachange(struct ifnet *ifp)
480 {
481 	struct cpsw_softc *sc = ifp->if_softc;
482 
483 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
484 		mii_mediachg(&sc->sc_mii);
485 
486 	return (0);
487 }
488 
489 void
cpsw_mediastatus(struct ifnet * ifp,struct ifmediareq * ifmr)490 cpsw_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
491 {
492 	struct cpsw_softc *sc = ifp->if_softc;
493 
494 	if (LIST_FIRST(&sc->sc_mii.mii_phys)) {
495 		mii_pollstat(&sc->sc_mii);
496 		ifmr->ifm_active = sc->sc_mii.mii_media_active;
497 		ifmr->ifm_status = sc->sc_mii.mii_media_status;
498 	}
499 }
500 
501 void
cpsw_start(struct ifnet * ifp)502 cpsw_start(struct ifnet *ifp)
503 {
504 	struct cpsw_softc * const sc = ifp->if_softc;
505 	struct cpsw_ring_data * const rdp = sc->sc_rdp;
506 	struct cpsw_cpdma_bd bd;
507 	struct mbuf *m;
508 	bus_dmamap_t dm;
509 	u_int eopi = ~0;
510 	u_int seg;
511 	u_int txfree;
512 	int txstart = -1;
513 	int error;
514 	bool pad;
515 	u_int mlen;
516 
517 	if (!ISSET(ifp->if_flags, IFF_RUNNING) ||
518 	    ifq_is_oactive(&ifp->if_snd) ||
519 	    ifq_empty(&ifp->if_snd))
520 		return;
521 
522 	if (sc->sc_txnext >= sc->sc_txhead)
523 		txfree = CPSW_NTXDESCS - 1 + sc->sc_txhead - sc->sc_txnext;
524 	else
525 		txfree = sc->sc_txhead - sc->sc_txnext - 1;
526 
527 	for (;;) {
528 		if (txfree <= CPSW_TXFRAGS) {
529 			ifq_set_oactive(&ifp->if_snd);
530 			break;
531 		}
532 
533 		m = ifq_dequeue(&ifp->if_snd);
534 		if (m == NULL)
535 			break;
536 
537 		dm = rdp->tx_dm[sc->sc_txnext];
538 		error = bus_dmamap_load_mbuf(sc->sc_bdt, dm, m, BUS_DMA_NOWAIT);
539 		switch (error) {
540 		case 0:
541 			break;
542 
543 		case EFBIG: /* mbuf chain is too fragmented */
544 			if (m_defrag(m, M_DONTWAIT) == 0 &&
545 			    bus_dmamap_load_mbuf(sc->sc_bdt, dm, m,
546 			    BUS_DMA_NOWAIT) == 0)
547 				break;
548 
549 			/* FALLTHROUGH */
550 		default:
551 			m_freem(m);
552 			ifp->if_oerrors++;
553 			continue;
554 		}
555 
556 		mlen = dm->dm_mapsize;
557 		pad = mlen < CPSW_PAD_LEN;
558 
559 		KASSERT(rdp->tx_mb[sc->sc_txnext] == NULL);
560 		rdp->tx_mb[sc->sc_txnext] = m;
561 
562 #if NBPFILTER > 0
563 		if (ifp->if_bpf)
564 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
565 #endif
566 
567 		bus_dmamap_sync(sc->sc_bdt, dm, 0, dm->dm_mapsize,
568 		    BUS_DMASYNC_PREWRITE);
569 
570 		if (txstart == -1)
571 			txstart = sc->sc_txnext;
572 		eopi = sc->sc_txnext;
573 		for (seg = 0; seg < dm->dm_nsegs; seg++) {
574 			bd.next = cpsw_txdesc_paddr(sc,
575 			    TXDESC_NEXT(sc->sc_txnext));
576 			bd.bufptr = dm->dm_segs[seg].ds_addr;
577 			bd.bufoff = 0;
578 			bd.buflen = dm->dm_segs[seg].ds_len;
579 			bd.pktlen = 0;
580 			bd.flags = 0;
581 
582 			if (seg == 0) {
583 				bd.flags = CPDMA_BD_OWNER | CPDMA_BD_SOP;
584 				bd.pktlen = MAX(mlen, CPSW_PAD_LEN);
585 			}
586 
587 			if (seg == dm->dm_nsegs - 1 && !pad)
588 				bd.flags |= CPDMA_BD_EOP;
589 
590 			cpsw_set_txdesc(sc, sc->sc_txnext, &bd);
591 			txfree--;
592 			eopi = sc->sc_txnext;
593 			sc->sc_txnext = TXDESC_NEXT(sc->sc_txnext);
594 		}
595 		if (pad) {
596 			bd.next = cpsw_txdesc_paddr(sc,
597 			    TXDESC_NEXT(sc->sc_txnext));
598 			bd.bufptr = sc->sc_txpad_pa;
599 			bd.bufoff = 0;
600 			bd.buflen = CPSW_PAD_LEN - mlen;
601 			bd.pktlen = 0;
602 			bd.flags = CPDMA_BD_EOP;
603 
604 			cpsw_set_txdesc(sc, sc->sc_txnext, &bd);
605 			txfree--;
606 			eopi = sc->sc_txnext;
607 			sc->sc_txnext = TXDESC_NEXT(sc->sc_txnext);
608 		}
609 	}
610 
611 	if (txstart >= 0) {
612 		ifp->if_timer = 5;
613 		/* terminate the new chain */
614 		KASSERT(eopi == TXDESC_PREV(sc->sc_txnext));
615 		cpsw_set_txdesc_next(sc, TXDESC_PREV(sc->sc_txnext), 0);
616 
617 		/* link the new chain on */
618 		cpsw_set_txdesc_next(sc, TXDESC_PREV(txstart),
619 		    cpsw_txdesc_paddr(sc, txstart));
620 		if (sc->sc_txeoq) {
621 			/* kick the dma engine */
622 			sc->sc_txeoq = false;
623 			bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_HDP(0),
624 			    cpsw_txdesc_paddr(sc, txstart));
625 		}
626 	}
627 }
628 
629 int
cpsw_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)630 cpsw_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
631 {
632 	struct cpsw_softc *sc = ifp->if_softc;
633 	struct ifreq *ifr = (struct ifreq *)data;
634 	int s = splnet();
635 	int error = 0;
636 
637 	switch (cmd) {
638 	case SIOCSIFADDR:
639 		ifp->if_flags |= IFF_UP;
640 		/* FALLTHROUGH */
641 	case SIOCSIFFLAGS:
642 		if (ifp->if_flags & IFF_UP) {
643 			if (ifp->if_flags & IFF_RUNNING)
644 				error = ENETRESET;
645 			else
646 				cpsw_init(ifp);
647 		} else {
648 			if (ifp->if_flags & IFF_RUNNING)
649 				cpsw_stop(ifp);
650 		}
651 		break;
652 	case SIOCSIFMEDIA:
653 		ifr->ifr_media &= ~IFM_ETH_FMASK;
654 		/* FALLTHROUGH */
655 	case SIOCGIFMEDIA:
656 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
657 		break;
658 	default:
659 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
660 		break;
661 	}
662 	if (error == ENETRESET) {
663 		if (ifp->if_flags & IFF_RUNNING)
664 			cpsw_init(ifp);
665 		error = 0;
666 	}
667 
668 	splx(s);
669 
670 	return error;
671 }
672 
673 void
cpsw_watchdog(struct ifnet * ifp)674 cpsw_watchdog(struct ifnet *ifp)
675 {
676 	printf("%s: device timeout\n", ifp->if_xname);
677 
678 	ifp->if_oerrors++;
679 	cpsw_init(ifp);
680 	cpsw_start(ifp);
681 }
682 
683 static int
cpsw_mii_wait(struct cpsw_softc * const sc,int reg)684 cpsw_mii_wait(struct cpsw_softc * const sc, int reg)
685 {
686 	u_int tries;
687 
688 	for(tries = 0; tries < 1000; tries++) {
689 		if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, reg) & (1U << 31)) == 0)
690 			return 0;
691 		delay(1);
692 	}
693 	return ETIMEDOUT;
694 }
695 
696 int
cpsw_mii_readreg(struct device * dev,int phy,int reg)697 cpsw_mii_readreg(struct device *dev, int phy, int reg)
698 {
699 	struct cpsw_softc * const sc = (struct cpsw_softc *)dev;
700 	uint32_t v;
701 
702 	if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
703 		return 0;
704 
705 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, MDIOUSERACCESS0, (1U << 31) |
706 	    ((reg & 0x1F) << 21) | ((phy & 0x1F) << 16));
707 
708 	if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
709 		return 0;
710 
711 	v = bus_space_read_4(sc->sc_bst, sc->sc_bsh, MDIOUSERACCESS0);
712 	if (v & (1 << 29))
713 		return v & 0xffff;
714 	else
715 		return 0;
716 }
717 
718 void
cpsw_mii_writereg(struct device * dev,int phy,int reg,int val)719 cpsw_mii_writereg(struct device *dev, int phy, int reg, int val)
720 {
721 	struct cpsw_softc * const sc = (struct cpsw_softc *)dev;
722 	uint32_t v;
723 
724 	KASSERT((val & 0xffff0000UL) == 0);
725 
726 	if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
727 		goto out;
728 
729 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, MDIOUSERACCESS0, (1U << 31) | (1 << 30) |
730 	    ((reg & 0x1F) << 21) | ((phy & 0x1F) << 16) | val);
731 
732 	if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
733 		goto out;
734 
735 	v = bus_space_read_4(sc->sc_bst, sc->sc_bsh, MDIOUSERACCESS0);
736 	if ((v & (1 << 29)) == 0)
737 out:
738 		printf("%s error\n", __func__);
739 
740 }
741 
742 void
cpsw_mii_statchg(struct device * self)743 cpsw_mii_statchg(struct device *self)
744 {
745 	return;
746 }
747 
748 int
cpsw_new_rxbuf(struct cpsw_softc * const sc,const u_int i)749 cpsw_new_rxbuf(struct cpsw_softc * const sc, const u_int i)
750 {
751 	struct cpsw_ring_data * const rdp = sc->sc_rdp;
752 	const u_int h = RXDESC_PREV(i);
753 	struct cpsw_cpdma_bd bd;
754 	struct mbuf *m;
755 	int error = ENOBUFS;
756 
757 	MGETHDR(m, M_DONTWAIT, MT_DATA);
758 	if (m == NULL) {
759 		goto reuse;
760 	}
761 
762 	MCLGET(m, M_DONTWAIT);
763 	if ((m->m_flags & M_EXT) == 0) {
764 		m_freem(m);
765 		goto reuse;
766 	}
767 
768 	/* We have a new buffer, prepare it for the ring. */
769 
770 	if (rdp->rx_mb[i] != NULL)
771 		bus_dmamap_unload(sc->sc_bdt, rdp->rx_dm[i]);
772 
773 	m->m_len = m->m_pkthdr.len = MCLBYTES;
774 
775 	rdp->rx_mb[i] = m;
776 
777 	error = bus_dmamap_load_mbuf(sc->sc_bdt, rdp->rx_dm[i], rdp->rx_mb[i],
778 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
779 	if (error) {
780 		printf("can't load rx DMA map %d: %d\n", i, error);
781 	}
782 
783 	bus_dmamap_sync(sc->sc_bdt, rdp->rx_dm[i],
784 	    0, rdp->rx_dm[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
785 
786 	error = 0;
787 
788 reuse:
789 	/* (re-)setup the descriptor */
790 	bd.next = 0;
791 	bd.bufptr = rdp->rx_dm[i]->dm_segs[0].ds_addr;
792 	bd.bufoff = 0;
793 	bd.buflen = MIN(0x7ff, rdp->rx_dm[i]->dm_segs[0].ds_len);
794 	bd.pktlen = 0;
795 	bd.flags = CPDMA_BD_OWNER;
796 
797 	cpsw_set_rxdesc(sc, i, &bd);
798 	/* and link onto ring */
799 	cpsw_set_rxdesc_next(sc, h, cpsw_rxdesc_paddr(sc, i));
800 
801 	return error;
802 }
803 
804 int
cpsw_init(struct ifnet * ifp)805 cpsw_init(struct ifnet *ifp)
806 {
807 	struct cpsw_softc * const sc = ifp->if_softc;
808 	struct arpcom *ac = &sc->sc_ac;
809 	struct mii_data * const mii = &sc->sc_mii;
810 	int i;
811 
812 	cpsw_stop(ifp);
813 
814 	sc->sc_txnext = 0;
815 	sc->sc_txhead = 0;
816 
817 	/* Reset wrapper */
818 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_WR_SOFT_RESET, 1);
819 	while(bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_WR_SOFT_RESET) & 1);
820 
821 	/* Reset SS */
822 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_SS_SOFT_RESET, 1);
823 	while(bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_SS_SOFT_RESET) & 1);
824 
825 	/* Clear table (30) and enable ALE(31) and set passthrough (4) */
826 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_ALE_CONTROL, (3 << 30) | 0x10);
827 
828 	/* Reset and init Sliver port 1 and 2 */
829 	for (i = 0; i < 2; i++) {
830 		/* Reset */
831 		bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_SL_SOFT_RESET(i), 1);
832 		while(bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_SL_SOFT_RESET(i)) & 1);
833 		/* Set Slave Mapping */
834 		bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_SL_RX_PRI_MAP(i), 0x76543210);
835 		bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_PORT_P_TX_PRI_MAP(i+1), 0x33221100);
836 		bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_SL_RX_MAXLEN(i), 0x5f2);
837 		/* Set MAC Address */
838 		bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_PORT_P_SA_HI(i+1),
839 		    ac->ac_enaddr[0] | (ac->ac_enaddr[1] << 8) |
840 		    (ac->ac_enaddr[2] << 16) | (ac->ac_enaddr[3] << 24));
841 		bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_PORT_P_SA_LO(i+1),
842 		    ac->ac_enaddr[4] | (ac->ac_enaddr[5] << 8));
843 
844 		/* Set MACCONTROL for ports 0,1: FULLDUPLEX(0), GMII_EN(5),
845 		   IFCTL_A(15), IFCTL_B(16) FIXME */
846 		bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_SL_MACCONTROL(i),
847 		    1 | (1<<5) | (1<<15) | (1<<16));
848 
849 		/* Set ALE port to forwarding(3) on the active port */
850 		if (i == sc->sc_active_port)
851 			bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_ALE_PORTCTL(i+1), 3);
852 	}
853 
854 	/* Set Host Port Mapping */
855 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210);
856 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0);
857 
858 	/* Set ALE port to forwarding(3) */
859 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_ALE_PORTCTL(0), 3);
860 
861 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_SS_PTYPE, 0);
862 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_SS_STAT_PORT_EN, 7);
863 
864 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_SOFT_RESET, 1);
865 	while(bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_SOFT_RESET) & 1);
866 
867 	for (i = 0; i < 8; i++) {
868 		bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_HDP(i), 0);
869 		bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_HDP(i), 0);
870 		bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_CP(i), 0);
871 		bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_CP(i), 0);
872 	}
873 
874 	bus_space_set_region_4(sc->sc_bst, sc->sc_bsh_txdescs, 0, 0,
875 	    CPSW_CPPI_RAM_TXDESCS_SIZE/4);
876 
877 	sc->sc_txhead = 0;
878 	sc->sc_txnext = 0;
879 
880 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_FREEBUFFER(0), 0);
881 
882 	bus_space_set_region_4(sc->sc_bst, sc->sc_bsh_rxdescs, 0, 0,
883 	    CPSW_CPPI_RAM_RXDESCS_SIZE/4);
884 
885 	/* Initialize RX Buffer Descriptors */
886 	cpsw_set_rxdesc_next(sc, RXDESC_PREV(0), 0);
887 	for (i = 0; i < CPSW_NRXDESCS; i++) {
888 		cpsw_new_rxbuf(sc, i);
889 	}
890 	sc->sc_rxhead = 0;
891 
892 	/* align layer 3 header to 32-bit */
893 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_BUFFER_OFFSET, ETHER_ALIGN);
894 
895 	/* Clear all interrupt Masks */
896 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF);
897 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF);
898 
899 	/* Enable TX & RX DMA */
900 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_CONTROL, 1);
901 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_CONTROL, 1);
902 
903 	/* Enable interrupt pacing for C0 RX/TX (IMAX set to max intr/ms allowed) */
904 #define CPSW_VBUSP_CLK_MHZ	2400	/* hardcoded for BBB */
905 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_WR_C_RX_IMAX(0), 2);
906 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_WR_C_TX_IMAX(0), 2);
907 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_WR_INT_CONTROL, 3 << 16 | CPSW_VBUSP_CLK_MHZ/4);
908 
909 	/* Enable TX and RX interrupt receive for core 0 */
910 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_WR_C_TX_EN(0), 1);
911 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_WR_C_RX_EN(0), 1);
912 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_WR_C_MISC_EN(0), 0x1F);
913 
914 	/* Enable host Error Interrupt */
915 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_DMA_INTMASK_SET, 2);
916 
917 	/* Enable interrupts for TX and RX Channel 0 */
918 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_INTMASK_SET, 1);
919 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_INTMASK_SET, 1);
920 
921 	/* Ack stalled irqs */
922 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RXTH);
923 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RX);
924 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_TX);
925 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_MISC);
926 
927 	cpsw_mdio_init(sc);
928 
929 	mii_mediachg(mii);
930 
931 	/* Write channel 0 RX HDP */
932 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_HDP(0), cpsw_rxdesc_paddr(sc, 0));
933 	sc->sc_rxrun = true;
934 	sc->sc_rxeoq = false;
935 
936 	sc->sc_txrun = true;
937 	sc->sc_txeoq = true;
938 
939 	ifp->if_flags |= IFF_RUNNING;
940 	ifq_clr_oactive(&ifp->if_snd);
941 
942 	timeout_add_sec(&sc->sc_tick, 1);
943 
944 	return 0;
945 }
946 
947 void
cpsw_stop(struct ifnet * ifp)948 cpsw_stop(struct ifnet *ifp)
949 {
950 	struct cpsw_softc * const sc = ifp->if_softc;
951 	struct cpsw_ring_data * const rdp = sc->sc_rdp;
952 	u_int i;
953 
954 #if 0
955 	/* XXX find where disable comes from */
956 	printf("%s: ifp %p disable %d\n", __func__, ifp, disable);
957 #endif
958 	if ((ifp->if_flags & IFF_RUNNING) == 0)
959 		return;
960 
961 	timeout_del(&sc->sc_tick);
962 
963 	mii_down(&sc->sc_mii);
964 
965 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_INTMASK_CLEAR, 1);
966 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_INTMASK_CLEAR, 1);
967 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_WR_C_TX_EN(0), 0x0);
968 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_WR_C_RX_EN(0), 0x0);
969 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_WR_C_MISC_EN(0), 0x1F);
970 
971 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_TEARDOWN, 0);
972 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_TEARDOWN, 0);
973 	i = 0;
974 	while ((sc->sc_txrun || sc->sc_rxrun) && i < 10000) {
975 		delay(10);
976 		if ((sc->sc_txrun == true) && cpsw_txintr(sc) == 0)
977 			sc->sc_txrun = false;
978 		if ((sc->sc_rxrun == true) && cpsw_rxintr(sc) == 0)
979 			sc->sc_rxrun = false;
980 		i++;
981 	}
982 	/* printf("%s toredown complete in %u\n", __func__, i); */
983 
984 	/* Reset wrapper */
985 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_WR_SOFT_RESET, 1);
986 	while(bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_WR_SOFT_RESET) & 1);
987 
988 	/* Reset SS */
989 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_SS_SOFT_RESET, 1);
990 	while(bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_SS_SOFT_RESET) & 1);
991 
992 	for (i = 0; i < 2; i++) {
993 		bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_SL_SOFT_RESET(i), 1);
994 		while(bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_SL_SOFT_RESET(i)) & 1);
995 	}
996 
997 	/* Reset CPDMA */
998 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_SOFT_RESET, 1);
999 	while(bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_SOFT_RESET) & 1);
1000 
1001 	/* Release any queued transmit buffers. */
1002 	for (i = 0; i < CPSW_NTXDESCS; i++) {
1003 		bus_dmamap_unload(sc->sc_bdt, rdp->tx_dm[i]);
1004 		m_freem(rdp->tx_mb[i]);
1005 		rdp->tx_mb[i] = NULL;
1006 	}
1007 
1008 	ifp->if_flags &= ~IFF_RUNNING;
1009 	ifp->if_timer = 0;
1010 	ifq_clr_oactive(&ifp->if_snd);
1011 
1012 	/* XXX Not sure what this is doing calling disable here
1013 	    where is disable set?
1014 	*/
1015 #if 0
1016 	if (!disable)
1017 		return;
1018 #endif
1019 
1020 	for (i = 0; i < CPSW_NRXDESCS; i++) {
1021 		bus_dmamap_unload(sc->sc_bdt, rdp->rx_dm[i]);
1022 		m_freem(rdp->rx_mb[i]);
1023 		rdp->rx_mb[i] = NULL;
1024 	}
1025 }
1026 
1027 int
cpsw_rxthintr(void * arg)1028 cpsw_rxthintr(void *arg)
1029 {
1030 	struct cpsw_softc * const sc = arg;
1031 
1032 	/* this won't deassert the interrupt though */
1033 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RXTH);
1034 
1035 	return 1;
1036 }
1037 
1038 int
cpsw_rxintr(void * arg)1039 cpsw_rxintr(void *arg)
1040 {
1041 	struct cpsw_softc * const sc = arg;
1042 	struct ifnet * const ifp = &sc->sc_ac.ac_if;
1043 	struct cpsw_ring_data * const rdp = sc->sc_rdp;
1044 	struct cpsw_cpdma_bd bd;
1045 	bus_dmamap_t dm;
1046 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1047 	struct mbuf *m;
1048 	u_int i;
1049 	u_int len, off;
1050 
1051 	sc->sc_rxeoq = false;
1052 
1053 	for (;;) {
1054 		KASSERT(sc->sc_rxhead < CPSW_NRXDESCS);
1055 
1056 		i = sc->sc_rxhead;
1057 		dm = rdp->rx_dm[i];
1058 		m = rdp->rx_mb[i];
1059 
1060 		KASSERT(dm != NULL);
1061 		KASSERT(m != NULL);
1062 
1063 		cpsw_get_rxdesc(sc, i, &bd);
1064 
1065 		if (bd.flags & CPDMA_BD_OWNER)
1066 			break;
1067 
1068 		if (bd.flags & CPDMA_BD_TDOWNCMPLT) {
1069 			sc->sc_rxrun = false;
1070 			goto done;
1071 		}
1072 
1073 		bus_dmamap_sync(sc->sc_bdt, dm, 0, dm->dm_mapsize,
1074 		    BUS_DMASYNC_POSTREAD);
1075 
1076 		if (cpsw_new_rxbuf(sc, i) != 0) {
1077 			/* drop current packet, reuse buffer for new */
1078 			ifp->if_ierrors++;
1079 			goto next;
1080 		}
1081 
1082 		if ((bd.flags & (CPDMA_BD_SOP|CPDMA_BD_EOP)) !=
1083 		    (CPDMA_BD_SOP|CPDMA_BD_EOP)) {
1084 			if (bd.flags & CPDMA_BD_SOP) {
1085 				printf("cpsw: rx packet too large\n");
1086 				ifp->if_ierrors++;
1087 			}
1088 			m_freem(m);
1089 			goto next;
1090 		}
1091 
1092 		off = bd.bufoff;
1093 		len = bd.pktlen;
1094 
1095 		if (bd.flags & CPDMA_BD_PASSCRC)
1096 			len -= ETHER_CRC_LEN;
1097 
1098 		m->m_pkthdr.len = m->m_len = len;
1099 		m->m_data += off;
1100 
1101 		ml_enqueue(&ml, m);
1102 
1103 next:
1104 		sc->sc_rxhead = RXDESC_NEXT(sc->sc_rxhead);
1105 		if (bd.flags & CPDMA_BD_EOQ) {
1106 			sc->sc_rxeoq = true;
1107 			sc->sc_rxrun = false;
1108 		}
1109 		bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_CP(0),
1110 		    cpsw_rxdesc_paddr(sc, i));
1111 	}
1112 
1113 	if (sc->sc_rxeoq) {
1114 		bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_HDP(0),
1115 				  cpsw_rxdesc_paddr(sc, sc->sc_rxhead));
1116 		sc->sc_rxrun = true;
1117 		sc->sc_rxeoq = false;
1118 	}
1119 
1120 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_CPDMA_EOI_VECTOR,
1121 	    CPSW_INTROFF_RX);
1122 
1123 done:
1124 	if_input(ifp, &ml);
1125 
1126 	return 1;
1127 }
1128 
1129 void
cpsw_tick(void * arg)1130 cpsw_tick(void *arg)
1131 {
1132 	struct cpsw_softc *sc = arg;
1133 	int s;
1134 
1135 	s = splnet();
1136 	mii_tick(&sc->sc_mii);
1137 	splx(s);
1138 
1139 	timeout_add_sec(&sc->sc_tick, 1);
1140 }
1141 
1142 int
cpsw_txintr(void * arg)1143 cpsw_txintr(void *arg)
1144 {
1145 	struct cpsw_softc * const sc = arg;
1146 	struct ifnet * const ifp = &sc->sc_ac.ac_if;
1147 	struct cpsw_ring_data * const rdp = sc->sc_rdp;
1148 	struct cpsw_cpdma_bd bd;
1149 	bool handled = false;
1150 	uint32_t tx0_cp;
1151 	u_int cpi;
1152 
1153 	KASSERT(sc->sc_txrun);
1154 
1155 	tx0_cp = bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_CP(0));
1156 
1157 	if (tx0_cp == 0xfffffffc) {
1158 		bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_CP(0), 0xfffffffc);
1159 		bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_HDP(0), 0);
1160 		sc->sc_txrun = false;
1161 		return 0;
1162 	}
1163 
1164 	for (;;) {
1165 		tx0_cp = bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_CP(0));
1166 		cpi = (tx0_cp - sc->sc_txdescs_pa) /
1167 		    sizeof(struct cpsw_cpdma_bd);
1168 		KASSERT(sc->sc_txhead < CPSW_NTXDESCS);
1169 
1170 		cpsw_get_txdesc(sc, sc->sc_txhead, &bd);
1171 
1172 		if (bd.buflen == 0) {
1173 			/* db_enter(); */
1174 		}
1175 
1176 		if ((bd.flags & CPDMA_BD_SOP) == 0)
1177 			goto next;
1178 
1179 		if (bd.flags & CPDMA_BD_OWNER) {
1180 			printf("pwned %x %x %x\n", cpi, sc->sc_txhead,
1181 			    sc->sc_txnext);
1182 			break;
1183 		}
1184 
1185 		if (bd.flags & CPDMA_BD_TDOWNCMPLT) {
1186 			sc->sc_txrun = false;
1187 			return 1;
1188 		}
1189 
1190 		bus_dmamap_sync(sc->sc_bdt, rdp->tx_dm[sc->sc_txhead],
1191 		    0, rdp->tx_dm[sc->sc_txhead]->dm_mapsize,
1192 		    BUS_DMASYNC_POSTWRITE);
1193 		bus_dmamap_unload(sc->sc_bdt, rdp->tx_dm[sc->sc_txhead]);
1194 
1195 		m_freem(rdp->tx_mb[sc->sc_txhead]);
1196 		rdp->tx_mb[sc->sc_txhead] = NULL;
1197 
1198 		handled = true;
1199 
1200 		ifq_clr_oactive(&ifp->if_snd);
1201 
1202 next:
1203 		if ((bd.flags & (CPDMA_BD_EOP|CPDMA_BD_EOQ)) ==
1204 		    (CPDMA_BD_EOP|CPDMA_BD_EOQ))
1205 			sc->sc_txeoq = true;
1206 
1207 		if (sc->sc_txhead == cpi) {
1208 			bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_CP(0),
1209 			    cpsw_txdesc_paddr(sc, cpi));
1210 			sc->sc_txhead = TXDESC_NEXT(sc->sc_txhead);
1211 			break;
1212 		}
1213 		sc->sc_txhead = TXDESC_NEXT(sc->sc_txhead);
1214 		if (sc->sc_txeoq == true)
1215 			break;
1216 	}
1217 
1218 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_TX);
1219 
1220 	if ((sc->sc_txnext != sc->sc_txhead) && sc->sc_txeoq) {
1221 		if (bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_HDP(0)) == 0) {
1222 			sc->sc_txeoq = false;
1223 			bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_HDP(0),
1224 			    cpsw_txdesc_paddr(sc, sc->sc_txhead));
1225 		}
1226 	}
1227 
1228 	if (handled && sc->sc_txnext == sc->sc_txhead)
1229 		ifp->if_timer = 0;
1230 
1231 	if (handled)
1232 		cpsw_start(ifp);
1233 
1234 	return handled;
1235 }
1236 
1237 int
cpsw_miscintr(void * arg)1238 cpsw_miscintr(void *arg)
1239 {
1240 	struct cpsw_softc * const sc = arg;
1241 	uint32_t miscstat;
1242 	uint32_t dmastat;
1243 	uint32_t stat;
1244 
1245 	miscstat = bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_WR_C_MISC_STAT(0));
1246 	printf("%s %x FIRE\n", __func__, miscstat);
1247 
1248 	if (miscstat & CPSW_MISC_HOST_PEND) {
1249 		/* Host Error */
1250 		dmastat = bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_DMA_INTSTAT_MASKED);
1251 		printf("CPSW_CPDMA_DMA_INTSTAT_MASKED %x\n", dmastat);
1252 
1253 		printf("rxhead %02x\n", sc->sc_rxhead);
1254 
1255 		stat = bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_DMASTATUS);
1256 		printf("CPSW_CPDMA_DMASTATUS %x\n", stat);
1257 		stat = bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_HDP(0));
1258 		printf("CPSW_CPDMA_TX0_HDP %x\n", stat);
1259 		stat = bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_CP(0));
1260 		printf("CPSW_CPDMA_TX0_CP %x\n", stat);
1261 		stat = bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_HDP(0));
1262 		printf("CPSW_CPDMA_RX0_HDP %x\n", stat);
1263 		stat = bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_CP(0));
1264 		printf("CPSW_CPDMA_RX0_CP %x\n", stat);
1265 
1266 		/* db_enter(); */
1267 
1268 		bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_DMA_INTMASK_CLEAR, dmastat);
1269 		dmastat = bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_DMA_INTSTAT_MASKED);
1270 		printf("CPSW_CPDMA_DMA_INTSTAT_MASKED %x\n", dmastat);
1271 	}
1272 
1273 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_MISC);
1274 
1275 	return 1;
1276 }
1277 
1278 void
cpsw_get_port_config(struct cpsw_port_config * conf,int pnode)1279 cpsw_get_port_config(struct cpsw_port_config *conf, int pnode)
1280 {
1281 	char mode[32];
1282 	uint32_t phy_id[2];
1283 	int node, phy_handle, phy_node;
1284 	int port = 0;
1285 
1286 	for (node = OF_child(pnode); node; node = OF_peer(node)) {
1287 		if (OF_getprop(node, "local-mac-address", conf[port].enaddr,
1288 		    sizeof(conf[port].enaddr)) != sizeof(conf[port].enaddr))
1289 			continue;
1290 
1291 		conf[port].vlan = OF_getpropint(node, "dual_emac_res_vlan", 0);
1292 
1293 		if (OF_getpropintarray(node, "phy_id", phy_id,
1294 		    sizeof(phy_id)) == sizeof(phy_id))
1295 			conf[port].phy_id = phy_id[1];
1296 		else if ((phy_handle =
1297 		    OF_getpropint(node, "phy-handle", 0)) != 0) {
1298 			phy_node = OF_getnodebyphandle(phy_handle);
1299 			if (phy_node)
1300 				conf[port].phy_id = OF_getpropint(phy_node,
1301 				    "reg", MII_PHY_ANY);
1302 		}
1303 
1304 		if (OF_getprop(node, "phy-mode", mode, sizeof(mode)) > 0 &&
1305 		    !strcmp(mode, "rgmii"))
1306 			conf[port].rgmii = 1;
1307 		else
1308 			conf[port].rgmii = 0;
1309 
1310 		if (port == 0)
1311 			port = 1;
1312 	}
1313 }
1314