xref: /openbsd/sys/arch/armv7/omap/if_cpsw.c (revision a6445c1d)
1 /* $OpenBSD: if_cpsw.c,v 1.23 2014/08/18 17:56:45 miod Exp $ */
2 /*	$NetBSD: if_cpsw.c,v 1.3 2013/04/17 14:36:34 bouyer Exp $	*/
3 
4 /*
5  * Copyright (c) 2013 Jonathan A. Kollasch
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
21  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
27  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /*-
31  * Copyright (c) 2012 Damjan Marion <dmarion@Freebsd.org>
32  * All rights reserved.
33  *
34  * Redistribution and use in source and binary forms, with or without
35  * modification, are permitted provided that the following conditions
36  * are met:
37  * 1. Redistributions of source code must retain the above copyright
38  *    notice, this list of conditions and the following disclaimer.
39  * 2. Redistributions in binary form must reproduce the above copyright
40  *    notice, this list of conditions and the following disclaimer in the
41  *    documentation and/or other materials provided with the distribution.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
44  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
47  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53  * SUCH DAMAGE.
54  */
55 
56 #include "bpfilter.h"
57 
58 #include <sys/param.h>
59 #include <sys/systm.h>
60 #include <sys/sockio.h>
61 #include <sys/mbuf.h>
62 #include <sys/pool.h>
63 #include <sys/queue.h>
64 #include <sys/kernel.h>
65 #include <sys/device.h>
66 #include <sys/timeout.h>
67 #include <sys/socket.h>
68 
69 #include <machine/bus.h>
70 
71 #include <net/if.h>
72 #include <net/if_media.h>
73 
74 #include <netinet/in.h>
75 #include <netinet/if_ether.h>
76 
77 #if NBPFILTER > 0
78 #include <net/bpf.h>
79 #endif
80 
81 #include <dev/mii/mii.h>
82 #include <dev/mii/miivar.h>
83 
84 #include <arch/armv7/armv7/armv7var.h>
85 #include <arch/armv7/omap/sitara_cm.h>
86 #include <arch/armv7/omap/if_cpswreg.h>
87 
88 #define CPSW_TXFRAGS	16
89 
90 #define OMAP2SCM_MAC_ID0_LO	0x630
91 #define OMAP2SCM_MAC_ID0_HI	0x634
92 
93 #define CPSW_CPPI_RAM_SIZE (0x2000)
94 #define CPSW_CPPI_RAM_TXDESCS_SIZE (CPSW_CPPI_RAM_SIZE/2)
95 #define CPSW_CPPI_RAM_RXDESCS_SIZE \
96     (CPSW_CPPI_RAM_SIZE - CPSW_CPPI_RAM_TXDESCS_SIZE)
97 #define CPSW_CPPI_RAM_TXDESCS_BASE (CPSW_CPPI_RAM_OFFSET + 0x0000)
98 #define CPSW_CPPI_RAM_RXDESCS_BASE \
99     (CPSW_CPPI_RAM_OFFSET + CPSW_CPPI_RAM_TXDESCS_SIZE)
100 
101 #define CPSW_NTXDESCS (CPSW_CPPI_RAM_TXDESCS_SIZE/sizeof(struct cpsw_cpdma_bd))
102 #define CPSW_NRXDESCS (CPSW_CPPI_RAM_RXDESCS_SIZE/sizeof(struct cpsw_cpdma_bd))
103 
104 #define CPSW_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
105 
106 #define TXDESC_NEXT(x) cpsw_txdesc_adjust((x), 1)
107 #define TXDESC_PREV(x) cpsw_txdesc_adjust((x), -1)
108 
109 #define RXDESC_NEXT(x) cpsw_rxdesc_adjust((x), 1)
110 #define RXDESC_PREV(x) cpsw_rxdesc_adjust((x), -1)
111 
112 struct cpsw_ring_data {
113 	bus_dmamap_t		 tx_dm[CPSW_NTXDESCS];
114 	struct mbuf		*tx_mb[CPSW_NTXDESCS];
115 	bus_dmamap_t		 rx_dm[CPSW_NRXDESCS];
116 	struct mbuf		*rx_mb[CPSW_NRXDESCS];
117 };
118 
119 struct cpsw_softc {
120 	struct device		 sc_dev;
121 	bus_space_tag_t		 sc_bst;
122 	bus_space_handle_t	 sc_bsh;
123 	bus_dma_tag_t		 sc_bdt;
124 	bus_space_handle_t	 sc_bsh_txdescs;
125 	bus_space_handle_t	 sc_bsh_rxdescs;
126 	bus_addr_t		 sc_txdescs_pa;
127 	bus_addr_t		 sc_rxdescs_pa;
128 
129 	struct arpcom		 sc_ac;
130 	struct mii_data		 sc_mii;
131 
132 	struct cpsw_ring_data	*sc_rdp;
133 	volatile u_int		 sc_txnext;
134 	volatile u_int		 sc_txhead;
135 	volatile u_int		 sc_rxhead;
136 
137 	void			*sc_rxthih;
138 	void			*sc_rxih;
139 	void			*sc_txih;
140 	void			*sc_miscih;
141 
142 	void			*sc_txpad;
143 	bus_dmamap_t		 sc_txpad_dm;
144 #define sc_txpad_pa sc_txpad_dm->dm_segs[0].ds_addr
145 
146 	volatile bool		 sc_txrun;
147 	volatile bool		 sc_rxrun;
148 	volatile bool		 sc_txeoq;
149 	volatile bool		 sc_rxeoq;
150 	struct timeout		 sc_tick;
151 };
152 
153 #define DEVNAME(_sc) ((_sc)->sc_dev.dv_xname)
154 
155 void	cpsw_attach(struct device *, struct device *, void *);
156 
157 void	cpsw_start(struct ifnet *);
158 int	cpsw_ioctl(struct ifnet *, u_long, caddr_t);
159 void	cpsw_watchdog(struct ifnet *);
160 int	cpsw_init(struct ifnet *);
161 void	cpsw_stop(struct ifnet *);
162 
163 int	cpsw_mii_readreg(struct device *, int, int);
164 void	cpsw_mii_writereg(struct device *, int, int, int);
165 void	cpsw_mii_statchg(struct device *);
166 
167 void	cpsw_tick(void *);
168 
169 int	cpsw_new_rxbuf(struct cpsw_softc * const, const u_int);
170 int	cpsw_mediachange(struct ifnet *);
171 void	cpsw_mediastatus(struct ifnet *, struct ifmediareq *);
172 
173 int	cpsw_rxthintr(void *);
174 int	cpsw_rxintr(void *);
175 int	cpsw_txintr(void *);
176 int	cpsw_miscintr(void *);
177 
178 void	cpsw_get_mac_addr(struct cpsw_softc *);
179 
180 struct cfattach cpsw_ca = {
181 	sizeof(struct cpsw_softc),
182 	NULL,
183 	cpsw_attach
184 };
185 
186 struct cfdriver cpsw_cd = {
187 	NULL,
188 	"cpsw",
189 	DV_IFNET
190 };
191 
192 static inline u_int
193 cpsw_txdesc_adjust(u_int x, int y)
194 {
195 	return (((x) + y) & (CPSW_NTXDESCS - 1));
196 }
197 
198 static inline u_int
199 cpsw_rxdesc_adjust(u_int x, int y)
200 {
201 	return (((x) + y) & (CPSW_NRXDESCS - 1));
202 }
203 
204 static inline void
205 cpsw_set_txdesc_next(struct cpsw_softc * const sc, const u_int i, uint32_t n)
206 {
207 	const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i + 0;
208 	bus_space_write_4(sc->sc_bst, sc->sc_bsh_txdescs, o, n);
209 }
210 
211 static inline void
212 cpsw_set_rxdesc_next(struct cpsw_softc * const sc, const u_int i, uint32_t n)
213 {
214 	const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i + 0;
215 	bus_space_write_4(sc->sc_bst, sc->sc_bsh_rxdescs, o, n);
216 }
217 
218 static inline void
219 cpsw_get_txdesc(struct cpsw_softc * const sc, const u_int i,
220     struct cpsw_cpdma_bd * const bdp)
221 {
222 	const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
223 	bus_space_read_region_4(sc->sc_bst, sc->sc_bsh_txdescs, o,
224 	    (uint32_t *)bdp, 4);
225 }
226 
227 static inline void
228 cpsw_set_txdesc(struct cpsw_softc * const sc, const u_int i,
229     struct cpsw_cpdma_bd * const bdp)
230 {
231 	const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
232 	bus_space_write_region_4(sc->sc_bst, sc->sc_bsh_txdescs, o,
233 	    (uint32_t *)bdp, 4);
234 }
235 
236 static inline void
237 cpsw_get_rxdesc(struct cpsw_softc * const sc, const u_int i,
238     struct cpsw_cpdma_bd * const bdp)
239 {
240 	const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
241 	bus_space_read_region_4(sc->sc_bst, sc->sc_bsh_rxdescs, o,
242 	    (uint32_t *)bdp, 4);
243 }
244 
245 static inline void
246 cpsw_set_rxdesc(struct cpsw_softc * const sc, const u_int i,
247     struct cpsw_cpdma_bd * const bdp)
248 {
249 	const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
250 	bus_space_write_region_4(sc->sc_bst, sc->sc_bsh_rxdescs, o,
251 	    (uint32_t *)bdp, 4);
252 }
253 
254 static inline bus_addr_t
255 cpsw_txdesc_paddr(struct cpsw_softc * const sc, u_int x)
256 {
257 	KASSERT(x < CPSW_NTXDESCS);
258 	return sc->sc_txdescs_pa + sizeof(struct cpsw_cpdma_bd) * x;
259 }
260 
261 static inline bus_addr_t
262 cpsw_rxdesc_paddr(struct cpsw_softc * const sc, u_int x)
263 {
264 	KASSERT(x < CPSW_NRXDESCS);
265 	return sc->sc_rxdescs_pa + sizeof(struct cpsw_cpdma_bd) * x;
266 }
267 
268 void
269 cpsw_get_mac_addr(struct cpsw_softc *sc)
270 {
271 	struct arpcom *ac = &sc->sc_ac;
272 	u_int32_t	mac_lo = 0, mac_hi = 0;
273 
274 	sitara_cm_reg_read_4(OMAP2SCM_MAC_ID0_LO, &mac_lo);
275 	sitara_cm_reg_read_4(OMAP2SCM_MAC_ID0_HI, &mac_hi);
276 
277 	if ((mac_lo == 0) && (mac_hi == 0))
278 		printf("%s: invalid ethernet address\n", DEVNAME(sc));
279 	else {
280 		ac->ac_enaddr[0] = (mac_hi >>  0) & 0xff;
281 		ac->ac_enaddr[1] = (mac_hi >>  8) & 0xff;
282 		ac->ac_enaddr[2] = (mac_hi >> 16) & 0xff;
283 		ac->ac_enaddr[3] = (mac_hi >> 24) & 0xff;
284 		ac->ac_enaddr[4] = (mac_lo >>  0) & 0xff;
285 		ac->ac_enaddr[5] = (mac_lo >>  8) & 0xff;
286 	}
287 }
288 
289 void
290 cpsw_attach(struct device *parent, struct device *self, void *aux)
291 {
292 	struct cpsw_softc *sc = (struct cpsw_softc *)self;
293 	struct armv7_attach_args *aa = aux;
294 	struct arpcom * const ac = &sc->sc_ac;
295 	struct ifnet * const ifp = &ac->ac_if;
296 	u_int32_t idver;
297 	int error;
298 	u_int i;
299 
300 	timeout_set(&sc->sc_tick, cpsw_tick, sc);
301 
302 	cpsw_get_mac_addr(sc);
303 
304 	sc->sc_rxthih = arm_intr_establish(aa->aa_dev->irq[0] +
305 	    CPSW_INTROFF_RXTH, IPL_NET, cpsw_rxthintr, sc, DEVNAME(sc));
306 	sc->sc_rxih = arm_intr_establish(aa->aa_dev->irq[0] +
307 	    CPSW_INTROFF_RX, IPL_NET, cpsw_rxintr, sc, DEVNAME(sc));
308 	sc->sc_txih = arm_intr_establish(aa->aa_dev->irq[0] +
309 	    CPSW_INTROFF_TX, IPL_NET, cpsw_txintr, sc, DEVNAME(sc));
310 	sc->sc_miscih = arm_intr_establish(aa->aa_dev->irq[0] +
311 	    CPSW_INTROFF_MISC, IPL_NET, cpsw_miscintr, sc, DEVNAME(sc));
312 
313 	sc->sc_bst = aa->aa_iot;
314 	sc->sc_bdt = aa->aa_dmat;
315 
316 	error = bus_space_map(sc->sc_bst, aa->aa_dev->mem[0].addr,
317 	    aa->aa_dev->mem[0].size, 0, &sc->sc_bsh);
318 	if (error) {
319 		printf("can't map registers: %d\n", error);
320 		return;
321 	}
322 
323 	sc->sc_txdescs_pa = aa->aa_dev->mem[0].addr +
324 	    CPSW_CPPI_RAM_TXDESCS_BASE;
325 	error = bus_space_subregion(sc->sc_bst, sc->sc_bsh,
326 	    CPSW_CPPI_RAM_TXDESCS_BASE, CPSW_CPPI_RAM_TXDESCS_SIZE,
327 	    &sc->sc_bsh_txdescs);
328 	if (error) {
329 		printf("can't subregion tx ring SRAM: %d\n", error);
330 		return;
331 	}
332 
333 	sc->sc_rxdescs_pa = aa->aa_dev->mem[0].addr +
334 	    CPSW_CPPI_RAM_RXDESCS_BASE;
335 	error = bus_space_subregion(sc->sc_bst, sc->sc_bsh,
336 	    CPSW_CPPI_RAM_RXDESCS_BASE, CPSW_CPPI_RAM_RXDESCS_SIZE,
337 	    &sc->sc_bsh_rxdescs);
338 	if (error) {
339 		printf("can't subregion rx ring SRAM: %d\n", error);
340 		return;
341 	}
342 
343 	sc->sc_rdp = malloc(sizeof(*sc->sc_rdp), M_TEMP, M_WAITOK);
344 	KASSERT(sc->sc_rdp != NULL);
345 
346 	for (i = 0; i < CPSW_NTXDESCS; i++) {
347 		if ((error = bus_dmamap_create(sc->sc_bdt, MCLBYTES,
348 		    CPSW_TXFRAGS, MCLBYTES, 0, 0,
349 		    &sc->sc_rdp->tx_dm[i])) != 0) {
350 			printf("unable to create tx DMA map: %d\n", error);
351 		}
352 		sc->sc_rdp->tx_mb[i] = NULL;
353 	}
354 
355 	for (i = 0; i < CPSW_NRXDESCS; i++) {
356 		if ((error = bus_dmamap_create(sc->sc_bdt, MCLBYTES, 1,
357 		    MCLBYTES, 0, 0, &sc->sc_rdp->rx_dm[i])) != 0) {
358 			printf("unable to create rx DMA map: %d\n", error);
359 		}
360 		sc->sc_rdp->rx_mb[i] = NULL;
361 	}
362 
363 	sc->sc_txpad = dma_alloc(ETHER_MIN_LEN, PR_WAITOK | PR_ZERO);
364 	KASSERT(sc->sc_txpad != NULL);
365 	bus_dmamap_create(sc->sc_bdt, ETHER_MIN_LEN, 1, ETHER_MIN_LEN, 0,
366 	    BUS_DMA_WAITOK, &sc->sc_txpad_dm);
367 	bus_dmamap_load(sc->sc_bdt, sc->sc_txpad_dm, sc->sc_txpad,
368 	    ETHER_MIN_LEN, NULL, BUS_DMA_WAITOK|BUS_DMA_WRITE);
369 	bus_dmamap_sync(sc->sc_bdt, sc->sc_txpad_dm, 0, ETHER_MIN_LEN,
370 	    BUS_DMASYNC_PREWRITE);
371 
372 	idver = bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_SS_IDVER);
373 	printf(": version %d.%d (%d), address %s\n",
374 	    CPSW_SS_IDVER_MAJ(idver), CPSW_SS_IDVER_MIN(idver),
375 	    CPSW_SS_IDVER_RTL(idver), ether_sprintf(ac->ac_enaddr));
376 
377 	ifp->if_softc = sc;
378 	ifp->if_capabilities = 0;
379 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
380 	ifp->if_start = cpsw_start;
381 	ifp->if_ioctl = cpsw_ioctl;
382 	ifp->if_watchdog = cpsw_watchdog;
383 	IFQ_SET_MAXLEN(&ifp->if_snd, CPSW_NTXDESCS - 1);
384 	IFQ_SET_READY(&ifp->if_snd);
385 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
386 
387 	cpsw_stop(ifp);
388 
389 	sc->sc_mii.mii_ifp = ifp;
390 	sc->sc_mii.mii_readreg = cpsw_mii_readreg;
391 	sc->sc_mii.mii_writereg = cpsw_mii_writereg;
392 	sc->sc_mii.mii_statchg = cpsw_mii_statchg;
393 
394 	ifmedia_init(&sc->sc_mii.mii_media, 0, cpsw_mediachange,
395 	    cpsw_mediastatus);
396 	mii_attach(self, &sc->sc_mii, 0xffffffff,
397 	    MII_PHY_ANY, MII_OFFSET_ANY, 0);
398 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
399 		printf("no PHY found!\n");
400 		ifmedia_add(&sc->sc_mii.mii_media,
401 		    IFM_ETHER|IFM_MANUAL, 0, NULL);
402 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
403 	} else {
404 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
405 	}
406 
407 	if_attach(ifp);
408 	ether_ifattach(ifp);
409 
410 	return;
411 }
412 
413 int
414 cpsw_mediachange(struct ifnet *ifp)
415 {
416 	struct cpsw_softc *sc = ifp->if_softc;
417 
418 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
419 		mii_mediachg(&sc->sc_mii);
420 
421 	return (0);
422 }
423 
424 void
425 cpsw_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
426 {
427 	struct cpsw_softc *sc = ifp->if_softc;
428 
429 	if (LIST_FIRST(&sc->sc_mii.mii_phys)) {
430 		mii_pollstat(&sc->sc_mii);
431 		ifmr->ifm_active = sc->sc_mii.mii_media_active;
432 		ifmr->ifm_status = sc->sc_mii.mii_media_status;
433 	}
434 }
435 
436 void
437 cpsw_start(struct ifnet *ifp)
438 {
439 	struct cpsw_softc * const sc = ifp->if_softc;
440 	struct cpsw_ring_data * const rdp = sc->sc_rdp;
441 	struct cpsw_cpdma_bd bd;
442 	struct mbuf *m;
443 	bus_dmamap_t dm;
444 	u_int eopi = ~0;
445 	u_int seg;
446 	u_int txfree;
447 	int txstart = -1;
448 	int error;
449 	bool pad;
450 	u_int mlen;
451 
452 	if (!ISSET(ifp->if_flags, IFF_RUNNING) ||
453 	    ISSET(ifp->if_flags, IFF_OACTIVE) ||
454 	    IFQ_IS_EMPTY(&ifp->if_snd))
455 		return;
456 
457 	if (sc->sc_txnext >= sc->sc_txhead)
458 		txfree = CPSW_NTXDESCS - 1 + sc->sc_txhead - sc->sc_txnext;
459 	else
460 		txfree = sc->sc_txhead - sc->sc_txnext - 1;
461 
462 	for (;;) {
463 		if (txfree <= CPSW_TXFRAGS) {
464 			SET(ifp->if_flags, IFF_OACTIVE);
465 			break;
466 		}
467 
468 		IFQ_POLL(&ifp->if_snd, m);
469 		if (m == NULL)
470 			break;
471 
472 		IFQ_DEQUEUE(&ifp->if_snd, m);
473 
474 		dm = rdp->tx_dm[sc->sc_txnext];
475 		error = bus_dmamap_load_mbuf(sc->sc_bdt, dm, m, BUS_DMA_NOWAIT);
476 		switch (error) {
477 		case 0:
478 			break;
479 
480 		case EFBIG: /* mbuf chain is too fragmented */
481 			if (m_defrag(m, M_DONTWAIT) == 0 &&
482 			    bus_dmamap_load_mbuf(sc->sc_bdt, dm, m,
483 			    BUS_DMA_NOWAIT) == 0)
484 				break;
485 
486 			/* FALLTHROUGH */
487 		default:
488 			m_freem(m);
489 			ifp->if_oerrors++;
490 			continue;
491 		}
492 
493 		mlen = dm->dm_mapsize;
494 		pad = mlen < CPSW_PAD_LEN;
495 
496 		KASSERT(rdp->tx_mb[sc->sc_txnext] == NULL);
497 		rdp->tx_mb[sc->sc_txnext] = m;
498 
499 #if NBPFILTER > 0
500 		if (ifp->if_bpf)
501 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
502 #endif
503 
504 		bus_dmamap_sync(sc->sc_bdt, dm, 0, dm->dm_mapsize,
505 		    BUS_DMASYNC_PREWRITE);
506 
507 		if (txstart == -1)
508 			txstart = sc->sc_txnext;
509 		eopi = sc->sc_txnext;
510 		for (seg = 0; seg < dm->dm_nsegs; seg++) {
511 			bd.next = cpsw_txdesc_paddr(sc,
512 			    TXDESC_NEXT(sc->sc_txnext));
513 			bd.bufptr = dm->dm_segs[seg].ds_addr;
514 			bd.bufoff = 0;
515 			bd.buflen = dm->dm_segs[seg].ds_len;
516 			bd.pktlen = 0;
517 			bd.flags = 0;
518 
519 			if (seg == 0) {
520 				bd.flags = CPDMA_BD_OWNER | CPDMA_BD_SOP;
521 				bd.pktlen = MAX(mlen, CPSW_PAD_LEN);
522 			}
523 
524 			if (seg == dm->dm_nsegs - 1 && !pad)
525 				bd.flags |= CPDMA_BD_EOP;
526 
527 			cpsw_set_txdesc(sc, sc->sc_txnext, &bd);
528 			txfree--;
529 			eopi = sc->sc_txnext;
530 			sc->sc_txnext = TXDESC_NEXT(sc->sc_txnext);
531 		}
532 		if (pad) {
533 			bd.next = cpsw_txdesc_paddr(sc,
534 			    TXDESC_NEXT(sc->sc_txnext));
535 			bd.bufptr = sc->sc_txpad_pa;
536 			bd.bufoff = 0;
537 			bd.buflen = CPSW_PAD_LEN - mlen;
538 			bd.pktlen = 0;
539 			bd.flags = CPDMA_BD_EOP;
540 
541 			cpsw_set_txdesc(sc, sc->sc_txnext, &bd);
542 			txfree--;
543 			eopi = sc->sc_txnext;
544 			sc->sc_txnext = TXDESC_NEXT(sc->sc_txnext);
545 		}
546 	}
547 
548 	if (txstart >= 0) {
549 		ifp->if_timer = 5;
550 		/* terminate the new chain */
551 		KASSERT(eopi == TXDESC_PREV(sc->sc_txnext));
552 		cpsw_set_txdesc_next(sc, TXDESC_PREV(sc->sc_txnext), 0);
553 
554 		/* link the new chain on */
555 		cpsw_set_txdesc_next(sc, TXDESC_PREV(txstart),
556 		    cpsw_txdesc_paddr(sc, txstart));
557 		if (sc->sc_txeoq) {
558 			/* kick the dma engine */
559 			sc->sc_txeoq = false;
560 			bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_HDP(0),
561 			    cpsw_txdesc_paddr(sc, txstart));
562 		}
563 	}
564 }
565 
566 int
567 cpsw_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
568 {
569 	struct cpsw_softc *sc = ifp->if_softc;
570 	struct ifaddr *ifa = (struct ifaddr *)data;
571 	struct ifreq *ifr = (struct ifreq *)data;
572 	int s = splnet();
573 	int error = 0;
574 
575 	switch (cmd) {
576 	case SIOCSIFADDR:
577 		ifp->if_flags |= IFF_UP;
578 #ifdef INET
579 		if (ifa->ifa_addr->sa_family == AF_INET)
580 			arp_ifinit(&sc->sc_ac, ifa);
581 #endif
582 
583 	case SIOCSIFFLAGS:
584 		if (ifp->if_flags & IFF_UP) {
585 			if (ifp->if_flags & IFF_RUNNING)
586 				error = ENETRESET;
587 			else
588 				cpsw_init(ifp);
589 		} else {
590 			if (ifp->if_flags & IFF_RUNNING)
591 				cpsw_stop(ifp);
592 		}
593 		break;
594 	case SIOCSIFMEDIA:
595 		ifr->ifr_media &= ~IFM_ETH_FMASK;
596 		/* FALLTHROUGH */
597 	case SIOCGIFMEDIA:
598 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
599 		break;
600 	default:
601 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
602 		break;
603 	}
604 	if (error == ENETRESET) {
605 		if (ifp->if_flags & IFF_RUNNING)
606 			cpsw_init(ifp);
607 		error = 0;
608 	}
609 
610 	splx(s);
611 
612 	return error;
613 }
614 
615 void
616 cpsw_watchdog(struct ifnet *ifp)
617 {
618 	printf("device timeout\n");
619 
620 	ifp->if_oerrors++;
621 	cpsw_init(ifp);
622 	cpsw_start(ifp);
623 }
624 
625 static int
626 cpsw_mii_wait(struct cpsw_softc * const sc, int reg)
627 {
628 	u_int tries;
629 
630 	for(tries = 0; tries < 1000; tries++) {
631 		if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, reg) & (1U << 31)) == 0)
632 			return 0;
633 		delay(1);
634 	}
635 	return ETIMEDOUT;
636 }
637 
638 int
639 cpsw_mii_readreg(struct device *dev, int phy, int reg)
640 {
641 	struct cpsw_softc * const sc = (struct cpsw_softc *)dev;
642 	uint32_t v;
643 
644 	if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
645 		return 0;
646 
647 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, MDIOUSERACCESS0, (1U << 31) |
648 	    ((reg & 0x1F) << 21) | ((phy & 0x1F) << 16));
649 
650 	if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
651 		return 0;
652 
653 	v = bus_space_read_4(sc->sc_bst, sc->sc_bsh, MDIOUSERACCESS0);
654 	if (v & (1 << 29))
655 		return v & 0xffff;
656 	else
657 		return 0;
658 }
659 
660 void
661 cpsw_mii_writereg(struct device *dev, int phy, int reg, int val)
662 {
663 	struct cpsw_softc * const sc = (struct cpsw_softc *)dev;
664 	uint32_t v;
665 
666 	KASSERT((val & 0xffff0000UL) == 0);
667 
668 	if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
669 		goto out;
670 
671 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, MDIOUSERACCESS0, (1U << 31) | (1 << 30) |
672 	    ((reg & 0x1F) << 21) | ((phy & 0x1F) << 16) | val);
673 
674 	if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
675 		goto out;
676 
677 	v = bus_space_read_4(sc->sc_bst, sc->sc_bsh, MDIOUSERACCESS0);
678 	if ((v & (1 << 29)) == 0)
679 out:
680 		printf("%s error\n", __func__);
681 
682 }
683 
684 void
685 cpsw_mii_statchg(struct device *self)
686 {
687 	return;
688 }
689 
690 int
691 cpsw_new_rxbuf(struct cpsw_softc * const sc, const u_int i)
692 {
693 	struct cpsw_ring_data * const rdp = sc->sc_rdp;
694 	const u_int h = RXDESC_PREV(i);
695 	struct cpsw_cpdma_bd bd;
696 	struct mbuf *m;
697 	int error = ENOBUFS;
698 
699 	MGETHDR(m, M_DONTWAIT, MT_DATA);
700 	if (m == NULL) {
701 		goto reuse;
702 	}
703 
704 	MCLGET(m, M_DONTWAIT);
705 	if ((m->m_flags & M_EXT) == 0) {
706 		m_freem(m);
707 		goto reuse;
708 	}
709 
710 	/* We have a new buffer, prepare it for the ring. */
711 
712 	if (rdp->rx_mb[i] != NULL)
713 		bus_dmamap_unload(sc->sc_bdt, rdp->rx_dm[i]);
714 
715 	m->m_len = m->m_pkthdr.len = MCLBYTES;
716 
717 	rdp->rx_mb[i] = m;
718 
719 	error = bus_dmamap_load_mbuf(sc->sc_bdt, rdp->rx_dm[i], rdp->rx_mb[i],
720 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
721 	if (error) {
722 		printf("can't load rx DMA map %d: %d\n", i, error);
723 	}
724 
725 	bus_dmamap_sync(sc->sc_bdt, rdp->rx_dm[i],
726 	    0, rdp->rx_dm[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
727 
728 	error = 0;
729 
730 reuse:
731 	/* (re-)setup the descriptor */
732 	bd.next = 0;
733 	bd.bufptr = rdp->rx_dm[i]->dm_segs[0].ds_addr;
734 	bd.bufoff = 0;
735 	bd.buflen = MIN(0x7ff, rdp->rx_dm[i]->dm_segs[0].ds_len);
736 	bd.pktlen = 0;
737 	bd.flags = CPDMA_BD_OWNER;
738 
739 	cpsw_set_rxdesc(sc, i, &bd);
740 	/* and link onto ring */
741 	cpsw_set_rxdesc_next(sc, h, cpsw_rxdesc_paddr(sc, i));
742 
743 	return error;
744 }
745 
746 int
747 cpsw_init(struct ifnet *ifp)
748 {
749 	struct cpsw_softc * const sc = ifp->if_softc;
750 	struct arpcom *ac = &sc->sc_ac;
751 	struct mii_data * const mii = &sc->sc_mii;
752 	int i;
753 
754 	cpsw_stop(ifp);
755 
756 	sc->sc_txnext = 0;
757 	sc->sc_txhead = 0;
758 
759 	/* Reset wrapper */
760 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_WR_SOFT_RESET, 1);
761 	while(bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_WR_SOFT_RESET) & 1);
762 
763 	/* Reset SS */
764 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_SS_SOFT_RESET, 1);
765 	while(bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_SS_SOFT_RESET) & 1);
766 
767 	/* Clear table (30) and enable ALE(31) and set passthrough (4) */
768 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_ALE_CONTROL, (3 << 30) | 0x10);
769 
770 	/* Reset and init Sliver port 1 and 2 */
771 	for (i = 0; i < 2; i++) {
772 		/* Reset */
773 		bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_SL_SOFT_RESET(i), 1);
774 		while(bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_SL_SOFT_RESET(i)) & 1);
775 		/* Set Slave Mapping */
776 		bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_SL_RX_PRI_MAP(i), 0x76543210);
777 		bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_PORT_P_TX_PRI_MAP(i+1), 0x33221100);
778 		bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_SL_RX_MAXLEN(i), 0x5f2);
779 		/* Set MAC Address */
780 		bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_PORT_P_SA_HI(i+1),
781 		    ac->ac_enaddr[0] | (ac->ac_enaddr[1] << 8) |
782 		    (ac->ac_enaddr[2] << 16) | (ac->ac_enaddr[3] << 24));
783 		bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_PORT_P_SA_LO(i+1),
784 		    ac->ac_enaddr[4] | (ac->ac_enaddr[5] << 8));
785 
786 		/* Set MACCONTROL for ports 0,1: FULLDUPLEX(1), GMII_EN(5),
787 		   IFCTL_A(15), IFCTL_B(16) FIXME */
788 		bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_SL_MACCONTROL(i),
789 		    1 | (1<<5) | (1<<15) | (1<<16));
790 
791 		/* Set ALE port to forwarding(3) */
792 		bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_ALE_PORTCTL(i+1), 3);
793 	}
794 
795 	/* Set Host Port Mapping */
796 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210);
797 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0);
798 
799 	/* Set ALE port to forwarding(3) */
800 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_ALE_PORTCTL(0), 3);
801 
802 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_SS_PTYPE, 0);
803 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_SS_STAT_PORT_EN, 7);
804 
805 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_SOFT_RESET, 1);
806 	while(bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_SOFT_RESET) & 1);
807 
808 	for (i = 0; i < 8; i++) {
809 		bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_HDP(i), 0);
810 		bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_HDP(i), 0);
811 		bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_CP(i), 0);
812 		bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_CP(i), 0);
813 	}
814 
815 	bus_space_set_region_4(sc->sc_bst, sc->sc_bsh_txdescs, 0, 0,
816 	    CPSW_CPPI_RAM_TXDESCS_SIZE/4);
817 
818 	sc->sc_txhead = 0;
819 	sc->sc_txnext = 0;
820 
821 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_FREEBUFFER(0), 0);
822 
823 	bus_space_set_region_4(sc->sc_bst, sc->sc_bsh_rxdescs, 0, 0,
824 	    CPSW_CPPI_RAM_RXDESCS_SIZE/4);
825 
826 	/* Initialize RX Buffer Descriptors */
827 	cpsw_set_rxdesc_next(sc, RXDESC_PREV(0), 0);
828 	for (i = 0; i < CPSW_NRXDESCS; i++) {
829 		cpsw_new_rxbuf(sc, i);
830 	}
831 	sc->sc_rxhead = 0;
832 
833 	/* align layer 3 header to 32-bit */
834 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_BUFFER_OFFSET, ETHER_ALIGN);
835 
836 	/* Clear all interrupt Masks */
837 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF);
838 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF);
839 
840 	/* Enable TX & RX DMA */
841 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_CONTROL, 1);
842 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_CONTROL, 1);
843 
844 	/* Enable TX and RX interrupt receive for core 0 */
845 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_WR_C_TX_EN(0), 1);
846 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_WR_C_RX_EN(0), 1);
847 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_WR_C_MISC_EN(0), 0x1F);
848 
849 	/* Enable host Error Interrupt */
850 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_DMA_INTMASK_SET, 2);
851 
852 	/* Enable interrupts for TX and RX Channel 0 */
853 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_INTMASK_SET, 1);
854 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_INTMASK_SET, 1);
855 
856 	/* Ack stalled irqs */
857 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RXTH);
858 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RX);
859 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_TX);
860 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_MISC);
861 
862 	/* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */
863 	/* TODO Calculate MDCLK=CLK/(CLKDIV+1) */
864 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, MDIOCONTROL, (1<<30) | (1<<18) | 0xFF);
865 
866 	mii_mediachg(mii);
867 
868 	/* Write channel 0 RX HDP */
869 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_HDP(0), cpsw_rxdesc_paddr(sc, 0));
870 	sc->sc_rxrun = true;
871 	sc->sc_rxeoq = false;
872 
873 	sc->sc_txrun = true;
874 	sc->sc_txeoq = true;
875 
876 	ifp->if_flags |= IFF_RUNNING;
877 	ifp->if_flags &= ~IFF_OACTIVE;
878 
879 	timeout_add_sec(&sc->sc_tick, 1);
880 
881 	return 0;
882 }
883 
884 void
885 cpsw_stop(struct ifnet *ifp)
886 {
887 	struct cpsw_softc * const sc = ifp->if_softc;
888 	struct cpsw_ring_data * const rdp = sc->sc_rdp;
889 	u_int i;
890 
891 #if 0
892 	/* XXX find where disable comes from */
893 	printf("%s: ifp %p disable %d\n", __func__, ifp, disable);
894 #endif
895 	if ((ifp->if_flags & IFF_RUNNING) == 0)
896 		return;
897 
898 	timeout_del(&sc->sc_tick);
899 
900 	mii_down(&sc->sc_mii);
901 
902 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_INTMASK_CLEAR, 1);
903 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_INTMASK_CLEAR, 1);
904 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_WR_C_TX_EN(0), 0x0);
905 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_WR_C_RX_EN(0), 0x0);
906 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_WR_C_MISC_EN(0), 0x1F);
907 
908 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_TEARDOWN, 0);
909 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_TEARDOWN, 0);
910 	i = 0;
911 	while ((sc->sc_txrun || sc->sc_rxrun) && i < 10000) {
912 		delay(10);
913 		if ((sc->sc_txrun == true) && cpsw_txintr(sc) == 0)
914 			sc->sc_txrun = false;
915 		if ((sc->sc_rxrun == true) && cpsw_rxintr(sc) == 0)
916 			sc->sc_rxrun = false;
917 		i++;
918 	}
919 	/* printf("%s toredown complete in %u\n", __func__, i); */
920 
921 	/* Reset wrapper */
922 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_WR_SOFT_RESET, 1);
923 	while(bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_WR_SOFT_RESET) & 1);
924 
925 	/* Reset SS */
926 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_SS_SOFT_RESET, 1);
927 	while(bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_SS_SOFT_RESET) & 1);
928 
929 	for (i = 0; i < 2; i++) {
930 		bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_SL_SOFT_RESET(i), 1);
931 		while(bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_SL_SOFT_RESET(i)) & 1);
932 	}
933 
934 	/* Reset CPDMA */
935 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_SOFT_RESET, 1);
936 	while(bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_SOFT_RESET) & 1);
937 
938 	/* Release any queued transmit buffers. */
939 	for (i = 0; i < CPSW_NTXDESCS; i++) {
940 		bus_dmamap_unload(sc->sc_bdt, rdp->tx_dm[i]);
941 		m_freem(rdp->tx_mb[i]);
942 		rdp->tx_mb[i] = NULL;
943 	}
944 
945 	ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
946 	ifp->if_timer = 0;
947 
948 	/* XXX Not sure what this is doing calling disable here
949 	    where is disable set?
950 	*/
951 #if 0
952 	if (!disable)
953 		return;
954 #endif
955 
956 	for (i = 0; i < CPSW_NRXDESCS; i++) {
957 		bus_dmamap_unload(sc->sc_bdt, rdp->rx_dm[i]);
958 		m_freem(rdp->rx_mb[i]);
959 		rdp->rx_mb[i] = NULL;
960 	}
961 }
962 
963 int
964 cpsw_rxthintr(void *arg)
965 {
966 	struct cpsw_softc * const sc = arg;
967 
968 	/* this won't deassert the interrupt though */
969 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RXTH);
970 
971 	return 1;
972 }
973 
974 int
975 cpsw_rxintr(void *arg)
976 {
977 	struct cpsw_softc * const sc = arg;
978 	struct ifnet * const ifp = &sc->sc_ac.ac_if;
979 	struct cpsw_ring_data * const rdp = sc->sc_rdp;
980 	struct cpsw_cpdma_bd bd;
981 	bus_dmamap_t dm;
982 	struct mbuf *m;
983 	u_int i;
984 	u_int len, off;
985 
986 	for (;;) {
987 		KASSERT(sc->sc_rxhead < CPSW_NRXDESCS);
988 
989 		i = sc->sc_rxhead;
990 		dm = rdp->rx_dm[i];
991 		m = rdp->rx_mb[i];
992 
993 		KASSERT(dm != NULL);
994 		KASSERT(m != NULL);
995 
996 		cpsw_get_rxdesc(sc, i, &bd);
997 
998 		if (bd.flags & CPDMA_BD_OWNER)
999 			break;
1000 
1001 		if (bd.flags & CPDMA_BD_TDOWNCMPLT) {
1002 			sc->sc_rxrun = false;
1003 			return 1;
1004 		}
1005 
1006 		if ((bd.flags & (CPDMA_BD_SOP|CPDMA_BD_EOP)) !=
1007 		    (CPDMA_BD_SOP|CPDMA_BD_EOP)) {
1008 			/* Debugger(); */
1009 		}
1010 
1011 		bus_dmamap_sync(sc->sc_bdt, dm, 0, dm->dm_mapsize,
1012 		    BUS_DMASYNC_POSTREAD);
1013 		bus_dmamap_unload(sc->sc_bdt, dm);
1014 
1015 		if (cpsw_new_rxbuf(sc, i) != 0) {
1016 			/* drop current packet, reuse buffer for new */
1017 			ifp->if_ierrors++;
1018 			goto next;
1019 		}
1020 
1021 		off = bd.bufoff;
1022 		len = bd.pktlen;
1023 
1024 		if (bd.flags & CPDMA_BD_PASSCRC)
1025 			len -= ETHER_CRC_LEN;
1026 
1027 		m->m_pkthdr.rcvif = ifp;
1028 		m->m_pkthdr.len = m->m_len = len;
1029 		m->m_data += off;
1030 
1031 		ifp->if_ipackets++;
1032 
1033 #if NBPFILTER > 0
1034 		if (ifp->if_bpf)
1035 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
1036 #endif
1037 		ether_input_mbuf(ifp, m);
1038 
1039 next:
1040 		sc->sc_rxhead = RXDESC_NEXT(sc->sc_rxhead);
1041 		if (bd.flags & CPDMA_BD_EOQ) {
1042 			sc->sc_rxeoq = true;
1043 			break;
1044 		} else {
1045 			sc->sc_rxeoq = false;
1046 		}
1047 		bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_CP(0),
1048 		    cpsw_rxdesc_paddr(sc, i));
1049 	}
1050 
1051 	if (sc->sc_rxeoq) {
1052 		printf("rxeoq\n");
1053 		/* Debugger(); */
1054 	}
1055 
1056 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RX);
1057 
1058 	return 1;
1059 }
1060 
1061 void
1062 cpsw_tick(void *arg)
1063 {
1064 	struct cpsw_softc *sc = arg;
1065 	int s;
1066 
1067 	s = splnet();
1068 	mii_tick(&sc->sc_mii);
1069 	splx(s);
1070 
1071 	timeout_add_sec(&sc->sc_tick, 1);
1072 }
1073 
1074 int
1075 cpsw_txintr(void *arg)
1076 {
1077 	struct cpsw_softc * const sc = arg;
1078 	struct ifnet * const ifp = &sc->sc_ac.ac_if;
1079 	struct cpsw_ring_data * const rdp = sc->sc_rdp;
1080 	struct cpsw_cpdma_bd bd;
1081 	bool handled = false;
1082 	uint32_t tx0_cp;
1083 	u_int cpi;
1084 
1085 	KASSERT(sc->sc_txrun);
1086 
1087 	tx0_cp = bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_CP(0));
1088 
1089 	if (tx0_cp == 0xfffffffc) {
1090 		bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_CP(0), 0xfffffffc);
1091 		bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_HDP(0), 0);
1092 		sc->sc_txrun = false;
1093 		return 0;
1094 	}
1095 
1096 	for (;;) {
1097 		tx0_cp = bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_CP(0));
1098 		cpi = (tx0_cp - sc->sc_txdescs_pa) /
1099 		    sizeof(struct cpsw_cpdma_bd);
1100 		KASSERT(sc->sc_txhead < CPSW_NTXDESCS);
1101 
1102 		cpsw_get_txdesc(sc, sc->sc_txhead, &bd);
1103 
1104 		if (bd.buflen == 0) {
1105 			/* Debugger(); */
1106 		}
1107 
1108 		if ((bd.flags & CPDMA_BD_SOP) == 0)
1109 			goto next;
1110 
1111 		if (bd.flags & CPDMA_BD_OWNER) {
1112 			printf("pwned %x %x %x\n", cpi, sc->sc_txhead,
1113 			    sc->sc_txnext);
1114 			break;
1115 		}
1116 
1117 		if (bd.flags & CPDMA_BD_TDOWNCMPLT) {
1118 			sc->sc_txrun = false;
1119 			return 1;
1120 		}
1121 
1122 		bus_dmamap_sync(sc->sc_bdt, rdp->tx_dm[sc->sc_txhead],
1123 		    0, rdp->tx_dm[sc->sc_txhead]->dm_mapsize,
1124 		    BUS_DMASYNC_POSTWRITE);
1125 		bus_dmamap_unload(sc->sc_bdt, rdp->tx_dm[sc->sc_txhead]);
1126 
1127 		m_freem(rdp->tx_mb[sc->sc_txhead]);
1128 		rdp->tx_mb[sc->sc_txhead] = NULL;
1129 
1130 		ifp->if_opackets++;
1131 
1132 		handled = true;
1133 
1134 		ifp->if_flags &= ~IFF_OACTIVE;
1135 
1136 next:
1137 		if ((bd.flags & (CPDMA_BD_EOP|CPDMA_BD_EOQ)) ==
1138 		    (CPDMA_BD_EOP|CPDMA_BD_EOQ))
1139 			sc->sc_txeoq = true;
1140 
1141 		if (sc->sc_txhead == cpi) {
1142 			bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_CP(0),
1143 			    cpsw_txdesc_paddr(sc, cpi));
1144 			sc->sc_txhead = TXDESC_NEXT(sc->sc_txhead);
1145 			break;
1146 		}
1147 		sc->sc_txhead = TXDESC_NEXT(sc->sc_txhead);
1148 		if (sc->sc_txeoq == true)
1149 			break;
1150 	}
1151 
1152 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_TX);
1153 
1154 	if ((sc->sc_txnext != sc->sc_txhead) && sc->sc_txeoq) {
1155 		if (bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_HDP(0)) == 0) {
1156 			sc->sc_txeoq = false;
1157 			bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_HDP(0),
1158 			    cpsw_txdesc_paddr(sc, sc->sc_txhead));
1159 		}
1160 	}
1161 
1162 	if (handled && sc->sc_txnext == sc->sc_txhead)
1163 		ifp->if_timer = 0;
1164 
1165 	if (handled)
1166 		cpsw_start(ifp);
1167 
1168 	return handled;
1169 }
1170 
1171 int
1172 cpsw_miscintr(void *arg)
1173 {
1174 	struct cpsw_softc * const sc = arg;
1175 	uint32_t miscstat;
1176 	uint32_t dmastat;
1177 	uint32_t stat;
1178 
1179 	miscstat = bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_WR_C_MISC_STAT(0));
1180 	printf("%s %x FIRE\n", __func__, miscstat);
1181 
1182 	if (miscstat & CPSW_MISC_HOST_PEND) {
1183 		/* Host Error */
1184 		dmastat = bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_DMA_INTSTAT_MASKED);
1185 		printf("CPSW_CPDMA_DMA_INTSTAT_MASKED %x\n", dmastat);
1186 
1187 		printf("rxhead %02x\n", sc->sc_rxhead);
1188 
1189 		stat = bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_DMASTATUS);
1190 		printf("CPSW_CPDMA_DMASTATUS %x\n", stat);
1191 		stat = bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_HDP(0));
1192 		printf("CPSW_CPDMA_TX0_HDP %x\n", stat);
1193 		stat = bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_CP(0));
1194 		printf("CPSW_CPDMA_TX0_CP %x\n", stat);
1195 		stat = bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_HDP(0));
1196 		printf("CPSW_CPDMA_RX0_HDP %x\n", stat);
1197 		stat = bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_CP(0));
1198 		printf("CPSW_CPDMA_RX0_CP %x\n", stat);
1199 
1200 		/* Debugger(); */
1201 
1202 		bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_DMA_INTMASK_CLEAR, dmastat);
1203 		dmastat = bus_space_read_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_DMA_INTSTAT_MASKED);
1204 		printf("CPSW_CPDMA_DMA_INTSTAT_MASKED %x\n", dmastat);
1205 	}
1206 
1207 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_MISC);
1208 
1209 	return 1;
1210 }
1211