xref: /openbsd/sys/arch/armv7/omap/if_cpsw.c (revision 91f110e0)
1 /* $OpenBSD: if_cpsw.c,v 1.21 2013/11/26 20:33:11 deraadt Exp $ */
2 /*	$NetBSD: if_cpsw.c,v 1.3 2013/04/17 14:36:34 bouyer Exp $	*/
3 
4 /*
5  * Copyright (c) 2013 Jonathan A. Kollasch
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
21  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
27  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /*-
31  * Copyright (c) 2012 Damjan Marion <dmarion@Freebsd.org>
32  * All rights reserved.
33  *
34  * Redistribution and use in source and binary forms, with or without
35  * modification, are permitted provided that the following conditions
36  * are met:
37  * 1. Redistributions of source code must retain the above copyright
38  *    notice, this list of conditions and the following disclaimer.
39  * 2. Redistributions in binary form must reproduce the above copyright
40  *    notice, this list of conditions and the following disclaimer in the
41  *    documentation and/or other materials provided with the distribution.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
44  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
47  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53  * SUCH DAMAGE.
54  */
55 
56 #include "bpfilter.h"
57 
58 #include <sys/param.h>
59 #include <sys/systm.h>
60 #include <sys/sockio.h>
61 #include <sys/mbuf.h>
62 #include <sys/queue.h>
63 #include <sys/kernel.h>
64 #include <sys/device.h>
65 #include <sys/timeout.h>
66 #include <sys/socket.h>
67 
68 #include <machine/bus.h>
69 
70 #include <net/if.h>
71 #include <net/if_media.h>
72 
73 #include <netinet/in.h>
74 #include <netinet/if_ether.h>
75 
76 #if NBPFILTER > 0
77 #include <net/bpf.h>
78 #endif
79 
80 #include <dev/mii/mii.h>
81 #include <dev/mii/miivar.h>
82 
83 #include <arch/armv7/armv7/armv7var.h>
84 #include <arch/armv7/omap/sitara_cm.h>
85 #include <arch/armv7/omap/if_cpswreg.h>
86 
87 #define CPSW_TXFRAGS	16
88 
89 #define OMAP2SCM_MAC_ID0_LO	0x630
90 #define OMAP2SCM_MAC_ID0_HI	0x634
91 
92 #define CPSW_CPPI_RAM_SIZE (0x2000)
93 #define CPSW_CPPI_RAM_TXDESCS_SIZE (CPSW_CPPI_RAM_SIZE/2)
94 #define CPSW_CPPI_RAM_RXDESCS_SIZE \
95     (CPSW_CPPI_RAM_SIZE - CPSW_CPPI_RAM_TXDESCS_SIZE)
96 #define CPSW_CPPI_RAM_TXDESCS_BASE (CPSW_CPPI_RAM_OFFSET + 0x0000)
97 #define CPSW_CPPI_RAM_RXDESCS_BASE \
98     (CPSW_CPPI_RAM_OFFSET + CPSW_CPPI_RAM_TXDESCS_SIZE)
99 
100 #define CPSW_NTXDESCS (CPSW_CPPI_RAM_TXDESCS_SIZE/sizeof(struct cpsw_cpdma_bd))
101 #define CPSW_NRXDESCS (CPSW_CPPI_RAM_RXDESCS_SIZE/sizeof(struct cpsw_cpdma_bd))
102 
103 #define CPSW_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
104 
105 #define TXDESC_NEXT(x) cpsw_txdesc_adjust((x), 1)
106 #define TXDESC_PREV(x) cpsw_txdesc_adjust((x), -1)
107 
108 #define RXDESC_NEXT(x) cpsw_rxdesc_adjust((x), 1)
109 #define RXDESC_PREV(x) cpsw_rxdesc_adjust((x), -1)
110 
111 struct cpsw_ring_data {
112 	bus_dmamap_t		 tx_dm[CPSW_NTXDESCS];
113 	struct mbuf		*tx_mb[CPSW_NTXDESCS];
114 	bus_dmamap_t		 rx_dm[CPSW_NRXDESCS];
115 	struct mbuf		*rx_mb[CPSW_NRXDESCS];
116 };
117 
118 struct cpsw_softc {
119 	struct device		 sc_dev;
120 	bus_space_tag_t		 sc_bst;
121 	bus_space_handle_t	 sc_bsh;
122 	bus_dma_tag_t		 sc_bdt;
123 	bus_space_handle_t	 sc_bsh_txdescs;
124 	bus_space_handle_t	 sc_bsh_rxdescs;
125 	bus_addr_t		 sc_txdescs_pa;
126 	bus_addr_t		 sc_rxdescs_pa;
127 
128 	struct arpcom		 sc_ac;
129 	struct mii_data		 sc_mii;
130 
131 	struct cpsw_ring_data	*sc_rdp;
132 	volatile u_int		 sc_txnext;
133 	volatile u_int		 sc_txhead;
134 	volatile u_int		 sc_rxhead;
135 
136 	void			*sc_rxthih;
137 	void			*sc_rxih;
138 	void			*sc_txih;
139 	void			*sc_miscih;
140 
141 	void			*sc_txpad;
142 	bus_dmamap_t		 sc_txpad_dm;
143 #define sc_txpad_pa sc_txpad_dm->dm_segs[0].ds_addr
144 
145 	volatile bool		 sc_txrun;
146 	volatile bool		 sc_rxrun;
147 	volatile bool		 sc_txeoq;
148 	volatile bool		 sc_rxeoq;
149 	struct timeout		 sc_tick;
150 };
151 
152 #define DEVNAME(_sc) ((_sc)->sc_dev.dv_xname)
153 
154 void	cpsw_attach(struct device *, struct device *, void *);
155 
156 void	cpsw_start(struct ifnet *);
157 int	cpsw_ioctl(struct ifnet *, u_long, caddr_t);
158 void	cpsw_watchdog(struct ifnet *);
159 int	cpsw_init(struct ifnet *);
160 void	cpsw_stop(struct ifnet *);
161 
162 int	cpsw_mii_readreg(struct device *, int, int);
163 void	cpsw_mii_writereg(struct device *, int, int, int);
164 void	cpsw_mii_statchg(struct device *);
165 
166 void	cpsw_tick(void *);
167 
168 int	cpsw_new_rxbuf(struct cpsw_softc * const, const u_int);
169 int	cpsw_mediachange(struct ifnet *);
170 void	cpsw_mediastatus(struct ifnet *, struct ifmediareq *);
171 
172 int	cpsw_rxthintr(void *);
173 int	cpsw_rxintr(void *);
174 int	cpsw_txintr(void *);
175 int	cpsw_miscintr(void *);
176 
177 void	cpsw_get_mac_addr(struct cpsw_softc *);
178 
179 struct cfattach cpsw_ca = {
180 	sizeof(struct cpsw_softc),
181 	NULL,
182 	cpsw_attach
183 };
184 
185 struct cfdriver cpsw_cd = {
186 	NULL,
187 	"cpsw",
188 	DV_IFNET
189 };
190 
191 static inline u_int
192 cpsw_txdesc_adjust(u_int x, int y)
193 {
194 	return (((x) + y) & (CPSW_NTXDESCS - 1));
195 }
196 
197 static inline u_int
198 cpsw_rxdesc_adjust(u_int x, int y)
199 {
200 	return (((x) + y) & (CPSW_NRXDESCS - 1));
201 }
202 
203 static inline uint32_t
204 cpsw_read_4(struct cpsw_softc * const sc, bus_size_t const offset)
205 {
206 	return bus_space_read_4(sc->sc_bst, sc->sc_bsh, offset);
207 }
208 
209 static inline void
210 cpsw_write_4(struct cpsw_softc * const sc, bus_size_t const offset,
211     uint32_t const value)
212 {
213 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, offset, value);
214 }
215 
216 static inline void
217 cpsw_set_txdesc_next(struct cpsw_softc * const sc, const u_int i, uint32_t n)
218 {
219 	const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i + 0;
220 	bus_space_write_4(sc->sc_bst, sc->sc_bsh_txdescs, o, n);
221 }
222 
223 static inline void
224 cpsw_set_rxdesc_next(struct cpsw_softc * const sc, const u_int i, uint32_t n)
225 {
226 	const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i + 0;
227 	bus_space_write_4(sc->sc_bst, sc->sc_bsh_rxdescs, o, n);
228 }
229 
230 static inline void
231 cpsw_get_txdesc(struct cpsw_softc * const sc, const u_int i,
232     struct cpsw_cpdma_bd * const bdp)
233 {
234 	const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
235 	bus_space_read_region_4(sc->sc_bst, sc->sc_bsh_txdescs, o,
236 	    (uint32_t *)bdp, 4);
237 }
238 
239 static inline void
240 cpsw_set_txdesc(struct cpsw_softc * const sc, const u_int i,
241     struct cpsw_cpdma_bd * const bdp)
242 {
243 	const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
244 	bus_space_write_region_4(sc->sc_bst, sc->sc_bsh_txdescs, o,
245 	    (uint32_t *)bdp, 4);
246 }
247 
248 static inline void
249 cpsw_get_rxdesc(struct cpsw_softc * const sc, const u_int i,
250     struct cpsw_cpdma_bd * const bdp)
251 {
252 	const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
253 	bus_space_read_region_4(sc->sc_bst, sc->sc_bsh_rxdescs, o,
254 	    (uint32_t *)bdp, 4);
255 }
256 
257 static inline void
258 cpsw_set_rxdesc(struct cpsw_softc * const sc, const u_int i,
259     struct cpsw_cpdma_bd * const bdp)
260 {
261 	const bus_size_t o = sizeof(struct cpsw_cpdma_bd) * i;
262 	bus_space_write_region_4(sc->sc_bst, sc->sc_bsh_rxdescs, o,
263 	    (uint32_t *)bdp, 4);
264 }
265 
266 static inline bus_addr_t
267 cpsw_txdesc_paddr(struct cpsw_softc * const sc, u_int x)
268 {
269 	KASSERT(x < CPSW_NTXDESCS);
270 	return sc->sc_txdescs_pa + sizeof(struct cpsw_cpdma_bd) * x;
271 }
272 
273 static inline bus_addr_t
274 cpsw_rxdesc_paddr(struct cpsw_softc * const sc, u_int x)
275 {
276 	KASSERT(x < CPSW_NRXDESCS);
277 	return sc->sc_rxdescs_pa + sizeof(struct cpsw_cpdma_bd) * x;
278 }
279 
280 void
281 cpsw_get_mac_addr(struct cpsw_softc *sc)
282 {
283 	struct arpcom *ac = &sc->sc_ac;
284 	u_int32_t	mac_lo = 0, mac_hi = 0;
285 
286 	sitara_cm_reg_read_4(OMAP2SCM_MAC_ID0_LO, &mac_lo);
287 	sitara_cm_reg_read_4(OMAP2SCM_MAC_ID0_HI, &mac_hi);
288 
289 	if ((mac_lo == 0) && (mac_hi == 0))
290 		printf("%s: invalid ethernet address\n", DEVNAME(sc));
291 	else {
292 		ac->ac_enaddr[0] = (mac_hi >>  0) & 0xff;
293 		ac->ac_enaddr[1] = (mac_hi >>  8) & 0xff;
294 		ac->ac_enaddr[2] = (mac_hi >> 16) & 0xff;
295 		ac->ac_enaddr[3] = (mac_hi >> 24) & 0xff;
296 		ac->ac_enaddr[4] = (mac_lo >>  0) & 0xff;
297 		ac->ac_enaddr[5] = (mac_lo >>  8) & 0xff;
298 	}
299 }
300 
301 void
302 cpsw_attach(struct device *parent, struct device *self, void *aux)
303 {
304 	struct cpsw_softc *sc = (struct cpsw_softc *)self;
305 	struct armv7_attach_args *aa = aux;
306 	struct arpcom * const ac = &sc->sc_ac;
307 	struct ifnet * const ifp = &ac->ac_if;
308 	u_int32_t idver;
309 	int error;
310 	u_int i;
311 
312 	timeout_set(&sc->sc_tick, cpsw_tick, sc);
313 
314 	cpsw_get_mac_addr(sc);
315 
316 	sc->sc_rxthih = arm_intr_establish(aa->aa_dev->irq[0] +
317 	    CPSW_INTROFF_RXTH, IPL_NET, cpsw_rxthintr, sc, DEVNAME(sc));
318 	sc->sc_rxih = arm_intr_establish(aa->aa_dev->irq[0] +
319 	    CPSW_INTROFF_RX, IPL_NET, cpsw_rxintr, sc, DEVNAME(sc));
320 	sc->sc_txih = arm_intr_establish(aa->aa_dev->irq[0] +
321 	    CPSW_INTROFF_TX, IPL_NET, cpsw_txintr, sc, DEVNAME(sc));
322 	sc->sc_miscih = arm_intr_establish(aa->aa_dev->irq[0] +
323 	    CPSW_INTROFF_MISC, IPL_NET, cpsw_miscintr, sc, DEVNAME(sc));
324 
325 	sc->sc_bst = aa->aa_iot;
326 	sc->sc_bdt = aa->aa_dmat;
327 
328 	error = bus_space_map(sc->sc_bst, aa->aa_dev->mem[0].addr,
329 	    aa->aa_dev->mem[0].size, 0, &sc->sc_bsh);
330 	if (error) {
331 		printf("can't map registers: %d\n", error);
332 		return;
333 	}
334 
335 	sc->sc_txdescs_pa = aa->aa_dev->mem[0].addr +
336 	    CPSW_CPPI_RAM_TXDESCS_BASE;
337 	error = bus_space_subregion(sc->sc_bst, sc->sc_bsh,
338 	    CPSW_CPPI_RAM_TXDESCS_BASE, CPSW_CPPI_RAM_TXDESCS_SIZE,
339 	    &sc->sc_bsh_txdescs);
340 	if (error) {
341 		printf("can't subregion tx ring SRAM: %d\n", error);
342 		return;
343 	}
344 
345 	sc->sc_rxdescs_pa = aa->aa_dev->mem[0].addr +
346 	    CPSW_CPPI_RAM_RXDESCS_BASE;
347 	error = bus_space_subregion(sc->sc_bst, sc->sc_bsh,
348 	    CPSW_CPPI_RAM_RXDESCS_BASE, CPSW_CPPI_RAM_RXDESCS_SIZE,
349 	    &sc->sc_bsh_rxdescs);
350 	if (error) {
351 		printf("can't subregion rx ring SRAM: %d\n", error);
352 		return;
353 	}
354 
355 	sc->sc_rdp = malloc(sizeof(*sc->sc_rdp), M_TEMP, M_WAITOK);
356 	KASSERT(sc->sc_rdp != NULL);
357 
358 	for (i = 0; i < CPSW_NTXDESCS; i++) {
359 		if ((error = bus_dmamap_create(sc->sc_bdt, MCLBYTES,
360 		    CPSW_TXFRAGS, MCLBYTES, 0, 0,
361 		    &sc->sc_rdp->tx_dm[i])) != 0) {
362 			printf("unable to create tx DMA map: %d\n", error);
363 		}
364 		sc->sc_rdp->tx_mb[i] = NULL;
365 	}
366 
367 	for (i = 0; i < CPSW_NRXDESCS; i++) {
368 		if ((error = bus_dmamap_create(sc->sc_bdt, MCLBYTES, 1,
369 		    MCLBYTES, 0, 0, &sc->sc_rdp->rx_dm[i])) != 0) {
370 			printf("unable to create rx DMA map: %d\n", error);
371 		}
372 		sc->sc_rdp->rx_mb[i] = NULL;
373 	}
374 
375 	sc->sc_txpad = dma_alloc(ETHER_MIN_LEN, PR_WAITOK | PR_ZERO);
376 	KASSERT(sc->sc_txpad != NULL);
377 	bus_dmamap_create(sc->sc_bdt, ETHER_MIN_LEN, 1, ETHER_MIN_LEN, 0,
378 	    BUS_DMA_WAITOK, &sc->sc_txpad_dm);
379 	bus_dmamap_load(sc->sc_bdt, sc->sc_txpad_dm, sc->sc_txpad,
380 	    ETHER_MIN_LEN, NULL, BUS_DMA_WAITOK|BUS_DMA_WRITE);
381 	bus_dmamap_sync(sc->sc_bdt, sc->sc_txpad_dm, 0, ETHER_MIN_LEN,
382 	    BUS_DMASYNC_PREWRITE);
383 
384 	idver = cpsw_read_4(sc, CPSW_SS_IDVER);
385 	printf(": version %d.%d (%d), address %s\n",
386 	    CPSW_SS_IDVER_MAJ(idver), CPSW_SS_IDVER_MIN(idver),
387 	    CPSW_SS_IDVER_RTL(idver), ether_sprintf(ac->ac_enaddr));
388 
389 	ifp->if_softc = sc;
390 	ifp->if_capabilities = 0;
391 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
392 	ifp->if_start = cpsw_start;
393 	ifp->if_ioctl = cpsw_ioctl;
394 	ifp->if_watchdog = cpsw_watchdog;
395 	IFQ_SET_MAXLEN(&ifp->if_snd, CPSW_NTXDESCS - 1);
396 	IFQ_SET_READY(&ifp->if_snd);
397 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
398 
399 	cpsw_stop(ifp);
400 
401 	sc->sc_mii.mii_ifp = ifp;
402 	sc->sc_mii.mii_readreg = cpsw_mii_readreg;
403 	sc->sc_mii.mii_writereg = cpsw_mii_writereg;
404 	sc->sc_mii.mii_statchg = cpsw_mii_statchg;
405 
406 	ifmedia_init(&sc->sc_mii.mii_media, 0, cpsw_mediachange,
407 	    cpsw_mediastatus);
408 	mii_attach(self, &sc->sc_mii, 0xffffffff,
409 	    MII_PHY_ANY, MII_OFFSET_ANY, 0);
410 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
411 		printf("no PHY found!\n");
412 		ifmedia_add(&sc->sc_mii.mii_media,
413 		    IFM_ETHER|IFM_MANUAL, 0, NULL);
414 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
415 	} else {
416 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
417 	}
418 
419 	if_attach(ifp);
420 	ether_ifattach(ifp);
421 
422 	return;
423 }
424 
425 int
426 cpsw_mediachange(struct ifnet *ifp)
427 {
428 	struct cpsw_softc *sc = ifp->if_softc;
429 
430 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
431 		mii_mediachg(&sc->sc_mii);
432 
433 	return (0);
434 }
435 
436 void
437 cpsw_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
438 {
439 	struct cpsw_softc *sc = ifp->if_softc;
440 
441 	if (LIST_FIRST(&sc->sc_mii.mii_phys)) {
442 		mii_pollstat(&sc->sc_mii);
443 		ifmr->ifm_active = sc->sc_mii.mii_media_active;
444 		ifmr->ifm_status = sc->sc_mii.mii_media_status;
445 	}
446 }
447 
448 void
449 cpsw_start(struct ifnet *ifp)
450 {
451 	struct cpsw_softc * const sc = ifp->if_softc;
452 	struct cpsw_ring_data * const rdp = sc->sc_rdp;
453 	struct cpsw_cpdma_bd bd;
454 	struct mbuf *m;
455 	bus_dmamap_t dm;
456 	u_int eopi = ~0;
457 	u_int seg;
458 	u_int txfree;
459 	int txstart = -1;
460 	int error;
461 	bool pad;
462 	u_int mlen;
463 
464 	if (!ISSET(ifp->if_flags, IFF_RUNNING) ||
465 	    ISSET(ifp->if_flags, IFF_OACTIVE) ||
466 	    IFQ_IS_EMPTY(&ifp->if_snd))
467 		return;
468 
469 	if (sc->sc_txnext >= sc->sc_txhead)
470 		txfree = CPSW_NTXDESCS - 1 + sc->sc_txhead - sc->sc_txnext;
471 	else
472 		txfree = sc->sc_txhead - sc->sc_txnext - 1;
473 
474 	for (;;) {
475 		if (txfree <= CPSW_TXFRAGS) {
476 			SET(ifp->if_flags, IFF_OACTIVE);
477 			break;
478 		}
479 
480 		IFQ_POLL(&ifp->if_snd, m);
481 		if (m == NULL)
482 			break;
483 
484 		IFQ_DEQUEUE(&ifp->if_snd, m);
485 
486 		dm = rdp->tx_dm[sc->sc_txnext];
487 		error = bus_dmamap_load_mbuf(sc->sc_bdt, dm, m, BUS_DMA_NOWAIT);
488 		switch (error) {
489 		case 0:
490 			break;
491 
492 		case EFBIG: /* mbuf chain is too fragmented */
493 			if (m_defrag(m, M_DONTWAIT) == 0 &&
494 			    bus_dmamap_load_mbuf(sc->sc_bdt, dm, m,
495 			    BUS_DMA_NOWAIT) == 0)
496 				break;
497 
498 			/* FALLTHROUGH */
499 		default:
500 			m_freem(m);
501 			ifp->if_oerrors++;
502 			continue;
503 		}
504 
505 		mlen = dm->dm_mapsize;
506 		pad = mlen < CPSW_PAD_LEN;
507 
508 		KASSERT(rdp->tx_mb[sc->sc_txnext] == NULL);
509 		rdp->tx_mb[sc->sc_txnext] = m;
510 
511 #if NBPFILTER > 0
512 		if (ifp->if_bpf)
513 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
514 #endif
515 
516 		bus_dmamap_sync(sc->sc_bdt, dm, 0, dm->dm_mapsize,
517 		    BUS_DMASYNC_PREWRITE);
518 
519 		if (txstart == -1)
520 			txstart = sc->sc_txnext;
521 		eopi = sc->sc_txnext;
522 		for (seg = 0; seg < dm->dm_nsegs; seg++) {
523 			bd.next = cpsw_txdesc_paddr(sc,
524 			    TXDESC_NEXT(sc->sc_txnext));
525 			bd.bufptr = dm->dm_segs[seg].ds_addr;
526 			bd.bufoff = 0;
527 			bd.buflen = dm->dm_segs[seg].ds_len;
528 			bd.pktlen = 0;
529 			bd.flags = 0;
530 
531 			if (seg == 0) {
532 				bd.flags = CPDMA_BD_OWNER | CPDMA_BD_SOP;
533 				bd.pktlen = MAX(mlen, CPSW_PAD_LEN);
534 			}
535 
536 			if (seg == dm->dm_nsegs - 1 && !pad)
537 				bd.flags |= CPDMA_BD_EOP;
538 
539 			cpsw_set_txdesc(sc, sc->sc_txnext, &bd);
540 			txfree--;
541 			eopi = sc->sc_txnext;
542 			sc->sc_txnext = TXDESC_NEXT(sc->sc_txnext);
543 		}
544 		if (pad) {
545 			bd.next = cpsw_txdesc_paddr(sc,
546 			    TXDESC_NEXT(sc->sc_txnext));
547 			bd.bufptr = sc->sc_txpad_pa;
548 			bd.bufoff = 0;
549 			bd.buflen = CPSW_PAD_LEN - mlen;
550 			bd.pktlen = 0;
551 			bd.flags = CPDMA_BD_EOP;
552 
553 			cpsw_set_txdesc(sc, sc->sc_txnext, &bd);
554 			txfree--;
555 			eopi = sc->sc_txnext;
556 			sc->sc_txnext = TXDESC_NEXT(sc->sc_txnext);
557 		}
558 	}
559 
560 	if (txstart >= 0) {
561 		ifp->if_timer = 5;
562 		/* terminate the new chain */
563 		KASSERT(eopi == TXDESC_PREV(sc->sc_txnext));
564 		cpsw_set_txdesc_next(sc, TXDESC_PREV(sc->sc_txnext), 0);
565 
566 		/* link the new chain on */
567 		cpsw_set_txdesc_next(sc, TXDESC_PREV(txstart),
568 		    cpsw_txdesc_paddr(sc, txstart));
569 		if (sc->sc_txeoq) {
570 			/* kick the dma engine */
571 			sc->sc_txeoq = false;
572 			cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(0),
573 			    cpsw_txdesc_paddr(sc, txstart));
574 		}
575 	}
576 }
577 
578 int
579 cpsw_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
580 {
581 	struct cpsw_softc *sc = ifp->if_softc;
582 	struct ifaddr *ifa = (struct ifaddr *)data;
583 	struct ifreq *ifr = (struct ifreq *)data;
584 	int s = splnet();
585 	int error = 0;
586 
587 	switch (cmd) {
588 	case SIOCSIFADDR:
589 		ifp->if_flags |= IFF_UP;
590 #ifdef INET
591 		if (ifa->ifa_addr->sa_family == AF_INET)
592 			arp_ifinit(&sc->sc_ac, ifa);
593 #endif
594 
595 	case SIOCSIFFLAGS:
596 		if (ifp->if_flags & IFF_UP) {
597 			if (ifp->if_flags & IFF_RUNNING)
598 				error = ENETRESET;
599 			else
600 				cpsw_init(ifp);
601 		} else {
602 			if (ifp->if_flags & IFF_RUNNING)
603 				cpsw_stop(ifp);
604 		}
605 		break;
606 	case SIOCSIFMEDIA:
607 		ifr->ifr_media &= ~IFM_ETH_FMASK;
608 		/* FALLTHROUGH */
609 	case SIOCGIFMEDIA:
610 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
611 		break;
612 	default:
613 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
614 		break;
615 	}
616 	if (error == ENETRESET) {
617 		if (ifp->if_flags & IFF_RUNNING)
618 			cpsw_init(ifp);
619 		error = 0;
620 	}
621 
622 	splx(s);
623 
624 	return error;
625 }
626 
627 void
628 cpsw_watchdog(struct ifnet *ifp)
629 {
630 	printf("device timeout\n");
631 
632 	ifp->if_oerrors++;
633 	cpsw_init(ifp);
634 	cpsw_start(ifp);
635 }
636 
637 static int
638 cpsw_mii_wait(struct cpsw_softc * const sc, int reg)
639 {
640 	u_int tries;
641 
642 	for(tries = 0; tries < 1000; tries++) {
643 		if ((cpsw_read_4(sc, reg) & (1U << 31)) == 0)
644 			return 0;
645 		delay(1);
646 	}
647 	return ETIMEDOUT;
648 }
649 
650 int
651 cpsw_mii_readreg(struct device *dev, int phy, int reg)
652 {
653 	struct cpsw_softc * const sc = (struct cpsw_softc *)dev;
654 	uint32_t v;
655 
656 	if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
657 		return 0;
658 
659 	cpsw_write_4(sc, MDIOUSERACCESS0, (1U << 31) |
660 	    ((reg & 0x1F) << 21) | ((phy & 0x1F) << 16));
661 
662 	if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
663 		return 0;
664 
665 	v = cpsw_read_4(sc, MDIOUSERACCESS0);
666 	if (v & (1 << 29))
667 		return v & 0xffff;
668 	else
669 		return 0;
670 }
671 
672 void
673 cpsw_mii_writereg(struct device *dev, int phy, int reg, int val)
674 {
675 	struct cpsw_softc * const sc = (struct cpsw_softc *)dev;
676 	uint32_t v;
677 
678 	KASSERT((val & 0xffff0000UL) == 0);
679 
680 	if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
681 		goto out;
682 
683 	cpsw_write_4(sc, MDIOUSERACCESS0, (1U << 31) | (1 << 30) |
684 	    ((reg & 0x1F) << 21) | ((phy & 0x1F) << 16) | val);
685 
686 	if (cpsw_mii_wait(sc, MDIOUSERACCESS0) != 0)
687 		goto out;
688 
689 	v = cpsw_read_4(sc, MDIOUSERACCESS0);
690 	if ((v & (1 << 29)) == 0)
691 out:
692 		printf("%s error\n", __func__);
693 
694 }
695 
696 void
697 cpsw_mii_statchg(struct device *self)
698 {
699 	return;
700 }
701 
702 int
703 cpsw_new_rxbuf(struct cpsw_softc * const sc, const u_int i)
704 {
705 	struct cpsw_ring_data * const rdp = sc->sc_rdp;
706 	const u_int h = RXDESC_PREV(i);
707 	struct cpsw_cpdma_bd bd;
708 	struct mbuf *m;
709 	int error = ENOBUFS;
710 
711 	MGETHDR(m, M_DONTWAIT, MT_DATA);
712 	if (m == NULL) {
713 		goto reuse;
714 	}
715 
716 	MCLGET(m, M_DONTWAIT);
717 	if ((m->m_flags & M_EXT) == 0) {
718 		m_freem(m);
719 		goto reuse;
720 	}
721 
722 	/* We have a new buffer, prepare it for the ring. */
723 
724 	if (rdp->rx_mb[i] != NULL)
725 		bus_dmamap_unload(sc->sc_bdt, rdp->rx_dm[i]);
726 
727 	m->m_len = m->m_pkthdr.len = MCLBYTES;
728 
729 	rdp->rx_mb[i] = m;
730 
731 	error = bus_dmamap_load_mbuf(sc->sc_bdt, rdp->rx_dm[i], rdp->rx_mb[i],
732 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
733 	if (error) {
734 		printf("can't load rx DMA map %d: %d\n", i, error);
735 	}
736 
737 	bus_dmamap_sync(sc->sc_bdt, rdp->rx_dm[i],
738 	    0, rdp->rx_dm[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
739 
740 	error = 0;
741 
742 reuse:
743 	/* (re-)setup the descriptor */
744 	bd.next = 0;
745 	bd.bufptr = rdp->rx_dm[i]->dm_segs[0].ds_addr;
746 	bd.bufoff = 0;
747 	bd.buflen = MIN(0x7ff, rdp->rx_dm[i]->dm_segs[0].ds_len);
748 	bd.pktlen = 0;
749 	bd.flags = CPDMA_BD_OWNER;
750 
751 	cpsw_set_rxdesc(sc, i, &bd);
752 	/* and link onto ring */
753 	cpsw_set_rxdesc_next(sc, h, cpsw_rxdesc_paddr(sc, i));
754 
755 	return error;
756 }
757 
758 int
759 cpsw_init(struct ifnet *ifp)
760 {
761 	struct cpsw_softc * const sc = ifp->if_softc;
762 	struct arpcom *ac = &sc->sc_ac;
763 	struct mii_data * const mii = &sc->sc_mii;
764 	int i;
765 
766 	cpsw_stop(ifp);
767 
768 	sc->sc_txnext = 0;
769 	sc->sc_txhead = 0;
770 
771 	/* Reset wrapper */
772 	cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1);
773 	while(cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1);
774 
775 	/* Reset SS */
776 	cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1);
777 	while(cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1);
778 
779 	/* Clear table (30) and enable ALE(31) and set passthrough (4) */
780 	cpsw_write_4(sc, CPSW_ALE_CONTROL, (3 << 30) | 0x10);
781 
782 	/* Reset and init Sliver port 1 and 2 */
783 	for (i = 0; i < 2; i++) {
784 		/* Reset */
785 		cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1);
786 		while(cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1);
787 		/* Set Slave Mapping */
788 		cpsw_write_4(sc, CPSW_SL_RX_PRI_MAP(i), 0x76543210);
789 		cpsw_write_4(sc, CPSW_PORT_P_TX_PRI_MAP(i+1), 0x33221100);
790 		cpsw_write_4(sc, CPSW_SL_RX_MAXLEN(i), 0x5f2);
791 		/* Set MAC Address */
792 		cpsw_write_4(sc, CPSW_PORT_P_SA_HI(i+1),
793 		    ac->ac_enaddr[0] | (ac->ac_enaddr[1] << 8) |
794 		    (ac->ac_enaddr[2] << 16) | (ac->ac_enaddr[3] << 24));
795 		cpsw_write_4(sc, CPSW_PORT_P_SA_LO(i+1),
796 		    ac->ac_enaddr[4] | (ac->ac_enaddr[5] << 8));
797 
798 		/* Set MACCONTROL for ports 0,1: FULLDUPLEX(1), GMII_EN(5),
799 		   IFCTL_A(15), IFCTL_B(16) FIXME */
800 		cpsw_write_4(sc, CPSW_SL_MACCONTROL(i),
801 		    1 | (1<<5) | (1<<15) | (1<<16));
802 
803 		/* Set ALE port to forwarding(3) */
804 		cpsw_write_4(sc, CPSW_ALE_PORTCTL(i+1), 3);
805 	}
806 
807 	/* Set Host Port Mapping */
808 	cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210);
809 	cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0);
810 
811 	/* Set ALE port to forwarding(3) */
812 	cpsw_write_4(sc, CPSW_ALE_PORTCTL(0), 3);
813 
814 	cpsw_write_4(sc, CPSW_SS_PTYPE, 0);
815 	cpsw_write_4(sc, CPSW_SS_STAT_PORT_EN, 7);
816 
817 	cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1);
818 	while(cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1);
819 
820 	for (i = 0; i < 8; i++) {
821 		cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(i), 0);
822 		cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(i), 0);
823 		cpsw_write_4(sc, CPSW_CPDMA_TX_CP(i), 0);
824 		cpsw_write_4(sc, CPSW_CPDMA_RX_CP(i), 0);
825 	}
826 
827 	bus_space_set_region_4(sc->sc_bst, sc->sc_bsh_txdescs, 0, 0,
828 	    CPSW_CPPI_RAM_TXDESCS_SIZE/4);
829 
830 	sc->sc_txhead = 0;
831 	sc->sc_txnext = 0;
832 
833 	cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), 0);
834 
835 	bus_space_set_region_4(sc->sc_bst, sc->sc_bsh_rxdescs, 0, 0,
836 	    CPSW_CPPI_RAM_RXDESCS_SIZE/4);
837 
838 	/* Initialize RX Buffer Descriptors */
839 	cpsw_set_rxdesc_next(sc, RXDESC_PREV(0), 0);
840 	for (i = 0; i < CPSW_NRXDESCS; i++) {
841 		cpsw_new_rxbuf(sc, i);
842 	}
843 	sc->sc_rxhead = 0;
844 
845 	/* align layer 3 header to 32-bit */
846 	cpsw_write_4(sc, CPSW_CPDMA_RX_BUFFER_OFFSET, ETHER_ALIGN);
847 
848 	/* Clear all interrupt Masks */
849 	cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF);
850 	cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF);
851 
852 	/* Enable TX & RX DMA */
853 	cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 1);
854 	cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 1);
855 
856 	/* Enable TX and RX interrupt receive for core 0 */
857 	cpsw_write_4(sc, CPSW_WR_C_TX_EN(0), 1);
858 	cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 1);
859 	cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x1F);
860 
861 	/* Enable host Error Interrupt */
862 	cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_SET, 2);
863 
864 	/* Enable interrupts for TX and RX Channel 0 */
865 	cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_SET, 1);
866 	cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_SET, 1);
867 
868 	/* Ack stalled irqs */
869 	cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RXTH);
870 	cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RX);
871 	cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_TX);
872 	cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_MISC);
873 
874 	/* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */
875 	/* TODO Calculate MDCLK=CLK/(CLKDIV+1) */
876 	cpsw_write_4(sc, MDIOCONTROL, (1<<30) | (1<<18) | 0xFF);
877 
878 	mii_mediachg(mii);
879 
880 	/* Write channel 0 RX HDP */
881 	cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(0), cpsw_rxdesc_paddr(sc, 0));
882 	sc->sc_rxrun = true;
883 	sc->sc_rxeoq = false;
884 
885 	sc->sc_txrun = true;
886 	sc->sc_txeoq = true;
887 
888 	ifp->if_flags |= IFF_RUNNING;
889 	ifp->if_flags &= ~IFF_OACTIVE;
890 
891 	timeout_add_sec(&sc->sc_tick, 1);
892 
893 	return 0;
894 }
895 
896 void
897 cpsw_stop(struct ifnet *ifp)
898 {
899 	struct cpsw_softc * const sc = ifp->if_softc;
900 	struct cpsw_ring_data * const rdp = sc->sc_rdp;
901 	u_int i;
902 
903 #if 0
904 	/* XXX find where disable comes from */
905 	printf("%s: ifp %p disable %d\n", __func__, ifp, disable);
906 #endif
907 	if ((ifp->if_flags & IFF_RUNNING) == 0)
908 		return;
909 
910 	timeout_del(&sc->sc_tick);
911 
912 	mii_down(&sc->sc_mii);
913 
914 	cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 1);
915 	cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 1);
916 	cpsw_write_4(sc, CPSW_WR_C_TX_EN(0), 0x0);
917 	cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 0x0);
918 	cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x1F);
919 
920 	cpsw_write_4(sc, CPSW_CPDMA_TX_TEARDOWN, 0);
921 	cpsw_write_4(sc, CPSW_CPDMA_RX_TEARDOWN, 0);
922 	i = 0;
923 	while ((sc->sc_txrun || sc->sc_rxrun) && i < 10000) {
924 		delay(10);
925 		if ((sc->sc_txrun == true) && cpsw_txintr(sc) == 0)
926 			sc->sc_txrun = false;
927 		if ((sc->sc_rxrun == true) && cpsw_rxintr(sc) == 0)
928 			sc->sc_rxrun = false;
929 		i++;
930 	}
931 	/* printf("%s toredown complete in %u\n", __func__, i); */
932 
933 	/* Reset wrapper */
934 	cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1);
935 	while(cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1);
936 
937 	/* Reset SS */
938 	cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1);
939 	while(cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1);
940 
941 	for (i = 0; i < 2; i++) {
942 		cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1);
943 		while(cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1);
944 	}
945 
946 	/* Reset CPDMA */
947 	cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1);
948 	while(cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1);
949 
950 	/* Release any queued transmit buffers. */
951 	for (i = 0; i < CPSW_NTXDESCS; i++) {
952 		bus_dmamap_unload(sc->sc_bdt, rdp->tx_dm[i]);
953 		m_freem(rdp->tx_mb[i]);
954 		rdp->tx_mb[i] = NULL;
955 	}
956 
957 	ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
958 	ifp->if_timer = 0;
959 
960 	/* XXX Not sure what this is doing calling disable here
961 	    where is disable set?
962 	*/
963 #if 0
964 	if (!disable)
965 		return;
966 #endif
967 
968 	for (i = 0; i < CPSW_NRXDESCS; i++) {
969 		bus_dmamap_unload(sc->sc_bdt, rdp->rx_dm[i]);
970 		m_freem(rdp->rx_mb[i]);
971 		rdp->rx_mb[i] = NULL;
972 	}
973 }
974 
975 int
976 cpsw_rxthintr(void *arg)
977 {
978 	struct cpsw_softc * const sc = arg;
979 
980 	/* this won't deassert the interrupt though */
981 	cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RXTH);
982 
983 	return 1;
984 }
985 
986 int
987 cpsw_rxintr(void *arg)
988 {
989 	struct cpsw_softc * const sc = arg;
990 	struct ifnet * const ifp = &sc->sc_ac.ac_if;
991 	struct cpsw_ring_data * const rdp = sc->sc_rdp;
992 	struct cpsw_cpdma_bd bd;
993 	bus_dmamap_t dm;
994 	struct mbuf *m;
995 	u_int i;
996 	u_int len, off;
997 
998 	for (;;) {
999 		KASSERT(sc->sc_rxhead < CPSW_NRXDESCS);
1000 
1001 		i = sc->sc_rxhead;
1002 		dm = rdp->rx_dm[i];
1003 		m = rdp->rx_mb[i];
1004 
1005 		KASSERT(dm != NULL);
1006 		KASSERT(m != NULL);
1007 
1008 		cpsw_get_rxdesc(sc, i, &bd);
1009 
1010 		if (bd.flags & CPDMA_BD_OWNER)
1011 			break;
1012 
1013 		if (bd.flags & CPDMA_BD_TDOWNCMPLT) {
1014 			sc->sc_rxrun = false;
1015 			return 1;
1016 		}
1017 
1018 		if ((bd.flags & (CPDMA_BD_SOP|CPDMA_BD_EOP)) !=
1019 		    (CPDMA_BD_SOP|CPDMA_BD_EOP)) {
1020 			/* Debugger(); */
1021 		}
1022 
1023 		bus_dmamap_sync(sc->sc_bdt, dm, 0, dm->dm_mapsize,
1024 		    BUS_DMASYNC_POSTREAD);
1025 		bus_dmamap_unload(sc->sc_bdt, dm);
1026 
1027 		if (cpsw_new_rxbuf(sc, i) != 0) {
1028 			/* drop current packet, reuse buffer for new */
1029 			ifp->if_ierrors++;
1030 			goto next;
1031 		}
1032 
1033 		off = bd.bufoff;
1034 		len = bd.pktlen;
1035 
1036 		if (bd.flags & CPDMA_BD_PASSCRC)
1037 			len -= ETHER_CRC_LEN;
1038 
1039 		m->m_pkthdr.rcvif = ifp;
1040 		m->m_pkthdr.len = m->m_len = len;
1041 		m->m_data += off;
1042 
1043 		ifp->if_ipackets++;
1044 
1045 #if NBPFILTER > 0
1046 		if (ifp->if_bpf)
1047 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
1048 #endif
1049 		ether_input_mbuf(ifp, m);
1050 
1051 next:
1052 		sc->sc_rxhead = RXDESC_NEXT(sc->sc_rxhead);
1053 		if (bd.flags & CPDMA_BD_EOQ) {
1054 			sc->sc_rxeoq = true;
1055 			break;
1056 		} else {
1057 			sc->sc_rxeoq = false;
1058 		}
1059 		cpsw_write_4(sc, CPSW_CPDMA_RX_CP(0),
1060 		    cpsw_rxdesc_paddr(sc, i));
1061 	}
1062 
1063 	if (sc->sc_rxeoq) {
1064 		printf("rxeoq\n");
1065 		/* Debugger(); */
1066 	}
1067 
1068 	cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_RX);
1069 
1070 	return 1;
1071 }
1072 
1073 void
1074 cpsw_tick(void *arg)
1075 {
1076 	struct cpsw_softc *sc = arg;
1077 	int s;
1078 
1079 	s = splnet();
1080 	mii_tick(&sc->sc_mii);
1081 	splx(s);
1082 
1083 	timeout_add_sec(&sc->sc_tick, 1);
1084 }
1085 
1086 int
1087 cpsw_txintr(void *arg)
1088 {
1089 	struct cpsw_softc * const sc = arg;
1090 	struct ifnet * const ifp = &sc->sc_ac.ac_if;
1091 	struct cpsw_ring_data * const rdp = sc->sc_rdp;
1092 	struct cpsw_cpdma_bd bd;
1093 	bool handled = false;
1094 	uint32_t tx0_cp;
1095 	u_int cpi;
1096 
1097 	KASSERT(sc->sc_txrun);
1098 
1099 	tx0_cp = cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0));
1100 
1101 	if (tx0_cp == 0xfffffffc) {
1102 		cpsw_write_4(sc, CPSW_CPDMA_TX_CP(0), 0xfffffffc);
1103 		cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(0), 0);
1104 		sc->sc_txrun = false;
1105 		return 0;
1106 	}
1107 
1108 	for (;;) {
1109 		tx0_cp = cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0));
1110 		cpi = (tx0_cp - sc->sc_txdescs_pa) /
1111 		    sizeof(struct cpsw_cpdma_bd);
1112 		KASSERT(sc->sc_txhead < CPSW_NTXDESCS);
1113 
1114 		cpsw_get_txdesc(sc, sc->sc_txhead, &bd);
1115 
1116 		if (bd.buflen == 0) {
1117 			/* Debugger(); */
1118 		}
1119 
1120 		if ((bd.flags & CPDMA_BD_SOP) == 0)
1121 			goto next;
1122 
1123 		if (bd.flags & CPDMA_BD_OWNER) {
1124 			printf("pwned %x %x %x\n", cpi, sc->sc_txhead,
1125 			    sc->sc_txnext);
1126 			break;
1127 		}
1128 
1129 		if (bd.flags & CPDMA_BD_TDOWNCMPLT) {
1130 			sc->sc_txrun = false;
1131 			return 1;
1132 		}
1133 
1134 		bus_dmamap_sync(sc->sc_bdt, rdp->tx_dm[sc->sc_txhead],
1135 		    0, rdp->tx_dm[sc->sc_txhead]->dm_mapsize,
1136 		    BUS_DMASYNC_POSTWRITE);
1137 		bus_dmamap_unload(sc->sc_bdt, rdp->tx_dm[sc->sc_txhead]);
1138 
1139 		m_freem(rdp->tx_mb[sc->sc_txhead]);
1140 		rdp->tx_mb[sc->sc_txhead] = NULL;
1141 
1142 		ifp->if_opackets++;
1143 
1144 		handled = true;
1145 
1146 		ifp->if_flags &= ~IFF_OACTIVE;
1147 
1148 next:
1149 		if ((bd.flags & (CPDMA_BD_EOP|CPDMA_BD_EOQ)) ==
1150 		    (CPDMA_BD_EOP|CPDMA_BD_EOQ))
1151 			sc->sc_txeoq = true;
1152 
1153 		if (sc->sc_txhead == cpi) {
1154 			cpsw_write_4(sc, CPSW_CPDMA_TX_CP(0),
1155 			    cpsw_txdesc_paddr(sc, cpi));
1156 			sc->sc_txhead = TXDESC_NEXT(sc->sc_txhead);
1157 			break;
1158 		}
1159 		sc->sc_txhead = TXDESC_NEXT(sc->sc_txhead);
1160 		if (sc->sc_txeoq == true)
1161 			break;
1162 	}
1163 
1164 	cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_TX);
1165 
1166 	if ((sc->sc_txnext != sc->sc_txhead) && sc->sc_txeoq) {
1167 		if (cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0)) == 0) {
1168 			sc->sc_txeoq = false;
1169 			cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(0),
1170 			    cpsw_txdesc_paddr(sc, sc->sc_txhead));
1171 		}
1172 	}
1173 
1174 	if (handled && sc->sc_txnext == sc->sc_txhead)
1175 		ifp->if_timer = 0;
1176 
1177 	if (handled)
1178 		cpsw_start(ifp);
1179 
1180 	return handled;
1181 }
1182 
1183 int
1184 cpsw_miscintr(void *arg)
1185 {
1186 	struct cpsw_softc * const sc = arg;
1187 	uint32_t miscstat;
1188 	uint32_t dmastat;
1189 	uint32_t stat;
1190 
1191 	miscstat = cpsw_read_4(sc, CPSW_WR_C_MISC_STAT(0));
1192 	printf("%s %x FIRE\n", __func__, miscstat);
1193 
1194 	if (miscstat & CPSW_MISC_HOST_PEND) {
1195 		/* Host Error */
1196 		dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED);
1197 		printf("CPSW_CPDMA_DMA_INTSTAT_MASKED %x\n", dmastat);
1198 
1199 		printf("rxhead %02x\n", sc->sc_rxhead);
1200 
1201 		stat = cpsw_read_4(sc, CPSW_CPDMA_DMASTATUS);
1202 		printf("CPSW_CPDMA_DMASTATUS %x\n", stat);
1203 		stat = cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(0));
1204 		printf("CPSW_CPDMA_TX0_HDP %x\n", stat);
1205 		stat = cpsw_read_4(sc, CPSW_CPDMA_TX_CP(0));
1206 		printf("CPSW_CPDMA_TX0_CP %x\n", stat);
1207 		stat = cpsw_read_4(sc, CPSW_CPDMA_RX_HDP(0));
1208 		printf("CPSW_CPDMA_RX0_HDP %x\n", stat);
1209 		stat = cpsw_read_4(sc, CPSW_CPDMA_RX_CP(0));
1210 		printf("CPSW_CPDMA_RX0_CP %x\n", stat);
1211 
1212 		/* Debugger(); */
1213 
1214 		cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_CLEAR, dmastat);
1215 		dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED);
1216 		printf("CPSW_CPDMA_DMA_INTSTAT_MASKED %x\n", dmastat);
1217 	}
1218 
1219 	cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, CPSW_INTROFF_MISC);
1220 
1221 	return 1;
1222 }
1223