xref: /freebsd/sys/dev/mge/if_mge.c (revision f05cddf9)
1 /*-
2  * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
3  * All rights reserved.
4  *
5  * Developed by Semihalf.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of MARVELL nor the names of contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #ifdef HAVE_KERNEL_OPTION_HEADERS
33 #include "opt_device_polling.h"
34 #endif
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/endian.h>
42 #include <sys/mbuf.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 #include <sys/kernel.h>
46 #include <sys/module.h>
47 #include <sys/socket.h>
48 #include <sys/sysctl.h>
49 
50 #include <net/ethernet.h>
51 #include <net/bpf.h>
52 #include <net/if.h>
53 #include <net/if_arp.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56 #include <net/if_types.h>
57 #include <net/if_vlan_var.h>
58 
59 #include <netinet/in_systm.h>
60 #include <netinet/in.h>
61 #include <netinet/ip.h>
62 
63 #include <sys/sockio.h>
64 #include <sys/bus.h>
65 #include <machine/bus.h>
66 #include <sys/rman.h>
67 #include <machine/resource.h>
68 
69 #include <dev/mii/mii.h>
70 #include <dev/mii/miivar.h>
71 
72 #include <dev/fdt/fdt_common.h>
73 #include <dev/ofw/ofw_bus.h>
74 #include <dev/ofw/ofw_bus_subr.h>
75 
76 #include <dev/mge/if_mgevar.h>
77 #include <arm/mv/mvreg.h>
78 #include <arm/mv/mvvar.h>
79 
80 #include "miibus_if.h"
81 
82 static int mge_probe(device_t dev);
83 static int mge_attach(device_t dev);
84 static int mge_detach(device_t dev);
85 static int mge_shutdown(device_t dev);
86 static int mge_suspend(device_t dev);
87 static int mge_resume(device_t dev);
88 
89 static int mge_miibus_readreg(device_t dev, int phy, int reg);
90 static int mge_miibus_writereg(device_t dev, int phy, int reg, int value);
91 
92 static int mge_ifmedia_upd(struct ifnet *ifp);
93 static void mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
94 
95 static void mge_init(void *arg);
96 static void mge_init_locked(void *arg);
97 static void mge_start(struct ifnet *ifp);
98 static void mge_start_locked(struct ifnet *ifp);
99 static void mge_watchdog(struct mge_softc *sc);
100 static int mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
101 
102 static uint32_t mge_tfut_ipg(uint32_t val, int ver);
103 static uint32_t mge_rx_ipg(uint32_t val, int ver);
104 static void mge_ver_params(struct mge_softc *sc);
105 
106 static void mge_intrs_ctrl(struct mge_softc *sc, int enable);
107 static void mge_intr_rxtx(void *arg);
108 static void mge_intr_rx(void *arg);
109 static void mge_intr_rx_check(struct mge_softc *sc, uint32_t int_cause,
110     uint32_t int_cause_ext);
111 static int mge_intr_rx_locked(struct mge_softc *sc, int count);
112 static void mge_intr_tx(void *arg);
113 static void mge_intr_tx_locked(struct mge_softc *sc);
114 static void mge_intr_misc(void *arg);
115 static void mge_intr_sum(void *arg);
116 static void mge_intr_err(void *arg);
117 static void mge_stop(struct mge_softc *sc);
118 static void mge_tick(void *msc);
119 static uint32_t mge_set_port_serial_control(uint32_t media);
120 static void mge_get_mac_address(struct mge_softc *sc, uint8_t *addr);
121 static void mge_set_mac_address(struct mge_softc *sc);
122 static void mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte,
123     uint8_t queue);
124 static void mge_set_prom_mode(struct mge_softc *sc, uint8_t queue);
125 static int mge_allocate_dma(struct mge_softc *sc);
126 static int mge_alloc_desc_dma(struct mge_softc *sc,
127     struct mge_desc_wrapper* desc_tab, uint32_t size, bus_dma_tag_t *buffer_tag);
128 static int mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
129     struct mbuf **mbufp, bus_addr_t *paddr);
130 static void mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error);
131 static void mge_free_dma(struct mge_softc *sc);
132 static void mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab, uint32_t size,
133     bus_dma_tag_t buffer_tag, uint8_t free_mbufs);
134 static void mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
135     uint32_t status, uint16_t bufsize);
136 static void mge_offload_setup_descriptor(struct mge_softc *sc,
137     struct mge_desc_wrapper *dw);
138 static uint8_t mge_crc8(uint8_t *data, int size);
139 static void mge_setup_multicast(struct mge_softc *sc);
140 static void mge_set_rxic(struct mge_softc *sc);
141 static void mge_set_txic(struct mge_softc *sc);
142 static void mge_add_sysctls(struct mge_softc *sc);
143 static int mge_sysctl_ic(SYSCTL_HANDLER_ARGS);
144 
145 static device_method_t mge_methods[] = {
146 	/* Device interface */
147 	DEVMETHOD(device_probe,		mge_probe),
148 	DEVMETHOD(device_attach,	mge_attach),
149 	DEVMETHOD(device_detach,	mge_detach),
150 	DEVMETHOD(device_shutdown,	mge_shutdown),
151 	DEVMETHOD(device_suspend,	mge_suspend),
152 	DEVMETHOD(device_resume,	mge_resume),
153 	/* MII interface */
154 	DEVMETHOD(miibus_readreg,	mge_miibus_readreg),
155 	DEVMETHOD(miibus_writereg,	mge_miibus_writereg),
156 	{ 0, 0 }
157 };
158 
159 static driver_t mge_driver = {
160 	"mge",
161 	mge_methods,
162 	sizeof(struct mge_softc),
163 };
164 
165 static devclass_t mge_devclass;
166 
167 DRIVER_MODULE(mge, simplebus, mge_driver, mge_devclass, 0, 0);
168 DRIVER_MODULE(miibus, mge, miibus_driver, miibus_devclass, 0, 0);
169 MODULE_DEPEND(mge, ether, 1, 1, 1);
170 MODULE_DEPEND(mge, miibus, 1, 1, 1);
171 
172 static struct resource_spec res_spec[] = {
173 	{ SYS_RES_MEMORY, 0, RF_ACTIVE },
174 	{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
175 	{ SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
176 	{ SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
177 	{ -1, 0 }
178 };
179 
180 static struct {
181 	driver_intr_t *handler;
182 	char * description;
183 } mge_intrs[MGE_INTR_COUNT + 1] = {
184 	{ mge_intr_rxtx,"GbE aggregated interrupt" },
185 	{ mge_intr_rx,	"GbE receive interrupt" },
186 	{ mge_intr_tx,	"GbE transmit interrupt" },
187 	{ mge_intr_misc,"GbE misc interrupt" },
188 	{ mge_intr_sum,	"GbE summary interrupt" },
189 	{ mge_intr_err,	"GbE error interrupt" },
190 };
191 
192 static void
193 mge_get_mac_address(struct mge_softc *sc, uint8_t *addr)
194 {
195 	uint32_t mac_l, mac_h;
196 	uint8_t lmac[6];
197 	int i, valid;
198 
199 	/*
200 	 * Retrieve hw address from the device tree.
201 	 */
202 	i = OF_getprop(sc->node, "local-mac-address", (void *)lmac, 6);
203 	if (i == 6) {
204 		valid = 0;
205 		for (i = 0; i < 6; i++)
206 			if (lmac[i] != 0) {
207 				valid = 1;
208 				break;
209 			}
210 
211 		if (valid) {
212 			bcopy(lmac, addr, 6);
213 			return;
214 		}
215 	}
216 
217 	/*
218 	 * Fall back -- use the currently programmed address.
219 	 */
220 	mac_l = MGE_READ(sc, MGE_MAC_ADDR_L);
221 	mac_h = MGE_READ(sc, MGE_MAC_ADDR_H);
222 
223 	addr[0] = (mac_h & 0xff000000) >> 24;
224 	addr[1] = (mac_h & 0x00ff0000) >> 16;
225 	addr[2] = (mac_h & 0x0000ff00) >> 8;
226 	addr[3] = (mac_h & 0x000000ff);
227 	addr[4] = (mac_l & 0x0000ff00) >> 8;
228 	addr[5] = (mac_l & 0x000000ff);
229 }
230 
231 static uint32_t
232 mge_tfut_ipg(uint32_t val, int ver)
233 {
234 
235 	switch (ver) {
236 	case 1:
237 		return ((val & 0x3fff) << 4);
238 	case 2:
239 	default:
240 		return ((val & 0xffff) << 4);
241 	}
242 }
243 
244 static uint32_t
245 mge_rx_ipg(uint32_t val, int ver)
246 {
247 
248 	switch (ver) {
249 	case 1:
250 		return ((val & 0x3fff) << 8);
251 	case 2:
252 	default:
253 		return (((val & 0x8000) << 10) | ((val & 0x7fff) << 7));
254 	}
255 }
256 
257 static void
258 mge_ver_params(struct mge_softc *sc)
259 {
260 	uint32_t d, r;
261 
262 	soc_id(&d, &r);
263 	if (d == MV_DEV_88F6281 || d == MV_DEV_88F6781 ||
264 	    d == MV_DEV_88F6282 ||
265 	    d == MV_DEV_MV78100 ||
266 	    d == MV_DEV_MV78100_Z0 ||
267 	    (d & MV_DEV_FAMILY_MASK) == MV_DEV_DISCOVERY) {
268 		sc->mge_ver = 2;
269 		sc->mge_mtu = 0x4e8;
270 		sc->mge_tfut_ipg_max = 0xFFFF;
271 		sc->mge_rx_ipg_max = 0xFFFF;
272 		sc->mge_tx_arb_cfg = 0xFC0000FF;
273 		sc->mge_tx_tok_cfg = 0xFFFF7FFF;
274 		sc->mge_tx_tok_cnt = 0x3FFFFFFF;
275 	} else {
276 		sc->mge_ver = 1;
277 		sc->mge_mtu = 0x458;
278 		sc->mge_tfut_ipg_max = 0x3FFF;
279 		sc->mge_rx_ipg_max = 0x3FFF;
280 		sc->mge_tx_arb_cfg = 0x000000FF;
281 		sc->mge_tx_tok_cfg = 0x3FFFFFFF;
282 		sc->mge_tx_tok_cnt = 0x3FFFFFFF;
283 	}
284 	if (d == MV_DEV_88RC8180)
285 		sc->mge_intr_cnt = 1;
286 	else
287 		sc->mge_intr_cnt = 2;
288 
289 	if (d == MV_DEV_MV78160 || d == MV_DEV_MV78260 || d == MV_DEV_MV78460)
290 		sc->mge_hw_csum = 0;
291 	else
292 		sc->mge_hw_csum = 1;
293 }
294 
295 static void
296 mge_set_mac_address(struct mge_softc *sc)
297 {
298 	char *if_mac;
299 	uint32_t mac_l, mac_h;
300 
301 	MGE_GLOBAL_LOCK_ASSERT(sc);
302 
303 	if_mac = (char *)IF_LLADDR(sc->ifp);
304 
305 	mac_l = (if_mac[4] << 8) | (if_mac[5]);
306 	mac_h = (if_mac[0] << 24)| (if_mac[1] << 16) |
307 	    (if_mac[2] << 8) | (if_mac[3] << 0);
308 
309 	MGE_WRITE(sc, MGE_MAC_ADDR_L, mac_l);
310 	MGE_WRITE(sc, MGE_MAC_ADDR_H, mac_h);
311 
312 	mge_set_ucast_address(sc, if_mac[5], MGE_RX_DEFAULT_QUEUE);
313 }
314 
315 static void
316 mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, uint8_t queue)
317 {
318 	uint32_t reg_idx, reg_off, reg_val, i;
319 
320 	last_byte &= 0xf;
321 	reg_idx = last_byte / MGE_UCAST_REG_NUMBER;
322 	reg_off = (last_byte % MGE_UCAST_REG_NUMBER) * 8;
323 	reg_val = (1 | (queue << 1)) << reg_off;
324 
325 	for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) {
326 		if ( i == reg_idx)
327 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
328 		else
329 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), 0);
330 	}
331 }
332 
333 static void
334 mge_set_prom_mode(struct mge_softc *sc, uint8_t queue)
335 {
336 	uint32_t port_config;
337 	uint32_t reg_val, i;
338 
339 	/* Enable or disable promiscuous mode as needed */
340 	if (sc->ifp->if_flags & IFF_PROMISC) {
341 		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
342 		port_config |= PORT_CONFIG_UPM;
343 		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
344 
345 		reg_val = ((1 | (queue << 1)) | (1 | (queue << 1)) << 8 |
346 		   (1 | (queue << 1)) << 16 | (1 | (queue << 1)) << 24);
347 
348 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
349 			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), reg_val);
350 			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), reg_val);
351 		}
352 
353 		for (i = 0; i < MGE_UCAST_REG_NUMBER; i++)
354 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
355 
356 	} else {
357 		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
358 		port_config &= ~PORT_CONFIG_UPM;
359 		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
360 
361 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
362 			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), 0);
363 			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), 0);
364 		}
365 
366 		mge_set_mac_address(sc);
367 	}
368 }
369 
370 static void
371 mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
372 {
373 	u_int32_t *paddr;
374 
375 	KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
376 	paddr = arg;
377 
378 	*paddr = segs->ds_addr;
379 }
380 
381 static int
382 mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
383     bus_addr_t *paddr)
384 {
385 	struct mbuf *new_mbuf;
386 	bus_dma_segment_t seg[1];
387 	int error;
388 	int nsegs;
389 
390 	KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
391 
392 	new_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
393 	if (new_mbuf == NULL)
394 		return (ENOBUFS);
395 	new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
396 
397 	if (*mbufp) {
398 		bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
399 		bus_dmamap_unload(tag, map);
400 	}
401 
402 	error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
403 	    BUS_DMA_NOWAIT);
404 	KASSERT(nsegs == 1, ("Too many segments returned!"));
405 	if (nsegs != 1 || error)
406 		panic("mge_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
407 
408 	bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
409 
410 	(*mbufp) = new_mbuf;
411 	(*paddr) = seg->ds_addr;
412 	return (0);
413 }
414 
415 static int
416 mge_alloc_desc_dma(struct mge_softc *sc, struct mge_desc_wrapper* tab,
417     uint32_t size, bus_dma_tag_t *buffer_tag)
418 {
419 	struct mge_desc_wrapper *dw;
420 	bus_addr_t desc_paddr;
421 	int i, error;
422 
423 	desc_paddr = 0;
424 	for (i = size - 1; i >= 0; i--) {
425 		dw = &(tab[i]);
426 		error = bus_dmamem_alloc(sc->mge_desc_dtag,
427 		    (void**)&(dw->mge_desc),
428 		    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
429 		    &(dw->desc_dmap));
430 
431 		if (error) {
432 			if_printf(sc->ifp, "failed to allocate DMA memory\n");
433 			dw->mge_desc = NULL;
434 			return (ENXIO);
435 		}
436 
437 		error = bus_dmamap_load(sc->mge_desc_dtag, dw->desc_dmap,
438 		    dw->mge_desc, sizeof(struct mge_desc), mge_get_dma_addr,
439 		    &(dw->mge_desc_paddr), BUS_DMA_NOWAIT);
440 
441 		if (error) {
442 			if_printf(sc->ifp, "can't load descriptor\n");
443 			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
444 			    dw->desc_dmap);
445 			dw->mge_desc = NULL;
446 			return (ENXIO);
447 		}
448 
449 		/* Chain descriptors */
450 		dw->mge_desc->next_desc = desc_paddr;
451 		desc_paddr = dw->mge_desc_paddr;
452 	}
453 	tab[size - 1].mge_desc->next_desc = desc_paddr;
454 
455 	/* Allocate a busdma tag for mbufs. */
456 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev),	/* parent */
457 	    1, 0,				/* alignment, boundary */
458 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
459 	    BUS_SPACE_MAXADDR,			/* highaddr */
460 	    NULL, NULL,				/* filtfunc, filtfuncarg */
461 	    MCLBYTES, 1,			/* maxsize, nsegments */
462 	    MCLBYTES, 0,			/* maxsegsz, flags */
463 	    NULL, NULL,				/* lockfunc, lockfuncarg */
464 	    buffer_tag);			/* dmat */
465 	if (error) {
466 		if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
467 		return (ENXIO);
468 	}
469 
470 	/* Create TX busdma maps */
471 	for (i = 0; i < size; i++) {
472 		dw = &(tab[i]);
473 		error = bus_dmamap_create(*buffer_tag, 0, &dw->buffer_dmap);
474 		if (error) {
475 			if_printf(sc->ifp, "failed to create map for mbuf\n");
476 			return (ENXIO);
477 		}
478 
479 		dw->buffer = (struct mbuf*)NULL;
480 		dw->mge_desc->buffer = (bus_addr_t)NULL;
481 	}
482 
483 	return (0);
484 }
485 
486 static int
487 mge_allocate_dma(struct mge_softc *sc)
488 {
489 	int error;
490 	struct mge_desc_wrapper *dw;
491 	int i;
492 
493 	/* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
494 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev),	/* parent */
495 	    16, 0,				/* alignment, boundary */
496 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
497 	    BUS_SPACE_MAXADDR,			/* highaddr */
498 	    NULL, NULL,				/* filtfunc, filtfuncarg */
499 	    sizeof(struct mge_desc), 1,		/* maxsize, nsegments */
500 	    sizeof(struct mge_desc), 0,		/* maxsegsz, flags */
501 	    NULL, NULL,				/* lockfunc, lockfuncarg */
502 	    &sc->mge_desc_dtag);		/* dmat */
503 
504 
505 	mge_alloc_desc_dma(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM,
506 	    &sc->mge_tx_dtag);
507 	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
508 	    &sc->mge_rx_dtag);
509 
510 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
511 		dw = &(sc->mge_rx_desc[i]);
512 		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
513 		    &dw->mge_desc->buffer);
514 	}
515 
516 	sc->tx_desc_start = sc->mge_tx_desc[0].mge_desc_paddr;
517 	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
518 
519 	return (0);
520 }
521 
522 static void
523 mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
524     uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs)
525 {
526 	struct mge_desc_wrapper *dw;
527 	int i;
528 
529 	for (i = 0; i < size; i++) {
530 		/* Free RX mbuf */
531 		dw = &(tab[i]);
532 
533 		if (dw->buffer_dmap) {
534 			if (free_mbufs) {
535 				bus_dmamap_sync(buffer_tag, dw->buffer_dmap,
536 				    BUS_DMASYNC_POSTREAD);
537 				bus_dmamap_unload(buffer_tag, dw->buffer_dmap);
538 			}
539 			bus_dmamap_destroy(buffer_tag, dw->buffer_dmap);
540 			if (free_mbufs)
541 				m_freem(dw->buffer);
542 		}
543 		/* Free RX descriptors */
544 		if (dw->desc_dmap) {
545 			bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
546 			    BUS_DMASYNC_POSTREAD);
547 			bus_dmamap_unload(sc->mge_desc_dtag, dw->desc_dmap);
548 			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
549 			    dw->desc_dmap);
550 		}
551 	}
552 }
553 
554 static void
555 mge_free_dma(struct mge_softc *sc)
556 {
557 	/* Free desciptors and mbufs */
558 	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
559 	mge_free_desc(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, sc->mge_tx_dtag, 0);
560 
561 	/* Destroy mbuf dma tag */
562 	bus_dma_tag_destroy(sc->mge_tx_dtag);
563 	bus_dma_tag_destroy(sc->mge_rx_dtag);
564 	/* Destroy descriptors tag */
565 	bus_dma_tag_destroy(sc->mge_desc_dtag);
566 }
567 
568 static void
569 mge_reinit_rx(struct mge_softc *sc)
570 {
571 	struct mge_desc_wrapper *dw;
572 	int i;
573 
574 	MGE_RECEIVE_LOCK_ASSERT(sc);
575 
576 	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
577 
578 	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
579 	    &sc->mge_rx_dtag);
580 
581 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
582 		dw = &(sc->mge_rx_desc[i]);
583 		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
584 		&dw->mge_desc->buffer);
585 	}
586 
587 	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
588 	sc->rx_desc_curr = 0;
589 
590 	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
591 	    sc->rx_desc_start);
592 
593 	/* Enable RX queue */
594 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
595 }
596 
597 #ifdef DEVICE_POLLING
598 static poll_handler_t mge_poll;
599 
600 static int
601 mge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
602 {
603 	struct mge_softc *sc = ifp->if_softc;
604 	uint32_t int_cause, int_cause_ext;
605 	int rx_npkts = 0;
606 
607 	MGE_GLOBAL_LOCK(sc);
608 
609 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
610 		MGE_GLOBAL_UNLOCK(sc);
611 		return (rx_npkts);
612 	}
613 
614 	if (cmd == POLL_AND_CHECK_STATUS) {
615 		int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
616 		int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
617 
618 		/* Check for resource error */
619 		if (int_cause & MGE_PORT_INT_RXERRQ0)
620 			mge_reinit_rx(sc);
621 
622 		if (int_cause || int_cause_ext) {
623 			MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
624 			MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
625 		}
626 	}
627 
628 	mge_intr_tx_locked(sc);
629 	rx_npkts = mge_intr_rx_locked(sc, count);
630 
631 	MGE_GLOBAL_UNLOCK(sc);
632 	return (rx_npkts);
633 }
634 #endif /* DEVICE_POLLING */
635 
636 static int
637 mge_attach(device_t dev)
638 {
639 	struct mge_softc *sc;
640 	struct mii_softc *miisc;
641 	struct ifnet *ifp;
642 	uint8_t hwaddr[ETHER_ADDR_LEN];
643 	int i, error, phy;
644 
645 	sc = device_get_softc(dev);
646 	sc->dev = dev;
647 	sc->node = ofw_bus_get_node(dev);
648 
649 	/* Set chip version-dependent parameters */
650 	mge_ver_params(sc);
651 
652 	/* Get phy address and used softc from fdt */
653 	if (fdt_get_phyaddr(sc->node, sc->dev, &phy, (void **)&sc->phy_sc) != 0)
654 		return (ENXIO);
655 
656 	/* Initialize mutexes */
657 	mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "mge TX lock", MTX_DEF);
658 	mtx_init(&sc->receive_lock, device_get_nameunit(dev), "mge RX lock", MTX_DEF);
659 
660 	/* Allocate IO and IRQ resources */
661 	error = bus_alloc_resources(dev, res_spec, sc->res);
662 	if (error) {
663 		device_printf(dev, "could not allocate resources\n");
664 		mge_detach(dev);
665 		return (ENXIO);
666 	}
667 
668 	/* Allocate DMA, buffers, buffer descriptors */
669 	error = mge_allocate_dma(sc);
670 	if (error) {
671 		mge_detach(dev);
672 		return (ENXIO);
673 	}
674 
675 	sc->tx_desc_curr = 0;
676 	sc->rx_desc_curr = 0;
677 	sc->tx_desc_used_idx = 0;
678 	sc->tx_desc_used_count = 0;
679 
680 	/* Configure defaults for interrupts coalescing */
681 	sc->rx_ic_time = 768;
682 	sc->tx_ic_time = 768;
683 	mge_add_sysctls(sc);
684 
685 	/* Allocate network interface */
686 	ifp = sc->ifp = if_alloc(IFT_ETHER);
687 	if (ifp == NULL) {
688 		device_printf(dev, "if_alloc() failed\n");
689 		mge_detach(dev);
690 		return (ENOMEM);
691 	}
692 
693 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
694 	ifp->if_softc = sc;
695 	ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
696 	ifp->if_capabilities = IFCAP_VLAN_MTU;
697 	if (sc->mge_hw_csum) {
698 		ifp->if_capabilities |= IFCAP_HWCSUM;
699 		ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
700 	}
701 	ifp->if_capenable = ifp->if_capabilities;
702 
703 #ifdef DEVICE_POLLING
704 	/* Advertise that polling is supported */
705 	ifp->if_capabilities |= IFCAP_POLLING;
706 #endif
707 
708 	ifp->if_init = mge_init;
709 	ifp->if_start = mge_start;
710 	ifp->if_ioctl = mge_ioctl;
711 
712 	ifp->if_snd.ifq_drv_maxlen = MGE_TX_DESC_NUM - 1;
713 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
714 	IFQ_SET_READY(&ifp->if_snd);
715 
716 	mge_get_mac_address(sc, hwaddr);
717 	ether_ifattach(ifp, hwaddr);
718 	callout_init(&sc->wd_callout, 0);
719 
720 	/* Attach PHY(s) */
721 	error = mii_attach(dev, &sc->miibus, ifp, mge_ifmedia_upd,
722 	    mge_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
723 	if (error) {
724 		device_printf(dev, "attaching PHYs failed\n");
725 		mge_detach(dev);
726 		return (error);
727 	}
728 	sc->mii = device_get_softc(sc->miibus);
729 
730 	/* Tell the MAC where to find the PHY so autoneg works */
731 	miisc = LIST_FIRST(&sc->mii->mii_phys);
732 	MGE_WRITE(sc, MGE_REG_PHYDEV, miisc->mii_phy);
733 
734 	/* Attach interrupt handlers */
735 	/* TODO: review flags, in part. mark RX as INTR_ENTROPY ? */
736 	for (i = 1; i <= sc->mge_intr_cnt; ++i) {
737 		error = bus_setup_intr(dev, sc->res[i],
738 		    INTR_TYPE_NET | INTR_MPSAFE,
739 		    NULL, *mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i)].handler,
740 		    sc, &sc->ih_cookie[i - 1]);
741 		if (error) {
742 			device_printf(dev, "could not setup %s\n",
743 			    mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i)].description);
744 			mge_detach(dev);
745 			return (error);
746 		}
747 	}
748 
749 	return (0);
750 }
751 
752 static int
753 mge_detach(device_t dev)
754 {
755 	struct mge_softc *sc;
756 	int error,i;
757 
758 	sc = device_get_softc(dev);
759 
760 	/* Stop controller and free TX queue */
761 	if (sc->ifp)
762 		mge_shutdown(dev);
763 
764 	/* Wait for stopping ticks */
765         callout_drain(&sc->wd_callout);
766 
767 	/* Stop and release all interrupts */
768 	for (i = 0; i < sc->mge_intr_cnt; ++i) {
769 		if (!sc->ih_cookie[i])
770 			continue;
771 
772 		error = bus_teardown_intr(dev, sc->res[1 + i], sc->ih_cookie[i]);
773 		if (error)
774 			device_printf(dev, "could not release %s\n",
775 			    mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i + 1)].description);
776 	}
777 
778 	/* Detach network interface */
779 	if (sc->ifp) {
780 		ether_ifdetach(sc->ifp);
781 		if_free(sc->ifp);
782 	}
783 
784 	/* Free DMA resources */
785 	mge_free_dma(sc);
786 
787 	/* Free IO memory handler */
788 	bus_release_resources(dev, res_spec, sc->res);
789 
790 	/* Destroy mutexes */
791 	mtx_destroy(&sc->receive_lock);
792 	mtx_destroy(&sc->transmit_lock);
793 
794 	return (0);
795 }
796 
797 static void
798 mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
799 {
800 	struct mge_softc *sc = ifp->if_softc;
801 	struct mii_data *mii;
802 
803 	MGE_TRANSMIT_LOCK(sc);
804 
805 	mii = sc->mii;
806 	mii_pollstat(mii);
807 
808 	ifmr->ifm_active = mii->mii_media_active;
809 	ifmr->ifm_status = mii->mii_media_status;
810 
811 	MGE_TRANSMIT_UNLOCK(sc);
812 }
813 
814 static uint32_t
815 mge_set_port_serial_control(uint32_t media)
816 {
817 	uint32_t port_config;
818 
819 	port_config = PORT_SERIAL_RES_BIT9 | PORT_SERIAL_FORCE_LINK_FAIL |
820 	    PORT_SERIAL_MRU(PORT_SERIAL_MRU_1552);
821 
822 	if (IFM_TYPE(media) == IFM_ETHER) {
823 		switch(IFM_SUBTYPE(media)) {
824 			case IFM_AUTO:
825 				break;
826 			case IFM_1000_T:
827 				port_config  |= (PORT_SERIAL_GMII_SPEED_1000 |
828 				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
829 				    PORT_SERIAL_SPEED_AUTONEG);
830 				break;
831 			case IFM_100_TX:
832 				port_config  |= (PORT_SERIAL_MII_SPEED_100 |
833 				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
834 				    PORT_SERIAL_SPEED_AUTONEG);
835 				break;
836 			case IFM_10_T:
837 				port_config  |= (PORT_SERIAL_AUTONEG |
838 				    PORT_SERIAL_AUTONEG_FC |
839 				    PORT_SERIAL_SPEED_AUTONEG);
840 				break;
841 		}
842 		if (media & IFM_FDX)
843 			port_config |= PORT_SERIAL_FULL_DUPLEX;
844 	}
845 	return (port_config);
846 }
847 
848 static int
849 mge_ifmedia_upd(struct ifnet *ifp)
850 {
851 	struct mge_softc *sc = ifp->if_softc;
852 
853 	if (ifp->if_flags & IFF_UP) {
854 		MGE_GLOBAL_LOCK(sc);
855 
856 		sc->mge_media_status = sc->mii->mii_media.ifm_media;
857 		mii_mediachg(sc->mii);
858 		mge_init_locked(sc);
859 
860 		MGE_GLOBAL_UNLOCK(sc);
861 	}
862 
863 	return (0);
864 }
865 
866 static void
867 mge_init(void *arg)
868 {
869 	struct mge_softc *sc = arg;
870 
871 	MGE_GLOBAL_LOCK(sc);
872 
873 	mge_init_locked(arg);
874 
875 	MGE_GLOBAL_UNLOCK(sc);
876 }
877 
878 static void
879 mge_init_locked(void *arg)
880 {
881 	struct mge_softc *sc = arg;
882 	struct mge_desc_wrapper *dw;
883 	volatile uint32_t reg_val;
884 	int i, count;
885 
886 
887 	MGE_GLOBAL_LOCK_ASSERT(sc);
888 
889 	/* Stop interface */
890 	mge_stop(sc);
891 
892 	/* Disable interrupts */
893 	mge_intrs_ctrl(sc, 0);
894 
895 	/* Set MAC address */
896 	mge_set_mac_address(sc);
897 
898 	/* Setup multicast filters */
899 	mge_setup_multicast(sc);
900 
901 	if (sc->mge_ver == 2) {
902 		MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL1, MGE_RGMII_EN);
903 		MGE_WRITE(sc, MGE_FIXED_PRIO_CONF, MGE_FIXED_PRIO_EN(0));
904 	}
905 
906 	/* Initialize TX queue configuration registers */
907 	MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(0), sc->mge_tx_tok_cnt);
908 	MGE_WRITE(sc, MGE_TX_TOKEN_CONF(0), sc->mge_tx_tok_cfg);
909 	MGE_WRITE(sc, MGE_TX_ARBITER_CONF(0), sc->mge_tx_arb_cfg);
910 
911 	/* Clear TX queue configuration registers for unused queues */
912 	for (i = 1; i < 7; i++) {
913 		MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(i), 0);
914 		MGE_WRITE(sc, MGE_TX_TOKEN_CONF(i), 0);
915 		MGE_WRITE(sc, MGE_TX_ARBITER_CONF(i), 0);
916 	}
917 
918 	/* Set default MTU */
919 	MGE_WRITE(sc, sc->mge_mtu, 0);
920 
921 	/* Port configuration */
922 	MGE_WRITE(sc, MGE_PORT_CONFIG,
923 	    PORT_CONFIG_RXCS | PORT_CONFIG_DFLT_RXQ(0) |
924 	    PORT_CONFIG_ARO_RXQ(0));
925 	MGE_WRITE(sc, MGE_PORT_EXT_CONFIG , 0x0);
926 
927 	/* Setup port configuration */
928 	reg_val = mge_set_port_serial_control(sc->mge_media_status);
929 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
930 
931 	/* Setup SDMA configuration */
932 	MGE_WRITE(sc, MGE_SDMA_CONFIG , MGE_SDMA_RX_BYTE_SWAP |
933 	    MGE_SDMA_TX_BYTE_SWAP |
934 	    MGE_SDMA_RX_BURST_SIZE(MGE_SDMA_BURST_16_WORD) |
935 	    MGE_SDMA_TX_BURST_SIZE(MGE_SDMA_BURST_16_WORD));
936 
937 	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, 0x0);
938 
939 	MGE_WRITE(sc, MGE_TX_CUR_DESC_PTR, sc->tx_desc_start);
940 	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
941 	    sc->rx_desc_start);
942 
943 	/* Reset descriptor indexes */
944 	sc->tx_desc_curr = 0;
945 	sc->rx_desc_curr = 0;
946 	sc->tx_desc_used_idx = 0;
947 	sc->tx_desc_used_count = 0;
948 
949 	/* Enable RX descriptors */
950 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
951 		dw = &sc->mge_rx_desc[i];
952 		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
953 		dw->mge_desc->buff_size = MCLBYTES;
954 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
955 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
956 	}
957 
958 	/* Enable RX queue */
959 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
960 
961 	/* Enable port */
962 	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
963 	reg_val |= PORT_SERIAL_ENABLE;
964 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
965 	count = 0x100000;
966 	for (;;) {
967 		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
968 		if (reg_val & MGE_STATUS_LINKUP)
969 			break;
970 		DELAY(100);
971 		if (--count == 0) {
972 			if_printf(sc->ifp, "Timeout on link-up\n");
973 			break;
974 		}
975 	}
976 
977 	/* Setup interrupts coalescing */
978 	mge_set_rxic(sc);
979 	mge_set_txic(sc);
980 
981 	/* Enable interrupts */
982 #ifdef DEVICE_POLLING
983         /*
984 	 * * ...only if polling is not turned on. Disable interrupts explicitly
985 	 * if polling is enabled.
986 	 */
987 	if (sc->ifp->if_capenable & IFCAP_POLLING)
988 		mge_intrs_ctrl(sc, 0);
989 	else
990 #endif /* DEVICE_POLLING */
991 	mge_intrs_ctrl(sc, 1);
992 
993 	/* Activate network interface */
994 	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
995 	sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
996 	sc->wd_timer = 0;
997 
998 	/* Schedule watchdog timeout */
999 	callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1000 }
1001 
1002 static void
1003 mge_intr_rxtx(void *arg)
1004 {
1005 	struct mge_softc *sc = arg;
1006 	uint32_t int_cause, int_cause_ext;
1007 
1008 	MGE_GLOBAL_LOCK(sc);
1009 
1010 #ifdef DEVICE_POLLING
1011 	if (sc->ifp->if_capenable & IFCAP_POLLING) {
1012 		MGE_GLOBAL_UNLOCK(sc);
1013 		return;
1014 	}
1015 #endif
1016 
1017 	/* Get interrupt cause */
1018 	int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
1019 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1020 
1021 	/* Check for Transmit interrupt */
1022 	if (int_cause_ext & (MGE_PORT_INT_EXT_TXBUF0 |
1023 	    MGE_PORT_INT_EXT_TXUR)) {
1024 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~(int_cause_ext &
1025 		    (MGE_PORT_INT_EXT_TXBUF0 | MGE_PORT_INT_EXT_TXUR)));
1026 		mge_intr_tx_locked(sc);
1027 	}
1028 
1029 	MGE_TRANSMIT_UNLOCK(sc);
1030 
1031 	/* Check for Receive interrupt */
1032 	mge_intr_rx_check(sc, int_cause, int_cause_ext);
1033 
1034 	MGE_RECEIVE_UNLOCK(sc);
1035 }
1036 
1037 static void
1038 mge_intr_err(void *arg)
1039 {
1040 	struct mge_softc *sc = arg;
1041 	struct ifnet *ifp;
1042 
1043 	ifp = sc->ifp;
1044 	if_printf(ifp, "%s\n", __FUNCTION__);
1045 }
1046 
1047 static void
1048 mge_intr_misc(void *arg)
1049 {
1050 	struct mge_softc *sc = arg;
1051 	struct ifnet *ifp;
1052 
1053 	ifp = sc->ifp;
1054 	if_printf(ifp, "%s\n", __FUNCTION__);
1055 }
1056 
1057 static void
1058 mge_intr_rx(void *arg) {
1059 	struct mge_softc *sc = arg;
1060 	uint32_t int_cause, int_cause_ext;
1061 
1062 	MGE_RECEIVE_LOCK(sc);
1063 
1064 #ifdef DEVICE_POLLING
1065 	if (sc->ifp->if_capenable & IFCAP_POLLING) {
1066 		MGE_RECEIVE_UNLOCK(sc);
1067 		return;
1068 	}
1069 #endif
1070 
1071 	/* Get interrupt cause */
1072 	int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
1073 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1074 
1075 	mge_intr_rx_check(sc, int_cause, int_cause_ext);
1076 
1077 	MGE_RECEIVE_UNLOCK(sc);
1078 }
1079 
1080 static void
1081 mge_intr_rx_check(struct mge_softc *sc, uint32_t int_cause,
1082     uint32_t int_cause_ext)
1083 {
1084 	/* Check for resource error */
1085 	if (int_cause & MGE_PORT_INT_RXERRQ0) {
1086 		mge_reinit_rx(sc);
1087 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE,
1088 		    ~(int_cause & MGE_PORT_INT_RXERRQ0));
1089 	}
1090 
1091 	int_cause &= MGE_PORT_INT_RXQ0;
1092 	int_cause_ext &= MGE_PORT_INT_EXT_RXOR;
1093 
1094 	if (int_cause || int_cause_ext) {
1095 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
1096 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
1097 		mge_intr_rx_locked(sc, -1);
1098 	}
1099 }
1100 
1101 static int
1102 mge_intr_rx_locked(struct mge_softc *sc, int count)
1103 {
1104 	struct ifnet *ifp = sc->ifp;
1105 	uint32_t status;
1106 	uint16_t bufsize;
1107 	struct mge_desc_wrapper* dw;
1108 	struct mbuf *mb;
1109 	int rx_npkts = 0;
1110 
1111 	MGE_RECEIVE_LOCK_ASSERT(sc);
1112 
1113 	while (count != 0) {
1114 		dw = &sc->mge_rx_desc[sc->rx_desc_curr];
1115 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1116 		    BUS_DMASYNC_POSTREAD);
1117 
1118 		/* Get status */
1119 		status = dw->mge_desc->cmd_status;
1120 		bufsize = dw->mge_desc->buff_size;
1121 		if ((status & MGE_DMA_OWNED) != 0)
1122 			break;
1123 
1124 		if (dw->mge_desc->byte_count &&
1125 		    ~(status & MGE_ERR_SUMMARY)) {
1126 
1127 			bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap,
1128 			    BUS_DMASYNC_POSTREAD);
1129 
1130 			mb = m_devget(dw->buffer->m_data,
1131 			    dw->mge_desc->byte_count - ETHER_CRC_LEN,
1132 			    0, ifp, NULL);
1133 
1134 			if (mb == NULL)
1135 				/* Give up if no mbufs */
1136 				break;
1137 
1138 			mb->m_len -= 2;
1139 			mb->m_pkthdr.len -= 2;
1140 			mb->m_data += 2;
1141 
1142 			mge_offload_process_frame(ifp, mb, status,
1143 			    bufsize);
1144 
1145 			MGE_RECEIVE_UNLOCK(sc);
1146 			(*ifp->if_input)(ifp, mb);
1147 			MGE_RECEIVE_LOCK(sc);
1148 			rx_npkts++;
1149 		}
1150 
1151 		dw->mge_desc->byte_count = 0;
1152 		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1153 		sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM);
1154 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1155 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1156 
1157 		if (count > 0)
1158 			count -= 1;
1159 	}
1160 
1161 	return (rx_npkts);
1162 }
1163 
1164 static void
1165 mge_intr_sum(void *arg)
1166 {
1167 	struct mge_softc *sc = arg;
1168 	struct ifnet *ifp;
1169 
1170 	ifp = sc->ifp;
1171 	if_printf(ifp, "%s\n", __FUNCTION__);
1172 }
1173 
1174 static void
1175 mge_intr_tx(void *arg)
1176 {
1177 	struct mge_softc *sc = arg;
1178 	uint32_t int_cause_ext;
1179 
1180 	MGE_TRANSMIT_LOCK(sc);
1181 
1182 #ifdef DEVICE_POLLING
1183 	if (sc->ifp->if_capenable & IFCAP_POLLING) {
1184 		MGE_TRANSMIT_UNLOCK(sc);
1185 		return;
1186 	}
1187 #endif
1188 
1189 	/* Ack the interrupt */
1190 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1191 	MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~(int_cause_ext &
1192 	    (MGE_PORT_INT_EXT_TXBUF0 | MGE_PORT_INT_EXT_TXUR)));
1193 
1194 	mge_intr_tx_locked(sc);
1195 
1196 	MGE_TRANSMIT_UNLOCK(sc);
1197 }
1198 
1199 
1200 static void
1201 mge_intr_tx_locked(struct mge_softc *sc)
1202 {
1203 	struct ifnet *ifp = sc->ifp;
1204 	struct mge_desc_wrapper *dw;
1205 	struct mge_desc *desc;
1206 	uint32_t status;
1207 	int send = 0;
1208 
1209 	MGE_TRANSMIT_LOCK_ASSERT(sc);
1210 
1211 	/* Disable watchdog */
1212 	sc->wd_timer = 0;
1213 
1214 	while (sc->tx_desc_used_count) {
1215 		/* Get the descriptor */
1216 		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1217 		desc = dw->mge_desc;
1218 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1219 		    BUS_DMASYNC_POSTREAD);
1220 
1221 		/* Get descriptor status */
1222 		status = desc->cmd_status;
1223 
1224 		if (status & MGE_DMA_OWNED)
1225 			break;
1226 
1227 		sc->tx_desc_used_idx =
1228 			(++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM;
1229 		sc->tx_desc_used_count--;
1230 
1231 		/* Update collision statistics */
1232 		if (status & MGE_ERR_SUMMARY) {
1233 			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_LC)
1234 				ifp->if_collisions++;
1235 			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_RL)
1236 				ifp->if_collisions += 16;
1237 		}
1238 
1239 		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1240 		    BUS_DMASYNC_POSTWRITE);
1241 		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1242 		m_freem(dw->buffer);
1243 		dw->buffer = (struct mbuf*)NULL;
1244 		send++;
1245 
1246 		ifp->if_opackets++;
1247 	}
1248 
1249 	if (send) {
1250 		/* Now send anything that was pending */
1251 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1252 		mge_start_locked(ifp);
1253 	}
1254 }
1255 
1256 static int
1257 mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1258 {
1259 	struct mge_softc *sc = ifp->if_softc;
1260 	struct ifreq *ifr = (struct ifreq *)data;
1261 	int mask, error;
1262 	uint32_t flags;
1263 
1264 	error = 0;
1265 
1266 	switch (command) {
1267 	case SIOCSIFFLAGS:
1268 		MGE_GLOBAL_LOCK(sc);
1269 
1270 		if (ifp->if_flags & IFF_UP) {
1271 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1272 				flags = ifp->if_flags ^ sc->mge_if_flags;
1273 				if (flags & IFF_PROMISC)
1274 					mge_set_prom_mode(sc,
1275 					    MGE_RX_DEFAULT_QUEUE);
1276 
1277 				if (flags & IFF_ALLMULTI)
1278 					mge_setup_multicast(sc);
1279 			} else
1280 				mge_init_locked(sc);
1281 		}
1282 		else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1283 			mge_stop(sc);
1284 
1285 		sc->mge_if_flags = ifp->if_flags;
1286 		MGE_GLOBAL_UNLOCK(sc);
1287 		break;
1288 	case SIOCADDMULTI:
1289 	case SIOCDELMULTI:
1290 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1291 			MGE_GLOBAL_LOCK(sc);
1292 			mge_setup_multicast(sc);
1293 			MGE_GLOBAL_UNLOCK(sc);
1294 		}
1295 		break;
1296 	case SIOCSIFCAP:
1297 		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1298 		if (mask & IFCAP_HWCSUM) {
1299 			ifp->if_capenable &= ~IFCAP_HWCSUM;
1300 			ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap;
1301 			if (ifp->if_capenable & IFCAP_TXCSUM)
1302 				ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
1303 			else
1304 				ifp->if_hwassist = 0;
1305 		}
1306 #ifdef DEVICE_POLLING
1307 		if (mask & IFCAP_POLLING) {
1308 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1309 				error = ether_poll_register(mge_poll, ifp);
1310 				if (error)
1311 					return(error);
1312 
1313 				MGE_GLOBAL_LOCK(sc);
1314 				mge_intrs_ctrl(sc, 0);
1315 				ifp->if_capenable |= IFCAP_POLLING;
1316 				MGE_GLOBAL_UNLOCK(sc);
1317 			} else {
1318 				error = ether_poll_deregister(ifp);
1319 				MGE_GLOBAL_LOCK(sc);
1320 				mge_intrs_ctrl(sc, 1);
1321 				ifp->if_capenable &= ~IFCAP_POLLING;
1322 				MGE_GLOBAL_UNLOCK(sc);
1323 			}
1324 		}
1325 #endif
1326 		break;
1327 	case SIOCGIFMEDIA: /* fall through */
1328 	case SIOCSIFMEDIA:
1329 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T
1330 		    && !(ifr->ifr_media & IFM_FDX)) {
1331 			device_printf(sc->dev,
1332 			    "1000baseTX half-duplex unsupported\n");
1333 			return 0;
1334 		}
1335 		error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1336 		break;
1337 	default:
1338 		error = ether_ioctl(ifp, command, data);
1339 	}
1340 	return (error);
1341 }
1342 
1343 static int
1344 mge_miibus_readreg(device_t dev, int phy, int reg)
1345 {
1346 	struct mge_softc *sc;
1347 	uint32_t retries;
1348 
1349 	sc = device_get_softc(dev);
1350 
1351 	MGE_WRITE(sc->phy_sc, MGE_REG_SMI, 0x1fffffff &
1352 	    (MGE_SMI_READ | (reg << 21) | (phy << 16)));
1353 
1354 	retries = MGE_SMI_READ_RETRIES;
1355 	while (--retries &&
1356 	    !(MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_READVALID))
1357 		DELAY(MGE_SMI_READ_DELAY);
1358 
1359 	if (retries == 0)
1360 		device_printf(dev, "Timeout while reading from PHY\n");
1361 
1362 	return (MGE_READ(sc->phy_sc, MGE_REG_SMI) & 0xffff);
1363 }
1364 
1365 static int
1366 mge_miibus_writereg(device_t dev, int phy, int reg, int value)
1367 {
1368 	struct mge_softc *sc;
1369 	uint32_t retries;
1370 
1371 	sc = device_get_softc(dev);
1372 
1373 	MGE_WRITE(sc->phy_sc, MGE_REG_SMI, 0x1fffffff &
1374 	    (MGE_SMI_WRITE | (reg << 21) | (phy << 16) | (value & 0xffff)));
1375 
1376 	retries = MGE_SMI_WRITE_RETRIES;
1377 	while (--retries && MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_BUSY)
1378 		DELAY(MGE_SMI_WRITE_DELAY);
1379 
1380 	if (retries == 0)
1381 		device_printf(dev, "Timeout while writing to PHY\n");
1382 	return (0);
1383 }
1384 
1385 static int
1386 mge_probe(device_t dev)
1387 {
1388 
1389 	if (!ofw_bus_is_compatible(dev, "mrvl,ge"))
1390 		return (ENXIO);
1391 
1392 	device_set_desc(dev, "Marvell Gigabit Ethernet controller");
1393 	return (BUS_PROBE_DEFAULT);
1394 }
1395 
1396 static int
1397 mge_resume(device_t dev)
1398 {
1399 
1400 	device_printf(dev, "%s\n", __FUNCTION__);
1401 	return (0);
1402 }
1403 
1404 static int
1405 mge_shutdown(device_t dev)
1406 {
1407 	struct mge_softc *sc = device_get_softc(dev);
1408 
1409 	MGE_GLOBAL_LOCK(sc);
1410 
1411 #ifdef DEVICE_POLLING
1412         if (sc->ifp->if_capenable & IFCAP_POLLING)
1413 		ether_poll_deregister(sc->ifp);
1414 #endif
1415 
1416 	mge_stop(sc);
1417 
1418 	MGE_GLOBAL_UNLOCK(sc);
1419 
1420 	return (0);
1421 }
1422 
1423 static int
1424 mge_encap(struct mge_softc *sc, struct mbuf *m0)
1425 {
1426 	struct mge_desc_wrapper *dw = NULL;
1427 	struct ifnet *ifp;
1428 	bus_dma_segment_t segs[MGE_TX_DESC_NUM];
1429 	bus_dmamap_t mapp;
1430 	int error;
1431 	int seg, nsegs;
1432 	int desc_no;
1433 
1434 	ifp = sc->ifp;
1435 
1436 	/* Check for free descriptors */
1437 	if (sc->tx_desc_used_count + 1 >= MGE_TX_DESC_NUM) {
1438 		/* No free descriptors */
1439 		return (-1);
1440 	}
1441 
1442 	/* Fetch unused map */
1443 	desc_no = sc->tx_desc_curr;
1444 	dw = &sc->mge_tx_desc[desc_no];
1445 	mapp = dw->buffer_dmap;
1446 
1447 	/* Create mapping in DMA memory */
1448 	error = bus_dmamap_load_mbuf_sg(sc->mge_tx_dtag, mapp, m0, segs, &nsegs,
1449 	    BUS_DMA_NOWAIT);
1450 	if (error != 0 || nsegs != 1 ) {
1451 		bus_dmamap_unload(sc->mge_tx_dtag, mapp);
1452 		return ((error != 0) ? error : -1);
1453 	}
1454 
1455 	bus_dmamap_sync(sc->mge_tx_dtag, mapp, BUS_DMASYNC_PREWRITE);
1456 
1457 	/* Everything is ok, now we can send buffers */
1458 	for (seg = 0; seg < nsegs; seg++) {
1459 		dw->mge_desc->byte_count = segs[seg].ds_len;
1460 		dw->mge_desc->buffer = segs[seg].ds_addr;
1461 		dw->buffer = m0;
1462 		dw->mge_desc->cmd_status = 0;
1463 		if (seg == 0)
1464 			mge_offload_setup_descriptor(sc, dw);
1465 		dw->mge_desc->cmd_status |= MGE_TX_LAST | MGE_TX_FIRST |
1466 		    MGE_TX_ETH_CRC | MGE_TX_EN_INT | MGE_TX_PADDING |
1467 		    MGE_DMA_OWNED;
1468 	}
1469 
1470 	bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1471 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1472 
1473 	sc->tx_desc_curr = (++sc->tx_desc_curr) % MGE_TX_DESC_NUM;
1474 	sc->tx_desc_used_count++;
1475 	return (0);
1476 }
1477 
1478 static void
1479 mge_tick(void *msc)
1480 {
1481 	struct mge_softc *sc = msc;
1482 
1483 	/* Check for TX timeout */
1484 	mge_watchdog(sc);
1485 
1486 	mii_tick(sc->mii);
1487 
1488 	/* Check for media type change */
1489 	if(sc->mge_media_status != sc->mii->mii_media.ifm_media)
1490 		mge_ifmedia_upd(sc->ifp);
1491 
1492 	/* Schedule another timeout one second from now */
1493 	callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1494 }
1495 
1496 static void
1497 mge_watchdog(struct mge_softc *sc)
1498 {
1499 	struct ifnet *ifp;
1500 
1501 	ifp = sc->ifp;
1502 
1503 	MGE_GLOBAL_LOCK(sc);
1504 
1505 	if (sc->wd_timer == 0 || --sc->wd_timer) {
1506 		MGE_GLOBAL_UNLOCK(sc);
1507 		return;
1508 	}
1509 
1510 	ifp->if_oerrors++;
1511 	if_printf(ifp, "watchdog timeout\n");
1512 
1513 	mge_stop(sc);
1514 	mge_init_locked(sc);
1515 
1516 	MGE_GLOBAL_UNLOCK(sc);
1517 }
1518 
1519 static void
1520 mge_start(struct ifnet *ifp)
1521 {
1522 	struct mge_softc *sc = ifp->if_softc;
1523 
1524 	MGE_TRANSMIT_LOCK(sc);
1525 
1526 	mge_start_locked(ifp);
1527 
1528 	MGE_TRANSMIT_UNLOCK(sc);
1529 }
1530 
1531 static void
1532 mge_start_locked(struct ifnet *ifp)
1533 {
1534 	struct mge_softc *sc;
1535 	struct mbuf *m0, *mtmp;
1536 	uint32_t reg_val, queued = 0;
1537 
1538 	sc = ifp->if_softc;
1539 
1540 	MGE_TRANSMIT_LOCK_ASSERT(sc);
1541 
1542 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1543 	    IFF_DRV_RUNNING)
1544 		return;
1545 
1546 	for (;;) {
1547 		/* Get packet from the queue */
1548 		IF_DEQUEUE(&ifp->if_snd, m0);
1549 		if (m0 == NULL)
1550 			break;
1551 
1552 		mtmp = m_defrag(m0, M_NOWAIT);
1553 		if (mtmp)
1554 			m0 = mtmp;
1555 
1556 		if (mge_encap(sc, m0)) {
1557 			IF_PREPEND(&ifp->if_snd, m0);
1558 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1559 			break;
1560 		}
1561 		queued++;
1562 		BPF_MTAP(ifp, m0);
1563 	}
1564 
1565 	if (queued) {
1566 		/* Enable transmitter and watchdog timer */
1567 		reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1568 		MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_ENABLE_TXQ);
1569 		sc->wd_timer = 5;
1570 	}
1571 }
1572 
1573 static void
1574 mge_stop(struct mge_softc *sc)
1575 {
1576 	struct ifnet *ifp;
1577 	volatile uint32_t reg_val, status;
1578 	struct mge_desc_wrapper *dw;
1579 	struct mge_desc *desc;
1580 	int count;
1581 
1582 	ifp = sc->ifp;
1583 
1584 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1585 		return;
1586 
1587 	/* Stop tick engine */
1588 	callout_stop(&sc->wd_callout);
1589 
1590 	/* Disable interface */
1591 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1592 	sc->wd_timer = 0;
1593 
1594 	/* Disable interrupts */
1595 	mge_intrs_ctrl(sc, 0);
1596 
1597 	/* Disable Rx and Tx */
1598 	reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1599 	MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_DISABLE_TXQ);
1600 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_DISABLE_RXQ_ALL);
1601 
1602 	/* Remove pending data from TX queue */
1603 	while (sc->tx_desc_used_idx != sc->tx_desc_curr &&
1604 	    sc->tx_desc_used_count) {
1605 		/* Get the descriptor */
1606 		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1607 		desc = dw->mge_desc;
1608 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1609 		    BUS_DMASYNC_POSTREAD);
1610 
1611 		/* Get descriptor status */
1612 		status = desc->cmd_status;
1613 
1614 		if (status & MGE_DMA_OWNED)
1615 			break;
1616 
1617 		sc->tx_desc_used_idx = (++sc->tx_desc_used_idx) %
1618 		    MGE_TX_DESC_NUM;
1619 		sc->tx_desc_used_count--;
1620 
1621 		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1622 		    BUS_DMASYNC_POSTWRITE);
1623 		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1624 
1625 		m_freem(dw->buffer);
1626 		dw->buffer = (struct mbuf*)NULL;
1627 	}
1628 
1629 	/* Wait for end of transmission */
1630 	count = 0x100000;
1631 	while (count--) {
1632 		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1633 		if ( !(reg_val & MGE_STATUS_TX_IN_PROG) &&
1634 		    (reg_val & MGE_STATUS_TX_FIFO_EMPTY))
1635 			break;
1636 		DELAY(100);
1637 	}
1638 
1639 	if(!count)
1640 		if_printf(ifp, "%s: timeout while waiting for end of transmission\n",
1641 		    __FUNCTION__);
1642 
1643 	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1644 	reg_val &= ~(PORT_SERIAL_ENABLE);
1645 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL ,reg_val);
1646 }
1647 
1648 static int
1649 mge_suspend(device_t dev)
1650 {
1651 
1652 	device_printf(dev, "%s\n", __FUNCTION__);
1653 	return (0);
1654 }
1655 
1656 static void
1657 mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
1658     uint32_t status, uint16_t bufsize)
1659 {
1660 	int csum_flags = 0;
1661 
1662 	if (ifp->if_capenable & IFCAP_RXCSUM) {
1663 		if ((status & MGE_RX_L3_IS_IP) && (status & MGE_RX_IP_OK))
1664 			csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
1665 
1666 		if ((bufsize & MGE_RX_IP_FRAGMENT) == 0 &&
1667 		    (MGE_RX_L4_IS_TCP(status) || MGE_RX_L4_IS_UDP(status)) &&
1668 		    (status & MGE_RX_L4_CSUM_OK)) {
1669 			csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1670 			frame->m_pkthdr.csum_data = 0xFFFF;
1671 		}
1672 
1673 		frame->m_pkthdr.csum_flags = csum_flags;
1674 	}
1675 }
1676 
1677 static void
1678 mge_offload_setup_descriptor(struct mge_softc *sc, struct mge_desc_wrapper *dw)
1679 {
1680 	struct mbuf *m0 = dw->buffer;
1681 	struct ether_vlan_header *eh = mtod(m0, struct ether_vlan_header *);
1682 	int csum_flags = m0->m_pkthdr.csum_flags;
1683 	int cmd_status = 0;
1684 	struct ip *ip;
1685 	int ehlen, etype;
1686 
1687 	if (csum_flags) {
1688 		if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1689 			etype = ntohs(eh->evl_proto);
1690 			ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1691 			csum_flags |= MGE_TX_VLAN_TAGGED;
1692 		} else {
1693 			etype = ntohs(eh->evl_encap_proto);
1694 			ehlen = ETHER_HDR_LEN;
1695 		}
1696 
1697 		if (etype != ETHERTYPE_IP) {
1698 			if_printf(sc->ifp,
1699 			    "TCP/IP Offload enabled for unsupported "
1700 			    "protocol!\n");
1701 			return;
1702 		}
1703 
1704 		ip = (struct ip *)(m0->m_data + ehlen);
1705 		cmd_status |= MGE_TX_IP_HDR_SIZE(ip->ip_hl);
1706 
1707 		if ((m0->m_flags & M_FRAG) == 0)
1708 			cmd_status |= MGE_TX_NOT_FRAGMENT;
1709 	}
1710 
1711 	if (csum_flags & CSUM_IP)
1712 		cmd_status |= MGE_TX_GEN_IP_CSUM;
1713 
1714 	if (csum_flags & CSUM_TCP)
1715 		cmd_status |= MGE_TX_GEN_L4_CSUM;
1716 
1717 	if (csum_flags & CSUM_UDP)
1718 		cmd_status |= MGE_TX_GEN_L4_CSUM | MGE_TX_UDP;
1719 
1720 	dw->mge_desc->cmd_status |= cmd_status;
1721 }
1722 
1723 static void
1724 mge_intrs_ctrl(struct mge_softc *sc, int enable)
1725 {
1726 
1727 	if (enable) {
1728 		MGE_WRITE(sc, MGE_PORT_INT_MASK , MGE_PORT_INT_RXQ0 |
1729 		    MGE_PORT_INT_EXTEND | MGE_PORT_INT_RXERRQ0);
1730 		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT , MGE_PORT_INT_EXT_TXERR0 |
1731 		    MGE_PORT_INT_EXT_RXOR | MGE_PORT_INT_EXT_TXUR |
1732 		    MGE_PORT_INT_EXT_TXBUF0);
1733 	} else {
1734 		MGE_WRITE(sc, MGE_INT_CAUSE, 0x0);
1735 		MGE_WRITE(sc, MGE_INT_MASK, 0x0);
1736 
1737 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, 0x0);
1738 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, 0x0);
1739 
1740 		MGE_WRITE(sc, MGE_PORT_INT_MASK, 0x0);
1741 		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT, 0x0);
1742 	}
1743 }
1744 
1745 static uint8_t
1746 mge_crc8(uint8_t *data, int size)
1747 {
1748 	uint8_t crc = 0;
1749 	static const uint8_t ct[256] = {
1750 		0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15,
1751 		0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D,
1752 		0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65,
1753 		0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D,
1754 		0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5,
1755 		0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD,
1756 		0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85,
1757 		0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD,
1758 		0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2,
1759 		0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA,
1760 		0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2,
1761 		0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A,
1762 		0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32,
1763 		0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A,
1764 		0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42,
1765 		0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A,
1766 		0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C,
1767 		0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4,
1768 		0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC,
1769 		0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4,
1770 		0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C,
1771 		0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44,
1772 		0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C,
1773 		0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34,
1774 		0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B,
1775 		0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63,
1776 		0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B,
1777 		0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13,
1778 		0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB,
1779 		0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83,
1780 		0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB,
1781 		0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3
1782 	};
1783 
1784 	while(size--)
1785 		crc = ct[crc ^ *(data++)];
1786 
1787 	return(crc);
1788 }
1789 
1790 static void
1791 mge_setup_multicast(struct mge_softc *sc)
1792 {
1793 	uint8_t special[5] = { 0x01, 0x00, 0x5E, 0x00, 0x00 };
1794 	uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
1795 	uint32_t smt[MGE_MCAST_REG_NUMBER];
1796 	uint32_t omt[MGE_MCAST_REG_NUMBER];
1797 	struct ifnet *ifp = sc->ifp;
1798 	struct ifmultiaddr *ifma;
1799 	uint8_t *mac;
1800 	int i;
1801 
1802 	if (ifp->if_flags & IFF_ALLMULTI) {
1803 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++)
1804 			smt[i] = omt[i] = (v << 24) | (v << 16) | (v << 8) | v;
1805 	} else {
1806 		memset(smt, 0, sizeof(smt));
1807 		memset(omt, 0, sizeof(omt));
1808 
1809 		if_maddr_rlock(ifp);
1810 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1811 			if (ifma->ifma_addr->sa_family != AF_LINK)
1812 				continue;
1813 
1814 			mac = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1815 			if (memcmp(mac, special, sizeof(special)) == 0) {
1816 				i = mac[5];
1817 				smt[i >> 2] |= v << ((i & 0x03) << 3);
1818 			} else {
1819 				i = mge_crc8(mac, ETHER_ADDR_LEN);
1820 				omt[i >> 2] |= v << ((i & 0x03) << 3);
1821 			}
1822 		}
1823 		if_maddr_runlock(ifp);
1824 	}
1825 
1826 	for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
1827 		MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), smt[i]);
1828 		MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), omt[i]);
1829 	}
1830 }
1831 
1832 static void
1833 mge_set_rxic(struct mge_softc *sc)
1834 {
1835 	uint32_t reg;
1836 
1837 	if (sc->rx_ic_time > sc->mge_rx_ipg_max)
1838 		sc->rx_ic_time = sc->mge_rx_ipg_max;
1839 
1840 	reg = MGE_READ(sc, MGE_SDMA_CONFIG);
1841 	reg &= ~mge_rx_ipg(sc->mge_rx_ipg_max, sc->mge_ver);
1842 	reg |= mge_rx_ipg(sc->rx_ic_time, sc->mge_ver);
1843 	MGE_WRITE(sc, MGE_SDMA_CONFIG, reg);
1844 }
1845 
1846 static void
1847 mge_set_txic(struct mge_softc *sc)
1848 {
1849 	uint32_t reg;
1850 
1851 	if (sc->tx_ic_time > sc->mge_tfut_ipg_max)
1852 		sc->tx_ic_time = sc->mge_tfut_ipg_max;
1853 
1854 	reg = MGE_READ(sc, MGE_TX_FIFO_URGENT_TRSH);
1855 	reg &= ~mge_tfut_ipg(sc->mge_tfut_ipg_max, sc->mge_ver);
1856 	reg |= mge_tfut_ipg(sc->tx_ic_time, sc->mge_ver);
1857 	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, reg);
1858 }
1859 
1860 static int
1861 mge_sysctl_ic(SYSCTL_HANDLER_ARGS)
1862 {
1863 	struct mge_softc *sc = (struct mge_softc *)arg1;
1864 	uint32_t time;
1865 	int error;
1866 
1867 	time = (arg2 == MGE_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time;
1868 	error = sysctl_handle_int(oidp, &time, 0, req);
1869 	if (error != 0)
1870 		return(error);
1871 
1872 	MGE_GLOBAL_LOCK(sc);
1873 	if (arg2 == MGE_IC_RX) {
1874 		sc->rx_ic_time = time;
1875 		mge_set_rxic(sc);
1876 	} else {
1877 		sc->tx_ic_time = time;
1878 		mge_set_txic(sc);
1879 	}
1880 	MGE_GLOBAL_UNLOCK(sc);
1881 
1882 	return(0);
1883 }
1884 
1885 static void
1886 mge_add_sysctls(struct mge_softc *sc)
1887 {
1888 	struct sysctl_ctx_list *ctx;
1889 	struct sysctl_oid_list *children;
1890 	struct sysctl_oid *tree;
1891 
1892 	ctx = device_get_sysctl_ctx(sc->dev);
1893 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1894 	tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal",
1895 	    CTLFLAG_RD, 0, "MGE Interrupts coalescing");
1896 	children = SYSCTL_CHILDREN(tree);
1897 
1898 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time",
1899 	    CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_RX, mge_sysctl_ic,
1900 	    "I", "IC RX time threshold");
1901 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time",
1902 	    CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_TX, mge_sysctl_ic,
1903 	    "I", "IC TX time threshold");
1904 }
1905