xref: /freebsd/sys/dev/mge/if_mge.c (revision aa0a1e58)
1 /*-
2  * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
3  * All rights reserved.
4  *
5  * Developed by Semihalf.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of MARVELL nor the names of contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #ifdef HAVE_KERNEL_OPTION_HEADERS
33 #include "opt_device_polling.h"
34 #endif
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/endian.h>
42 #include <sys/mbuf.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 #include <sys/kernel.h>
46 #include <sys/module.h>
47 #include <sys/socket.h>
48 #include <sys/sysctl.h>
49 
50 #include <net/ethernet.h>
51 #include <net/bpf.h>
52 #include <net/if.h>
53 #include <net/if_arp.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56 #include <net/if_types.h>
57 #include <net/if_vlan_var.h>
58 
59 #include <netinet/in_systm.h>
60 #include <netinet/in.h>
61 #include <netinet/ip.h>
62 
63 #include <sys/sockio.h>
64 #include <sys/bus.h>
65 #include <machine/bus.h>
66 #include <sys/rman.h>
67 #include <machine/resource.h>
68 
69 #include <dev/mii/mii.h>
70 #include <dev/mii/miivar.h>
71 
72 #include <dev/fdt/fdt_common.h>
73 #include <dev/ofw/ofw_bus.h>
74 #include <dev/ofw/ofw_bus_subr.h>
75 
76 #include <dev/mge/if_mgevar.h>
77 #include <arm/mv/mvreg.h>
78 #include <arm/mv/mvvar.h>
79 
80 #include "miibus_if.h"
81 
82 /* PHY registers are in the address space of the first mge unit */
83 static struct mge_softc *sc_mge0 = NULL;
84 
85 static int mge_probe(device_t dev);
86 static int mge_attach(device_t dev);
87 static int mge_detach(device_t dev);
88 static int mge_shutdown(device_t dev);
89 static int mge_suspend(device_t dev);
90 static int mge_resume(device_t dev);
91 
92 static int mge_miibus_readreg(device_t dev, int phy, int reg);
93 static int mge_miibus_writereg(device_t dev, int phy, int reg, int value);
94 
95 static int mge_ifmedia_upd(struct ifnet *ifp);
96 static void mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
97 
98 static void mge_init(void *arg);
99 static void mge_init_locked(void *arg);
100 static void mge_start(struct ifnet *ifp);
101 static void mge_start_locked(struct ifnet *ifp);
102 static void mge_watchdog(struct mge_softc *sc);
103 static int mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
104 
105 static uint32_t mge_tfut_ipg(uint32_t val, int ver);
106 static uint32_t mge_rx_ipg(uint32_t val, int ver);
107 static void mge_ver_params(struct mge_softc *sc);
108 
109 static void mge_intrs_ctrl(struct mge_softc *sc, int enable);
110 static void mge_intr_rx(void *arg);
111 static int mge_intr_rx_locked(struct mge_softc *sc, int count);
112 static void mge_intr_tx(void *arg);
113 static void mge_intr_tx_locked(struct mge_softc *sc);
114 static void mge_intr_misc(void *arg);
115 static void mge_intr_sum(void *arg);
116 static void mge_intr_err(void *arg);
117 static void mge_stop(struct mge_softc *sc);
118 static void mge_tick(void *msc);
119 static uint32_t mge_set_port_serial_control(uint32_t media);
120 static void mge_get_mac_address(struct mge_softc *sc, uint8_t *addr);
121 static void mge_set_mac_address(struct mge_softc *sc);
122 static void mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte,
123     uint8_t queue);
124 static void mge_set_prom_mode(struct mge_softc *sc, uint8_t queue);
125 static int mge_allocate_dma(struct mge_softc *sc);
126 static int mge_alloc_desc_dma(struct mge_softc *sc,
127     struct mge_desc_wrapper* desc_tab, uint32_t size, bus_dma_tag_t *buffer_tag);
128 static int mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
129     struct mbuf **mbufp, bus_addr_t *paddr);
130 static void mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error);
131 static void mge_free_dma(struct mge_softc *sc);
132 static void mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab, uint32_t size,
133     bus_dma_tag_t buffer_tag, uint8_t free_mbufs);
134 static void mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
135     uint32_t status, uint16_t bufsize);
136 static void mge_offload_setup_descriptor(struct mge_softc *sc,
137     struct mge_desc_wrapper *dw);
138 static uint8_t mge_crc8(uint8_t *data, int size);
139 static void mge_setup_multicast(struct mge_softc *sc);
140 static void mge_set_rxic(struct mge_softc *sc);
141 static void mge_set_txic(struct mge_softc *sc);
142 static void mge_add_sysctls(struct mge_softc *sc);
143 static int mge_sysctl_ic(SYSCTL_HANDLER_ARGS);
144 
145 static device_method_t mge_methods[] = {
146 	/* Device interface */
147 	DEVMETHOD(device_probe,		mge_probe),
148 	DEVMETHOD(device_attach,	mge_attach),
149 	DEVMETHOD(device_detach,	mge_detach),
150 	DEVMETHOD(device_shutdown,	mge_shutdown),
151 	DEVMETHOD(device_suspend,	mge_suspend),
152 	DEVMETHOD(device_resume,	mge_resume),
153 	/* MII interface */
154 	DEVMETHOD(miibus_readreg,	mge_miibus_readreg),
155 	DEVMETHOD(miibus_writereg,	mge_miibus_writereg),
156 	{ 0, 0 }
157 };
158 
159 static driver_t mge_driver = {
160 	"mge",
161 	mge_methods,
162 	sizeof(struct mge_softc),
163 };
164 
165 static devclass_t mge_devclass;
166 
167 DRIVER_MODULE(mge, simplebus, mge_driver, mge_devclass, 0, 0);
168 DRIVER_MODULE(miibus, mge, miibus_driver, miibus_devclass, 0, 0);
169 MODULE_DEPEND(mge, ether, 1, 1, 1);
170 MODULE_DEPEND(mge, miibus, 1, 1, 1);
171 
172 static struct resource_spec res_spec[] = {
173 	{ SYS_RES_MEMORY, 0, RF_ACTIVE },
174 	{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
175 	{ SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
176 	{ SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
177 	{ SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE },
178 	{ SYS_RES_IRQ, 4, RF_ACTIVE | RF_SHAREABLE },
179 	{ -1, 0 }
180 };
181 
182 static struct {
183 	driver_intr_t *handler;
184 	char * description;
185 } mge_intrs[MGE_INTR_COUNT] = {
186 	{ mge_intr_rx,	"GbE receive interrupt" },
187 	{ mge_intr_tx,	"GbE transmit interrupt" },
188 	{ mge_intr_misc,"GbE misc interrupt" },
189 	{ mge_intr_sum,	"GbE summary interrupt" },
190 	{ mge_intr_err,	"GbE error interrupt" },
191 };
192 
193 static void
194 mge_get_mac_address(struct mge_softc *sc, uint8_t *addr)
195 {
196 	uint32_t mac_l, mac_h;
197 	uint8_t lmac[6];
198 	int i, valid;
199 
200 	/*
201 	 * Retrieve hw address from the device tree.
202 	 */
203 	i = OF_getprop(sc->node, "local-mac-address", (void *)lmac, 6);
204 	if (i == 6) {
205 		valid = 0;
206 		for (i = 0; i < 6; i++)
207 			if (lmac[i] != 0) {
208 				valid = 1;
209 				break;
210 			}
211 
212 		if (valid) {
213 			bcopy(lmac, addr, 6);
214 			return;
215 		}
216 	}
217 
218 	/*
219 	 * Fall back -- use the currently programmed address.
220 	 */
221 	mac_l = MGE_READ(sc, MGE_MAC_ADDR_L);
222 	mac_h = MGE_READ(sc, MGE_MAC_ADDR_H);
223 
224 	addr[0] = (mac_h & 0xff000000) >> 24;
225 	addr[1] = (mac_h & 0x00ff0000) >> 16;
226 	addr[2] = (mac_h & 0x0000ff00) >> 8;
227 	addr[3] = (mac_h & 0x000000ff);
228 	addr[4] = (mac_l & 0x0000ff00) >> 8;
229 	addr[5] = (mac_l & 0x000000ff);
230 }
231 
232 static uint32_t
233 mge_tfut_ipg(uint32_t val, int ver)
234 {
235 
236 	switch (ver) {
237 	case 1:
238 		return ((val & 0x3fff) << 4);
239 	case 2:
240 	default:
241 		return ((val & 0xffff) << 4);
242 	}
243 }
244 
245 static uint32_t
246 mge_rx_ipg(uint32_t val, int ver)
247 {
248 
249 	switch (ver) {
250 	case 1:
251 		return ((val & 0x3fff) << 8);
252 	case 2:
253 	default:
254 		return (((val & 0x8000) << 10) | ((val & 0x7fff) << 7));
255 	}
256 }
257 
258 static void
259 mge_ver_params(struct mge_softc *sc)
260 {
261 	uint32_t d, r;
262 
263 	soc_id(&d, &r);
264 	if (d == MV_DEV_88F6281 || d == MV_DEV_MV78100 ||
265 	    d == MV_DEV_MV78100_Z0) {
266 		sc->mge_ver = 2;
267 		sc->mge_mtu = 0x4e8;
268 		sc->mge_tfut_ipg_max = 0xFFFF;
269 		sc->mge_rx_ipg_max = 0xFFFF;
270 		sc->mge_tx_arb_cfg = 0xFC0000FF;
271 		sc->mge_tx_tok_cfg = 0xFFFF7FFF;
272 		sc->mge_tx_tok_cnt = 0x3FFFFFFF;
273 	} else {
274 		sc->mge_ver = 1;
275 		sc->mge_mtu = 0x458;
276 		sc->mge_tfut_ipg_max = 0x3FFF;
277 		sc->mge_rx_ipg_max = 0x3FFF;
278 		sc->mge_tx_arb_cfg = 0x000000FF;
279 		sc->mge_tx_tok_cfg = 0x3FFFFFFF;
280 		sc->mge_tx_tok_cnt = 0x3FFFFFFF;
281 	}
282 }
283 
284 static void
285 mge_set_mac_address(struct mge_softc *sc)
286 {
287 	char *if_mac;
288 	uint32_t mac_l, mac_h;
289 
290 	MGE_GLOBAL_LOCK_ASSERT(sc);
291 
292 	if_mac = (char *)IF_LLADDR(sc->ifp);
293 
294 	mac_l = (if_mac[4] << 8) | (if_mac[5]);
295 	mac_h = (if_mac[0] << 24)| (if_mac[1] << 16) |
296 	    (if_mac[2] << 8) | (if_mac[3] << 0);
297 
298 	MGE_WRITE(sc, MGE_MAC_ADDR_L, mac_l);
299 	MGE_WRITE(sc, MGE_MAC_ADDR_H, mac_h);
300 
301 	mge_set_ucast_address(sc, if_mac[5], MGE_RX_DEFAULT_QUEUE);
302 }
303 
304 static void
305 mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, uint8_t queue)
306 {
307 	uint32_t reg_idx, reg_off, reg_val, i;
308 
309 	last_byte &= 0xf;
310 	reg_idx = last_byte / MGE_UCAST_REG_NUMBER;
311 	reg_off = (last_byte % MGE_UCAST_REG_NUMBER) * 8;
312 	reg_val = (1 | (queue << 1)) << reg_off;
313 
314 	for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) {
315 		if ( i == reg_idx)
316 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
317 		else
318 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), 0);
319 	}
320 }
321 
322 static void
323 mge_set_prom_mode(struct mge_softc *sc, uint8_t queue)
324 {
325 	uint32_t port_config;
326 	uint32_t reg_val, i;
327 
328 	/* Enable or disable promiscuous mode as needed */
329 	if (sc->ifp->if_flags & IFF_PROMISC) {
330 		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
331 		port_config |= PORT_CONFIG_UPM;
332 		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
333 
334 		reg_val = ((1 | (queue << 1)) | (1 | (queue << 1)) << 8 |
335 		   (1 | (queue << 1)) << 16 | (1 | (queue << 1)) << 24);
336 
337 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
338 			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), reg_val);
339 			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), reg_val);
340 		}
341 
342 		for (i = 0; i < MGE_UCAST_REG_NUMBER; i++)
343 			MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
344 
345 	} else {
346 		port_config = MGE_READ(sc, MGE_PORT_CONFIG);
347 		port_config &= ~PORT_CONFIG_UPM;
348 		MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
349 
350 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
351 			MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), 0);
352 			MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), 0);
353 		}
354 
355 		mge_set_mac_address(sc);
356 	}
357 }
358 
359 static void
360 mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
361 {
362 	u_int32_t *paddr;
363 
364 	KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
365 	paddr = arg;
366 
367 	*paddr = segs->ds_addr;
368 }
369 
370 static int
371 mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
372     bus_addr_t *paddr)
373 {
374 	struct mbuf *new_mbuf;
375 	bus_dma_segment_t seg[1];
376 	int error;
377 	int nsegs;
378 
379 	KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
380 
381 	new_mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
382 	if (new_mbuf == NULL)
383 		return (ENOBUFS);
384 	new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
385 
386 	if (*mbufp) {
387 		bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
388 		bus_dmamap_unload(tag, map);
389 	}
390 
391 	error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
392 	    BUS_DMA_NOWAIT);
393 	KASSERT(nsegs == 1, ("Too many segments returned!"));
394 	if (nsegs != 1 || error)
395 		panic("mge_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
396 
397 	bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
398 
399 	(*mbufp) = new_mbuf;
400 	(*paddr) = seg->ds_addr;
401 	return (0);
402 }
403 
404 static int
405 mge_alloc_desc_dma(struct mge_softc *sc, struct mge_desc_wrapper* tab,
406     uint32_t size, bus_dma_tag_t *buffer_tag)
407 {
408 	struct mge_desc_wrapper *dw;
409 	bus_addr_t desc_paddr;
410 	int i, error;
411 
412 	desc_paddr = 0;
413 	for (i = size - 1; i >= 0; i--) {
414 		dw = &(tab[i]);
415 		error = bus_dmamem_alloc(sc->mge_desc_dtag,
416 		    (void**)&(dw->mge_desc),
417 		    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
418 		    &(dw->desc_dmap));
419 
420 		if (error) {
421 			if_printf(sc->ifp, "failed to allocate DMA memory\n");
422 			dw->mge_desc = NULL;
423 			return (ENXIO);
424 		}
425 
426 		error = bus_dmamap_load(sc->mge_desc_dtag, dw->desc_dmap,
427 		    dw->mge_desc, sizeof(struct mge_desc), mge_get_dma_addr,
428 		    &(dw->mge_desc_paddr), BUS_DMA_NOWAIT);
429 
430 		if (error) {
431 			if_printf(sc->ifp, "can't load descriptor\n");
432 			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
433 			    dw->desc_dmap);
434 			dw->mge_desc = NULL;
435 			return (ENXIO);
436 		}
437 
438 		/* Chain descriptors */
439 		dw->mge_desc->next_desc = desc_paddr;
440 		desc_paddr = dw->mge_desc_paddr;
441 	}
442 	tab[size - 1].mge_desc->next_desc = desc_paddr;
443 
444 	/* Allocate a busdma tag for mbufs. */
445 	error = bus_dma_tag_create(NULL,	/* parent */
446 	    8, 0,				/* alignment, boundary */
447 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
448 	    BUS_SPACE_MAXADDR,			/* highaddr */
449 	    NULL, NULL,				/* filtfunc, filtfuncarg */
450 	    MCLBYTES, 1,			/* maxsize, nsegments */
451 	    MCLBYTES, 0,			/* maxsegsz, flags */
452 	    NULL, NULL,				/* lockfunc, lockfuncarg */
453 	    buffer_tag);			/* dmat */
454 	if (error) {
455 		if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
456 		return (ENXIO);
457 	}
458 
459 	/* Create TX busdma maps */
460 	for (i = 0; i < size; i++) {
461 		dw = &(tab[i]);
462 		error = bus_dmamap_create(*buffer_tag, 0, &dw->buffer_dmap);
463 		if (error) {
464 			if_printf(sc->ifp, "failed to create map for mbuf\n");
465 			return (ENXIO);
466 		}
467 
468 		dw->buffer = (struct mbuf*)NULL;
469 		dw->mge_desc->buffer = (bus_addr_t)NULL;
470 	}
471 
472 	return (0);
473 }
474 
475 static int
476 mge_allocate_dma(struct mge_softc *sc)
477 {
478 	int error;
479 	struct mge_desc_wrapper *dw;
480 	int i;
481 
482 	/* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
483 	error = bus_dma_tag_create(NULL,	/* parent */
484 	    16, 0,				/* alignment, boundary */
485 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
486 	    BUS_SPACE_MAXADDR,			/* highaddr */
487 	    NULL, NULL,				/* filtfunc, filtfuncarg */
488 	    sizeof(struct mge_desc), 1,		/* maxsize, nsegments */
489 	    sizeof(struct mge_desc), 0,		/* maxsegsz, flags */
490 	    NULL, NULL,				/* lockfunc, lockfuncarg */
491 	    &sc->mge_desc_dtag);		/* dmat */
492 
493 
494 	mge_alloc_desc_dma(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM,
495 	    &sc->mge_tx_dtag);
496 	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
497 	    &sc->mge_rx_dtag);
498 
499 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
500 		dw = &(sc->mge_rx_desc[i]);
501 		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
502 		    &dw->mge_desc->buffer);
503 	}
504 
505 	sc->tx_desc_start = sc->mge_tx_desc[0].mge_desc_paddr;
506 	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
507 
508 	return (0);
509 }
510 
511 static void
512 mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
513     uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs)
514 {
515 	struct mge_desc_wrapper *dw;
516 	int i;
517 
518 	for (i = 0; i < size; i++) {
519 		/* Free RX mbuf */
520 		dw = &(tab[i]);
521 
522 		if (dw->buffer_dmap) {
523 			if (free_mbufs) {
524 				bus_dmamap_sync(buffer_tag, dw->buffer_dmap,
525 				    BUS_DMASYNC_POSTREAD);
526 				bus_dmamap_unload(buffer_tag, dw->buffer_dmap);
527 			}
528 			bus_dmamap_destroy(buffer_tag, dw->buffer_dmap);
529 			if (free_mbufs)
530 				m_freem(dw->buffer);
531 		}
532 		/* Free RX descriptors */
533 		if (dw->desc_dmap) {
534 			bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
535 			    BUS_DMASYNC_POSTREAD);
536 			bus_dmamap_unload(sc->mge_desc_dtag, dw->desc_dmap);
537 			bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
538 			    dw->desc_dmap);
539 		}
540 	}
541 }
542 
543 static void
544 mge_free_dma(struct mge_softc *sc)
545 {
546 	/* Free desciptors and mbufs */
547 	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
548 	mge_free_desc(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, sc->mge_tx_dtag, 0);
549 
550 	/* Destroy mbuf dma tag */
551 	bus_dma_tag_destroy(sc->mge_tx_dtag);
552 	bus_dma_tag_destroy(sc->mge_rx_dtag);
553 	/* Destroy descriptors tag */
554 	bus_dma_tag_destroy(sc->mge_desc_dtag);
555 }
556 
557 static void
558 mge_reinit_rx(struct mge_softc *sc)
559 {
560 	struct mge_desc_wrapper *dw;
561 	int i;
562 
563 	MGE_RECEIVE_LOCK_ASSERT(sc);
564 
565 	mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
566 
567 	mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
568 	    &sc->mge_rx_dtag);
569 
570 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
571 		dw = &(sc->mge_rx_desc[i]);
572 		mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
573 		&dw->mge_desc->buffer);
574 	}
575 
576 	sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
577 	sc->rx_desc_curr = 0;
578 
579 	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
580 	    sc->rx_desc_start);
581 
582 	/* Enable RX queue */
583 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
584 }
585 
586 #ifdef DEVICE_POLLING
587 static poll_handler_t mge_poll;
588 
589 static int
590 mge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
591 {
592 	struct mge_softc *sc = ifp->if_softc;
593 	uint32_t int_cause, int_cause_ext;
594 	int rx_npkts = 0;
595 
596 	MGE_GLOBAL_LOCK(sc);
597 
598 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
599 		MGE_GLOBAL_UNLOCK(sc);
600 		return (rx_npkts);
601 	}
602 
603 	if (cmd == POLL_AND_CHECK_STATUS) {
604 		int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
605 		int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
606 
607 		/* Check for resource error */
608 		if (int_cause & MGE_PORT_INT_RXERRQ0)
609 			mge_reinit_rx(sc);
610 
611 		if (int_cause || int_cause_ext) {
612 			MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
613 			MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
614 		}
615 	}
616 
617 	mge_intr_tx_locked(sc);
618 	rx_npkts = mge_intr_rx_locked(sc, count);
619 
620 	MGE_GLOBAL_UNLOCK(sc);
621 	return (rx_npkts);
622 }
623 #endif /* DEVICE_POLLING */
624 
625 static int
626 mge_attach(device_t dev)
627 {
628 	struct mge_softc *sc;
629 	struct mii_softc *miisc;
630 	struct ifnet *ifp;
631 	uint8_t hwaddr[ETHER_ADDR_LEN];
632 	int i, error, phy;
633 
634 	sc = device_get_softc(dev);
635 	sc->dev = dev;
636 	sc->node = ofw_bus_get_node(dev);
637 
638 	if (device_get_unit(dev) == 0)
639 		sc_mge0 = sc;
640 
641 	/* Set chip version-dependent parameters */
642 	mge_ver_params(sc);
643 
644 	/* Get phy address from fdt */
645 	if (fdt_get_phyaddr(sc->node, &phy) != 0)
646 		return (ENXIO);
647 
648 	/* Initialize mutexes */
649 	mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "mge TX lock", MTX_DEF);
650 	mtx_init(&sc->receive_lock, device_get_nameunit(dev), "mge RX lock", MTX_DEF);
651 
652 	/* Allocate IO and IRQ resources */
653 	error = bus_alloc_resources(dev, res_spec, sc->res);
654 	if (error) {
655 		device_printf(dev, "could not allocate resources\n");
656 		mge_detach(dev);
657 		return (ENXIO);
658 	}
659 
660 	/* Allocate DMA, buffers, buffer descriptors */
661 	error = mge_allocate_dma(sc);
662 	if (error) {
663 		mge_detach(dev);
664 		return (ENXIO);
665 	}
666 
667 	sc->tx_desc_curr = 0;
668 	sc->rx_desc_curr = 0;
669 	sc->tx_desc_used_idx = 0;
670 	sc->tx_desc_used_count = 0;
671 
672 	/* Configure defaults for interrupts coalescing */
673 	sc->rx_ic_time = 768;
674 	sc->tx_ic_time = 768;
675 	mge_add_sysctls(sc);
676 
677 	/* Allocate network interface */
678 	ifp = sc->ifp = if_alloc(IFT_ETHER);
679 	if (ifp == NULL) {
680 		device_printf(dev, "if_alloc() failed\n");
681 		mge_detach(dev);
682 		return (ENOMEM);
683 	}
684 
685 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
686 	ifp->if_softc = sc;
687 	ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
688 	ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_MTU;
689 	ifp->if_capenable = ifp->if_capabilities;
690 	ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
691 
692 #ifdef DEVICE_POLLING
693 	/* Advertise that polling is supported */
694 	ifp->if_capabilities |= IFCAP_POLLING;
695 #endif
696 
697 	ifp->if_init = mge_init;
698 	ifp->if_start = mge_start;
699 	ifp->if_ioctl = mge_ioctl;
700 
701 	ifp->if_snd.ifq_drv_maxlen = MGE_TX_DESC_NUM - 1;
702 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
703 	IFQ_SET_READY(&ifp->if_snd);
704 
705 	mge_get_mac_address(sc, hwaddr);
706 	ether_ifattach(ifp, hwaddr);
707 	callout_init(&sc->wd_callout, 0);
708 
709 	/* Attach PHY(s) */
710 	error = mii_attach(dev, &sc->miibus, ifp, mge_ifmedia_upd,
711 	    mge_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
712 	if (error) {
713 		device_printf(dev, "attaching PHYs failed\n");
714 		mge_detach(dev);
715 		return (error);
716 	}
717 	sc->mii = device_get_softc(sc->miibus);
718 
719 	/* Tell the MAC where to find the PHY so autoneg works */
720 	miisc = LIST_FIRST(&sc->mii->mii_phys);
721 	MGE_WRITE(sc, MGE_REG_PHYDEV, miisc->mii_phy);
722 
723 	/* Attach interrupt handlers */
724 	for (i = 0; i < 2; ++i) {
725 		error = bus_setup_intr(dev, sc->res[1 + i],
726 		    INTR_TYPE_NET | INTR_MPSAFE, NULL, *mge_intrs[i].handler,
727 		    sc, &sc->ih_cookie[i]);
728 		if (error) {
729 			device_printf(dev, "could not setup %s\n",
730 			    mge_intrs[i].description);
731 			mge_detach(dev);
732 			return (error);
733 		}
734 	}
735 
736 	return (0);
737 }
738 
739 static int
740 mge_detach(device_t dev)
741 {
742 	struct mge_softc *sc;
743 	int error,i;
744 
745 	sc = device_get_softc(dev);
746 
747 	/* Stop controller and free TX queue */
748 	if (sc->ifp)
749 		mge_shutdown(dev);
750 
751 	/* Wait for stopping ticks */
752         callout_drain(&sc->wd_callout);
753 
754 	/* Stop and release all interrupts */
755 	for (i = 0; i < 2; ++i) {
756 		if (!sc->ih_cookie[i])
757 			continue;
758 
759 		error = bus_teardown_intr(dev, sc->res[1 + i], sc->ih_cookie[i]);
760 		if (error)
761 			device_printf(dev, "could not release %s\n",
762 			    mge_intrs[i].description);
763 	}
764 
765 	/* Detach network interface */
766 	if (sc->ifp) {
767 		ether_ifdetach(sc->ifp);
768 		if_free(sc->ifp);
769 	}
770 
771 	/* Free DMA resources */
772 	mge_free_dma(sc);
773 
774 	/* Free IO memory handler */
775 	bus_release_resources(dev, res_spec, sc->res);
776 
777 	/* Destroy mutexes */
778 	mtx_destroy(&sc->receive_lock);
779 	mtx_destroy(&sc->transmit_lock);
780 
781 	return (0);
782 }
783 
784 static void
785 mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
786 {
787 	struct mge_softc *sc = ifp->if_softc;
788 	struct mii_data *mii;
789 
790 	MGE_TRANSMIT_LOCK(sc);
791 
792 	mii = sc->mii;
793 	mii_pollstat(mii);
794 
795 	ifmr->ifm_active = mii->mii_media_active;
796 	ifmr->ifm_status = mii->mii_media_status;
797 
798 	MGE_TRANSMIT_UNLOCK(sc);
799 }
800 
801 static uint32_t
802 mge_set_port_serial_control(uint32_t media)
803 {
804 	uint32_t port_config;
805 
806 	port_config = PORT_SERIAL_RES_BIT9 | PORT_SERIAL_FORCE_LINK_FAIL |
807 	    PORT_SERIAL_MRU(PORT_SERIAL_MRU_1552);
808 
809 	if (IFM_TYPE(media) == IFM_ETHER) {
810 		switch(IFM_SUBTYPE(media)) {
811 			case IFM_AUTO:
812 				break;
813 			case IFM_1000_T:
814 				port_config  |= (PORT_SERIAL_GMII_SPEED_1000 |
815 				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
816 				    PORT_SERIAL_SPEED_AUTONEG);
817 				break;
818 			case IFM_100_TX:
819 				port_config  |= (PORT_SERIAL_MII_SPEED_100 |
820 				    PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC |
821 				    PORT_SERIAL_SPEED_AUTONEG);
822 				break;
823 			case IFM_10_T:
824 				port_config  |= (PORT_SERIAL_AUTONEG |
825 				    PORT_SERIAL_AUTONEG_FC |
826 				    PORT_SERIAL_SPEED_AUTONEG);
827 				break;
828 		}
829 		if (media & IFM_FDX)
830 			port_config |= PORT_SERIAL_FULL_DUPLEX;
831 	}
832 	return (port_config);
833 }
834 
835 static int
836 mge_ifmedia_upd(struct ifnet *ifp)
837 {
838 	struct mge_softc *sc = ifp->if_softc;
839 
840 	if (ifp->if_flags & IFF_UP) {
841 		MGE_GLOBAL_LOCK(sc);
842 
843 		sc->mge_media_status = sc->mii->mii_media.ifm_media;
844 		mii_mediachg(sc->mii);
845 		mge_init_locked(sc);
846 
847 		MGE_GLOBAL_UNLOCK(sc);
848 	}
849 
850 	return (0);
851 }
852 
853 static void
854 mge_init(void *arg)
855 {
856 	struct mge_softc *sc = arg;
857 
858 	MGE_GLOBAL_LOCK(sc);
859 
860 	mge_init_locked(arg);
861 
862 	MGE_GLOBAL_UNLOCK(sc);
863 }
864 
865 static void
866 mge_init_locked(void *arg)
867 {
868 	struct mge_softc *sc = arg;
869 	struct mge_desc_wrapper *dw;
870 	volatile uint32_t reg_val;
871 	int i, count;
872 
873 
874 	MGE_GLOBAL_LOCK_ASSERT(sc);
875 
876 	/* Stop interface */
877 	mge_stop(sc);
878 
879 	/* Disable interrupts */
880 	mge_intrs_ctrl(sc, 0);
881 
882 	/* Set MAC address */
883 	mge_set_mac_address(sc);
884 
885 	/* Setup multicast filters */
886 	mge_setup_multicast(sc);
887 
888 	if (sc->mge_ver == 2) {
889 		MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL1, MGE_RGMII_EN);
890 		MGE_WRITE(sc, MGE_FIXED_PRIO_CONF, MGE_FIXED_PRIO_EN(0));
891 	}
892 
893 	/* Initialize TX queue configuration registers */
894 	MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(0), sc->mge_tx_tok_cnt);
895 	MGE_WRITE(sc, MGE_TX_TOKEN_CONF(0), sc->mge_tx_tok_cfg);
896 	MGE_WRITE(sc, MGE_TX_ARBITER_CONF(0), sc->mge_tx_arb_cfg);
897 
898 	/* Clear TX queue configuration registers for unused queues */
899 	for (i = 1; i < 7; i++) {
900 		MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(i), 0);
901 		MGE_WRITE(sc, MGE_TX_TOKEN_CONF(i), 0);
902 		MGE_WRITE(sc, MGE_TX_ARBITER_CONF(i), 0);
903 	}
904 
905 	/* Set default MTU */
906 	MGE_WRITE(sc, sc->mge_mtu, 0);
907 
908 	/* Port configuration */
909 	MGE_WRITE(sc, MGE_PORT_CONFIG,
910 	    PORT_CONFIG_RXCS | PORT_CONFIG_DFLT_RXQ(0) |
911 	    PORT_CONFIG_ARO_RXQ(0));
912 	MGE_WRITE(sc, MGE_PORT_EXT_CONFIG , 0x0);
913 
914 	/* Setup port configuration */
915 	reg_val = mge_set_port_serial_control(sc->mge_media_status);
916 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
917 
918 	/* Setup SDMA configuration */
919 	MGE_WRITE(sc, MGE_SDMA_CONFIG , MGE_SDMA_RX_BYTE_SWAP |
920 	    MGE_SDMA_TX_BYTE_SWAP |
921 	    MGE_SDMA_RX_BURST_SIZE(MGE_SDMA_BURST_16_WORD) |
922 	    MGE_SDMA_TX_BURST_SIZE(MGE_SDMA_BURST_16_WORD));
923 
924 	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, 0x0);
925 
926 	MGE_WRITE(sc, MGE_TX_CUR_DESC_PTR, sc->tx_desc_start);
927 	MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
928 	    sc->rx_desc_start);
929 
930 	/* Reset descriptor indexes */
931 	sc->tx_desc_curr = 0;
932 	sc->rx_desc_curr = 0;
933 	sc->tx_desc_used_idx = 0;
934 	sc->tx_desc_used_count = 0;
935 
936 	/* Enable RX descriptors */
937 	for (i = 0; i < MGE_RX_DESC_NUM; i++) {
938 		dw = &sc->mge_rx_desc[i];
939 		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
940 		dw->mge_desc->buff_size = MCLBYTES;
941 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
942 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
943 	}
944 
945 	/* Enable RX queue */
946 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
947 
948 	/* Enable port */
949 	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
950 	reg_val |= PORT_SERIAL_ENABLE;
951 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
952 	count = 0x100000;
953 	for (;;) {
954 		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
955 		if (reg_val & MGE_STATUS_LINKUP)
956 			break;
957 		DELAY(100);
958 		if (--count == 0) {
959 			if_printf(sc->ifp, "Timeout on link-up\n");
960 			break;
961 		}
962 	}
963 
964 	/* Setup interrupts coalescing */
965 	mge_set_rxic(sc);
966 	mge_set_txic(sc);
967 
968 	/* Enable interrupts */
969 #ifdef DEVICE_POLLING
970         /*
971 	 * * ...only if polling is not turned on. Disable interrupts explicitly
972 	 * if polling is enabled.
973 	 */
974 	if (sc->ifp->if_capenable & IFCAP_POLLING)
975 		mge_intrs_ctrl(sc, 0);
976 	else
977 #endif /* DEVICE_POLLING */
978 	mge_intrs_ctrl(sc, 1);
979 
980 	/* Activate network interface */
981 	sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
982 	sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
983 	sc->wd_timer = 0;
984 
985 	/* Schedule watchdog timeout */
986 	callout_reset(&sc->wd_callout, hz, mge_tick, sc);
987 }
988 
989 static void
990 mge_intr_err(void *arg)
991 {
992 	struct mge_softc *sc = arg;
993 	struct ifnet *ifp;
994 
995 	ifp = sc->ifp;
996 	if_printf(ifp, "%s\n", __FUNCTION__);
997 }
998 
999 static void
1000 mge_intr_misc(void *arg)
1001 {
1002 	struct mge_softc *sc = arg;
1003 	struct ifnet *ifp;
1004 
1005 	ifp = sc->ifp;
1006 	if_printf(ifp, "%s\n", __FUNCTION__);
1007 }
1008 
1009 static void
1010 mge_intr_rx(void *arg) {
1011 	struct mge_softc *sc = arg;
1012 	uint32_t int_cause, int_cause_ext;
1013 
1014 	MGE_RECEIVE_LOCK(sc);
1015 
1016 #ifdef DEVICE_POLLING
1017 	if (sc->ifp->if_capenable & IFCAP_POLLING) {
1018 		MGE_RECEIVE_UNLOCK(sc);
1019 		return;
1020 	}
1021 #endif
1022 
1023 	/* Get interrupt cause */
1024 	int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
1025 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1026 
1027 	/* Check for resource error */
1028 	if (int_cause & MGE_PORT_INT_RXERRQ0) {
1029 		mge_reinit_rx(sc);
1030 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE,
1031 		    int_cause & ~MGE_PORT_INT_RXERRQ0);
1032 	}
1033 
1034 	int_cause &= MGE_PORT_INT_RXQ0;
1035 	int_cause_ext &= MGE_PORT_INT_EXT_RXOR;
1036 
1037 	if (int_cause || int_cause_ext) {
1038 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
1039 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
1040 		mge_intr_rx_locked(sc, -1);
1041 	}
1042 
1043 	MGE_RECEIVE_UNLOCK(sc);
1044 }
1045 
1046 
1047 static int
1048 mge_intr_rx_locked(struct mge_softc *sc, int count)
1049 {
1050 	struct ifnet *ifp = sc->ifp;
1051 	uint32_t status;
1052 	uint16_t bufsize;
1053 	struct mge_desc_wrapper* dw;
1054 	struct mbuf *mb;
1055 	int rx_npkts = 0;
1056 
1057 	MGE_RECEIVE_LOCK_ASSERT(sc);
1058 
1059 	while (count != 0) {
1060 		dw = &sc->mge_rx_desc[sc->rx_desc_curr];
1061 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1062 		    BUS_DMASYNC_POSTREAD);
1063 
1064 		/* Get status */
1065 		status = dw->mge_desc->cmd_status;
1066 		bufsize = dw->mge_desc->buff_size;
1067 		if ((status & MGE_DMA_OWNED) != 0)
1068 			break;
1069 
1070 		if (dw->mge_desc->byte_count &&
1071 		    ~(status & MGE_ERR_SUMMARY)) {
1072 
1073 			bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap,
1074 			    BUS_DMASYNC_POSTREAD);
1075 
1076 			mb = m_devget(dw->buffer->m_data,
1077 			    dw->mge_desc->byte_count - ETHER_CRC_LEN,
1078 			    0, ifp, NULL);
1079 
1080 			if (mb == NULL)
1081 				/* Give up if no mbufs */
1082 				break;
1083 
1084 			mb->m_len -= 2;
1085 			mb->m_pkthdr.len -= 2;
1086 			mb->m_data += 2;
1087 
1088 			mge_offload_process_frame(ifp, mb, status,
1089 			    bufsize);
1090 
1091 			MGE_RECEIVE_UNLOCK(sc);
1092 			(*ifp->if_input)(ifp, mb);
1093 			MGE_RECEIVE_LOCK(sc);
1094 			rx_npkts++;
1095 		}
1096 
1097 		dw->mge_desc->byte_count = 0;
1098 		dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1099 		sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM);
1100 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1101 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1102 
1103 		if (count > 0)
1104 			count -= 1;
1105 	}
1106 
1107 	return (rx_npkts);
1108 }
1109 
1110 static void
1111 mge_intr_sum(void *arg)
1112 {
1113 	struct mge_softc *sc = arg;
1114 	struct ifnet *ifp;
1115 
1116 	ifp = sc->ifp;
1117 	if_printf(ifp, "%s\n", __FUNCTION__);
1118 }
1119 
1120 static void
1121 mge_intr_tx(void *arg)
1122 {
1123 	struct mge_softc *sc = arg;
1124 	uint32_t int_cause_ext;
1125 
1126 	MGE_TRANSMIT_LOCK(sc);
1127 
1128 #ifdef DEVICE_POLLING
1129 	if (sc->ifp->if_capenable & IFCAP_POLLING) {
1130 		MGE_TRANSMIT_UNLOCK(sc);
1131 		return;
1132 	}
1133 #endif
1134 
1135 	/* Ack the interrupt */
1136 	int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1137 	MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT,
1138 	    int_cause_ext & ~MGE_PORT_INT_EXT_TXBUF0);
1139 
1140 	mge_intr_tx_locked(sc);
1141 
1142 	MGE_TRANSMIT_UNLOCK(sc);
1143 }
1144 
1145 
1146 static void
1147 mge_intr_tx_locked(struct mge_softc *sc)
1148 {
1149 	struct ifnet *ifp = sc->ifp;
1150 	struct mge_desc_wrapper *dw;
1151 	struct mge_desc *desc;
1152 	uint32_t status;
1153 	int send = 0;
1154 
1155 	MGE_TRANSMIT_LOCK_ASSERT(sc);
1156 
1157 	/* Disable watchdog */
1158 	sc->wd_timer = 0;
1159 
1160 	while (sc->tx_desc_used_count) {
1161 		/* Get the descriptor */
1162 		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1163 		desc = dw->mge_desc;
1164 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1165 		    BUS_DMASYNC_POSTREAD);
1166 
1167 		/* Get descriptor status */
1168 		status = desc->cmd_status;
1169 
1170 		if (status & MGE_DMA_OWNED)
1171 			break;
1172 
1173 		sc->tx_desc_used_idx =
1174 			(++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM;
1175 		sc->tx_desc_used_count--;
1176 
1177 		/* Update collision statistics */
1178 		if (status & MGE_ERR_SUMMARY) {
1179 			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_LC)
1180 				ifp->if_collisions++;
1181 			if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_RL)
1182 				ifp->if_collisions += 16;
1183 		}
1184 
1185 		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1186 		    BUS_DMASYNC_POSTWRITE);
1187 		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1188 		m_freem(dw->buffer);
1189 		dw->buffer = (struct mbuf*)NULL;
1190 		send++;
1191 
1192 		ifp->if_opackets++;
1193 	}
1194 
1195 	if (send) {
1196 		/* Now send anything that was pending */
1197 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1198 		mge_start_locked(ifp);
1199 	}
1200 }
1201 
1202 static int
1203 mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1204 {
1205 	struct mge_softc *sc = ifp->if_softc;
1206 	struct ifreq *ifr = (struct ifreq *)data;
1207 	int mask, error;
1208 	uint32_t flags;
1209 
1210 	error = 0;
1211 
1212 	switch (command) {
1213 	case SIOCSIFFLAGS:
1214 		MGE_GLOBAL_LOCK(sc);
1215 
1216 		if (ifp->if_flags & IFF_UP) {
1217 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1218 				flags = ifp->if_flags ^ sc->mge_if_flags;
1219 				if (flags & IFF_PROMISC)
1220 					mge_set_prom_mode(sc,
1221 					    MGE_RX_DEFAULT_QUEUE);
1222 
1223 				if (flags & IFF_ALLMULTI)
1224 					mge_setup_multicast(sc);
1225 			} else
1226 				mge_init_locked(sc);
1227 		}
1228 		else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1229 			mge_stop(sc);
1230 
1231 		sc->mge_if_flags = ifp->if_flags;
1232 		MGE_GLOBAL_UNLOCK(sc);
1233 		break;
1234 	case SIOCADDMULTI:
1235 	case SIOCDELMULTI:
1236 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1237 			MGE_GLOBAL_LOCK(sc);
1238 			mge_setup_multicast(sc);
1239 			MGE_GLOBAL_UNLOCK(sc);
1240 		}
1241 		break;
1242 	case SIOCSIFCAP:
1243 		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1244 		if (mask & IFCAP_HWCSUM) {
1245 			ifp->if_capenable &= ~IFCAP_HWCSUM;
1246 			ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap;
1247 			if (ifp->if_capenable & IFCAP_TXCSUM)
1248 				ifp->if_hwassist = MGE_CHECKSUM_FEATURES;
1249 			else
1250 				ifp->if_hwassist = 0;
1251 		}
1252 #ifdef DEVICE_POLLING
1253 		if (mask & IFCAP_POLLING) {
1254 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1255 				error = ether_poll_register(mge_poll, ifp);
1256 				if (error)
1257 					return(error);
1258 
1259 				MGE_GLOBAL_LOCK(sc);
1260 				mge_intrs_ctrl(sc, 0);
1261 				ifp->if_capenable |= IFCAP_POLLING;
1262 				MGE_GLOBAL_UNLOCK(sc);
1263 			} else {
1264 				error = ether_poll_deregister(ifp);
1265 				MGE_GLOBAL_LOCK(sc);
1266 				mge_intrs_ctrl(sc, 1);
1267 				ifp->if_capenable &= ~IFCAP_POLLING;
1268 				MGE_GLOBAL_UNLOCK(sc);
1269 			}
1270 		}
1271 #endif
1272 		break;
1273 	case SIOCGIFMEDIA: /* fall through */
1274 	case SIOCSIFMEDIA:
1275 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T
1276 		    && !(ifr->ifr_media & IFM_FDX)) {
1277 			device_printf(sc->dev,
1278 			    "1000baseTX half-duplex unsupported\n");
1279 			return 0;
1280 		}
1281 		error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1282 		break;
1283 	default:
1284 		error = ether_ioctl(ifp, command, data);
1285 	}
1286 	return (error);
1287 }
1288 
1289 static int
1290 mge_miibus_readreg(device_t dev, int phy, int reg)
1291 {
1292 	struct mge_softc *sc;
1293 	uint32_t retries;
1294 
1295 	sc = device_get_softc(dev);
1296 
1297 	MGE_WRITE(sc_mge0, MGE_REG_SMI, 0x1fffffff &
1298 	    (MGE_SMI_READ | (reg << 21) | (phy << 16)));
1299 
1300 	retries = MGE_SMI_READ_RETRIES;
1301 	while (--retries && !(MGE_READ(sc_mge0, MGE_REG_SMI) & MGE_SMI_READVALID))
1302 		DELAY(MGE_SMI_READ_DELAY);
1303 
1304 	if (retries == 0)
1305 		device_printf(dev, "Timeout while reading from PHY\n");
1306 
1307 	return (MGE_READ(sc_mge0, MGE_REG_SMI) & 0xffff);
1308 }
1309 
1310 static int
1311 mge_miibus_writereg(device_t dev, int phy, int reg, int value)
1312 {
1313 	struct mge_softc *sc;
1314 	uint32_t retries;
1315 
1316 	sc = device_get_softc(dev);
1317 
1318 	MGE_WRITE(sc_mge0, MGE_REG_SMI, 0x1fffffff &
1319 	    (MGE_SMI_WRITE | (reg << 21) | (phy << 16) | (value & 0xffff)));
1320 
1321 	retries = MGE_SMI_WRITE_RETRIES;
1322 	while (--retries && MGE_READ(sc_mge0, MGE_REG_SMI) & MGE_SMI_BUSY)
1323 		DELAY(MGE_SMI_WRITE_DELAY);
1324 
1325 	if (retries == 0)
1326 		device_printf(dev, "Timeout while writing to PHY\n");
1327 	return (0);
1328 }
1329 
1330 static int
1331 mge_probe(device_t dev)
1332 {
1333 
1334 	if (!ofw_bus_is_compatible(dev, "mrvl,ge"))
1335 		return (ENXIO);
1336 
1337 	device_set_desc(dev, "Marvell Gigabit Ethernet controller");
1338 	return (BUS_PROBE_DEFAULT);
1339 }
1340 
1341 static int
1342 mge_resume(device_t dev)
1343 {
1344 
1345 	device_printf(dev, "%s\n", __FUNCTION__);
1346 	return (0);
1347 }
1348 
1349 static int
1350 mge_shutdown(device_t dev)
1351 {
1352 	struct mge_softc *sc = device_get_softc(dev);
1353 
1354 	MGE_GLOBAL_LOCK(sc);
1355 
1356 #ifdef DEVICE_POLLING
1357         if (sc->ifp->if_capenable & IFCAP_POLLING)
1358 		ether_poll_deregister(sc->ifp);
1359 #endif
1360 
1361 	mge_stop(sc);
1362 
1363 	MGE_GLOBAL_UNLOCK(sc);
1364 
1365 	return (0);
1366 }
1367 
1368 static int
1369 mge_encap(struct mge_softc *sc, struct mbuf *m0)
1370 {
1371 	struct mge_desc_wrapper *dw = NULL;
1372 	struct ifnet *ifp;
1373 	bus_dma_segment_t segs[MGE_TX_DESC_NUM];
1374 	bus_dmamap_t mapp;
1375 	int error;
1376 	int seg, nsegs;
1377 	int desc_no;
1378 
1379 	ifp = sc->ifp;
1380 
1381 	/* Check for free descriptors */
1382 	if (sc->tx_desc_used_count + 1 >= MGE_TX_DESC_NUM) {
1383 		/* No free descriptors */
1384 		return (-1);
1385 	}
1386 
1387 	/* Fetch unused map */
1388 	desc_no = sc->tx_desc_curr;
1389 	dw = &sc->mge_tx_desc[desc_no];
1390 	mapp = dw->buffer_dmap;
1391 
1392 	/* Create mapping in DMA memory */
1393 	error = bus_dmamap_load_mbuf_sg(sc->mge_tx_dtag, mapp, m0, segs, &nsegs,
1394 	    BUS_DMA_NOWAIT);
1395 	if (error != 0 || nsegs != 1 ) {
1396 		bus_dmamap_unload(sc->mge_tx_dtag, mapp);
1397 		return ((error != 0) ? error : -1);
1398 	}
1399 
1400 	bus_dmamap_sync(sc->mge_tx_dtag, mapp, BUS_DMASYNC_PREWRITE);
1401 
1402 	/* Everything is ok, now we can send buffers */
1403 	for (seg = 0; seg < nsegs; seg++) {
1404 		dw->mge_desc->byte_count = segs[seg].ds_len;
1405 		dw->mge_desc->buffer = segs[seg].ds_addr;
1406 		dw->buffer = m0;
1407 		dw->mge_desc->cmd_status = MGE_TX_LAST | MGE_TX_FIRST |
1408 		    MGE_TX_ETH_CRC | MGE_TX_EN_INT | MGE_TX_PADDING |
1409 		    MGE_DMA_OWNED;
1410 
1411 		if (seg == 0)
1412 			mge_offload_setup_descriptor(sc, dw);
1413 	}
1414 
1415 	bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1416 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1417 
1418 	sc->tx_desc_curr = (++sc->tx_desc_curr) % MGE_TX_DESC_NUM;
1419 	sc->tx_desc_used_count++;
1420 	return (0);
1421 }
1422 
1423 static void
1424 mge_tick(void *msc)
1425 {
1426 	struct mge_softc *sc = msc;
1427 
1428 	/* Check for TX timeout */
1429 	mge_watchdog(sc);
1430 
1431 	mii_tick(sc->mii);
1432 
1433 	/* Check for media type change */
1434 	if(sc->mge_media_status != sc->mii->mii_media.ifm_media)
1435 		mge_ifmedia_upd(sc->ifp);
1436 
1437 	/* Schedule another timeout one second from now */
1438 	callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1439 }
1440 
1441 static void
1442 mge_watchdog(struct mge_softc *sc)
1443 {
1444 	struct ifnet *ifp;
1445 
1446 	ifp = sc->ifp;
1447 
1448 	MGE_GLOBAL_LOCK(sc);
1449 
1450 	if (sc->wd_timer == 0 || --sc->wd_timer) {
1451 		MGE_GLOBAL_UNLOCK(sc);
1452 		return;
1453 	}
1454 
1455 	ifp->if_oerrors++;
1456 	if_printf(ifp, "watchdog timeout\n");
1457 
1458 	mge_stop(sc);
1459 	mge_init_locked(sc);
1460 
1461 	MGE_GLOBAL_UNLOCK(sc);
1462 }
1463 
1464 static void
1465 mge_start(struct ifnet *ifp)
1466 {
1467 	struct mge_softc *sc = ifp->if_softc;
1468 
1469 	MGE_TRANSMIT_LOCK(sc);
1470 
1471 	mge_start_locked(ifp);
1472 
1473 	MGE_TRANSMIT_UNLOCK(sc);
1474 }
1475 
1476 static void
1477 mge_start_locked(struct ifnet *ifp)
1478 {
1479 	struct mge_softc *sc;
1480 	struct mbuf *m0, *mtmp;
1481 	uint32_t reg_val, queued = 0;
1482 
1483 	sc = ifp->if_softc;
1484 
1485 	MGE_TRANSMIT_LOCK_ASSERT(sc);
1486 
1487 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1488 	    IFF_DRV_RUNNING)
1489 		return;
1490 
1491 	for (;;) {
1492 		/* Get packet from the queue */
1493 		IF_DEQUEUE(&ifp->if_snd, m0);
1494 		if (m0 == NULL)
1495 			break;
1496 
1497 		mtmp = m_defrag(m0, M_DONTWAIT);
1498 		if (mtmp)
1499 			m0 = mtmp;
1500 
1501 		if (mge_encap(sc, m0)) {
1502 			IF_PREPEND(&ifp->if_snd, m0);
1503 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1504 			break;
1505 		}
1506 		queued++;
1507 		BPF_MTAP(ifp, m0);
1508 	}
1509 
1510 	if (queued) {
1511 		/* Enable transmitter and watchdog timer */
1512 		reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1513 		MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_ENABLE_TXQ);
1514 		sc->wd_timer = 5;
1515 	}
1516 }
1517 
1518 static void
1519 mge_stop(struct mge_softc *sc)
1520 {
1521 	struct ifnet *ifp;
1522 	volatile uint32_t reg_val, status;
1523 	struct mge_desc_wrapper *dw;
1524 	struct mge_desc *desc;
1525 	int count;
1526 
1527 	ifp = sc->ifp;
1528 
1529 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1530 		return;
1531 
1532 	/* Stop tick engine */
1533 	callout_stop(&sc->wd_callout);
1534 
1535 	/* Disable interface */
1536 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1537 	sc->wd_timer = 0;
1538 
1539 	/* Disable interrupts */
1540 	mge_intrs_ctrl(sc, 0);
1541 
1542 	/* Disable Rx and Tx */
1543 	reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1544 	MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_DISABLE_TXQ);
1545 	MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_DISABLE_RXQ_ALL);
1546 
1547 	/* Remove pending data from TX queue */
1548 	while (sc->tx_desc_used_idx != sc->tx_desc_curr &&
1549 	    sc->tx_desc_used_count) {
1550 		/* Get the descriptor */
1551 		dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1552 		desc = dw->mge_desc;
1553 		bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1554 		    BUS_DMASYNC_POSTREAD);
1555 
1556 		/* Get descriptor status */
1557 		status = desc->cmd_status;
1558 
1559 		if (status & MGE_DMA_OWNED)
1560 			break;
1561 
1562 		sc->tx_desc_used_idx = (++sc->tx_desc_used_idx) %
1563 		    MGE_TX_DESC_NUM;
1564 		sc->tx_desc_used_count--;
1565 
1566 		bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1567 		    BUS_DMASYNC_POSTWRITE);
1568 		bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1569 
1570 		m_freem(dw->buffer);
1571 		dw->buffer = (struct mbuf*)NULL;
1572 	}
1573 
1574 	/* Wait for end of transmission */
1575 	count = 0x100000;
1576 	while (count--) {
1577 		reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1578 		if ( !(reg_val & MGE_STATUS_TX_IN_PROG) &&
1579 		    (reg_val & MGE_STATUS_TX_FIFO_EMPTY))
1580 			break;
1581 		DELAY(100);
1582 	}
1583 
1584 	if(!count)
1585 		if_printf(ifp, "%s: timeout while waiting for end of transmission\n",
1586 		    __FUNCTION__);
1587 
1588 	reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1589 	reg_val &= ~(PORT_SERIAL_ENABLE);
1590 	MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL ,reg_val);
1591 }
1592 
1593 static int
1594 mge_suspend(device_t dev)
1595 {
1596 
1597 	device_printf(dev, "%s\n", __FUNCTION__);
1598 	return (0);
1599 }
1600 
1601 static void
1602 mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame,
1603     uint32_t status, uint16_t bufsize)
1604 {
1605 	int csum_flags = 0;
1606 
1607 	if (ifp->if_capenable & IFCAP_RXCSUM) {
1608 		if ((status & MGE_RX_L3_IS_IP) && (status & MGE_RX_IP_OK))
1609 			csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
1610 
1611 		if ((bufsize & MGE_RX_IP_FRAGMENT) == 0 &&
1612 		    (MGE_RX_L4_IS_TCP(status) || MGE_RX_L4_IS_UDP(status)) &&
1613 		    (status & MGE_RX_L4_CSUM_OK)) {
1614 			csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1615 			frame->m_pkthdr.csum_data = 0xFFFF;
1616 		}
1617 
1618 		frame->m_pkthdr.csum_flags = csum_flags;
1619 	}
1620 }
1621 
1622 static void
1623 mge_offload_setup_descriptor(struct mge_softc *sc, struct mge_desc_wrapper *dw)
1624 {
1625 	struct mbuf *m0 = dw->buffer;
1626 	struct ether_vlan_header *eh = mtod(m0, struct ether_vlan_header *);
1627 	int csum_flags = m0->m_pkthdr.csum_flags;
1628 	int cmd_status = 0;
1629 	struct ip *ip;
1630 	int ehlen, etype;
1631 
1632 	if (csum_flags) {
1633 		if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1634 			etype = ntohs(eh->evl_proto);
1635 			ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1636 			csum_flags |= MGE_TX_VLAN_TAGGED;
1637 		} else {
1638 			etype = ntohs(eh->evl_encap_proto);
1639 			ehlen = ETHER_HDR_LEN;
1640 		}
1641 
1642 		if (etype != ETHERTYPE_IP) {
1643 			if_printf(sc->ifp,
1644 			    "TCP/IP Offload enabled for unsupported "
1645 			    "protocol!\n");
1646 			return;
1647 		}
1648 
1649 		ip = (struct ip *)(m0->m_data + ehlen);
1650 		cmd_status |= MGE_TX_IP_HDR_SIZE(ip->ip_hl);
1651 
1652 		if ((m0->m_flags & M_FRAG) == 0)
1653 			cmd_status |= MGE_TX_NOT_FRAGMENT;
1654 	}
1655 
1656 	if (csum_flags & CSUM_IP)
1657 		cmd_status |= MGE_TX_GEN_IP_CSUM;
1658 
1659 	if (csum_flags & CSUM_TCP)
1660 		cmd_status |= MGE_TX_GEN_L4_CSUM;
1661 
1662 	if (csum_flags & CSUM_UDP)
1663 		cmd_status |= MGE_TX_GEN_L4_CSUM | MGE_TX_UDP;
1664 
1665 	dw->mge_desc->cmd_status |= cmd_status;
1666 }
1667 
1668 static void
1669 mge_intrs_ctrl(struct mge_softc *sc, int enable)
1670 {
1671 
1672 	if (enable) {
1673 		MGE_WRITE(sc, MGE_PORT_INT_MASK , MGE_PORT_INT_RXQ0 |
1674 		    MGE_PORT_INT_EXTEND | MGE_PORT_INT_RXERRQ0);
1675 		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT , MGE_PORT_INT_EXT_TXERR0 |
1676 		    MGE_PORT_INT_EXT_RXOR | MGE_PORT_INT_EXT_TXUR |
1677 		    MGE_PORT_INT_EXT_TXBUF0);
1678 	} else {
1679 		MGE_WRITE(sc, MGE_INT_CAUSE, 0x0);
1680 		MGE_WRITE(sc, MGE_INT_MASK, 0x0);
1681 
1682 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE, 0x0);
1683 		MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, 0x0);
1684 
1685 		MGE_WRITE(sc, MGE_PORT_INT_MASK, 0x0);
1686 		MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT, 0x0);
1687 	}
1688 }
1689 
1690 static uint8_t
1691 mge_crc8(uint8_t *data, int size)
1692 {
1693 	uint8_t crc = 0;
1694 	static const uint8_t ct[256] = {
1695 		0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15,
1696 		0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D,
1697 		0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65,
1698 		0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D,
1699 		0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5,
1700 		0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD,
1701 		0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85,
1702 		0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD,
1703 		0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2,
1704 		0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA,
1705 		0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2,
1706 		0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A,
1707 		0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32,
1708 		0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A,
1709 		0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42,
1710 		0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A,
1711 		0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C,
1712 		0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4,
1713 		0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC,
1714 		0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4,
1715 		0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C,
1716 		0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44,
1717 		0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C,
1718 		0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34,
1719 		0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B,
1720 		0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63,
1721 		0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B,
1722 		0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13,
1723 		0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB,
1724 		0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83,
1725 		0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB,
1726 		0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3
1727 	};
1728 
1729 	while(size--)
1730 		crc = ct[crc ^ *(data++)];
1731 
1732 	return(crc);
1733 }
1734 
1735 static void
1736 mge_setup_multicast(struct mge_softc *sc)
1737 {
1738 	uint8_t special[5] = { 0x01, 0x00, 0x5E, 0x00, 0x00 };
1739 	uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
1740 	uint32_t smt[MGE_MCAST_REG_NUMBER];
1741 	uint32_t omt[MGE_MCAST_REG_NUMBER];
1742 	struct ifnet *ifp = sc->ifp;
1743 	struct ifmultiaddr *ifma;
1744 	uint8_t *mac;
1745 	int i;
1746 
1747 	if (ifp->if_flags & IFF_ALLMULTI) {
1748 		for (i = 0; i < MGE_MCAST_REG_NUMBER; i++)
1749 			smt[i] = omt[i] = (v << 24) | (v << 16) | (v << 8) | v;
1750 	} else {
1751 		memset(smt, 0, sizeof(smt));
1752 		memset(omt, 0, sizeof(omt));
1753 
1754 		if_maddr_rlock(ifp);
1755 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1756 			if (ifma->ifma_addr->sa_family != AF_LINK)
1757 				continue;
1758 
1759 			mac = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1760 			if (memcmp(mac, special, sizeof(special)) == 0) {
1761 				i = mac[5];
1762 				smt[i >> 2] |= v << ((i & 0x03) << 3);
1763 			} else {
1764 				i = mge_crc8(mac, ETHER_ADDR_LEN);
1765 				omt[i >> 2] |= v << ((i & 0x03) << 3);
1766 			}
1767 		}
1768 		if_maddr_runlock(ifp);
1769 	}
1770 
1771 	for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
1772 		MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), smt[i]);
1773 		MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), omt[i]);
1774 	}
1775 }
1776 
1777 static void
1778 mge_set_rxic(struct mge_softc *sc)
1779 {
1780 	uint32_t reg;
1781 
1782 	if (sc->rx_ic_time > sc->mge_rx_ipg_max)
1783 		sc->rx_ic_time = sc->mge_rx_ipg_max;
1784 
1785 	reg = MGE_READ(sc, MGE_SDMA_CONFIG);
1786 	reg &= ~mge_rx_ipg(sc->mge_rx_ipg_max, sc->mge_ver);
1787 	reg |= mge_rx_ipg(sc->rx_ic_time, sc->mge_ver);
1788 	MGE_WRITE(sc, MGE_SDMA_CONFIG, reg);
1789 }
1790 
1791 static void
1792 mge_set_txic(struct mge_softc *sc)
1793 {
1794 	uint32_t reg;
1795 
1796 	if (sc->tx_ic_time > sc->mge_tfut_ipg_max)
1797 		sc->tx_ic_time = sc->mge_tfut_ipg_max;
1798 
1799 	reg = MGE_READ(sc, MGE_TX_FIFO_URGENT_TRSH);
1800 	reg &= ~mge_tfut_ipg(sc->mge_tfut_ipg_max, sc->mge_ver);
1801 	reg |= mge_tfut_ipg(sc->tx_ic_time, sc->mge_ver);
1802 	MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, reg);
1803 }
1804 
1805 static int
1806 mge_sysctl_ic(SYSCTL_HANDLER_ARGS)
1807 {
1808 	struct mge_softc *sc = (struct mge_softc *)arg1;
1809 	uint32_t time;
1810 	int error;
1811 
1812 	time = (arg2 == MGE_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time;
1813 	error = sysctl_handle_int(oidp, &time, 0, req);
1814 	if (error != 0)
1815 		return(error);
1816 
1817 	MGE_GLOBAL_LOCK(sc);
1818 	if (arg2 == MGE_IC_RX) {
1819 		sc->rx_ic_time = time;
1820 		mge_set_rxic(sc);
1821 	} else {
1822 		sc->tx_ic_time = time;
1823 		mge_set_txic(sc);
1824 	}
1825 	MGE_GLOBAL_UNLOCK(sc);
1826 
1827 	return(0);
1828 }
1829 
1830 static void
1831 mge_add_sysctls(struct mge_softc *sc)
1832 {
1833 	struct sysctl_ctx_list *ctx;
1834 	struct sysctl_oid_list *children;
1835 	struct sysctl_oid *tree;
1836 
1837 	ctx = device_get_sysctl_ctx(sc->dev);
1838 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1839 	tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal",
1840 	    CTLFLAG_RD, 0, "MGE Interrupts coalescing");
1841 	children = SYSCTL_CHILDREN(tree);
1842 
1843 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time",
1844 	    CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_RX, mge_sysctl_ic,
1845 	    "I", "IC RX time threshold");
1846 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time",
1847 	    CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_TX, mge_sysctl_ic,
1848 	    "I", "IC TX time threshold");
1849 }
1850