xref: /freebsd/sys/arm64/broadcom/genet/if_genet.c (revision 53b70c86)
1 /*-
2  * Copyright (c) 2020 Michael J Karels
3  * Copyright (c) 2016, 2020 Jared McNeill <jmcneill@invisible.ca>
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
19  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
20  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
21  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
22  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 /*
30  * RPi4 (BCM 2711) Gigabit Ethernet ("GENET") controller
31  *
32  * This driver is derived in large part from bcmgenet.c from NetBSD by
33  * Jared McNeill.  Parts of the structure and other common code in
34  * this driver have been copied from if_awg.c for the Allwinner EMAC,
35  * also by Jared McNeill.
36  */
37 
38 #include "opt_device_polling.h"
39 
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/bus.h>
46 #include <sys/rman.h>
47 #include <sys/kernel.h>
48 #include <sys/endian.h>
49 #include <sys/mbuf.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <sys/module.h>
54 #include <sys/taskqueue.h>
55 #include <sys/gpio.h>
56 
57 #include <net/bpf.h>
58 #include <net/if.h>
59 #include <net/ethernet.h>
60 #include <net/if_dl.h>
61 #include <net/if_media.h>
62 #include <net/if_types.h>
63 #include <net/if_var.h>
64 
65 #include <machine/bus.h>
66 
67 #include <dev/ofw/ofw_bus.h>
68 #include <dev/ofw/ofw_bus_subr.h>
69 
70 #define __BIT(_x)	(1 << (_x))
71 #include "if_genetreg.h"
72 
73 #include <dev/mii/mii.h>
74 #include <dev/mii/miivar.h>
75 #include <dev/mii/mii_fdt.h>
76 
77 #include <netinet/in.h>
78 #include <netinet/ip.h>
79 #include <netinet/ip6.h>
80 
81 #include "syscon_if.h"
82 #include "miibus_if.h"
83 #include "gpio_if.h"
84 
85 #define	RD4(sc, reg)		bus_read_4((sc)->res[_RES_MAC], (reg))
86 #define	WR4(sc, reg, val)	bus_write_4((sc)->res[_RES_MAC], (reg), (val))
87 
88 #define	GEN_LOCK(sc)		mtx_lock(&(sc)->mtx)
89 #define	GEN_UNLOCK(sc)		mtx_unlock(&(sc)->mtx)
90 #define	GEN_ASSERT_LOCKED(sc)	mtx_assert(&(sc)->mtx, MA_OWNED)
91 #define	GEN_ASSERT_UNLOCKED(sc)	mtx_assert(&(sc)->mtx, MA_NOTOWNED)
92 
93 #define	TX_DESC_COUNT		GENET_DMA_DESC_COUNT
94 #define	RX_DESC_COUNT		GENET_DMA_DESC_COUNT
95 
96 #define	TX_NEXT(n, count)		(((n) + 1) & ((count) - 1))
97 #define	RX_NEXT(n, count)		(((n) + 1) & ((count) - 1))
98 
99 #define	TX_MAX_SEGS		20
100 
101 static SYSCTL_NODE(_hw, OID_AUTO, genet, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
102     "genet driver parameters");
103 
104 /* Maximum number of mbufs to pass per call to if_input */
105 static int gen_rx_batch = 16 /* RX_BATCH_DEFAULT */;
106 SYSCTL_INT(_hw_genet, OID_AUTO, rx_batch, CTLFLAG_RDTUN,
107     &gen_rx_batch, 0, "max mbufs per call to if_input");
108 
109 TUNABLE_INT("hw.gen.rx_batch", &gen_rx_batch);	/* old name/interface */
110 
111 /*
112  * Transmitting packets with only an Ethernet header in the first mbuf
113  * fails.  Examples include reflected ICMPv6 packets, e.g. echo replies;
114  * forwarded IPv6/TCP packets; and forwarded IPv4/TCP packets that use NAT
115  * with IPFW.  Pulling up the sizes of ether_header + ip6_hdr + icmp6_hdr
116  * seems to work for both ICMPv6 and TCP over IPv6, as well as the IPv4/TCP
117  * case.
118  */
119 static int gen_tx_hdr_min = 56;		/* ether_header + ip6_hdr + icmp6_hdr */
120 SYSCTL_INT(_hw_genet, OID_AUTO, tx_hdr_min, CTLFLAG_RW,
121     &gen_tx_hdr_min, 0, "header to add to packets with ether header only");
122 
123 static struct ofw_compat_data compat_data[] = {
124 	{ "brcm,genet-v1",		1 },
125 	{ "brcm,genet-v2",		2 },
126 	{ "brcm,genet-v3",		3 },
127 	{ "brcm,genet-v4",		4 },
128 	{ "brcm,genet-v5",		5 },
129 	{ "brcm,bcm2711-genet-v5",	5 },
130 	{ NULL,				0 }
131 };
132 
133 enum {
134 	_RES_MAC,		/* what to call this? */
135 	_RES_IRQ1,
136 	_RES_IRQ2,
137 	_RES_NITEMS
138 };
139 
140 static struct resource_spec gen_spec[] = {
141 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
142 	{ SYS_RES_IRQ,		0,	RF_ACTIVE },
143 	{ SYS_RES_IRQ,		1,	RF_ACTIVE },
144 	{ -1, 0 }
145 };
146 
147 /* structure per ring entry */
148 struct gen_ring_ent {
149 	bus_dmamap_t		map;
150 	struct mbuf		*mbuf;
151 };
152 
153 struct tx_queue {
154 	int			hwindex;		/* hardware index */
155 	int			nentries;
156 	u_int			queued;			/* or avail? */
157 	u_int			cur;
158 	u_int			next;
159 	u_int			prod_idx;
160 	u_int			cons_idx;
161 	struct gen_ring_ent	*entries;
162 };
163 
164 struct rx_queue {
165 	int			hwindex;		/* hardware index */
166 	int			nentries;
167 	u_int			cur;
168 	u_int			prod_idx;
169 	u_int			cons_idx;
170 	struct gen_ring_ent	*entries;
171 };
172 
173 struct gen_softc {
174 	struct resource		*res[_RES_NITEMS];
175 	struct mtx		mtx;
176 	if_t			ifp;
177 	device_t		dev;
178 	device_t		miibus;
179 	mii_contype_t		phy_mode;
180 
181 	struct callout		stat_ch;
182 	struct task		link_task;
183 	void			*ih;
184 	void			*ih2;
185 	int			type;
186 	int			if_flags;
187 	int			link;
188 	bus_dma_tag_t		tx_buf_tag;
189 	/*
190 	 * The genet chip has multiple queues for transmit and receive.
191 	 * This driver uses only one (queue 16, the default), but is cast
192 	 * with multiple rings.  The additional rings are used for different
193 	 * priorities.
194 	 */
195 #define DEF_TXQUEUE	0
196 #define NTXQUEUE	1
197 	struct tx_queue		tx_queue[NTXQUEUE];
198 	struct gen_ring_ent	tx_ring_ent[TX_DESC_COUNT];  /* ring entries */
199 
200 	bus_dma_tag_t		rx_buf_tag;
201 #define DEF_RXQUEUE	0
202 #define NRXQUEUE	1
203 	struct rx_queue		rx_queue[NRXQUEUE];
204 	struct gen_ring_ent	rx_ring_ent[RX_DESC_COUNT];  /* ring entries */
205 };
206 
207 static void gen_init(void *softc);
208 static void gen_start(if_t ifp);
209 static void gen_destroy(struct gen_softc *sc);
210 static int gen_encap(struct gen_softc *sc, struct mbuf **mp);
211 static int gen_parse_tx(struct mbuf *m, int csum_flags);
212 static int gen_ioctl(if_t ifp, u_long cmd, caddr_t data);
213 static int gen_get_phy_mode(device_t dev);
214 static bool gen_get_eaddr(device_t dev, struct ether_addr *eaddr);
215 static void gen_set_enaddr(struct gen_softc *sc);
216 static void gen_setup_rxfilter(struct gen_softc *sc);
217 static void gen_reset(struct gen_softc *sc);
218 static void gen_enable(struct gen_softc *sc);
219 static void gen_dma_disable(device_t dev);
220 static int gen_bus_dma_init(struct gen_softc *sc);
221 static void gen_bus_dma_teardown(struct gen_softc *sc);
222 static void gen_enable_intr(struct gen_softc *sc);
223 static void gen_init_txrings(struct gen_softc *sc);
224 static void gen_init_rxrings(struct gen_softc *sc);
225 static void gen_intr(void *softc);
226 static int gen_rxintr(struct gen_softc *sc, struct rx_queue *q);
227 static void gen_txintr(struct gen_softc *sc, struct tx_queue *q);
228 static void gen_intr2(void *softc);
229 static int gen_newbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index);
230 static int gen_mapbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index,
231     struct mbuf *m);
232 static void gen_link_task(void *arg, int pending);
233 static void gen_media_status(if_t ifp, struct ifmediareq *ifmr);
234 static int gen_media_change(if_t ifp);
235 static void gen_tick(void *softc);
236 
237 static int
238 gen_probe(device_t dev)
239 {
240 	if (!ofw_bus_status_okay(dev))
241 		return (ENXIO);
242 
243 	if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
244 		return (ENXIO);
245 
246 	device_set_desc(dev, "RPi4 Gigabit Ethernet");
247 	return (BUS_PROBE_DEFAULT);
248 }
249 
250 static int
251 gen_attach(device_t dev)
252 {
253 	struct ether_addr eaddr;
254 	struct gen_softc *sc;
255 	int major, minor, error, mii_flags;
256 	bool eaddr_found;
257 
258 	sc = device_get_softc(dev);
259 	sc->dev = dev;
260 	sc->type = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
261 
262 	if (bus_alloc_resources(dev, gen_spec, sc->res) != 0) {
263 		device_printf(dev, "cannot allocate resources for device\n");
264 		error = ENXIO;
265 		goto fail;
266 	}
267 
268 	major = (RD4(sc, GENET_SYS_REV_CTRL) & REV_MAJOR) >> REV_MAJOR_SHIFT;
269 	if (major != REV_MAJOR_V5) {
270 		device_printf(dev, "version %d is not supported\n", major);
271 		error = ENXIO;
272 		goto fail;
273 	}
274 	minor = (RD4(sc, GENET_SYS_REV_CTRL) & REV_MINOR) >> REV_MINOR_SHIFT;
275 	device_printf(dev, "GENET version 5.%d phy 0x%04x\n", minor,
276 		RD4(sc, GENET_SYS_REV_CTRL) & REV_PHY);
277 
278 	mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF);
279 	callout_init_mtx(&sc->stat_ch, &sc->mtx, 0);
280 	TASK_INIT(&sc->link_task, 0, gen_link_task, sc);
281 
282 	error = gen_get_phy_mode(dev);
283 	if (error != 0)
284 		goto fail;
285 
286 	bzero(&eaddr, sizeof(eaddr));
287 	eaddr_found = gen_get_eaddr(dev, &eaddr);
288 
289 	/* reset core */
290 	gen_reset(sc);
291 
292 	gen_dma_disable(dev);
293 
294 	/* Setup DMA */
295 	error = gen_bus_dma_init(sc);
296 	if (error != 0) {
297 		device_printf(dev, "cannot setup bus dma\n");
298 		goto fail;
299 	}
300 
301 	/* Setup ethernet interface */
302 	sc->ifp = if_alloc(IFT_ETHER);
303 	if_setsoftc(sc->ifp, sc);
304 	if_initname(sc->ifp, device_get_name(dev), device_get_unit(dev));
305 	if_setflags(sc->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
306 	if_setstartfn(sc->ifp, gen_start);
307 	if_setioctlfn(sc->ifp, gen_ioctl);
308 	if_setinitfn(sc->ifp, gen_init);
309 	if_setsendqlen(sc->ifp, TX_DESC_COUNT - 1);
310 	if_setsendqready(sc->ifp);
311 #define GEN_CSUM_FEATURES	(CSUM_UDP | CSUM_TCP)
312 	if_sethwassist(sc->ifp, GEN_CSUM_FEATURES);
313 	if_setcapabilities(sc->ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM |
314 	    IFCAP_HWCSUM_IPV6);
315 	if_setcapenable(sc->ifp, if_getcapabilities(sc->ifp));
316 
317 	/* Install interrupt handlers */
318 	error = bus_setup_intr(dev, sc->res[_RES_IRQ1],
319 	    INTR_TYPE_NET | INTR_MPSAFE, NULL, gen_intr, sc, &sc->ih);
320 	if (error != 0) {
321 		device_printf(dev, "cannot setup interrupt handler1\n");
322 		goto fail;
323 	}
324 
325 	error = bus_setup_intr(dev, sc->res[_RES_IRQ2],
326 	    INTR_TYPE_NET | INTR_MPSAFE, NULL, gen_intr2, sc, &sc->ih2);
327 	if (error != 0) {
328 		device_printf(dev, "cannot setup interrupt handler2\n");
329 		goto fail;
330 	}
331 
332 	/* Attach MII driver */
333 	mii_flags = 0;
334 	switch (sc->phy_mode)
335 	{
336 	case MII_CONTYPE_RGMII_ID:
337 		mii_flags |= MIIF_RX_DELAY | MIIF_TX_DELAY;
338 		break;
339 	case MII_CONTYPE_RGMII_RXID:
340 		mii_flags |= MIIF_RX_DELAY;
341 		break;
342 	case MII_CONTYPE_RGMII_TXID:
343 		mii_flags |= MIIF_TX_DELAY;
344 		break;
345 	default:
346 		break;
347 	}
348 	error = mii_attach(dev, &sc->miibus, sc->ifp, gen_media_change,
349 	    gen_media_status, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY,
350 	    mii_flags);
351 	if (error != 0) {
352 		device_printf(dev, "cannot attach PHY\n");
353 		goto fail;
354 	}
355 
356 	/* If address was not found, create one based on the hostid and name. */
357 	if (eaddr_found == 0)
358 		ether_gen_addr(sc->ifp, &eaddr);
359 	/* Attach ethernet interface */
360 	ether_ifattach(sc->ifp, eaddr.octet);
361 
362 fail:
363 	if (error)
364 		gen_destroy(sc);
365 	return (error);
366 }
367 
368 /* Free resources after failed attach.  This is not a complete detach. */
369 static void
370 gen_destroy(struct gen_softc *sc)
371 {
372 
373 	if (sc->miibus) {	/* can't happen */
374 		device_delete_child(sc->dev, sc->miibus);
375 		sc->miibus = NULL;
376 	}
377 	bus_teardown_intr(sc->dev, sc->res[_RES_IRQ1], sc->ih);
378 	bus_teardown_intr(sc->dev, sc->res[_RES_IRQ2], sc->ih2);
379 	gen_bus_dma_teardown(sc);
380 	callout_drain(&sc->stat_ch);
381 	if (mtx_initialized(&sc->mtx))
382 		mtx_destroy(&sc->mtx);
383 	bus_release_resources(sc->dev, gen_spec, sc->res);
384 	if (sc->ifp != NULL) {
385 		if_free(sc->ifp);
386 		sc->ifp = NULL;
387 	}
388 }
389 
390 static int
391 gen_get_phy_mode(device_t dev)
392 {
393 	struct gen_softc *sc;
394 	phandle_t node;
395 	mii_contype_t type;
396 	int error = 0;
397 
398 	sc = device_get_softc(dev);
399 	node = ofw_bus_get_node(dev);
400 	type = mii_fdt_get_contype(node);
401 
402 	switch (type) {
403 	case MII_CONTYPE_RGMII:
404 	case MII_CONTYPE_RGMII_ID:
405 	case MII_CONTYPE_RGMII_RXID:
406 	case MII_CONTYPE_RGMII_TXID:
407 		sc->phy_mode = type;
408 		break;
409 	default:
410 		device_printf(dev, "unknown phy-mode '%s'\n",
411 		    mii_fdt_contype_to_name(type));
412 		error = ENXIO;
413 		break;
414 	}
415 
416 	return (error);
417 }
418 
419 static bool
420 gen_get_eaddr(device_t dev, struct ether_addr *eaddr)
421 {
422 	struct gen_softc *sc;
423 	uint32_t maclo, machi, val;
424 	phandle_t node;
425 
426 	sc = device_get_softc(dev);
427 
428 	node = ofw_bus_get_node(dev);
429 	if (OF_getprop(node, "mac-address", eaddr->octet,
430 	    ETHER_ADDR_LEN) != -1 ||
431 	    OF_getprop(node, "local-mac-address", eaddr->octet,
432 	    ETHER_ADDR_LEN) != -1 ||
433 	    OF_getprop(node, "address", eaddr->octet, ETHER_ADDR_LEN) != -1)
434 		return (true);
435 
436 	device_printf(dev, "No Ethernet address found in fdt!\n");
437 	maclo = machi = 0;
438 
439 	val = RD4(sc, GENET_SYS_RBUF_FLUSH_CTRL);
440 	if ((val & GENET_SYS_RBUF_FLUSH_RESET) == 0) {
441 		maclo = htobe32(RD4(sc, GENET_UMAC_MAC0));
442 		machi = htobe16(RD4(sc, GENET_UMAC_MAC1) & 0xffff);
443 	}
444 
445 	if (maclo == 0 && machi == 0) {
446 		if (bootverbose)
447 			device_printf(dev,
448 			    "No Ethernet address found in controller\n");
449 		return (false);
450 	} else {
451 		eaddr->octet[0] = maclo & 0xff;
452 		eaddr->octet[1] = (maclo >> 8) & 0xff;
453 		eaddr->octet[2] = (maclo >> 16) & 0xff;
454 		eaddr->octet[3] = (maclo >> 24) & 0xff;
455 		eaddr->octet[4] = machi & 0xff;
456 		eaddr->octet[5] = (machi >> 8) & 0xff;
457 		return (true);
458 	}
459 }
460 
461 static void
462 gen_reset(struct gen_softc *sc)
463 {
464 	uint32_t val;
465 
466 	val = RD4(sc, GENET_SYS_RBUF_FLUSH_CTRL);
467 	val |= GENET_SYS_RBUF_FLUSH_RESET;
468 	WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val);
469 	DELAY(10);
470 
471 	val &= ~GENET_SYS_RBUF_FLUSH_RESET;
472 	WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val);
473 	DELAY(10);
474 
475 	WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, 0);
476 	DELAY(10);
477 
478 	WR4(sc, GENET_UMAC_CMD, 0);
479 	WR4(sc, GENET_UMAC_CMD,
480 	    GENET_UMAC_CMD_LCL_LOOP_EN | GENET_UMAC_CMD_SW_RESET);
481 	DELAY(10);
482 	WR4(sc, GENET_UMAC_CMD, 0);
483 
484 	WR4(sc, GENET_UMAC_MIB_CTRL, GENET_UMAC_MIB_RESET_RUNT |
485 	    GENET_UMAC_MIB_RESET_RX | GENET_UMAC_MIB_RESET_TX);
486 	WR4(sc, GENET_UMAC_MIB_CTRL, 0);
487 
488 	WR4(sc, GENET_UMAC_MAX_FRAME_LEN, 1536);
489 
490 	val = RD4(sc, GENET_RBUF_CTRL);
491 	val |= GENET_RBUF_ALIGN_2B;
492 	WR4(sc, GENET_RBUF_CTRL, val);
493 
494 	WR4(sc, GENET_RBUF_TBUF_SIZE_CTRL, 1);
495 }
496 
497 static void
498 gen_enable(struct gen_softc *sc)
499 {
500 	u_int val;
501 
502 	/* Enable transmitter and receiver */
503 	val = RD4(sc, GENET_UMAC_CMD);
504 	val |= GENET_UMAC_CMD_TXEN;
505 	val |= GENET_UMAC_CMD_RXEN;
506 	WR4(sc, GENET_UMAC_CMD, val);
507 
508 	/* Enable interrupts */
509 	gen_enable_intr(sc);
510 	WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK,
511 	    GENET_IRQ_TXDMA_DONE | GENET_IRQ_RXDMA_DONE);
512 }
513 
514 static void
515 gen_enable_offload(struct gen_softc *sc)
516 {
517 	uint32_t check_ctrl, buf_ctrl;
518 
519 	check_ctrl = RD4(sc, GENET_RBUF_CHECK_CTRL);
520 	buf_ctrl  = RD4(sc, GENET_RBUF_CTRL);
521 	if ((if_getcapenable(sc->ifp) & IFCAP_RXCSUM) != 0) {
522 		check_ctrl |= GENET_RBUF_CHECK_CTRL_EN;
523 		buf_ctrl |= GENET_RBUF_64B_EN;
524 	} else {
525 		check_ctrl &= ~GENET_RBUF_CHECK_CTRL_EN;
526 		buf_ctrl &= ~GENET_RBUF_64B_EN;
527 	}
528 	WR4(sc, GENET_RBUF_CHECK_CTRL, check_ctrl);
529 	WR4(sc, GENET_RBUF_CTRL, buf_ctrl);
530 
531 	buf_ctrl  = RD4(sc, GENET_TBUF_CTRL);
532 	if ((if_getcapenable(sc->ifp) & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) !=
533 	    0)
534 		buf_ctrl |= GENET_RBUF_64B_EN;
535 	else
536 		buf_ctrl &= ~GENET_RBUF_64B_EN;
537 	WR4(sc, GENET_TBUF_CTRL, buf_ctrl);
538 }
539 
540 static void
541 gen_dma_disable(device_t dev)
542 {
543 	struct gen_softc *sc = device_get_softc(dev);
544 	int val;
545 
546 	val = RD4(sc, GENET_TX_DMA_CTRL);
547 	val &= ~GENET_TX_DMA_CTRL_EN;
548 	val &= ~GENET_TX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE);
549 	WR4(sc, GENET_TX_DMA_CTRL, val);
550 
551 	val = RD4(sc, GENET_RX_DMA_CTRL);
552 	val &= ~GENET_RX_DMA_CTRL_EN;
553 	val &= ~GENET_RX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE);
554 	WR4(sc, GENET_RX_DMA_CTRL, val);
555 }
556 
557 static int
558 gen_bus_dma_init(struct gen_softc *sc)
559 {
560 	device_t dev = sc->dev;
561 	int i, error;
562 
563 	error = bus_dma_tag_create(
564 	    bus_get_dma_tag(dev),	/* Parent tag */
565 	    4, 0,			/* alignment, boundary */
566 	    BUS_SPACE_MAXADDR_40BIT,	/* lowaddr */
567 	    BUS_SPACE_MAXADDR,		/* highaddr */
568 	    NULL, NULL,			/* filter, filterarg */
569 	    MCLBYTES, TX_MAX_SEGS,	/* maxsize, nsegs */
570 	    MCLBYTES,			/* maxsegsize */
571 	    0,				/* flags */
572 	    NULL, NULL,			/* lockfunc, lockarg */
573 	    &sc->tx_buf_tag);
574 	if (error != 0) {
575 		device_printf(dev, "cannot create TX buffer tag\n");
576 		return (error);
577 	}
578 
579 	for (i = 0; i < TX_DESC_COUNT; i++) {
580 		error = bus_dmamap_create(sc->tx_buf_tag, 0,
581 		    &sc->tx_ring_ent[i].map);
582 		if (error != 0) {
583 			device_printf(dev, "cannot create TX buffer map\n");
584 			return (error);
585 		}
586 	}
587 
588 	error = bus_dma_tag_create(
589 	    bus_get_dma_tag(dev),	/* Parent tag */
590 	    4, 0,			/* alignment, boundary */
591 	    BUS_SPACE_MAXADDR_40BIT,	/* lowaddr */
592 	    BUS_SPACE_MAXADDR,		/* highaddr */
593 	    NULL, NULL,			/* filter, filterarg */
594 	    MCLBYTES, 1,		/* maxsize, nsegs */
595 	    MCLBYTES,			/* maxsegsize */
596 	    0,				/* flags */
597 	    NULL, NULL,			/* lockfunc, lockarg */
598 	    &sc->rx_buf_tag);
599 	if (error != 0) {
600 		device_printf(dev, "cannot create RX buffer tag\n");
601 		return (error);
602 	}
603 
604 	for (i = 0; i < RX_DESC_COUNT; i++) {
605 		error = bus_dmamap_create(sc->rx_buf_tag, 0,
606 		    &sc->rx_ring_ent[i].map);
607 		if (error != 0) {
608 			device_printf(dev, "cannot create RX buffer map\n");
609 			return (error);
610 		}
611 	}
612 	return (0);
613 }
614 
615 static void
616 gen_bus_dma_teardown(struct gen_softc *sc)
617 {
618 	int i, error;
619 
620 	if (sc->tx_buf_tag != NULL) {
621 		for (i = 0; i < TX_DESC_COUNT; i++) {
622 			error = bus_dmamap_destroy(sc->tx_buf_tag,
623 			    sc->tx_ring_ent[i].map);
624 			sc->tx_ring_ent[i].map = NULL;
625 			if (error)
626 				device_printf(sc->dev,
627 				    "%s: bus_dmamap_destroy failed: %d\n",
628 				    __func__, error);
629 		}
630 		error = bus_dma_tag_destroy(sc->tx_buf_tag);
631 		sc->tx_buf_tag = NULL;
632 		if (error)
633 			device_printf(sc->dev,
634 			    "%s: bus_dma_tag_destroy failed: %d\n", __func__,
635 			    error);
636 	}
637 
638 	if (sc->tx_buf_tag != NULL) {
639 		for (i = 0; i < RX_DESC_COUNT; i++) {
640 			error = bus_dmamap_destroy(sc->rx_buf_tag,
641 			    sc->rx_ring_ent[i].map);
642 			sc->rx_ring_ent[i].map = NULL;
643 			if (error)
644 				device_printf(sc->dev,
645 				    "%s: bus_dmamap_destroy failed: %d\n",
646 				    __func__, error);
647 		}
648 		error = bus_dma_tag_destroy(sc->rx_buf_tag);
649 		sc->rx_buf_tag = NULL;
650 		if (error)
651 			device_printf(sc->dev,
652 			    "%s: bus_dma_tag_destroy failed: %d\n", __func__,
653 			    error);
654 	}
655 }
656 
657 static void
658 gen_enable_intr(struct gen_softc *sc)
659 {
660 
661 	WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK,
662 	    GENET_IRQ_TXDMA_DONE | GENET_IRQ_RXDMA_DONE);
663 }
664 
665 /*
666  * "queue" is the software queue index (0-4); "qid" is the hardware index
667  * (0-16).  "base" is the starting index in the ring array.
668  */
669 static void
670 gen_init_txring(struct gen_softc *sc, int queue, int qid, int base,
671     int nentries)
672 {
673 	struct tx_queue *q;
674 	uint32_t val;
675 
676 	q = &sc->tx_queue[queue];
677 	q->entries = &sc->tx_ring_ent[base];
678 	q->hwindex = qid;
679 	q->nentries = nentries;
680 
681 	/* TX ring */
682 
683 	q->queued = 0;
684 	q->cons_idx = q->prod_idx = 0;
685 
686 	WR4(sc, GENET_TX_SCB_BURST_SIZE, 0x08);
687 
688 	WR4(sc, GENET_TX_DMA_READ_PTR_LO(qid), 0);
689 	WR4(sc, GENET_TX_DMA_READ_PTR_HI(qid), 0);
690 	WR4(sc, GENET_TX_DMA_CONS_INDEX(qid), 0);
691 	WR4(sc, GENET_TX_DMA_PROD_INDEX(qid), 0);
692 	WR4(sc, GENET_TX_DMA_RING_BUF_SIZE(qid),
693 	    (nentries << GENET_TX_DMA_RING_BUF_SIZE_DESC_SHIFT) |
694 	    (MCLBYTES & GENET_TX_DMA_RING_BUF_SIZE_BUF_LEN_MASK));
695 	WR4(sc, GENET_TX_DMA_START_ADDR_LO(qid), 0);
696 	WR4(sc, GENET_TX_DMA_START_ADDR_HI(qid), 0);
697 	WR4(sc, GENET_TX_DMA_END_ADDR_LO(qid),
698 	    TX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1);
699 	WR4(sc, GENET_TX_DMA_END_ADDR_HI(qid), 0);
700 	WR4(sc, GENET_TX_DMA_MBUF_DONE_THRES(qid), 1);
701 	WR4(sc, GENET_TX_DMA_FLOW_PERIOD(qid), 0);
702 	WR4(sc, GENET_TX_DMA_WRITE_PTR_LO(qid), 0);
703 	WR4(sc, GENET_TX_DMA_WRITE_PTR_HI(qid), 0);
704 
705 	WR4(sc, GENET_TX_DMA_RING_CFG, __BIT(qid));	/* enable */
706 
707 	/* Enable transmit DMA */
708 	val = RD4(sc, GENET_TX_DMA_CTRL);
709 	val |= GENET_TX_DMA_CTRL_EN;
710 	val |= GENET_TX_DMA_CTRL_RBUF_EN(qid);
711 	WR4(sc, GENET_TX_DMA_CTRL, val);
712 }
713 
714 /*
715  * "queue" is the software queue index (0-4); "qid" is the hardware index
716  * (0-16).  "base" is the starting index in the ring array.
717  */
718 static void
719 gen_init_rxring(struct gen_softc *sc, int queue, int qid, int base,
720     int nentries)
721 {
722 	struct rx_queue *q;
723 	uint32_t val;
724 	int i;
725 
726 	q = &sc->rx_queue[queue];
727 	q->entries = &sc->rx_ring_ent[base];
728 	q->hwindex = qid;
729 	q->nentries = nentries;
730 	q->cons_idx = q->prod_idx = 0;
731 
732 	WR4(sc, GENET_RX_SCB_BURST_SIZE, 0x08);
733 
734 	WR4(sc, GENET_RX_DMA_WRITE_PTR_LO(qid), 0);
735 	WR4(sc, GENET_RX_DMA_WRITE_PTR_HI(qid), 0);
736 	WR4(sc, GENET_RX_DMA_PROD_INDEX(qid), 0);
737 	WR4(sc, GENET_RX_DMA_CONS_INDEX(qid), 0);
738 	WR4(sc, GENET_RX_DMA_RING_BUF_SIZE(qid),
739 	    (nentries << GENET_RX_DMA_RING_BUF_SIZE_DESC_SHIFT) |
740 	    (MCLBYTES & GENET_RX_DMA_RING_BUF_SIZE_BUF_LEN_MASK));
741 	WR4(sc, GENET_RX_DMA_START_ADDR_LO(qid), 0);
742 	WR4(sc, GENET_RX_DMA_START_ADDR_HI(qid), 0);
743 	WR4(sc, GENET_RX_DMA_END_ADDR_LO(qid),
744 	    RX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1);
745 	WR4(sc, GENET_RX_DMA_END_ADDR_HI(qid), 0);
746 	WR4(sc, GENET_RX_DMA_XON_XOFF_THRES(qid),
747 	    (5 << GENET_RX_DMA_XON_XOFF_THRES_LO_SHIFT) | (RX_DESC_COUNT >> 4));
748 	WR4(sc, GENET_RX_DMA_READ_PTR_LO(qid), 0);
749 	WR4(sc, GENET_RX_DMA_READ_PTR_HI(qid), 0);
750 
751 	WR4(sc, GENET_RX_DMA_RING_CFG, __BIT(qid));	/* enable */
752 
753 	/* fill ring */
754 	for (i = 0; i < RX_DESC_COUNT; i++)
755 		gen_newbuf_rx(sc, &sc->rx_queue[DEF_RXQUEUE], i);
756 
757 	/* Enable receive DMA */
758 	val = RD4(sc, GENET_RX_DMA_CTRL);
759 	val |= GENET_RX_DMA_CTRL_EN;
760 	val |= GENET_RX_DMA_CTRL_RBUF_EN(qid);
761 	WR4(sc, GENET_RX_DMA_CTRL, val);
762 }
763 
764 static void
765 gen_init_txrings(struct gen_softc *sc)
766 {
767 	int base = 0;
768 #ifdef PRI_RINGS
769 	int i;
770 
771 	/* init priority rings */
772 	for (i = 0; i < PRI_RINGS; i++) {
773 		gen_init_txring(sc, i, i, base, TX_DESC_PRICOUNT);
774 		sc->tx_queue[i].queue = i;
775 		base += TX_DESC_PRICOUNT;
776 		dma_ring_conf |= 1 << i;
777 		dma_control |= DMA_RENABLE(i);
778 	}
779 #endif
780 
781 	/* init GENET_DMA_DEFAULT_QUEUE (16) */
782 	gen_init_txring(sc, DEF_TXQUEUE, GENET_DMA_DEFAULT_QUEUE, base,
783 	    TX_DESC_COUNT);
784 	sc->tx_queue[DEF_TXQUEUE].hwindex = GENET_DMA_DEFAULT_QUEUE;
785 }
786 
787 static void
788 gen_init_rxrings(struct gen_softc *sc)
789 {
790 	int base = 0;
791 #ifdef PRI_RINGS
792 	int i;
793 
794 	/* init priority rings */
795 	for (i = 0; i < PRI_RINGS; i++) {
796 		gen_init_rxring(sc, i, i, base, TX_DESC_PRICOUNT);
797 		sc->rx_queue[i].queue = i;
798 		base += TX_DESC_PRICOUNT;
799 		dma_ring_conf |= 1 << i;
800 		dma_control |= DMA_RENABLE(i);
801 	}
802 #endif
803 
804 	/* init GENET_DMA_DEFAULT_QUEUE (16) */
805 	gen_init_rxring(sc, DEF_RXQUEUE, GENET_DMA_DEFAULT_QUEUE, base,
806 	    RX_DESC_COUNT);
807 	sc->rx_queue[DEF_RXQUEUE].hwindex = GENET_DMA_DEFAULT_QUEUE;
808 
809 }
810 
811 static void
812 gen_init_locked(struct gen_softc *sc)
813 {
814 	struct mii_data *mii;
815 	if_t ifp;
816 
817 	mii = device_get_softc(sc->miibus);
818 	ifp = sc->ifp;
819 
820 	GEN_ASSERT_LOCKED(sc);
821 
822 	if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
823 		return;
824 
825 	switch (sc->phy_mode)
826 	{
827 	case MII_CONTYPE_RGMII:
828 	case MII_CONTYPE_RGMII_ID:
829 	case MII_CONTYPE_RGMII_RXID:
830 	case MII_CONTYPE_RGMII_TXID:
831 		WR4(sc, GENET_SYS_PORT_CTRL, GENET_SYS_PORT_MODE_EXT_GPHY);
832 		break;
833 	default:
834 		WR4(sc, GENET_SYS_PORT_CTRL, 0);
835 	}
836 
837 	gen_set_enaddr(sc);
838 
839 	/* Setup RX filter */
840 	gen_setup_rxfilter(sc);
841 
842 	gen_init_txrings(sc);
843 	gen_init_rxrings(sc);
844 	gen_enable(sc);
845 	gen_enable_offload(sc);
846 
847 	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
848 
849 	mii_mediachg(mii);
850 	callout_reset(&sc->stat_ch, hz, gen_tick, sc);
851 }
852 
853 static void
854 gen_init(void *softc)
855 {
856         struct gen_softc *sc;
857 
858         sc = softc;
859 	GEN_LOCK(sc);
860 	gen_init_locked(sc);
861 	GEN_UNLOCK(sc);
862 }
863 
864 static uint8_t ether_broadcastaddr[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
865 
866 static void
867 gen_setup_rxfilter_mdf(struct gen_softc *sc, u_int n, const uint8_t *ea)
868 {
869 	uint32_t addr0 = (ea[0] << 8) | ea[1];
870 	uint32_t addr1 = (ea[2] << 24) | (ea[3] << 16) | (ea[4] << 8) | ea[5];
871 
872 	WR4(sc, GENET_UMAC_MDF_ADDR0(n), addr0);
873 	WR4(sc, GENET_UMAC_MDF_ADDR1(n), addr1);
874 }
875 
876 static u_int
877 gen_setup_multi(void *arg, struct sockaddr_dl *sdl, u_int count)
878 {
879 	struct gen_softc *sc = arg;
880 
881 	/* "count + 2" to account for unicast and broadcast */
882 	gen_setup_rxfilter_mdf(sc, count + 2, LLADDR(sdl));
883 	return (1);		/* increment to count */
884 }
885 
886 static void
887 gen_setup_rxfilter(struct gen_softc *sc)
888 {
889 	struct ifnet *ifp = sc->ifp;
890 	uint32_t cmd, mdf_ctrl;
891 	u_int n;
892 
893 	GEN_ASSERT_LOCKED(sc);
894 
895 	cmd = RD4(sc, GENET_UMAC_CMD);
896 
897 	/*
898 	 * Count the required number of hardware filters. We need one
899 	 * for each multicast address, plus one for our own address and
900 	 * the broadcast address.
901 	 */
902 	n = if_llmaddr_count(ifp) + 2;
903 
904 	if (n > GENET_MAX_MDF_FILTER)
905 		ifp->if_flags |= IFF_ALLMULTI;
906 	else
907 		ifp->if_flags &= ~IFF_ALLMULTI;
908 
909 	if ((ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0) {
910 		cmd |= GENET_UMAC_CMD_PROMISC;
911 		mdf_ctrl = 0;
912 	} else {
913 		cmd &= ~GENET_UMAC_CMD_PROMISC;
914 		gen_setup_rxfilter_mdf(sc, 0, ether_broadcastaddr);
915 		gen_setup_rxfilter_mdf(sc, 1, IF_LLADDR(ifp));
916 		(void) if_foreach_llmaddr(ifp, gen_setup_multi, sc);
917 		mdf_ctrl = (__BIT(GENET_MAX_MDF_FILTER) - 1)  &~
918 		    (__BIT(GENET_MAX_MDF_FILTER - n) - 1);
919 	}
920 
921 	WR4(sc, GENET_UMAC_CMD, cmd);
922 	WR4(sc, GENET_UMAC_MDF_CTRL, mdf_ctrl);
923 }
924 
925 static void
926 gen_set_enaddr(struct gen_softc *sc)
927 {
928 	uint8_t *enaddr;
929 	uint32_t val;
930 	if_t ifp;
931 
932 	GEN_ASSERT_LOCKED(sc);
933 
934 	ifp = sc->ifp;
935 
936 	/* Write our unicast address */
937 	enaddr = IF_LLADDR(ifp);
938 	/* Write hardware address */
939 	val = enaddr[3] | (enaddr[2] << 8) | (enaddr[1] << 16) |
940 	    (enaddr[0] << 24);
941 	WR4(sc, GENET_UMAC_MAC0, val);
942 	val = enaddr[5] | (enaddr[4] << 8);
943 	WR4(sc, GENET_UMAC_MAC1, val);
944 }
945 
946 static void
947 gen_start_locked(struct gen_softc *sc)
948 {
949 	struct mbuf *m;
950 	if_t ifp;
951 	int cnt, err;
952 
953 	GEN_ASSERT_LOCKED(sc);
954 
955 	if (!sc->link)
956 		return;
957 
958 	ifp = sc->ifp;
959 
960 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
961 	    IFF_DRV_RUNNING)
962 		return;
963 
964 	for (cnt = 0; ; cnt++) {
965 		m = if_dequeue(ifp);
966 		if (m == NULL)
967 			break;
968 
969 		err = gen_encap(sc, &m);
970 		if (err != 0) {
971 			if (err == ENOBUFS)
972 				if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
973 			else if (m == NULL)
974 				if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
975 			if (m != NULL)
976 				if_sendq_prepend(ifp, m);
977 			break;
978 		}
979 		if_bpfmtap(ifp, m);
980 	}
981 }
982 
983 static void
984 gen_start(if_t ifp)
985 {
986 	struct gen_softc *sc;
987 
988 	sc = if_getsoftc(ifp);
989 
990 	GEN_LOCK(sc);
991 	gen_start_locked(sc);
992 	GEN_UNLOCK(sc);
993 }
994 
995 /* Test for any delayed checksum */
996 #define CSUM_DELAY_ANY	(CSUM_TCP | CSUM_UDP | CSUM_IP6_TCP | CSUM_IP6_UDP)
997 
998 static int
999 gen_encap(struct gen_softc *sc, struct mbuf **mp)
1000 {
1001 	bus_dmamap_t map;
1002 	bus_dma_segment_t segs[TX_MAX_SEGS];
1003 	int error, nsegs, cur, first, i, index, offset;
1004 	uint32_t csuminfo, length_status, csum_flags = 0, csumdata;
1005 	struct mbuf *m;
1006 	struct statusblock *sb = NULL;
1007 	struct tx_queue *q;
1008 	struct gen_ring_ent *ent;
1009 
1010 	GEN_ASSERT_LOCKED(sc);
1011 
1012 	q = &sc->tx_queue[DEF_TXQUEUE];
1013 
1014 	m = *mp;
1015 
1016 	/*
1017 	 * Don't attempt to send packets with only an Ethernet header in
1018 	 * first mbuf; see comment above with gen_tx_hdr_min.
1019 	 */
1020 	if (m->m_len == sizeof(struct ether_header)) {
1021 		m = m_pullup(m, MIN(m->m_pkthdr.len, gen_tx_hdr_min));
1022 		if (m == NULL) {
1023 			if (sc->ifp->if_flags & IFF_DEBUG)
1024 				device_printf(sc->dev,
1025 				    "header pullup fail\n");
1026 			*mp = NULL;
1027 			return (ENOMEM);
1028 		}
1029 	}
1030 
1031 	if ((if_getcapenable(sc->ifp) & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) !=
1032 	    0) {
1033 		csum_flags = m->m_pkthdr.csum_flags;
1034 		csumdata = m->m_pkthdr.csum_data;
1035 		M_PREPEND(m, sizeof(struct statusblock), M_NOWAIT);
1036 		if (m == NULL) {
1037 			if (sc->ifp->if_flags & IFF_DEBUG)
1038 				device_printf(sc->dev, "prepend fail\n");
1039 			*mp = NULL;
1040 			return (ENOMEM);
1041 		}
1042 		offset = gen_parse_tx(m, csum_flags);
1043 		sb = mtod(m, struct statusblock *);
1044 		if ((csum_flags & CSUM_DELAY_ANY) != 0) {
1045 			csuminfo = (offset << TXCSUM_OFF_SHIFT) |
1046 			    (offset + csumdata);
1047 			csuminfo |= TXCSUM_LEN_VALID;
1048 			if (csum_flags & (CSUM_UDP | CSUM_IP6_UDP))
1049 				csuminfo |= TXCSUM_UDP;
1050 			sb->txcsuminfo = csuminfo;
1051 		} else
1052 			sb->txcsuminfo = 0;
1053 	}
1054 
1055 	*mp = m;
1056 
1057 	cur = first = q->cur;
1058 	ent = &q->entries[cur];
1059 	map = ent->map;
1060 	error = bus_dmamap_load_mbuf_sg(sc->tx_buf_tag, map, m, segs,
1061 	    &nsegs, BUS_DMA_NOWAIT);
1062 	if (error == EFBIG) {
1063 		m = m_collapse(m, M_NOWAIT, TX_MAX_SEGS);
1064 		if (m == NULL) {
1065 			device_printf(sc->dev,
1066 			    "gen_encap: m_collapse failed\n");
1067 			m_freem(*mp);
1068 			*mp = NULL;
1069 			return (ENOMEM);
1070 		}
1071 		*mp = m;
1072 		error = bus_dmamap_load_mbuf_sg(sc->tx_buf_tag, map, m,
1073 		    segs, &nsegs, BUS_DMA_NOWAIT);
1074 		if (error != 0) {
1075 			m_freem(*mp);
1076 			*mp = NULL;
1077 		}
1078 	}
1079 	if (error != 0) {
1080 		device_printf(sc->dev,
1081 		    "gen_encap: bus_dmamap_load_mbuf_sg failed\n");
1082 		return (error);
1083 	}
1084 	if (nsegs == 0) {
1085 		m_freem(*mp);
1086 		*mp = NULL;
1087 		return (EIO);
1088 	}
1089 
1090 	/* Remove statusblock after mapping, before possible requeue or bpf. */
1091 	if (sb != NULL) {
1092 		m->m_data += sizeof(struct statusblock);
1093 		m->m_len -= sizeof(struct statusblock);
1094 		m->m_pkthdr.len -= sizeof(struct statusblock);
1095 	}
1096 	if (q->queued + nsegs > q->nentries) {
1097 		bus_dmamap_unload(sc->tx_buf_tag, map);
1098 		return (ENOBUFS);
1099 	}
1100 
1101 	bus_dmamap_sync(sc->tx_buf_tag, map, BUS_DMASYNC_PREWRITE);
1102 
1103 	index = q->prod_idx & (q->nentries - 1);
1104 	for (i = 0; i < nsegs; i++) {
1105 		ent = &q->entries[cur];
1106 		length_status = GENET_TX_DESC_STATUS_QTAG_MASK;
1107 		if (i == 0) {
1108 			length_status |= GENET_TX_DESC_STATUS_SOP |
1109 			    GENET_TX_DESC_STATUS_CRC;
1110 			if ((csum_flags & CSUM_DELAY_ANY) != 0)
1111 				length_status |= GENET_TX_DESC_STATUS_CKSUM;
1112 		}
1113 		if (i == nsegs - 1)
1114 			length_status |= GENET_TX_DESC_STATUS_EOP;
1115 
1116 		length_status |= segs[i].ds_len <<
1117 		    GENET_TX_DESC_STATUS_BUFLEN_SHIFT;
1118 
1119 		WR4(sc, GENET_TX_DESC_ADDRESS_LO(index),
1120 		    (uint32_t)segs[i].ds_addr);
1121 		WR4(sc, GENET_TX_DESC_ADDRESS_HI(index),
1122 		    (uint32_t)(segs[i].ds_addr >> 32));
1123 		WR4(sc, GENET_TX_DESC_STATUS(index), length_status);
1124 
1125 		++q->queued;
1126 		cur = TX_NEXT(cur, q->nentries);
1127 		index = TX_NEXT(index, q->nentries);
1128 	}
1129 
1130 	q->prod_idx += nsegs;
1131 	q->prod_idx &= GENET_TX_DMA_PROD_CONS_MASK;
1132 	/* We probably don't need to write the producer index on every iter */
1133 	if (nsegs != 0)
1134 		WR4(sc, GENET_TX_DMA_PROD_INDEX(q->hwindex), q->prod_idx);
1135 	q->cur = cur;
1136 
1137 	/* Store mbuf in the last segment */
1138 	q->entries[first].mbuf = m;
1139 
1140 	return (0);
1141 }
1142 
1143 /*
1144  * Parse a packet to find the offset of the transport header for checksum
1145  * offload.  Ensure that the link and network headers are contiguous with
1146  * the status block, or transmission fails.
1147  */
1148 static int
1149 gen_parse_tx(struct mbuf *m, int csum_flags)
1150 {
1151 	int offset, off_in_m;
1152 	bool copy = false, shift = false;
1153 	u_char *p, *copy_p = NULL;
1154 	struct mbuf *m0 = m;
1155 	uint16_t ether_type;
1156 
1157 	if (m->m_len == sizeof(struct statusblock)) {
1158 		/* M_PREPEND placed statusblock at end; move to beginning */
1159 		m->m_data = m->m_pktdat;
1160 		copy_p = mtodo(m, sizeof(struct statusblock));
1161 		m = m->m_next;
1162 		off_in_m = 0;
1163 		p = mtod(m, u_char *);
1164 		copy = true;
1165 	} else {
1166 		/*
1167 		 * If statusblock is not at beginning of mbuf (likely),
1168 		 * then remember to move mbuf contents down before copying
1169 		 * after them.
1170 		 */
1171 		if ((m->m_flags & M_EXT) == 0 && m->m_data != m->m_pktdat)
1172 			shift = true;
1173 		p = mtodo(m, sizeof(struct statusblock));
1174 		off_in_m = sizeof(struct statusblock);
1175 	}
1176 
1177 /*
1178  * If headers need to be copied contiguous to statusblock, do so.
1179  * If copying to the internal mbuf data area, and the status block
1180  * is not at the beginning of that area, shift the status block (which
1181  * is empty) and following data.
1182  */
1183 #define COPY(size) {							\
1184 	int hsize = size;						\
1185 	if (copy) {							\
1186 		if (shift) {						\
1187 			u_char *p0;					\
1188 			shift = false;					\
1189 			p0 = mtodo(m0, sizeof(struct statusblock));	\
1190 			m0->m_data = m0->m_pktdat;			\
1191 			bcopy(p0, mtodo(m0, sizeof(struct statusblock)),\
1192 			    m0->m_len - sizeof(struct statusblock));	\
1193 			copy_p = mtodo(m0, sizeof(struct statusblock));	\
1194 		}							\
1195 		bcopy(p, copy_p, hsize);				\
1196 		m0->m_len += hsize;					\
1197 		m0->m_pkthdr.len += hsize;	/* unneeded */		\
1198 		m->m_len -= hsize;					\
1199 		m->m_data += hsize;					\
1200 	}								\
1201 	copy_p += hsize;						\
1202 }
1203 
1204 	KASSERT((sizeof(struct statusblock) + sizeof(struct ether_vlan_header) +
1205 	    sizeof(struct ip6_hdr) <= MLEN), ("%s: mbuf too small", __func__));
1206 
1207 	if (((struct ether_header *)p)->ether_type == htons(ETHERTYPE_VLAN)) {
1208 		offset = sizeof(struct ether_vlan_header);
1209 		ether_type = ntohs(((struct ether_vlan_header *)p)->evl_proto);
1210 		COPY(sizeof(struct ether_vlan_header));
1211 		if (m->m_len == off_in_m + sizeof(struct ether_vlan_header)) {
1212 			m = m->m_next;
1213 			off_in_m = 0;
1214 			p = mtod(m, u_char *);
1215 			copy = true;
1216 		} else {
1217 			off_in_m += sizeof(struct ether_vlan_header);
1218 			p += sizeof(struct ether_vlan_header);
1219 		}
1220 	} else {
1221 		offset = sizeof(struct ether_header);
1222 		ether_type = ntohs(((struct ether_header *)p)->ether_type);
1223 		COPY(sizeof(struct ether_header));
1224 		if (m->m_len == off_in_m + sizeof(struct ether_header)) {
1225 			m = m->m_next;
1226 			off_in_m = 0;
1227 			p = mtod(m, u_char *);
1228 			copy = true;
1229 		} else {
1230 			off_in_m += sizeof(struct ether_header);
1231 			p += sizeof(struct ether_header);
1232 		}
1233 	}
1234 	if (ether_type == ETHERTYPE_IP) {
1235 		COPY(((struct ip *)p)->ip_hl << 2);
1236 		offset += ((struct ip *)p)->ip_hl << 2;
1237 	} else if (ether_type == ETHERTYPE_IPV6) {
1238 		COPY(sizeof(struct ip6_hdr));
1239 		offset += sizeof(struct ip6_hdr);
1240 	} else {
1241 		/*
1242 		 * Unknown whether other cases require moving a header;
1243 		 * ARP works without.
1244 		 */
1245 	}
1246 	return (offset);
1247 #undef COPY
1248 }
1249 
1250 static void
1251 gen_intr(void *arg)
1252 {
1253 	struct gen_softc *sc = arg;
1254 	uint32_t val;
1255 
1256 	GEN_LOCK(sc);
1257 
1258 	val = RD4(sc, GENET_INTRL2_CPU_STAT);
1259 	val &= ~RD4(sc, GENET_INTRL2_CPU_STAT_MASK);
1260 	WR4(sc, GENET_INTRL2_CPU_CLEAR, val);
1261 
1262 	if (val & GENET_IRQ_RXDMA_DONE)
1263 		gen_rxintr(sc, &sc->rx_queue[DEF_RXQUEUE]);
1264 
1265 	if (val & GENET_IRQ_TXDMA_DONE) {
1266 		gen_txintr(sc, &sc->tx_queue[DEF_TXQUEUE]);
1267 		if (!if_sendq_empty(sc->ifp))
1268 			gen_start_locked(sc);
1269 	}
1270 
1271 	GEN_UNLOCK(sc);
1272 }
1273 
1274 static int
1275 gen_rxintr(struct gen_softc *sc, struct rx_queue *q)
1276 {
1277 	if_t ifp;
1278 	struct mbuf *m, *mh, *mt;
1279 	struct statusblock *sb = NULL;
1280 	int error, index, len, cnt, npkt, n;
1281 	uint32_t status, prod_idx, total;
1282 
1283 	ifp = sc->ifp;
1284 	mh = mt = NULL;
1285 	cnt = 0;
1286 	npkt = 0;
1287 
1288 	prod_idx = RD4(sc, GENET_RX_DMA_PROD_INDEX(q->hwindex)) &
1289 	    GENET_RX_DMA_PROD_CONS_MASK;
1290 	total = (prod_idx - q->cons_idx) & GENET_RX_DMA_PROD_CONS_MASK;
1291 
1292 	index = q->cons_idx & (RX_DESC_COUNT - 1);
1293 	for (n = 0; n < total; n++) {
1294 		bus_dmamap_sync(sc->rx_buf_tag, q->entries[index].map,
1295 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1296 		bus_dmamap_unload(sc->rx_buf_tag, q->entries[index].map);
1297 
1298 		m = q->entries[index].mbuf;
1299 
1300 		if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
1301 			sb = mtod(m, struct statusblock *);
1302 			status = sb->status_buflen;
1303 		} else
1304 			status = RD4(sc, GENET_RX_DESC_STATUS(index));
1305 
1306 		len = (status & GENET_RX_DESC_STATUS_BUFLEN_MASK) >>
1307 		    GENET_RX_DESC_STATUS_BUFLEN_SHIFT;
1308 
1309 		/* check for errors */
1310 		if ((status &
1311 		    (GENET_RX_DESC_STATUS_SOP | GENET_RX_DESC_STATUS_EOP |
1312 		    GENET_RX_DESC_STATUS_RX_ERROR)) !=
1313 		    (GENET_RX_DESC_STATUS_SOP | GENET_RX_DESC_STATUS_EOP)) {
1314 			if (ifp->if_flags & IFF_DEBUG)
1315 				device_printf(sc->dev,
1316 				    "error/frag %x csum %x\n", status,
1317 				    sb->rxcsum);
1318 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1319 			continue;
1320 		}
1321 
1322 		error = gen_newbuf_rx(sc, q, index);
1323 		if (error != 0) {
1324 			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1325 			if (ifp->if_flags & IFF_DEBUG)
1326 				device_printf(sc->dev, "gen_newbuf_rx %d\n",
1327 				    error);
1328 			/* reuse previous mbuf */
1329 			(void) gen_mapbuf_rx(sc, q, index, m);
1330 			continue;
1331 		}
1332 
1333 		if (sb != NULL) {
1334 			if (status & GENET_RX_DESC_STATUS_CKSUM_OK) {
1335 				/* L4 checksum checked; not sure about L3. */
1336 				m->m_pkthdr.csum_flags = CSUM_DATA_VALID |
1337 				    CSUM_PSEUDO_HDR;
1338 				m->m_pkthdr.csum_data = 0xffff;
1339 			}
1340 			m->m_data += sizeof(struct statusblock);
1341 			m->m_len -= sizeof(struct statusblock);
1342 			len -= sizeof(struct statusblock);
1343 		}
1344 		if (len > ETHER_ALIGN) {
1345 			m_adj(m, ETHER_ALIGN);
1346 			len -= ETHER_ALIGN;
1347 		}
1348 
1349 		m->m_pkthdr.rcvif = ifp;
1350 		m->m_pkthdr.len = len;
1351 		m->m_len = len;
1352 		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1353 
1354 		m->m_nextpkt = NULL;
1355 		if (mh == NULL)
1356 			mh = m;
1357 		else
1358 			mt->m_nextpkt = m;
1359 		mt = m;
1360 		++cnt;
1361 		++npkt;
1362 
1363 		index = RX_NEXT(index, q->nentries);
1364 
1365 		q->cons_idx = (q->cons_idx + 1) & GENET_RX_DMA_PROD_CONS_MASK;
1366 		WR4(sc, GENET_RX_DMA_CONS_INDEX(q->hwindex), q->cons_idx);
1367 
1368 		if (cnt == gen_rx_batch) {
1369 			GEN_UNLOCK(sc);
1370 			if_input(ifp, mh);
1371 			GEN_LOCK(sc);
1372 			mh = mt = NULL;
1373 			cnt = 0;
1374 		}
1375 	}
1376 
1377 	if (mh != NULL) {
1378 		GEN_UNLOCK(sc);
1379 		if_input(ifp, mh);
1380 		GEN_LOCK(sc);
1381 	}
1382 
1383 	return (npkt);
1384 }
1385 
1386 static void
1387 gen_txintr(struct gen_softc *sc, struct tx_queue *q)
1388 {
1389 	uint32_t cons_idx, total;
1390 	struct gen_ring_ent *ent;
1391 	if_t ifp;
1392 	int i, prog;
1393 
1394 	GEN_ASSERT_LOCKED(sc);
1395 
1396 	ifp = sc->ifp;
1397 
1398 	cons_idx = RD4(sc, GENET_TX_DMA_CONS_INDEX(q->hwindex)) &
1399 	    GENET_TX_DMA_PROD_CONS_MASK;
1400 	total = (cons_idx - q->cons_idx) & GENET_TX_DMA_PROD_CONS_MASK;
1401 
1402 	prog = 0;
1403 	for (i = q->next; q->queued > 0 && total > 0;
1404 	    i = TX_NEXT(i, q->nentries), total--) {
1405 		/* XXX check for errors */
1406 
1407 		ent = &q->entries[i];
1408 		if (ent->mbuf != NULL) {
1409 			bus_dmamap_sync(sc->tx_buf_tag, ent->map,
1410 			    BUS_DMASYNC_POSTWRITE);
1411 			bus_dmamap_unload(sc->tx_buf_tag, ent->map);
1412 			m_freem(ent->mbuf);
1413 			ent->mbuf = NULL;
1414 			if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1415 		}
1416 
1417 		prog++;
1418 		--q->queued;
1419 	}
1420 
1421 	if (prog > 0) {
1422 		q->next = i;
1423 		if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1424 	}
1425 
1426 	q->cons_idx = cons_idx;
1427 }
1428 
1429 static void
1430 gen_intr2(void *arg)
1431 {
1432 	struct gen_softc *sc = arg;
1433 
1434 	device_printf(sc->dev, "gen_intr2\n");
1435 }
1436 
1437 static int
1438 gen_newbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index)
1439 {
1440 	struct mbuf *m;
1441 
1442 	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1443 	if (m == NULL)
1444 		return (ENOBUFS);
1445 
1446 	m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
1447 	m_adj(m, ETHER_ALIGN);
1448 
1449 	return (gen_mapbuf_rx(sc, q, index, m));
1450 }
1451 
1452 static int
1453 gen_mapbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index,
1454     struct mbuf *m)
1455 {
1456 	bus_dma_segment_t seg;
1457 	bus_dmamap_t map;
1458 	int nsegs;
1459 
1460 	map = q->entries[index].map;
1461 	if (bus_dmamap_load_mbuf_sg(sc->rx_buf_tag, map, m, &seg, &nsegs,
1462 	    BUS_DMA_NOWAIT) != 0) {
1463 		m_freem(m);
1464 		return (ENOBUFS);
1465 	}
1466 
1467 	bus_dmamap_sync(sc->rx_buf_tag, map, BUS_DMASYNC_PREREAD);
1468 
1469 	q->entries[index].mbuf = m;
1470 	WR4(sc, GENET_RX_DESC_ADDRESS_LO(index), (uint32_t)seg.ds_addr);
1471 	WR4(sc, GENET_RX_DESC_ADDRESS_HI(index), (uint32_t)(seg.ds_addr >> 32));
1472 
1473 	return (0);
1474 }
1475 
1476 static int
1477 gen_ioctl(if_t ifp, u_long cmd, caddr_t data)
1478 {
1479 	struct gen_softc *sc;
1480 	struct mii_data *mii;
1481 	struct ifreq *ifr;
1482 	int flags, enable, error;
1483 
1484 	sc = if_getsoftc(ifp);
1485 	mii = device_get_softc(sc->miibus);
1486 	ifr = (struct ifreq *)data;
1487 	error = 0;
1488 
1489 	switch (cmd) {
1490 	case SIOCSIFFLAGS:
1491 		GEN_LOCK(sc);
1492 		if (if_getflags(ifp) & IFF_UP) {
1493 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1494 				flags = if_getflags(ifp) ^ sc->if_flags;
1495 				if ((flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0)
1496 					gen_setup_rxfilter(sc);
1497 			} else
1498 				gen_init_locked(sc);
1499 		} else {
1500 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1501 				gen_reset(sc);
1502 		}
1503 		sc->if_flags = if_getflags(ifp);
1504 		GEN_UNLOCK(sc);
1505 		break;
1506 
1507 	case SIOCADDMULTI:
1508 	case SIOCDELMULTI:
1509 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1510 			GEN_LOCK(sc);
1511 			gen_setup_rxfilter(sc);
1512 			GEN_UNLOCK(sc);
1513 		}
1514 		break;
1515 
1516 	case SIOCSIFMEDIA:
1517 	case SIOCGIFMEDIA:
1518 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1519 		break;
1520 
1521 	case SIOCSIFCAP:
1522 		enable = if_getcapenable(ifp);
1523 		flags = ifr->ifr_reqcap ^ enable;
1524 		if (flags & IFCAP_RXCSUM)
1525 			enable ^= IFCAP_RXCSUM;
1526 		if (flags & IFCAP_RXCSUM_IPV6)
1527 			enable ^= IFCAP_RXCSUM_IPV6;
1528 		if (flags & IFCAP_TXCSUM)
1529 			enable ^= IFCAP_TXCSUM;
1530 		if (flags & IFCAP_TXCSUM_IPV6)
1531 			enable ^= IFCAP_TXCSUM_IPV6;
1532 		if (enable & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6))
1533 			if_sethwassist(ifp, GEN_CSUM_FEATURES);
1534 		else
1535 			if_sethwassist(ifp, 0);
1536 		if_setcapenable(ifp, enable);
1537 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1538 			gen_enable_offload(sc);
1539 		break;
1540 
1541 	default:
1542 		error = ether_ioctl(ifp, cmd, data);
1543 		break;
1544 	}
1545 	return (error);
1546 }
1547 
1548 static void
1549 gen_tick(void *softc)
1550 {
1551 	struct gen_softc *sc;
1552 	struct mii_data *mii;
1553 	if_t ifp;
1554 	int link;
1555 
1556 	sc = softc;
1557 	ifp = sc->ifp;
1558 	mii = device_get_softc(sc->miibus);
1559 
1560 	GEN_ASSERT_LOCKED(sc);
1561 
1562 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
1563 		return;
1564 
1565 	link = sc->link;
1566 	mii_tick(mii);
1567 	if (sc->link && !link)
1568 		gen_start_locked(sc);
1569 
1570 	callout_reset(&sc->stat_ch, hz, gen_tick, sc);
1571 }
1572 
1573 #define	MII_BUSY_RETRY		1000
1574 
1575 static int
1576 gen_miibus_readreg(device_t dev, int phy, int reg)
1577 {
1578 	struct gen_softc *sc;
1579 	int retry, val;
1580 
1581 	sc = device_get_softc(dev);
1582 	val = 0;
1583 
1584 	WR4(sc, GENET_MDIO_CMD, GENET_MDIO_READ |
1585 	    (phy << GENET_MDIO_ADDR_SHIFT) | (reg << GENET_MDIO_REG_SHIFT));
1586 	val = RD4(sc, GENET_MDIO_CMD);
1587 	WR4(sc, GENET_MDIO_CMD, val | GENET_MDIO_START_BUSY);
1588 	for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
1589 		if (((val = RD4(sc, GENET_MDIO_CMD)) &
1590 		    GENET_MDIO_START_BUSY) == 0) {
1591 			if (val & GENET_MDIO_READ_FAILED)
1592 				return (0);	/* -1? */
1593 			val &= GENET_MDIO_VAL_MASK;
1594 			break;
1595 		}
1596 		DELAY(10);
1597 	}
1598 
1599 	if (retry == 0)
1600 		device_printf(dev, "phy read timeout, phy=%d reg=%d\n",
1601 		    phy, reg);
1602 
1603 	return (val);
1604 }
1605 
1606 static int
1607 gen_miibus_writereg(device_t dev, int phy, int reg, int val)
1608 {
1609 	struct gen_softc *sc;
1610 	int retry;
1611 
1612 	sc = device_get_softc(dev);
1613 
1614 	WR4(sc, GENET_MDIO_CMD, GENET_MDIO_WRITE |
1615 	    (phy << GENET_MDIO_ADDR_SHIFT) | (reg << GENET_MDIO_REG_SHIFT) |
1616 	    (val & GENET_MDIO_VAL_MASK));
1617 	val = RD4(sc, GENET_MDIO_CMD);
1618 	WR4(sc, GENET_MDIO_CMD, val | GENET_MDIO_START_BUSY);
1619 	for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
1620 		val = RD4(sc, GENET_MDIO_CMD);
1621 		if ((val & GENET_MDIO_START_BUSY) == 0)
1622 			break;
1623 		DELAY(10);
1624 	}
1625 	if (retry == 0)
1626 		device_printf(dev, "phy write timeout, phy=%d reg=%d\n",
1627 		    phy, reg);
1628 
1629 	return (0);
1630 }
1631 
1632 static void
1633 gen_update_link_locked(struct gen_softc *sc)
1634 {
1635 	struct mii_data *mii;
1636 	uint32_t val;
1637 	u_int speed;
1638 
1639 	GEN_ASSERT_LOCKED(sc);
1640 
1641 	if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0)
1642 		return;
1643 	mii = device_get_softc(sc->miibus);
1644 
1645 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
1646 	    (IFM_ACTIVE | IFM_AVALID)) {
1647 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
1648 		case IFM_1000_T:
1649 		case IFM_1000_SX:
1650 			speed = GENET_UMAC_CMD_SPEED_1000;
1651 			sc->link = 1;
1652 			break;
1653 		case IFM_100_TX:
1654 			speed = GENET_UMAC_CMD_SPEED_100;
1655 			sc->link = 1;
1656 			break;
1657 		case IFM_10_T:
1658 			speed = GENET_UMAC_CMD_SPEED_10;
1659 			sc->link = 1;
1660 			break;
1661 		default:
1662 			sc->link = 0;
1663 			break;
1664 		}
1665 	} else
1666 		sc->link = 0;
1667 
1668 	if (sc->link == 0)
1669 		return;
1670 
1671 	val = RD4(sc, GENET_EXT_RGMII_OOB_CTRL);
1672 	val &= ~GENET_EXT_RGMII_OOB_OOB_DISABLE;
1673 	val |= GENET_EXT_RGMII_OOB_RGMII_LINK;
1674 	val |= GENET_EXT_RGMII_OOB_RGMII_MODE_EN;
1675 	if (sc->phy_mode == MII_CONTYPE_RGMII)
1676 		val |= GENET_EXT_RGMII_OOB_ID_MODE_DISABLE;
1677 	else
1678 		val &= ~GENET_EXT_RGMII_OOB_ID_MODE_DISABLE;
1679 	WR4(sc, GENET_EXT_RGMII_OOB_CTRL, val);
1680 
1681 	val = RD4(sc, GENET_UMAC_CMD);
1682 	val &= ~GENET_UMAC_CMD_SPEED;
1683 	val |= speed;
1684 	WR4(sc, GENET_UMAC_CMD, val);
1685 }
1686 
1687 static void
1688 gen_link_task(void *arg, int pending)
1689 {
1690 	struct gen_softc *sc;
1691 
1692 	sc = arg;
1693 
1694 	GEN_LOCK(sc);
1695 	gen_update_link_locked(sc);
1696 	GEN_UNLOCK(sc);
1697 }
1698 
1699 static void
1700 gen_miibus_statchg(device_t dev)
1701 {
1702 	struct gen_softc *sc;
1703 
1704 	sc = device_get_softc(dev);
1705 
1706 	taskqueue_enqueue(taskqueue_swi, &sc->link_task);
1707 }
1708 
1709 static void
1710 gen_media_status(if_t ifp, struct ifmediareq *ifmr)
1711 {
1712 	struct gen_softc *sc;
1713 	struct mii_data *mii;
1714 
1715 	sc = if_getsoftc(ifp);
1716 	mii = device_get_softc(sc->miibus);
1717 
1718 	GEN_LOCK(sc);
1719 	mii_pollstat(mii);
1720 	ifmr->ifm_active = mii->mii_media_active;
1721 	ifmr->ifm_status = mii->mii_media_status;
1722 	GEN_UNLOCK(sc);
1723 }
1724 
1725 static int
1726 gen_media_change(if_t ifp)
1727 {
1728 	struct gen_softc *sc;
1729 	struct mii_data *mii;
1730 	int error;
1731 
1732 	sc = if_getsoftc(ifp);
1733 	mii = device_get_softc(sc->miibus);
1734 
1735 	GEN_LOCK(sc);
1736 	error = mii_mediachg(mii);
1737 	GEN_UNLOCK(sc);
1738 
1739 	return (error);
1740 }
1741 
1742 static device_method_t gen_methods[] = {
1743 	/* Device interface */
1744 	DEVMETHOD(device_probe,		gen_probe),
1745 	DEVMETHOD(device_attach,	gen_attach),
1746 
1747 	/* MII interface */
1748 	DEVMETHOD(miibus_readreg,	gen_miibus_readreg),
1749 	DEVMETHOD(miibus_writereg,	gen_miibus_writereg),
1750 	DEVMETHOD(miibus_statchg,	gen_miibus_statchg),
1751 
1752 	DEVMETHOD_END
1753 };
1754 
1755 static driver_t gen_driver = {
1756 	"genet",
1757 	gen_methods,
1758 	sizeof(struct gen_softc),
1759 };
1760 
1761 static devclass_t gen_devclass;
1762 
1763 DRIVER_MODULE(genet, simplebus, gen_driver, gen_devclass, 0, 0);
1764 DRIVER_MODULE(miibus, genet, miibus_driver, miibus_devclass, 0, 0);
1765 MODULE_DEPEND(genet, ether, 1, 1, 1);
1766 MODULE_DEPEND(genet, miibus, 1, 1, 1);
1767