xref: /freebsd/sys/dev/ffec/if_ffec.c (revision f56f82e0)
1 /*-
2  * Copyright (c) 2013 Ian Lepore <ian@freebsd.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 /*
32  * Driver for Freescale Fast Ethernet Controller, found on imx-series SoCs among
33  * others.  Also works for the ENET Gigibit controller found on imx6 and imx28,
34  * but the driver doesn't currently use any of the ENET advanced features other
35  * than enabling gigabit.
36  *
37  * The interface name 'fec' is already taken by netgraph's Fast Etherchannel
38  * (netgraph/ng_fec.c), so we use 'ffec'.
39  *
40  * Requires an FDT entry with at least these properties:
41  *   fec: ethernet@02188000 {
42  *      compatible = "fsl,imxNN-fec";
43  *      reg = <0x02188000 0x4000>;
44  *      interrupts = <150 151>;
45  *      phy-mode = "rgmii";
46  *      phy-disable-preamble; // optional
47  *   };
48  * The second interrupt number is for IEEE-1588, and is not currently used; it
49  * need not be present.  phy-mode must be one of: "mii", "rmii", "rgmii".
50  * There is also an optional property, phy-disable-preamble, which if present
51  * will disable the preamble bits, cutting the size of each mdio transaction
52  * (and thus the busy-wait time) in half.
53  */
54 
55 #include <sys/param.h>
56 #include <sys/systm.h>
57 #include <sys/bus.h>
58 #include <sys/endian.h>
59 #include <sys/kernel.h>
60 #include <sys/lock.h>
61 #include <sys/malloc.h>
62 #include <sys/mbuf.h>
63 #include <sys/module.h>
64 #include <sys/mutex.h>
65 #include <sys/rman.h>
66 #include <sys/socket.h>
67 #include <sys/sockio.h>
68 #include <sys/sysctl.h>
69 
70 #include <machine/bus.h>
71 
72 #include <net/bpf.h>
73 #include <net/if.h>
74 #include <net/ethernet.h>
75 #include <net/if_dl.h>
76 #include <net/if_media.h>
77 #include <net/if_types.h>
78 #include <net/if_var.h>
79 #include <net/if_vlan_var.h>
80 
81 #include <dev/fdt/fdt_common.h>
82 #include <dev/ffec/if_ffecreg.h>
83 #include <dev/ofw/ofw_bus.h>
84 #include <dev/ofw/ofw_bus_subr.h>
85 #include <dev/mii/mii.h>
86 #include <dev/mii/miivar.h>
87 #include <dev/mii/mii_fdt.h>
88 #include "miibus_if.h"
89 
90 /*
91  * There are small differences in the hardware on various SoCs.  Not every SoC
92  * we support has its own FECTYPE; most work as GENERIC and only the ones that
93  * need different handling get their own entry.  In addition to the types in
94  * this list, there are some flags below that can be ORed into the upper bits.
95  */
96 enum {
97 	FECTYPE_NONE,
98 	FECTYPE_GENERIC,
99 	FECTYPE_IMX53,
100 	FECTYPE_IMX6,
101 	FECTYPE_MVF,
102 };
103 
104 /*
105  * Flags that describe general differences between the FEC hardware in various
106  * SoCs.  These are ORed into the FECTYPE enum values.
107  */
108 #define	FECTYPE_MASK		0x0000ffff
109 #define	FECFLAG_GBE		(0x0001 << 16)
110 
111 /*
112  * Table of supported FDT compat strings and their associated FECTYPE values.
113  */
114 static struct ofw_compat_data compat_data[] = {
115 	{"fsl,imx51-fec",	FECTYPE_GENERIC},
116 	{"fsl,imx53-fec",	FECTYPE_IMX53},
117 	{"fsl,imx6q-fec",	FECTYPE_IMX6 | FECFLAG_GBE},
118 	{"fsl,imx6ul-fec",	FECTYPE_IMX6},
119 	{"fsl,mvf600-fec",	FECTYPE_MVF},
120 	{"fsl,mvf-fec",		FECTYPE_MVF},
121 	{NULL,		 	FECTYPE_NONE},
122 };
123 
124 /*
125  * Driver data and defines.
126  */
127 #define	RX_DESC_COUNT	64
128 #define	RX_DESC_SIZE	(sizeof(struct ffec_hwdesc) * RX_DESC_COUNT)
129 #define	TX_DESC_COUNT	64
130 #define	TX_DESC_SIZE	(sizeof(struct ffec_hwdesc) * TX_DESC_COUNT)
131 
132 #define	WATCHDOG_TIMEOUT_SECS	5
133 
134 struct ffec_bufmap {
135 	struct mbuf	*mbuf;
136 	bus_dmamap_t	map;
137 };
138 
139 struct ffec_softc {
140 	device_t		dev;
141 	device_t		miibus;
142 	struct mii_data *	mii_softc;
143 	struct ifnet		*ifp;
144 	int			if_flags;
145 	struct mtx		mtx;
146 	struct resource		*irq_res;
147 	struct resource		*mem_res;
148 	void *			intr_cookie;
149 	struct callout		ffec_callout;
150 	mii_contype_t		phy_conn_type;
151 	uint8_t			fectype;
152 	boolean_t		link_is_up;
153 	boolean_t		is_attached;
154 	boolean_t		is_detaching;
155 	int			tx_watchdog_count;
156 
157 	bus_dma_tag_t		rxdesc_tag;
158 	bus_dmamap_t		rxdesc_map;
159 	struct ffec_hwdesc	*rxdesc_ring;
160 	bus_addr_t		rxdesc_ring_paddr;
161 	bus_dma_tag_t		rxbuf_tag;
162 	struct ffec_bufmap	rxbuf_map[RX_DESC_COUNT];
163 	uint32_t		rx_idx;
164 
165 	bus_dma_tag_t		txdesc_tag;
166 	bus_dmamap_t		txdesc_map;
167 	struct ffec_hwdesc	*txdesc_ring;
168 	bus_addr_t		txdesc_ring_paddr;
169 	bus_dma_tag_t		txbuf_tag;
170 	struct ffec_bufmap	txbuf_map[TX_DESC_COUNT];
171 	uint32_t		tx_idx_head;
172 	uint32_t		tx_idx_tail;
173 	int			txcount;
174 };
175 
176 #define	FFEC_LOCK(sc)			mtx_lock(&(sc)->mtx)
177 #define	FFEC_UNLOCK(sc)			mtx_unlock(&(sc)->mtx)
178 #define	FFEC_LOCK_INIT(sc)		mtx_init(&(sc)->mtx, \
179 	    device_get_nameunit((sc)->dev), MTX_NETWORK_LOCK, MTX_DEF)
180 #define	FFEC_LOCK_DESTROY(sc)		mtx_destroy(&(sc)->mtx);
181 #define	FFEC_ASSERT_LOCKED(sc)		mtx_assert(&(sc)->mtx, MA_OWNED);
182 #define	FFEC_ASSERT_UNLOCKED(sc)	mtx_assert(&(sc)->mtx, MA_NOTOWNED);
183 
184 static void ffec_init_locked(struct ffec_softc *sc);
185 static void ffec_stop_locked(struct ffec_softc *sc);
186 static void ffec_txstart_locked(struct ffec_softc *sc);
187 static void ffec_txfinish_locked(struct ffec_softc *sc);
188 
189 static inline uint16_t
190 RD2(struct ffec_softc *sc, bus_size_t off)
191 {
192 
193 	return (bus_read_2(sc->mem_res, off));
194 }
195 
196 static inline void
197 WR2(struct ffec_softc *sc, bus_size_t off, uint16_t val)
198 {
199 
200 	bus_write_2(sc->mem_res, off, val);
201 }
202 
203 static inline uint32_t
204 RD4(struct ffec_softc *sc, bus_size_t off)
205 {
206 
207 	return (bus_read_4(sc->mem_res, off));
208 }
209 
210 static inline void
211 WR4(struct ffec_softc *sc, bus_size_t off, uint32_t val)
212 {
213 
214 	bus_write_4(sc->mem_res, off, val);
215 }
216 
217 static inline uint32_t
218 next_rxidx(struct ffec_softc *sc, uint32_t curidx)
219 {
220 
221 	return ((curidx == RX_DESC_COUNT - 1) ? 0 : curidx + 1);
222 }
223 
224 static inline uint32_t
225 next_txidx(struct ffec_softc *sc, uint32_t curidx)
226 {
227 
228 	return ((curidx == TX_DESC_COUNT - 1) ? 0 : curidx + 1);
229 }
230 
231 static void
232 ffec_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
233 {
234 
235 	if (error != 0)
236 		return;
237 	*(bus_addr_t *)arg = segs[0].ds_addr;
238 }
239 
240 static void
241 ffec_miigasket_setup(struct ffec_softc *sc)
242 {
243 	uint32_t ifmode;
244 
245 	/*
246 	 * We only need the gasket for MII and RMII connections on certain SoCs.
247 	 */
248 
249 	switch (sc->fectype & FECTYPE_MASK)
250 	{
251 	case FECTYPE_IMX53:
252 		break;
253 	default:
254 		return;
255 	}
256 
257 	switch (sc->phy_conn_type)
258 	{
259 	case MII_CONTYPE_MII:
260 		ifmode = 0;
261 		break;
262 	case MII_CONTYPE_RMII:
263 		ifmode = FEC_MIIGSK_CFGR_IF_MODE_RMII;
264 		break;
265 	default:
266 		return;
267 	}
268 
269 	/*
270 	 * Disable the gasket, configure for either MII or RMII, then enable.
271 	 */
272 
273 	WR2(sc, FEC_MIIGSK_ENR, 0);
274 	while (RD2(sc, FEC_MIIGSK_ENR) & FEC_MIIGSK_ENR_READY)
275 		continue;
276 
277 	WR2(sc, FEC_MIIGSK_CFGR, ifmode);
278 
279 	WR2(sc, FEC_MIIGSK_ENR, FEC_MIIGSK_ENR_EN);
280 	while (!(RD2(sc, FEC_MIIGSK_ENR) & FEC_MIIGSK_ENR_READY))
281 		continue;
282 }
283 
284 static boolean_t
285 ffec_miibus_iowait(struct ffec_softc *sc)
286 {
287 	uint32_t timeout;
288 
289 	for (timeout = 10000; timeout != 0; --timeout)
290 		if (RD4(sc, FEC_IER_REG) & FEC_IER_MII)
291 			return (true);
292 
293 	return (false);
294 }
295 
296 static int
297 ffec_miibus_readreg(device_t dev, int phy, int reg)
298 {
299 	struct ffec_softc *sc;
300 	int val;
301 
302 	sc = device_get_softc(dev);
303 
304 	WR4(sc, FEC_IER_REG, FEC_IER_MII);
305 
306 	WR4(sc, FEC_MMFR_REG, FEC_MMFR_OP_READ |
307 	    FEC_MMFR_ST_VALUE | FEC_MMFR_TA_VALUE |
308 	    ((phy << FEC_MMFR_PA_SHIFT) & FEC_MMFR_PA_MASK) |
309 	    ((reg << FEC_MMFR_RA_SHIFT) & FEC_MMFR_RA_MASK));
310 
311 	if (!ffec_miibus_iowait(sc)) {
312 		device_printf(dev, "timeout waiting for mii read\n");
313 		return (-1); /* All-ones is a symptom of bad mdio. */
314 	}
315 
316 	val = RD4(sc, FEC_MMFR_REG) & FEC_MMFR_DATA_MASK;
317 
318 	return (val);
319 }
320 
321 static int
322 ffec_miibus_writereg(device_t dev, int phy, int reg, int val)
323 {
324 	struct ffec_softc *sc;
325 
326 	sc = device_get_softc(dev);
327 
328 	WR4(sc, FEC_IER_REG, FEC_IER_MII);
329 
330 	WR4(sc, FEC_MMFR_REG, FEC_MMFR_OP_WRITE |
331 	    FEC_MMFR_ST_VALUE | FEC_MMFR_TA_VALUE |
332 	    ((phy << FEC_MMFR_PA_SHIFT) & FEC_MMFR_PA_MASK) |
333 	    ((reg << FEC_MMFR_RA_SHIFT) & FEC_MMFR_RA_MASK) |
334 	    (val & FEC_MMFR_DATA_MASK));
335 
336 	if (!ffec_miibus_iowait(sc)) {
337 		device_printf(dev, "timeout waiting for mii write\n");
338 		return (-1);
339 	}
340 
341 	return (0);
342 }
343 
344 static void
345 ffec_miibus_statchg(device_t dev)
346 {
347 	struct ffec_softc *sc;
348 	struct mii_data *mii;
349 	uint32_t ecr, rcr, tcr;
350 
351 	/*
352 	 * Called by the MII bus driver when the PHY establishes link to set the
353 	 * MAC interface registers.
354 	 */
355 
356 	sc = device_get_softc(dev);
357 
358 	FFEC_ASSERT_LOCKED(sc);
359 
360 	mii = sc->mii_softc;
361 
362 	if (mii->mii_media_status & IFM_ACTIVE)
363 		sc->link_is_up = true;
364 	else
365 		sc->link_is_up = false;
366 
367 	ecr = RD4(sc, FEC_ECR_REG) & ~FEC_ECR_SPEED;
368 	rcr = RD4(sc, FEC_RCR_REG) & ~(FEC_RCR_RMII_10T | FEC_RCR_RMII_MODE |
369 	    FEC_RCR_RGMII_EN | FEC_RCR_DRT | FEC_RCR_FCE);
370 	tcr = RD4(sc, FEC_TCR_REG) & ~FEC_TCR_FDEN;
371 
372 	rcr |= FEC_RCR_MII_MODE; /* Must always be on even for R[G]MII. */
373 	switch (sc->phy_conn_type) {
374 	case MII_CONTYPE_RMII:
375 		rcr |= FEC_RCR_RMII_MODE;
376 		break;
377 	case MII_CONTYPE_RGMII:
378 	case MII_CONTYPE_RGMII_ID:
379 	case MII_CONTYPE_RGMII_RXID:
380 	case MII_CONTYPE_RGMII_TXID:
381 		rcr |= FEC_RCR_RGMII_EN;
382 		break;
383 	default:
384 		break;
385 	}
386 
387 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
388 	case IFM_1000_T:
389 	case IFM_1000_SX:
390 		ecr |= FEC_ECR_SPEED;
391 		break;
392 	case IFM_100_TX:
393 		/* Not-FEC_ECR_SPEED + not-FEC_RCR_RMII_10T means 100TX */
394 		break;
395 	case IFM_10_T:
396 		rcr |= FEC_RCR_RMII_10T;
397 		break;
398 	case IFM_NONE:
399 		sc->link_is_up = false;
400 		return;
401 	default:
402 		sc->link_is_up = false;
403 		device_printf(dev, "Unsupported media %u\n",
404 		    IFM_SUBTYPE(mii->mii_media_active));
405 		return;
406 	}
407 
408 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
409 		tcr |= FEC_TCR_FDEN;
410 	else
411 		rcr |= FEC_RCR_DRT;
412 
413 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FLOW) != 0)
414 		rcr |= FEC_RCR_FCE;
415 
416 	WR4(sc, FEC_RCR_REG, rcr);
417 	WR4(sc, FEC_TCR_REG, tcr);
418 	WR4(sc, FEC_ECR_REG, ecr);
419 }
420 
421 static void
422 ffec_media_status(struct ifnet * ifp, struct ifmediareq *ifmr)
423 {
424 	struct ffec_softc *sc;
425 	struct mii_data *mii;
426 
427 
428 	sc = ifp->if_softc;
429 	mii = sc->mii_softc;
430 	FFEC_LOCK(sc);
431 	mii_pollstat(mii);
432 	ifmr->ifm_active = mii->mii_media_active;
433 	ifmr->ifm_status = mii->mii_media_status;
434 	FFEC_UNLOCK(sc);
435 }
436 
437 static int
438 ffec_media_change_locked(struct ffec_softc *sc)
439 {
440 
441 	return (mii_mediachg(sc->mii_softc));
442 }
443 
444 static int
445 ffec_media_change(struct ifnet * ifp)
446 {
447 	struct ffec_softc *sc;
448 	int error;
449 
450 	sc = ifp->if_softc;
451 
452 	FFEC_LOCK(sc);
453 	error = ffec_media_change_locked(sc);
454 	FFEC_UNLOCK(sc);
455 	return (error);
456 }
457 
458 static void ffec_clear_stats(struct ffec_softc *sc)
459 {
460 	uint32_t mibc;
461 
462 	mibc = RD4(sc, FEC_MIBC_REG);
463 
464 	/*
465 	 * On newer hardware the statistic regs are cleared by toggling a bit in
466 	 * the mib control register.  On older hardware the clear procedure is
467 	 * to disable statistics collection, zero the regs, then re-enable.
468 	 */
469 	if (sc->fectype == FECTYPE_IMX6 || sc->fectype == FECTYPE_MVF) {
470 		WR4(sc, FEC_MIBC_REG, mibc | FEC_MIBC_CLEAR);
471 		WR4(sc, FEC_MIBC_REG, mibc & ~FEC_MIBC_CLEAR);
472 	} else {
473 		WR4(sc, FEC_MIBC_REG, mibc | FEC_MIBC_DIS);
474 
475 		WR4(sc, FEC_IEEE_R_DROP, 0);
476 		WR4(sc, FEC_IEEE_R_MACERR, 0);
477 		WR4(sc, FEC_RMON_R_CRC_ALIGN, 0);
478 		WR4(sc, FEC_RMON_R_FRAG, 0);
479 		WR4(sc, FEC_RMON_R_JAB, 0);
480 		WR4(sc, FEC_RMON_R_MC_PKT, 0);
481 		WR4(sc, FEC_RMON_R_OVERSIZE, 0);
482 		WR4(sc, FEC_RMON_R_PACKETS, 0);
483 		WR4(sc, FEC_RMON_R_UNDERSIZE, 0);
484 		WR4(sc, FEC_RMON_T_COL, 0);
485 		WR4(sc, FEC_RMON_T_CRC_ALIGN, 0);
486 		WR4(sc, FEC_RMON_T_FRAG, 0);
487 		WR4(sc, FEC_RMON_T_JAB, 0);
488 		WR4(sc, FEC_RMON_T_MC_PKT, 0);
489 		WR4(sc, FEC_RMON_T_OVERSIZE , 0);
490 		WR4(sc, FEC_RMON_T_PACKETS, 0);
491 		WR4(sc, FEC_RMON_T_UNDERSIZE, 0);
492 
493 		WR4(sc, FEC_MIBC_REG, mibc);
494 	}
495 }
496 
497 static void
498 ffec_harvest_stats(struct ffec_softc *sc)
499 {
500 	struct ifnet *ifp;
501 
502 	ifp = sc->ifp;
503 
504 	/*
505 	 * - FEC_IEEE_R_DROP is "dropped due to invalid start frame delimiter"
506 	 *   so it's really just another type of input error.
507 	 * - FEC_IEEE_R_MACERR is "no receive fifo space"; count as input drops.
508 	 */
509 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, RD4(sc, FEC_RMON_R_PACKETS));
510 	if_inc_counter(ifp, IFCOUNTER_IMCASTS, RD4(sc, FEC_RMON_R_MC_PKT));
511 	if_inc_counter(ifp, IFCOUNTER_IERRORS,
512 	    RD4(sc, FEC_RMON_R_CRC_ALIGN) + RD4(sc, FEC_RMON_R_UNDERSIZE) +
513 	    RD4(sc, FEC_RMON_R_OVERSIZE) + RD4(sc, FEC_RMON_R_FRAG) +
514 	    RD4(sc, FEC_RMON_R_JAB) + RD4(sc, FEC_IEEE_R_DROP));
515 
516 	if_inc_counter(ifp, IFCOUNTER_IQDROPS, RD4(sc, FEC_IEEE_R_MACERR));
517 
518 	if_inc_counter(ifp, IFCOUNTER_OPACKETS, RD4(sc, FEC_RMON_T_PACKETS));
519 	if_inc_counter(ifp, IFCOUNTER_OMCASTS, RD4(sc, FEC_RMON_T_MC_PKT));
520 	if_inc_counter(ifp, IFCOUNTER_OERRORS,
521 	    RD4(sc, FEC_RMON_T_CRC_ALIGN) + RD4(sc, FEC_RMON_T_UNDERSIZE) +
522 	    RD4(sc, FEC_RMON_T_OVERSIZE) + RD4(sc, FEC_RMON_T_FRAG) +
523 	    RD4(sc, FEC_RMON_T_JAB));
524 
525 	if_inc_counter(ifp, IFCOUNTER_COLLISIONS, RD4(sc, FEC_RMON_T_COL));
526 
527 	ffec_clear_stats(sc);
528 }
529 
530 static void
531 ffec_tick(void *arg)
532 {
533 	struct ffec_softc *sc;
534 	struct ifnet *ifp;
535 	int link_was_up;
536 
537 	sc = arg;
538 
539 	FFEC_ASSERT_LOCKED(sc);
540 
541 	ifp = sc->ifp;
542 
543 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
544 	    return;
545 
546 	/*
547 	 * Typical tx watchdog.  If this fires it indicates that we enqueued
548 	 * packets for output and never got a txdone interrupt for them.  Maybe
549 	 * it's a missed interrupt somehow, just pretend we got one.
550 	 */
551 	if (sc->tx_watchdog_count > 0) {
552 		if (--sc->tx_watchdog_count == 0) {
553 			ffec_txfinish_locked(sc);
554 		}
555 	}
556 
557 	/* Gather stats from hardware counters. */
558 	ffec_harvest_stats(sc);
559 
560 	/* Check the media status. */
561 	link_was_up = sc->link_is_up;
562 	mii_tick(sc->mii_softc);
563 	if (sc->link_is_up && !link_was_up)
564 		ffec_txstart_locked(sc);
565 
566 	/* Schedule another check one second from now. */
567 	callout_reset(&sc->ffec_callout, hz, ffec_tick, sc);
568 }
569 
570 inline static uint32_t
571 ffec_setup_txdesc(struct ffec_softc *sc, int idx, bus_addr_t paddr,
572     uint32_t len)
573 {
574 	uint32_t nidx;
575 	uint32_t flags;
576 
577 	nidx = next_txidx(sc, idx);
578 
579 	/* Addr/len 0 means we're clearing the descriptor after xmit done. */
580 	if (paddr == 0 || len == 0) {
581 		flags = 0;
582 		--sc->txcount;
583 	} else {
584 		flags = FEC_TXDESC_READY | FEC_TXDESC_L | FEC_TXDESC_TC;
585 		++sc->txcount;
586 	}
587 	if (nidx == 0)
588 		flags |= FEC_TXDESC_WRAP;
589 
590 	/*
591 	 * The hardware requires 32-bit physical addresses.  We set up the dma
592 	 * tag to indicate that, so the cast to uint32_t should never lose
593 	 * significant bits.
594 	 */
595 	sc->txdesc_ring[idx].buf_paddr = (uint32_t)paddr;
596 	sc->txdesc_ring[idx].flags_len = flags | len; /* Must be set last! */
597 
598 	return (nidx);
599 }
600 
601 static int
602 ffec_setup_txbuf(struct ffec_softc *sc, int idx, struct mbuf **mp)
603 {
604 	struct mbuf * m;
605 	int error, nsegs;
606 	struct bus_dma_segment seg;
607 
608 	if ((m = m_defrag(*mp, M_NOWAIT)) == NULL)
609 		return (ENOMEM);
610 	*mp = m;
611 
612 	error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag, sc->txbuf_map[idx].map,
613 	    m, &seg, &nsegs, 0);
614 	if (error != 0) {
615 		return (ENOMEM);
616 	}
617 	bus_dmamap_sync(sc->txbuf_tag, sc->txbuf_map[idx].map,
618 	    BUS_DMASYNC_PREWRITE);
619 
620 	sc->txbuf_map[idx].mbuf = m;
621 	ffec_setup_txdesc(sc, idx, seg.ds_addr, seg.ds_len);
622 
623 	return (0);
624 
625 }
626 
627 static void
628 ffec_txstart_locked(struct ffec_softc *sc)
629 {
630 	struct ifnet *ifp;
631 	struct mbuf *m;
632 	int enqueued;
633 
634 	FFEC_ASSERT_LOCKED(sc);
635 
636 	if (!sc->link_is_up)
637 		return;
638 
639 	ifp = sc->ifp;
640 
641 	if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
642 		return;
643 
644 	enqueued = 0;
645 
646 	for (;;) {
647 		if (sc->txcount == (TX_DESC_COUNT-1)) {
648 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
649 			break;
650 		}
651 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
652 		if (m == NULL)
653 			break;
654 		if (ffec_setup_txbuf(sc, sc->tx_idx_head, &m) != 0) {
655 			IFQ_DRV_PREPEND(&ifp->if_snd, m);
656 			break;
657 		}
658 		BPF_MTAP(ifp, m);
659 		sc->tx_idx_head = next_txidx(sc, sc->tx_idx_head);
660 		++enqueued;
661 	}
662 
663 	if (enqueued != 0) {
664 		bus_dmamap_sync(sc->txdesc_tag, sc->txdesc_map, BUS_DMASYNC_PREWRITE);
665 		WR4(sc, FEC_TDAR_REG, FEC_TDAR_TDAR);
666 		bus_dmamap_sync(sc->txdesc_tag, sc->txdesc_map, BUS_DMASYNC_POSTWRITE);
667 		sc->tx_watchdog_count = WATCHDOG_TIMEOUT_SECS;
668 	}
669 }
670 
671 static void
672 ffec_txstart(struct ifnet *ifp)
673 {
674 	struct ffec_softc *sc = ifp->if_softc;
675 
676 	FFEC_LOCK(sc);
677 	ffec_txstart_locked(sc);
678 	FFEC_UNLOCK(sc);
679 }
680 
681 static void
682 ffec_txfinish_locked(struct ffec_softc *sc)
683 {
684 	struct ifnet *ifp;
685 	struct ffec_hwdesc *desc;
686 	struct ffec_bufmap *bmap;
687 	boolean_t retired_buffer;
688 
689 	FFEC_ASSERT_LOCKED(sc);
690 
691 	/* XXX Can't set PRE|POST right now, but we need both. */
692 	bus_dmamap_sync(sc->txdesc_tag, sc->txdesc_map, BUS_DMASYNC_PREREAD);
693 	bus_dmamap_sync(sc->txdesc_tag, sc->txdesc_map, BUS_DMASYNC_POSTREAD);
694 	ifp = sc->ifp;
695 	retired_buffer = false;
696 	while (sc->tx_idx_tail != sc->tx_idx_head) {
697 		desc = &sc->txdesc_ring[sc->tx_idx_tail];
698 		if (desc->flags_len & FEC_TXDESC_READY)
699 			break;
700 		retired_buffer = true;
701 		bmap = &sc->txbuf_map[sc->tx_idx_tail];
702 		bus_dmamap_sync(sc->txbuf_tag, bmap->map,
703 		    BUS_DMASYNC_POSTWRITE);
704 		bus_dmamap_unload(sc->txbuf_tag, bmap->map);
705 		m_freem(bmap->mbuf);
706 		bmap->mbuf = NULL;
707 		ffec_setup_txdesc(sc, sc->tx_idx_tail, 0, 0);
708 		sc->tx_idx_tail = next_txidx(sc, sc->tx_idx_tail);
709 	}
710 
711 	/*
712 	 * If we retired any buffers, there will be open tx slots available in
713 	 * the descriptor ring, go try to start some new output.
714 	 */
715 	if (retired_buffer) {
716 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
717 		ffec_txstart_locked(sc);
718 	}
719 
720 	/* If there are no buffers outstanding, muzzle the watchdog. */
721 	if (sc->tx_idx_tail == sc->tx_idx_head) {
722 		sc->tx_watchdog_count = 0;
723 	}
724 }
725 
726 inline static uint32_t
727 ffec_setup_rxdesc(struct ffec_softc *sc, int idx, bus_addr_t paddr)
728 {
729 	uint32_t nidx;
730 
731 	/*
732 	 * The hardware requires 32-bit physical addresses.  We set up the dma
733 	 * tag to indicate that, so the cast to uint32_t should never lose
734 	 * significant bits.
735 	 */
736 	nidx = next_rxidx(sc, idx);
737 	sc->rxdesc_ring[idx].buf_paddr = (uint32_t)paddr;
738 	sc->rxdesc_ring[idx].flags_len = FEC_RXDESC_EMPTY |
739 		((nidx == 0) ? FEC_RXDESC_WRAP : 0);
740 
741 	return (nidx);
742 }
743 
744 static int
745 ffec_setup_rxbuf(struct ffec_softc *sc, int idx, struct mbuf * m)
746 {
747 	int error, nsegs;
748 	struct bus_dma_segment seg;
749 
750 	/*
751 	 * We need to leave at least ETHER_ALIGN bytes free at the beginning of
752 	 * the buffer to allow the data to be re-aligned after receiving it (by
753 	 * copying it backwards ETHER_ALIGN bytes in the same buffer).  We also
754 	 * have to ensure that the beginning of the buffer is aligned to the
755 	 * hardware's requirements.
756 	 */
757 	m_adj(m, roundup(ETHER_ALIGN, FEC_RXBUF_ALIGN));
758 
759 	error = bus_dmamap_load_mbuf_sg(sc->rxbuf_tag, sc->rxbuf_map[idx].map,
760 	    m, &seg, &nsegs, 0);
761 	if (error != 0) {
762 		return (error);
763 	}
764 
765 	bus_dmamap_sync(sc->rxbuf_tag, sc->rxbuf_map[idx].map,
766 	    BUS_DMASYNC_PREREAD);
767 
768 	sc->rxbuf_map[idx].mbuf = m;
769 	ffec_setup_rxdesc(sc, idx, seg.ds_addr);
770 
771 	return (0);
772 }
773 
774 static struct mbuf *
775 ffec_alloc_mbufcl(struct ffec_softc *sc)
776 {
777 	struct mbuf *m;
778 
779 	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
780 	m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
781 
782 	return (m);
783 }
784 
785 static void
786 ffec_rxfinish_onebuf(struct ffec_softc *sc, int len)
787 {
788 	struct mbuf *m, *newmbuf;
789 	struct ffec_bufmap *bmap;
790 	uint8_t *dst, *src;
791 	int error;
792 
793 	/*
794 	 *  First try to get a new mbuf to plug into this slot in the rx ring.
795 	 *  If that fails, drop the current packet and recycle the current
796 	 *  mbuf, which is still mapped and loaded.
797 	 */
798 	if ((newmbuf = ffec_alloc_mbufcl(sc)) == NULL) {
799 		if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, 1);
800 		ffec_setup_rxdesc(sc, sc->rx_idx,
801 		    sc->rxdesc_ring[sc->rx_idx].buf_paddr);
802 		return;
803 	}
804 
805 	/*
806 	 *  Unfortunately, the protocol headers need to be aligned on a 32-bit
807 	 *  boundary for the upper layers.  The hardware requires receive
808 	 *  buffers to be 16-byte aligned.  The ethernet header is 14 bytes,
809 	 *  leaving the protocol header unaligned.  We used m_adj() after
810 	 *  allocating the buffer to leave empty space at the start of the
811 	 *  buffer, now we'll use the alignment agnostic bcopy() routine to
812 	 *  shuffle all the data backwards 2 bytes and adjust m_data.
813 	 *
814 	 *  XXX imx6 hardware is able to do this 2-byte alignment by setting the
815 	 *  SHIFT16 bit in the RACC register.  Older hardware doesn't have that
816 	 *  feature, but for them could we speed this up by copying just the
817 	 *  protocol headers into their own small mbuf then chaining the cluster
818 	 *  to it?  That way we'd only need to copy like 64 bytes or whatever
819 	 *  the biggest header is, instead of the whole 1530ish-byte frame.
820 	 */
821 
822 	FFEC_UNLOCK(sc);
823 
824 	bmap = &sc->rxbuf_map[sc->rx_idx];
825 	len -= ETHER_CRC_LEN;
826 	bus_dmamap_sync(sc->rxbuf_tag, bmap->map, BUS_DMASYNC_POSTREAD);
827 	bus_dmamap_unload(sc->rxbuf_tag, bmap->map);
828 	m = bmap->mbuf;
829 	bmap->mbuf = NULL;
830 	m->m_len = len;
831 	m->m_pkthdr.len = len;
832 	m->m_pkthdr.rcvif = sc->ifp;
833 
834 	src = mtod(m, uint8_t*);
835 	dst = src - ETHER_ALIGN;
836 	bcopy(src, dst, len);
837 	m->m_data = dst;
838 	sc->ifp->if_input(sc->ifp, m);
839 
840 	FFEC_LOCK(sc);
841 
842 	if ((error = ffec_setup_rxbuf(sc, sc->rx_idx, newmbuf)) != 0) {
843 		device_printf(sc->dev, "ffec_setup_rxbuf error %d\n", error);
844 		/* XXX Now what?  We've got a hole in the rx ring. */
845 	}
846 
847 }
848 
849 static void
850 ffec_rxfinish_locked(struct ffec_softc *sc)
851 {
852 	struct ffec_hwdesc *desc;
853 	int len;
854 	boolean_t produced_empty_buffer;
855 
856 	FFEC_ASSERT_LOCKED(sc);
857 
858 	/* XXX Can't set PRE|POST right now, but we need both. */
859 	bus_dmamap_sync(sc->rxdesc_tag, sc->rxdesc_map, BUS_DMASYNC_PREREAD);
860 	bus_dmamap_sync(sc->rxdesc_tag, sc->rxdesc_map, BUS_DMASYNC_POSTREAD);
861 	produced_empty_buffer = false;
862 	for (;;) {
863 		desc = &sc->rxdesc_ring[sc->rx_idx];
864 		if (desc->flags_len & FEC_RXDESC_EMPTY)
865 			break;
866 		produced_empty_buffer = true;
867 		len = (desc->flags_len & FEC_RXDESC_LEN_MASK);
868 		if (len < 64) {
869 			/*
870 			 * Just recycle the descriptor and continue.           .
871 			 */
872 			ffec_setup_rxdesc(sc, sc->rx_idx,
873 			    sc->rxdesc_ring[sc->rx_idx].buf_paddr);
874 		} else if ((desc->flags_len & FEC_RXDESC_L) == 0) {
875 			/*
876 			 * The entire frame is not in this buffer.  Impossible.
877 			 * Recycle the descriptor and continue.
878 			 *
879 			 * XXX what's the right way to handle this? Probably we
880 			 * should stop/init the hardware because this should
881 			 * just really never happen when we have buffers bigger
882 			 * than the maximum frame size.
883 			 */
884 			device_printf(sc->dev,
885 			    "fec_rxfinish: received frame without LAST bit set");
886 			ffec_setup_rxdesc(sc, sc->rx_idx,
887 			    sc->rxdesc_ring[sc->rx_idx].buf_paddr);
888 		} else if (desc->flags_len & FEC_RXDESC_ERROR_BITS) {
889 			/*
890 			 *  Something went wrong with receiving the frame, we
891 			 *  don't care what (the hardware has counted the error
892 			 *  in the stats registers already), we just reuse the
893 			 *  same mbuf, which is still dma-mapped, by resetting
894 			 *  the rx descriptor.
895 			 */
896 			ffec_setup_rxdesc(sc, sc->rx_idx,
897 			    sc->rxdesc_ring[sc->rx_idx].buf_paddr);
898 		} else {
899 			/*
900 			 *  Normal case: a good frame all in one buffer.
901 			 */
902 			ffec_rxfinish_onebuf(sc, len);
903 		}
904 		sc->rx_idx = next_rxidx(sc, sc->rx_idx);
905 	}
906 
907 	if (produced_empty_buffer) {
908 		bus_dmamap_sync(sc->rxdesc_tag, sc->rxdesc_map, BUS_DMASYNC_PREWRITE);
909 		WR4(sc, FEC_RDAR_REG, FEC_RDAR_RDAR);
910 		bus_dmamap_sync(sc->rxdesc_tag, sc->rxdesc_map, BUS_DMASYNC_POSTWRITE);
911 	}
912 }
913 
914 static void
915 ffec_get_hwaddr(struct ffec_softc *sc, uint8_t *hwaddr)
916 {
917 	uint32_t palr, paur, rnd;
918 
919 	/*
920 	 * Try to recover a MAC address from the running hardware. If there's
921 	 * something non-zero there, assume the bootloader did the right thing
922 	 * and just use it.
923 	 *
924 	 * Otherwise, set the address to a convenient locally assigned address,
925 	 * 'bsd' + random 24 low-order bits.  'b' is 0x62, which has the locally
926 	 * assigned bit set, and the broadcast/multicast bit clear.
927 	 */
928 	palr = RD4(sc, FEC_PALR_REG);
929 	paur = RD4(sc, FEC_PAUR_REG) & FEC_PAUR_PADDR2_MASK;
930 	if ((palr | paur) != 0) {
931 		hwaddr[0] = palr >> 24;
932 		hwaddr[1] = palr >> 16;
933 		hwaddr[2] = palr >>  8;
934 		hwaddr[3] = palr >>  0;
935 		hwaddr[4] = paur >> 24;
936 		hwaddr[5] = paur >> 16;
937 	} else {
938 		rnd = arc4random() & 0x00ffffff;
939 		hwaddr[0] = 'b';
940 		hwaddr[1] = 's';
941 		hwaddr[2] = 'd';
942 		hwaddr[3] = rnd >> 16;
943 		hwaddr[4] = rnd >>  8;
944 		hwaddr[5] = rnd >>  0;
945 	}
946 
947 	if (bootverbose) {
948 		device_printf(sc->dev,
949 		    "MAC address %02x:%02x:%02x:%02x:%02x:%02x:\n",
950 		    hwaddr[0], hwaddr[1], hwaddr[2],
951 		    hwaddr[3], hwaddr[4], hwaddr[5]);
952 	}
953 }
954 
955 static void
956 ffec_setup_rxfilter(struct ffec_softc *sc)
957 {
958 	struct ifnet *ifp;
959 	struct ifmultiaddr *ifma;
960 	uint8_t *eaddr;
961 	uint32_t crc;
962 	uint64_t ghash, ihash;
963 
964 	FFEC_ASSERT_LOCKED(sc);
965 
966 	ifp = sc->ifp;
967 
968 	/*
969 	 * Set the multicast (group) filter hash.
970 	 */
971 	if ((ifp->if_flags & IFF_ALLMULTI))
972 		ghash = 0xffffffffffffffffLLU;
973 	else {
974 		ghash = 0;
975 		if_maddr_rlock(ifp);
976 		TAILQ_FOREACH(ifma, &sc->ifp->if_multiaddrs, ifma_link) {
977 			if (ifma->ifma_addr->sa_family != AF_LINK)
978 				continue;
979 			/* 6 bits from MSB in LE CRC32 are used for hash. */
980 			crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
981 			    ifma->ifma_addr), ETHER_ADDR_LEN);
982 			ghash |= 1LLU << (((uint8_t *)&crc)[3] >> 2);
983 		}
984 		if_maddr_runlock(ifp);
985 	}
986 	WR4(sc, FEC_GAUR_REG, (uint32_t)(ghash >> 32));
987 	WR4(sc, FEC_GALR_REG, (uint32_t)ghash);
988 
989 	/*
990 	 * Set the individual address filter hash.
991 	 *
992 	 * XXX Is 0 the right value when promiscuous is off?  This hw feature
993 	 * seems to support the concept of MAC address aliases, does such a
994 	 * thing even exist?
995 	 */
996 	if ((ifp->if_flags & IFF_PROMISC))
997 		ihash = 0xffffffffffffffffLLU;
998 	else {
999 		ihash = 0;
1000 	}
1001 	WR4(sc, FEC_IAUR_REG, (uint32_t)(ihash >> 32));
1002 	WR4(sc, FEC_IALR_REG, (uint32_t)ihash);
1003 
1004 	/*
1005 	 * Set the primary address.
1006 	 */
1007 	eaddr = IF_LLADDR(ifp);
1008 	WR4(sc, FEC_PALR_REG, (eaddr[0] << 24) | (eaddr[1] << 16) |
1009 	    (eaddr[2] <<  8) | eaddr[3]);
1010 	WR4(sc, FEC_PAUR_REG, (eaddr[4] << 24) | (eaddr[5] << 16));
1011 }
1012 
1013 static void
1014 ffec_stop_locked(struct ffec_softc *sc)
1015 {
1016 	struct ifnet *ifp;
1017 	struct ffec_hwdesc *desc;
1018 	struct ffec_bufmap *bmap;
1019 	int idx;
1020 
1021 	FFEC_ASSERT_LOCKED(sc);
1022 
1023 	ifp = sc->ifp;
1024 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1025 	sc->tx_watchdog_count = 0;
1026 
1027 	/*
1028 	 * Stop the hardware, mask all interrupts, and clear all current
1029 	 * interrupt status bits.
1030 	 */
1031 	WR4(sc, FEC_ECR_REG, RD4(sc, FEC_ECR_REG) & ~FEC_ECR_ETHEREN);
1032 	WR4(sc, FEC_IEM_REG, 0x00000000);
1033 	WR4(sc, FEC_IER_REG, 0xffffffff);
1034 
1035 	/*
1036 	 * Stop the media-check callout.  Do not use callout_drain() because
1037 	 * we're holding a mutex the callout acquires, and if it's currently
1038 	 * waiting to acquire it, we'd deadlock.  If it is waiting now, the
1039 	 * ffec_tick() routine will return without doing anything when it sees
1040 	 * that IFF_DRV_RUNNING is not set, so avoiding callout_drain() is safe.
1041 	 */
1042 	callout_stop(&sc->ffec_callout);
1043 
1044 	/*
1045 	 * Discard all untransmitted buffers.  Each buffer is simply freed;
1046 	 * it's as if the bits were transmitted and then lost on the wire.
1047 	 *
1048 	 * XXX Is this right?  Or should we use IFQ_DRV_PREPEND() to put them
1049 	 * back on the queue for when we get restarted later?
1050 	 */
1051 	idx = sc->tx_idx_tail;
1052 	while (idx != sc->tx_idx_head) {
1053 		desc = &sc->txdesc_ring[idx];
1054 		bmap = &sc->txbuf_map[idx];
1055 		if (desc->buf_paddr != 0) {
1056 			bus_dmamap_unload(sc->txbuf_tag, bmap->map);
1057 			m_freem(bmap->mbuf);
1058 			bmap->mbuf = NULL;
1059 			ffec_setup_txdesc(sc, idx, 0, 0);
1060 		}
1061 		idx = next_txidx(sc, idx);
1062 	}
1063 
1064 	/*
1065 	 * Discard all unprocessed receive buffers.  This amounts to just
1066 	 * pretending that nothing ever got received into them.  We reuse the
1067 	 * mbuf already mapped for each desc, simply turning the EMPTY flags
1068 	 * back on so they'll get reused when we start up again.
1069 	 */
1070 	for (idx = 0; idx < RX_DESC_COUNT; ++idx) {
1071 		desc = &sc->rxdesc_ring[idx];
1072 		ffec_setup_rxdesc(sc, idx, desc->buf_paddr);
1073 	}
1074 }
1075 
1076 static void
1077 ffec_init_locked(struct ffec_softc *sc)
1078 {
1079 	struct ifnet *ifp = sc->ifp;
1080 	uint32_t maxbuf, maxfl, regval;
1081 
1082 	FFEC_ASSERT_LOCKED(sc);
1083 
1084 	/*
1085 	 * The hardware has a limit of 0x7ff as the max frame length (see
1086 	 * comments for MRBR below), and we use mbuf clusters as receive
1087 	 * buffers, and we currently are designed to receive an entire frame
1088 	 * into a single buffer.
1089 	 *
1090 	 * We start with a MCLBYTES-sized cluster, but we have to offset into
1091 	 * the buffer by ETHER_ALIGN to make room for post-receive re-alignment,
1092 	 * and then that value has to be rounded up to the hardware's DMA
1093 	 * alignment requirements, so all in all our buffer is that much smaller
1094 	 * than MCLBYTES.
1095 	 *
1096 	 * The resulting value is used as the frame truncation length and the
1097 	 * max buffer receive buffer size for now.  It'll become more complex
1098 	 * when we support jumbo frames and receiving fragments of them into
1099 	 * separate buffers.
1100 	 */
1101 	maxbuf = MCLBYTES - roundup(ETHER_ALIGN, FEC_RXBUF_ALIGN);
1102 	maxfl = min(maxbuf, 0x7ff);
1103 
1104 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1105 		return;
1106 
1107 	/* Mask all interrupts and clear all current interrupt status bits. */
1108 	WR4(sc, FEC_IEM_REG, 0x00000000);
1109 	WR4(sc, FEC_IER_REG, 0xffffffff);
1110 
1111 	/*
1112 	 * Go set up palr/puar, galr/gaur, ialr/iaur.
1113 	 */
1114 	ffec_setup_rxfilter(sc);
1115 
1116 	/*
1117 	 * TFWR - Transmit FIFO watermark register.
1118 	 *
1119 	 * Set the transmit fifo watermark register to "store and forward" mode
1120 	 * and also set a threshold of 128 bytes in the fifo before transmission
1121 	 * of a frame begins (to avoid dma underruns).  Recent FEC hardware
1122 	 * supports STRFWD and when that bit is set, the watermark level in the
1123 	 * low bits is ignored.  Older hardware doesn't have STRFWD, but writing
1124 	 * to that bit is innocuous, and the TWFR bits get used instead.
1125 	 */
1126 	WR4(sc, FEC_TFWR_REG, FEC_TFWR_STRFWD | FEC_TFWR_TWFR_128BYTE);
1127 
1128 	/* RCR - Receive control register.
1129 	 *
1130 	 * Set max frame length + clean out anything left from u-boot.
1131 	 */
1132 	WR4(sc, FEC_RCR_REG, (maxfl << FEC_RCR_MAX_FL_SHIFT));
1133 
1134 	/*
1135 	 * TCR - Transmit control register.
1136 	 *
1137 	 * Clean out anything left from u-boot.  Any necessary values are set in
1138 	 * ffec_miibus_statchg() based on the media type.
1139 	 */
1140 	WR4(sc, FEC_TCR_REG, 0);
1141 
1142 	/*
1143 	 * OPD - Opcode/pause duration.
1144 	 *
1145 	 * XXX These magic numbers come from u-boot.
1146 	 */
1147 	WR4(sc, FEC_OPD_REG, 0x00010020);
1148 
1149 	/*
1150 	 * FRSR - Fifo receive start register.
1151 	 *
1152 	 * This register does not exist on imx6, it is present on earlier
1153 	 * hardware. The u-boot code sets this to a non-default value that's 32
1154 	 * bytes larger than the default, with no clue as to why.  The default
1155 	 * value should work fine, so there's no code to init it here.
1156 	 */
1157 
1158 	/*
1159 	 *  MRBR - Max RX buffer size.
1160 	 *
1161 	 *  Note: For hardware prior to imx6 this value cannot exceed 0x07ff,
1162 	 *  but the datasheet says no such thing for imx6.  On the imx6, setting
1163 	 *  this to 2K without setting EN1588 resulted in a crazy runaway
1164 	 *  receive loop in the hardware, where every rx descriptor in the ring
1165 	 *  had its EMPTY flag cleared, no completion or error flags set, and a
1166 	 *  length of zero.  I think maybe you can only exceed it when EN1588 is
1167 	 *  set, like maybe that's what enables jumbo frames, because in general
1168 	 *  the EN1588 flag seems to be the "enable new stuff" vs. "be legacy-
1169 	 *  compatible" flag.
1170 	 */
1171 	WR4(sc, FEC_MRBR_REG, maxfl << FEC_MRBR_R_BUF_SIZE_SHIFT);
1172 
1173 	/*
1174 	 * FTRL - Frame truncation length.
1175 	 *
1176 	 * Must be greater than or equal to the value set in FEC_RCR_MAXFL.
1177 	 */
1178 	WR4(sc, FEC_FTRL_REG, maxfl);
1179 
1180 	/*
1181 	 * RDSR / TDSR descriptor ring pointers.
1182 	 *
1183 	 * When we turn on ECR_ETHEREN at the end, the hardware zeroes its
1184 	 * internal current descriptor index values for both rings, so we zero
1185 	 * our index values as well.
1186 	 */
1187 	sc->rx_idx = 0;
1188 	sc->tx_idx_head = sc->tx_idx_tail = 0;
1189 	sc->txcount = 0;
1190 	WR4(sc, FEC_RDSR_REG, sc->rxdesc_ring_paddr);
1191 	WR4(sc, FEC_TDSR_REG, sc->txdesc_ring_paddr);
1192 
1193 	/*
1194 	 * EIM - interrupt mask register.
1195 	 *
1196 	 * We always enable the same set of interrupts while running; unlike
1197 	 * some drivers there's no need to change the mask on the fly depending
1198 	 * on what operations are in progress.
1199 	 */
1200 	WR4(sc, FEC_IEM_REG, FEC_IER_TXF | FEC_IER_RXF | FEC_IER_EBERR);
1201 
1202 	/*
1203 	 * MIBC - MIB control (hardware stats); clear all statistics regs, then
1204 	 * enable collection of statistics.
1205 	 */
1206 	regval = RD4(sc, FEC_MIBC_REG);
1207 	WR4(sc, FEC_MIBC_REG, regval | FEC_MIBC_DIS);
1208 	ffec_clear_stats(sc);
1209 	WR4(sc, FEC_MIBC_REG, regval & ~FEC_MIBC_DIS);
1210 
1211 	/*
1212 	 * ECR - Ethernet control register.
1213 	 *
1214 	 * This must happen after all the other config registers are set.  If
1215 	 * we're running on little-endian hardware, also set the flag for byte-
1216 	 * swapping descriptor ring entries.  This flag doesn't exist on older
1217 	 * hardware, but it can be safely set -- the bit position it occupies
1218 	 * was unused.
1219 	 */
1220 	regval = RD4(sc, FEC_ECR_REG);
1221 #if _BYTE_ORDER == _LITTLE_ENDIAN
1222 	regval |= FEC_ECR_DBSWP;
1223 #endif
1224 	regval |= FEC_ECR_ETHEREN;
1225 	WR4(sc, FEC_ECR_REG, regval);
1226 
1227 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1228 
1229        /*
1230 	* Call mii_mediachg() which will call back into ffec_miibus_statchg() to
1231 	* set up the remaining config registers based on the current media.
1232 	*/
1233 	mii_mediachg(sc->mii_softc);
1234 	callout_reset(&sc->ffec_callout, hz, ffec_tick, sc);
1235 
1236 	/*
1237 	 * Tell the hardware that receive buffers are available.  They were made
1238 	 * available in ffec_attach() or ffec_stop().
1239 	 */
1240 	WR4(sc, FEC_RDAR_REG, FEC_RDAR_RDAR);
1241 }
1242 
1243 static void
1244 ffec_init(void *if_softc)
1245 {
1246 	struct ffec_softc *sc = if_softc;
1247 
1248 	FFEC_LOCK(sc);
1249 	ffec_init_locked(sc);
1250 	FFEC_UNLOCK(sc);
1251 }
1252 
1253 static void
1254 ffec_intr(void *arg)
1255 {
1256 	struct ffec_softc *sc;
1257 	uint32_t ier;
1258 
1259 	sc = arg;
1260 
1261 	FFEC_LOCK(sc);
1262 
1263 	ier = RD4(sc, FEC_IER_REG);
1264 
1265 	if (ier & FEC_IER_TXF) {
1266 		WR4(sc, FEC_IER_REG, FEC_IER_TXF);
1267 		ffec_txfinish_locked(sc);
1268 	}
1269 
1270 	if (ier & FEC_IER_RXF) {
1271 		WR4(sc, FEC_IER_REG, FEC_IER_RXF);
1272 		ffec_rxfinish_locked(sc);
1273 	}
1274 
1275 	/*
1276 	 * We actually don't care about most errors, because the hardware copes
1277 	 * with them just fine, discarding the incoming bad frame, or forcing a
1278 	 * bad CRC onto an outgoing bad frame, and counting the errors in the
1279 	 * stats registers.  The one that really matters is EBERR (DMA bus
1280 	 * error) because the hardware automatically clears ECR[ETHEREN] and we
1281 	 * have to restart it here.  It should never happen.
1282 	 */
1283 	if (ier & FEC_IER_EBERR) {
1284 		WR4(sc, FEC_IER_REG, FEC_IER_EBERR);
1285 		device_printf(sc->dev,
1286 		    "Ethernet DMA error, restarting controller.\n");
1287 		ffec_stop_locked(sc);
1288 		ffec_init_locked(sc);
1289 	}
1290 
1291 	FFEC_UNLOCK(sc);
1292 
1293 }
1294 
1295 static int
1296 ffec_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1297 {
1298 	struct ffec_softc *sc;
1299 	struct mii_data *mii;
1300 	struct ifreq *ifr;
1301 	int mask, error;
1302 
1303 	sc = ifp->if_softc;
1304 	ifr = (struct ifreq *)data;
1305 
1306 	error = 0;
1307 	switch (cmd) {
1308 	case SIOCSIFFLAGS:
1309 		FFEC_LOCK(sc);
1310 		if (ifp->if_flags & IFF_UP) {
1311 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1312 				if ((ifp->if_flags ^ sc->if_flags) &
1313 				    (IFF_PROMISC | IFF_ALLMULTI))
1314 					ffec_setup_rxfilter(sc);
1315 			} else {
1316 				if (!sc->is_detaching)
1317 					ffec_init_locked(sc);
1318 			}
1319 		} else {
1320 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1321 				ffec_stop_locked(sc);
1322 		}
1323 		sc->if_flags = ifp->if_flags;
1324 		FFEC_UNLOCK(sc);
1325 		break;
1326 
1327 	case SIOCADDMULTI:
1328 	case SIOCDELMULTI:
1329 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1330 			FFEC_LOCK(sc);
1331 			ffec_setup_rxfilter(sc);
1332 			FFEC_UNLOCK(sc);
1333 		}
1334 		break;
1335 
1336 	case SIOCSIFMEDIA:
1337 	case SIOCGIFMEDIA:
1338 		mii = sc->mii_softc;
1339 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1340 		break;
1341 
1342 	case SIOCSIFCAP:
1343 		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1344 		if (mask & IFCAP_VLAN_MTU) {
1345 			/* No work to do except acknowledge the change took. */
1346 			ifp->if_capenable ^= IFCAP_VLAN_MTU;
1347 		}
1348 		break;
1349 
1350 	default:
1351 		error = ether_ioctl(ifp, cmd, data);
1352 		break;
1353 	}
1354 
1355 	return (error);
1356 }
1357 
1358 static int
1359 ffec_detach(device_t dev)
1360 {
1361 	struct ffec_softc *sc;
1362 	bus_dmamap_t map;
1363 	int idx;
1364 
1365 	/*
1366 	 * NB: This function can be called internally to unwind a failure to
1367 	 * attach. Make sure a resource got allocated/created before destroying.
1368 	 */
1369 
1370 	sc = device_get_softc(dev);
1371 
1372 	if (sc->is_attached) {
1373 		FFEC_LOCK(sc);
1374 		sc->is_detaching = true;
1375 		ffec_stop_locked(sc);
1376 		FFEC_UNLOCK(sc);
1377 		callout_drain(&sc->ffec_callout);
1378 		ether_ifdetach(sc->ifp);
1379 	}
1380 
1381 	/* XXX no miibus detach? */
1382 
1383 	/* Clean up RX DMA resources and free mbufs. */
1384 	for (idx = 0; idx < RX_DESC_COUNT; ++idx) {
1385 		if ((map = sc->rxbuf_map[idx].map) != NULL) {
1386 			bus_dmamap_unload(sc->rxbuf_tag, map);
1387 			bus_dmamap_destroy(sc->rxbuf_tag, map);
1388 			m_freem(sc->rxbuf_map[idx].mbuf);
1389 		}
1390 	}
1391 	if (sc->rxbuf_tag != NULL)
1392 		bus_dma_tag_destroy(sc->rxbuf_tag);
1393 	if (sc->rxdesc_map != NULL) {
1394 		bus_dmamap_unload(sc->rxdesc_tag, sc->rxdesc_map);
1395 		bus_dmamap_destroy(sc->rxdesc_tag, sc->rxdesc_map);
1396 	}
1397 	if (sc->rxdesc_tag != NULL)
1398 	bus_dma_tag_destroy(sc->rxdesc_tag);
1399 
1400 	/* Clean up TX DMA resources. */
1401 	for (idx = 0; idx < TX_DESC_COUNT; ++idx) {
1402 		if ((map = sc->txbuf_map[idx].map) != NULL) {
1403 			/* TX maps are already unloaded. */
1404 			bus_dmamap_destroy(sc->txbuf_tag, map);
1405 		}
1406 	}
1407 	if (sc->txbuf_tag != NULL)
1408 		bus_dma_tag_destroy(sc->txbuf_tag);
1409 	if (sc->txdesc_map != NULL) {
1410 		bus_dmamap_unload(sc->txdesc_tag, sc->txdesc_map);
1411 		bus_dmamap_destroy(sc->txdesc_tag, sc->txdesc_map);
1412 	}
1413 	if (sc->txdesc_tag != NULL)
1414 	bus_dma_tag_destroy(sc->txdesc_tag);
1415 
1416 	/* Release bus resources. */
1417 	if (sc->intr_cookie)
1418 		bus_teardown_intr(dev, sc->irq_res, sc->intr_cookie);
1419 
1420 	if (sc->irq_res != NULL)
1421 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res);
1422 
1423 	if (sc->mem_res != NULL)
1424 		bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->mem_res);
1425 
1426 	FFEC_LOCK_DESTROY(sc);
1427 	return (0);
1428 }
1429 
1430 static int
1431 ffec_attach(device_t dev)
1432 {
1433 	struct ffec_softc *sc;
1434 	struct ifnet *ifp = NULL;
1435 	struct mbuf *m;
1436 	void *dummy;
1437 	phandle_t ofw_node;
1438 	int error, phynum, rid;
1439 	uint8_t eaddr[ETHER_ADDR_LEN];
1440 	uint32_t idx, mscr;
1441 
1442 	sc = device_get_softc(dev);
1443 	sc->dev = dev;
1444 
1445 	FFEC_LOCK_INIT(sc);
1446 
1447 	/*
1448 	 * There are differences in the implementation and features of the FEC
1449 	 * hardware on different SoCs, so figure out what type we are.
1450 	 */
1451 	sc->fectype = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
1452 
1453 	/*
1454 	 * We have to be told what kind of electrical connection exists between
1455 	 * the MAC and PHY or we can't operate correctly.
1456 	 */
1457 	if ((ofw_node = ofw_bus_get_node(dev)) == -1) {
1458 		device_printf(dev, "Impossible: Can't find ofw bus node\n");
1459 		error = ENXIO;
1460 		goto out;
1461 	}
1462 	sc->phy_conn_type = mii_fdt_get_contype(ofw_node);
1463 	if (sc->phy_conn_type == MII_CONTYPE_UNKNOWN) {
1464 		device_printf(sc->dev, "No valid 'phy-mode' "
1465 		    "property found in FDT data for device.\n");
1466 		error = ENOATTR;
1467 		goto out;
1468 	}
1469 
1470 	callout_init_mtx(&sc->ffec_callout, &sc->mtx, 0);
1471 
1472 	/* Allocate bus resources for accessing the hardware. */
1473 	rid = 0;
1474 	sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1475 	    RF_ACTIVE);
1476 	if (sc->mem_res == NULL) {
1477 		device_printf(dev, "could not allocate memory resources.\n");
1478 		error = ENOMEM;
1479 		goto out;
1480 	}
1481 	rid = 0;
1482 	sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1483 	    RF_ACTIVE);
1484 	if (sc->irq_res == NULL) {
1485 		device_printf(dev, "could not allocate interrupt resources.\n");
1486 		error = ENOMEM;
1487 		goto out;
1488 	}
1489 
1490 	/*
1491 	 * Set up TX descriptor ring, descriptors, and dma maps.
1492 	 */
1493 	error = bus_dma_tag_create(
1494 	    bus_get_dma_tag(dev),	/* Parent tag. */
1495 	    FEC_DESC_RING_ALIGN, 0,	/* alignment, boundary */
1496 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1497 	    BUS_SPACE_MAXADDR,		/* highaddr */
1498 	    NULL, NULL,			/* filter, filterarg */
1499 	    TX_DESC_SIZE, 1, 		/* maxsize, nsegments */
1500 	    TX_DESC_SIZE,		/* maxsegsize */
1501 	    0,				/* flags */
1502 	    NULL, NULL,			/* lockfunc, lockarg */
1503 	    &sc->txdesc_tag);
1504 	if (error != 0) {
1505 		device_printf(sc->dev,
1506 		    "could not create TX ring DMA tag.\n");
1507 		goto out;
1508 	}
1509 
1510 	error = bus_dmamem_alloc(sc->txdesc_tag, (void**)&sc->txdesc_ring,
1511 	    BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->txdesc_map);
1512 	if (error != 0) {
1513 		device_printf(sc->dev,
1514 		    "could not allocate TX descriptor ring.\n");
1515 		goto out;
1516 	}
1517 
1518 	error = bus_dmamap_load(sc->txdesc_tag, sc->txdesc_map, sc->txdesc_ring,
1519 	    TX_DESC_SIZE, ffec_get1paddr, &sc->txdesc_ring_paddr, 0);
1520 	if (error != 0) {
1521 		device_printf(sc->dev,
1522 		    "could not load TX descriptor ring map.\n");
1523 		goto out;
1524 	}
1525 
1526 	error = bus_dma_tag_create(
1527 	    bus_get_dma_tag(dev),	/* Parent tag. */
1528 	    FEC_TXBUF_ALIGN, 0,		/* alignment, boundary */
1529 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1530 	    BUS_SPACE_MAXADDR,		/* highaddr */
1531 	    NULL, NULL,			/* filter, filterarg */
1532 	    MCLBYTES, 1, 		/* maxsize, nsegments */
1533 	    MCLBYTES,			/* maxsegsize */
1534 	    0,				/* flags */
1535 	    NULL, NULL,			/* lockfunc, lockarg */
1536 	    &sc->txbuf_tag);
1537 	if (error != 0) {
1538 		device_printf(sc->dev,
1539 		    "could not create TX ring DMA tag.\n");
1540 		goto out;
1541 	}
1542 
1543 	for (idx = 0; idx < TX_DESC_COUNT; ++idx) {
1544 		error = bus_dmamap_create(sc->txbuf_tag, 0,
1545 		    &sc->txbuf_map[idx].map);
1546 		if (error != 0) {
1547 			device_printf(sc->dev,
1548 			    "could not create TX buffer DMA map.\n");
1549 			goto out;
1550 		}
1551 		ffec_setup_txdesc(sc, idx, 0, 0);
1552 	}
1553 
1554 	/*
1555 	 * Set up RX descriptor ring, descriptors, dma maps, and mbufs.
1556 	 */
1557 	error = bus_dma_tag_create(
1558 	    bus_get_dma_tag(dev),	/* Parent tag. */
1559 	    FEC_DESC_RING_ALIGN, 0,	/* alignment, boundary */
1560 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1561 	    BUS_SPACE_MAXADDR,		/* highaddr */
1562 	    NULL, NULL,			/* filter, filterarg */
1563 	    RX_DESC_SIZE, 1, 		/* maxsize, nsegments */
1564 	    RX_DESC_SIZE,		/* maxsegsize */
1565 	    0,				/* flags */
1566 	    NULL, NULL,			/* lockfunc, lockarg */
1567 	    &sc->rxdesc_tag);
1568 	if (error != 0) {
1569 		device_printf(sc->dev,
1570 		    "could not create RX ring DMA tag.\n");
1571 		goto out;
1572 	}
1573 
1574 	error = bus_dmamem_alloc(sc->rxdesc_tag, (void **)&sc->rxdesc_ring,
1575 	    BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->rxdesc_map);
1576 	if (error != 0) {
1577 		device_printf(sc->dev,
1578 		    "could not allocate RX descriptor ring.\n");
1579 		goto out;
1580 	}
1581 
1582 	error = bus_dmamap_load(sc->rxdesc_tag, sc->rxdesc_map, sc->rxdesc_ring,
1583 	    RX_DESC_SIZE, ffec_get1paddr, &sc->rxdesc_ring_paddr, 0);
1584 	if (error != 0) {
1585 		device_printf(sc->dev,
1586 		    "could not load RX descriptor ring map.\n");
1587 		goto out;
1588 	}
1589 
1590 	error = bus_dma_tag_create(
1591 	    bus_get_dma_tag(dev),	/* Parent tag. */
1592 	    1, 0,			/* alignment, boundary */
1593 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1594 	    BUS_SPACE_MAXADDR,		/* highaddr */
1595 	    NULL, NULL,			/* filter, filterarg */
1596 	    MCLBYTES, 1, 		/* maxsize, nsegments */
1597 	    MCLBYTES,			/* maxsegsize */
1598 	    0,				/* flags */
1599 	    NULL, NULL,			/* lockfunc, lockarg */
1600 	    &sc->rxbuf_tag);
1601 	if (error != 0) {
1602 		device_printf(sc->dev,
1603 		    "could not create RX buf DMA tag.\n");
1604 		goto out;
1605 	}
1606 
1607 	for (idx = 0; idx < RX_DESC_COUNT; ++idx) {
1608 		error = bus_dmamap_create(sc->rxbuf_tag, 0,
1609 		    &sc->rxbuf_map[idx].map);
1610 		if (error != 0) {
1611 			device_printf(sc->dev,
1612 			    "could not create RX buffer DMA map.\n");
1613 			goto out;
1614 		}
1615 		if ((m = ffec_alloc_mbufcl(sc)) == NULL) {
1616 			device_printf(dev, "Could not alloc mbuf\n");
1617 			error = ENOMEM;
1618 			goto out;
1619 		}
1620 		if ((error = ffec_setup_rxbuf(sc, idx, m)) != 0) {
1621 			device_printf(sc->dev,
1622 			    "could not create new RX buffer.\n");
1623 			goto out;
1624 		}
1625 	}
1626 
1627 	/* Try to get the MAC address from the hardware before resetting it. */
1628 	ffec_get_hwaddr(sc, eaddr);
1629 
1630 	/* Reset the hardware.  Disables all interrupts. */
1631 	WR4(sc, FEC_ECR_REG, FEC_ECR_RESET);
1632 
1633 	/* Setup interrupt handler. */
1634 	error = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE,
1635 	    NULL, ffec_intr, sc, &sc->intr_cookie);
1636 	if (error != 0) {
1637 		device_printf(dev, "could not setup interrupt handler.\n");
1638 		goto out;
1639 	}
1640 
1641 	/*
1642 	 * Set up the PHY control register.
1643 	 *
1644 	 * Speed formula for ENET is md_clock = mac_clock / ((N + 1) * 2).
1645 	 * Speed formula for FEC is  md_clock = mac_clock / (N * 2)
1646 	 *
1647 	 * XXX - Revisit this...
1648 	 *
1649 	 * For a Wandboard imx6 (ENET) I was originally using 4, but the uboot
1650 	 * code uses 10.  Both values seem to work, but I suspect many modern
1651 	 * PHY parts can do mdio at speeds far above the standard 2.5 MHz.
1652 	 *
1653 	 * Different imx manuals use confusingly different terminology (things
1654 	 * like "system clock" and "internal module clock") with examples that
1655 	 * use frequencies that have nothing to do with ethernet, giving the
1656 	 * vague impression that maybe the clock in question is the periphclock
1657 	 * or something.  In fact, on an imx53 development board (FEC),
1658 	 * measuring the mdio clock at the pin on the PHY and playing with
1659 	 * various divisors showed that the root speed was 66 MHz (clk_ipg_root
1660 	 * aka periphclock) and 13 was the right divisor.
1661 	 *
1662 	 * All in all, it seems likely that 13 is a safe divisor for now,
1663 	 * because if we really do need to base it on the peripheral clock
1664 	 * speed, then we need a platform-independant get-clock-freq API.
1665 	 */
1666 	mscr = 13 << FEC_MSCR_MII_SPEED_SHIFT;
1667 	if (OF_hasprop(ofw_node, "phy-disable-preamble")) {
1668 		mscr |= FEC_MSCR_DIS_PRE;
1669 		if (bootverbose)
1670 			device_printf(dev, "PHY preamble disabled\n");
1671 	}
1672 	WR4(sc, FEC_MSCR_REG, mscr);
1673 
1674 	/* Set up the ethernet interface. */
1675 	sc->ifp = ifp = if_alloc(IFT_ETHER);
1676 
1677 	ifp->if_softc = sc;
1678 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1679 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1680 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1681 	ifp->if_capenable = ifp->if_capabilities;
1682 	ifp->if_start = ffec_txstart;
1683 	ifp->if_ioctl = ffec_ioctl;
1684 	ifp->if_init = ffec_init;
1685 	IFQ_SET_MAXLEN(&ifp->if_snd, TX_DESC_COUNT - 1);
1686 	ifp->if_snd.ifq_drv_maxlen = TX_DESC_COUNT - 1;
1687 	IFQ_SET_READY(&ifp->if_snd);
1688 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1689 
1690 #if 0 /* XXX The hardware keeps stats we could use for these. */
1691 	ifp->if_linkmib = &sc->mibdata;
1692 	ifp->if_linkmiblen = sizeof(sc->mibdata);
1693 #endif
1694 
1695 	/* Set up the miigasket hardware (if any). */
1696 	ffec_miigasket_setup(sc);
1697 
1698 	/* Attach the mii driver. */
1699 	if (fdt_get_phyaddr(ofw_node, dev, &phynum, &dummy) != 0) {
1700 		phynum = MII_PHY_ANY;
1701 	}
1702 	error = mii_attach(dev, &sc->miibus, ifp, ffec_media_change,
1703 	    ffec_media_status, BMSR_DEFCAPMASK, phynum, MII_OFFSET_ANY,
1704 	    (sc->fectype & FECTYPE_MVF) ? MIIF_FORCEANEG : 0);
1705 	if (error != 0) {
1706 		device_printf(dev, "PHY attach failed\n");
1707 		goto out;
1708 	}
1709 	sc->mii_softc = device_get_softc(sc->miibus);
1710 
1711 	/* All ready to run, attach the ethernet interface. */
1712 	ether_ifattach(ifp, eaddr);
1713 	sc->is_attached = true;
1714 
1715 	error = 0;
1716 out:
1717 
1718 	if (error != 0)
1719 		ffec_detach(dev);
1720 
1721 	return (error);
1722 }
1723 
1724 static int
1725 ffec_probe(device_t dev)
1726 {
1727 	uintptr_t fectype;
1728 
1729 	if (!ofw_bus_status_okay(dev))
1730 		return (ENXIO);
1731 
1732 	fectype = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
1733 	if (fectype == FECTYPE_NONE)
1734 		return (ENXIO);
1735 
1736 	device_set_desc(dev, (fectype & FECFLAG_GBE) ?
1737 	    "Freescale Gigabit Ethernet Controller" :
1738 	    "Freescale Fast Ethernet Controller");
1739 
1740 	return (BUS_PROBE_DEFAULT);
1741 }
1742 
1743 
1744 static device_method_t ffec_methods[] = {
1745 	/* Device interface. */
1746 	DEVMETHOD(device_probe,		ffec_probe),
1747 	DEVMETHOD(device_attach,	ffec_attach),
1748 	DEVMETHOD(device_detach,	ffec_detach),
1749 
1750 /*
1751 	DEVMETHOD(device_shutdown,	ffec_shutdown),
1752 	DEVMETHOD(device_suspend,	ffec_suspend),
1753 	DEVMETHOD(device_resume,	ffec_resume),
1754 */
1755 
1756 	/* MII interface. */
1757 	DEVMETHOD(miibus_readreg,	ffec_miibus_readreg),
1758 	DEVMETHOD(miibus_writereg,	ffec_miibus_writereg),
1759 	DEVMETHOD(miibus_statchg,	ffec_miibus_statchg),
1760 
1761 	DEVMETHOD_END
1762 };
1763 
1764 static driver_t ffec_driver = {
1765 	"ffec",
1766 	ffec_methods,
1767 	sizeof(struct ffec_softc)
1768 };
1769 
1770 static devclass_t ffec_devclass;
1771 
1772 DRIVER_MODULE(ffec, simplebus, ffec_driver, ffec_devclass, 0, 0);
1773 DRIVER_MODULE(miibus, ffec, miibus_driver, miibus_devclass, 0, 0);
1774 
1775 MODULE_DEPEND(ffec, ether, 1, 1, 1);
1776 MODULE_DEPEND(ffec, miibus, 1, 1, 1);
1777