xref: /openbsd/sys/dev/fdt/if_fec.c (revision 097a140d)
1 /* $OpenBSD: if_fec.c,v 1.11 2020/12/12 11:48:52 jan Exp $ */
2 /*
3  * Copyright (c) 2012-2013,2019 Patrick Wildt <patrick@blueri.se>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <sys/param.h>
19 #include <sys/systm.h>
20 #include <sys/sockio.h>
21 #include <sys/queue.h>
22 #include <sys/malloc.h>
23 #include <sys/device.h>
24 #include <sys/evcount.h>
25 #include <sys/socket.h>
26 #include <sys/timeout.h>
27 #include <sys/mbuf.h>
28 #include <machine/intr.h>
29 #include <machine/bus.h>
30 #include <machine/fdt.h>
31 
32 #include "bpfilter.h"
33 
34 #include <net/if.h>
35 #include <net/if_media.h>
36 #if NBPFILTER > 0
37 #include <net/bpf.h>
38 #endif
39 
40 #include <netinet/in.h>
41 #include <netinet/if_ether.h>
42 
43 #include <dev/mii/mii.h>
44 #include <dev/mii/miivar.h>
45 #include <dev/mii/miidevs.h>
46 
47 #include <dev/ofw/openfirm.h>
48 #include <dev/ofw/ofw_clock.h>
49 #include <dev/ofw/ofw_gpio.h>
50 #include <dev/ofw/ofw_pinctrl.h>
51 #include <dev/ofw/fdt.h>
52 
53 /* configuration registers */
54 #define ENET_EIR		0x004
55 #define ENET_EIMR		0x008
56 #define ENET_RDAR		0x010
57 #define ENET_TDAR		0x014
58 #define ENET_ECR		0x024
59 #define ENET_MMFR		0x040
60 #define ENET_MSCR		0x044
61 #define ENET_MIBC		0x064
62 #define ENET_RCR		0x084
63 #define ENET_TCR		0x0C4
64 #define ENET_PALR		0x0E4
65 #define ENET_PAUR		0x0E8
66 #define ENET_OPD		0x0EC
67 #define ENET_IAUR		0x118
68 #define ENET_IALR		0x11C
69 #define ENET_GAUR		0x120
70 #define ENET_GALR		0x124
71 #define ENET_TFWR		0x144
72 #define ENET_RDSR		0x180
73 #define ENET_TDSR		0x184
74 #define ENET_MRBR		0x188
75 #define ENET_RSFL		0x190
76 #define ENET_RSEM		0x194
77 #define ENET_RAEM		0x198
78 #define ENET_RAFL		0x19C
79 #define ENET_TSEM		0x1A0
80 #define ENET_TAEM		0x1A4
81 #define ENET_TAFL		0x1A8
82 #define ENET_TIPG		0x1AC
83 #define ENET_FTRL		0x1B0
84 #define ENET_TACC		0x1C0
85 #define ENET_RACC		0x1C4
86 
87 #define ENET_RDAR_RDAR		(1 << 24)
88 #define ENET_TDAR_TDAR		(1 << 24)
89 #define ENET_ECR_RESET		(1 << 0)
90 #define ENET_ECR_ETHEREN	(1 << 1)
91 #define ENET_ECR_EN1588		(1 << 4)
92 #define ENET_ECR_SPEED		(1 << 5)
93 #define ENET_ECR_DBSWP		(1 << 8)
94 #define ENET_MMFR_TA		(2 << 16)
95 #define ENET_MMFR_RA_SHIFT	18
96 #define ENET_MMFR_PA_SHIFT	23
97 #define ENET_MMFR_OP_WR		(1 << 28)
98 #define ENET_MMFR_OP_RD		(2 << 28)
99 #define ENET_MMFR_ST		(1 << 30)
100 #define ENET_RCR_MII_MODE	(1 << 2)
101 #define ENET_RCR_PROM		(1 << 3)
102 #define ENET_RCR_FCE		(1 << 5)
103 #define ENET_RCR_RGMII_MODE	(1 << 6)
104 #define ENET_RCR_RMII_10T	(1 << 9)
105 #define ENET_RCR_MAX_FL(x)	(((x) & 0x3fff) << 16)
106 #define ENET_TCR_FDEN		(1 << 2)
107 #define ENET_EIR_MII		(1 << 23)
108 #define ENET_EIR_RXF		(1 << 25)
109 #define ENET_EIR_TXF		(1 << 27)
110 #define ENET_TFWR_STRFWD	(1 << 8)
111 #define ENET_RACC_SHIFT16	(1 << 7)
112 
113 /* statistics counters */
114 
115 /* 1588 control */
116 #define ENET_ATCR		0x400
117 #define ENET_ATVR		0x404
118 #define ENET_ATOFF		0x408
119 #define ENET_ATPER		0x40C
120 #define ENET_ATCOR		0x410
121 #define ENET_ATINC		0x414
122 #define ENET_ATSTMP		0x418
123 
124 /* capture / compare block */
125 #define ENET_TGSR		0x604
126 #define ENET_TCSR0		0x608
127 #define ENET_TCCR0		0x60C
128 #define ENET_TCSR1		0x610
129 #define ENET_TCCR1		0x614
130 #define ENET_TCSR2		0x618
131 #define ENET_TCCR2		0x61C
132 #define ENET_TCSR3		0x620
133 #define ENET_TCCR3		0x624
134 
135 #define ENET_MII_CLK		2500000
136 #define ENET_ALIGNMENT		16
137 
138 #define HREAD4(sc, reg)							\
139 	(bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg)))
140 #define HWRITE4(sc, reg, val)						\
141 	bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
142 #define HSET4(sc, reg, bits)						\
143 	HWRITE4((sc), (reg), HREAD4((sc), (reg)) | (bits))
144 #define HCLR4(sc, reg, bits)						\
145 	HWRITE4((sc), (reg), HREAD4((sc), (reg)) & ~(bits))
146 
147 #define ENET_MAX_BUF_SIZE	1522
148 #define ENET_MAX_PKT_SIZE	1536
149 
150 #define ENET_ROUNDUP(size, unit) (((size) + (unit) - 1) & ~((unit) - 1))
151 
152 /* buffer descriptor status bits */
153 #define ENET_RXD_EMPTY		(1 << 15)
154 #define ENET_RXD_WRAP		(1 << 13)
155 #define ENET_RXD_INTR		(1 << 12)
156 #define ENET_RXD_LAST		(1 << 11)
157 #define ENET_RXD_MISS		(1 << 8)
158 #define ENET_RXD_BC		(1 << 7)
159 #define ENET_RXD_MC		(1 << 6)
160 #define ENET_RXD_LG		(1 << 5)
161 #define ENET_RXD_NO		(1 << 4)
162 #define ENET_RXD_CR		(1 << 2)
163 #define ENET_RXD_OV		(1 << 1)
164 #define ENET_RXD_TR		(1 << 0)
165 
166 #define ENET_TXD_READY		(1 << 15)
167 #define ENET_TXD_WRAP		(1 << 13)
168 #define ENET_TXD_INTR		(1 << 12)
169 #define ENET_TXD_LAST		(1 << 11)
170 #define ENET_TXD_TC		(1 << 10)
171 #define ENET_TXD_ABC		(1 << 9)
172 #define ENET_TXD_STATUS_MASK	0x3ff
173 
174 #ifdef ENET_ENHANCED_BD
175 /* enhanced */
176 #define ENET_RXD_INT		(1 << 23)
177 
178 #define ENET_TXD_INT		(1 << 30)
179 #endif
180 
181 struct fec_buf {
182 	bus_dmamap_t	 fb_map;
183 	struct mbuf	*fb_m;
184 	struct mbuf	*fb_m0;
185 };
186 
187 /* what should we use? */
188 #define ENET_NTXDESC	256
189 #define ENET_NTXSEGS	16
190 #define ENET_NRXDESC	256
191 
192 struct fec_dmamem {
193 	bus_dmamap_t		 fdm_map;
194 	bus_dma_segment_t	 fdm_seg;
195 	size_t			 fdm_size;
196 	caddr_t			 fdm_kva;
197 };
198 #define ENET_DMA_MAP(_fdm)	((_fdm)->fdm_map)
199 #define ENET_DMA_LEN(_fdm)	((_fdm)->fdm_size)
200 #define ENET_DMA_DVA(_fdm)	((_fdm)->fdm_map->dm_segs[0].ds_addr)
201 #define ENET_DMA_KVA(_fdm)	((void *)(_fdm)->fdm_kva)
202 
203 struct fec_desc {
204 	uint16_t fd_len;		/* payload's length in bytes */
205 	uint16_t fd_status;		/* BD's status (see datasheet) */
206 	uint32_t fd_addr;		/* payload's buffer address */
207 #ifdef ENET_ENHANCED_BD
208 	uint32_t fd_enhanced_status;	/* enhanced status with IEEE 1588 */
209 	uint32_t fd_reserved0;		/* reserved */
210 	uint32_t fd_update_done;	/* buffer descriptor update done */
211 	uint32_t fd_timestamp;		/* IEEE 1588 timestamp */
212 	uint32_t fd_reserved1[2];	/* reserved */
213 #endif
214 };
215 
216 struct fec_softc {
217 	struct device		sc_dev;
218 	struct arpcom		sc_ac;
219 	struct mii_data		sc_mii;
220 	int			sc_node;
221 	bus_space_tag_t		sc_iot;
222 	bus_space_handle_t	sc_ioh;
223 	void			*sc_ih[3]; /* Interrupt handler */
224 	bus_dma_tag_t		sc_dmat;
225 
226 	struct fec_dmamem	*sc_txring;
227 	struct fec_buf		*sc_txbuf;
228 	struct fec_desc		*sc_txdesc;
229 	int			 sc_tx_prod;
230 	int			 sc_tx_cnt;
231 	int			 sc_tx_cons;
232 	int			 sc_tx_bounce;
233 
234 	struct fec_dmamem	*sc_rxring;
235 	struct fec_buf		*sc_rxbuf;
236 	struct fec_desc		*sc_rxdesc;
237 	int			 sc_rx_prod;
238 	struct if_rxring	 sc_rx_ring;
239 	int			 sc_rx_cons;
240 
241 	struct timeout		sc_tick;
242 	uint32_t		sc_phy_speed;
243 };
244 
245 struct fec_softc *fec_sc;
246 
247 int fec_match(struct device *, void *, void *);
248 void fec_attach(struct device *, struct device *, void *);
249 void fec_phy_init(struct fec_softc *, struct mii_softc *);
250 int fec_ioctl(struct ifnet *, u_long, caddr_t);
251 void fec_start(struct ifnet *);
252 int fec_encap(struct fec_softc *, struct mbuf *, int *);
253 void fec_init_txd(struct fec_softc *);
254 void fec_init_rxd(struct fec_softc *);
255 void fec_init(struct fec_softc *);
256 void fec_stop(struct fec_softc *);
257 void fec_iff(struct fec_softc *);
258 int fec_intr(void *);
259 void fec_tx_proc(struct fec_softc *);
260 void fec_rx_proc(struct fec_softc *);
261 void fec_tick(void *);
262 int fec_miibus_readreg(struct device *, int, int);
263 void fec_miibus_writereg(struct device *, int, int, int);
264 void fec_miibus_statchg(struct device *);
265 int fec_ifmedia_upd(struct ifnet *);
266 void fec_ifmedia_sts(struct ifnet *, struct ifmediareq *);
267 struct fec_dmamem *fec_dmamem_alloc(struct fec_softc *, bus_size_t, bus_size_t);
268 void fec_dmamem_free(struct fec_softc *, struct fec_dmamem *);
269 struct mbuf *fec_alloc_mbuf(struct fec_softc *, bus_dmamap_t);
270 void fec_fill_rx_ring(struct fec_softc *);
271 
272 struct cfattach fec_ca = {
273 	sizeof (struct fec_softc), fec_match, fec_attach
274 };
275 
276 struct cfdriver fec_cd = {
277 	NULL, "fec", DV_IFNET
278 };
279 
280 int
281 fec_match(struct device *parent, void *match, void *aux)
282 {
283 	struct fdt_attach_args *faa = aux;
284 
285 	return (OF_is_compatible(faa->fa_node, "fsl,imx6q-fec") ||
286 	    OF_is_compatible(faa->fa_node, "fsl,imx6sx-fec") ||
287 	    OF_is_compatible(faa->fa_node, "fsl,imx8mq-fec"));
288 }
289 
290 void
291 fec_attach(struct device *parent, struct device *self, void *aux)
292 {
293 	struct fec_softc *sc = (struct fec_softc *) self;
294 	struct fdt_attach_args *faa = aux;
295 	struct fec_buf *txb, *rxb;
296 	struct mii_data *mii;
297 	struct mii_softc *child;
298 	struct ifnet *ifp;
299 	uint32_t phy_reset_gpio[3];
300 	uint32_t phy_reset_duration;
301 	int i, s;
302 
303 	if (faa->fa_nreg < 1)
304 		return;
305 
306 	sc->sc_node = faa->fa_node;
307 	sc->sc_iot = faa->fa_iot;
308 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
309 	    faa->fa_reg[0].size, 0, &sc->sc_ioh))
310 		panic("fec_attach: bus_space_map failed!");
311 
312 	sc->sc_dmat = faa->fa_dmat;
313 
314 	pinctrl_byname(faa->fa_node, "default");
315 
316 	/* power it up */
317 	clock_enable_all(faa->fa_node);
318 
319 	/* reset PHY */
320 	if (OF_getpropintarray(faa->fa_node, "phy-reset-gpios", phy_reset_gpio,
321 	    sizeof(phy_reset_gpio)) == sizeof(phy_reset_gpio)) {
322 		phy_reset_duration = OF_getpropint(faa->fa_node,
323 		    "phy-reset-duration", 1);
324 		if (phy_reset_duration > 1000)
325 			phy_reset_duration = 1;
326 
327 		/*
328 		 * The Linux people really screwed the pooch here.
329 		 * The Linux kernel always treats the gpio as
330 		 * active-low, even if it is marked as active-high in
331 		 * the device tree.  As a result the device tree for
332 		 * many boards incorrectly marks the gpio as
333 		 * active-high.
334 		 */
335 		phy_reset_gpio[2] = GPIO_ACTIVE_LOW;
336 		gpio_controller_config_pin(phy_reset_gpio, GPIO_CONFIG_OUTPUT);
337 
338 		/*
339 		 * On some Cubox-i machines we need to hold the PHY in
340 		 * reset a little bit longer than specified.
341 		 */
342 		gpio_controller_set_pin(phy_reset_gpio, 1);
343 		delay((phy_reset_duration + 1) * 1000);
344 		gpio_controller_set_pin(phy_reset_gpio, 0);
345 		delay(1000);
346 	}
347 	printf("\n");
348 
349 	/* Figure out the hardware address. Must happen before reset. */
350 	OF_getprop(faa->fa_node, "local-mac-address", sc->sc_ac.ac_enaddr,
351 	    sizeof(sc->sc_ac.ac_enaddr));
352 
353 	/* reset the controller */
354 	HSET4(sc, ENET_ECR, ENET_ECR_RESET);
355 	while (HREAD4(sc, ENET_ECR) & ENET_ECR_ETHEREN)
356 		continue;
357 
358 	HWRITE4(sc, ENET_EIMR, 0);
359 	HWRITE4(sc, ENET_EIR, 0xffffffff);
360 
361 	sc->sc_ih[0] = fdt_intr_establish_idx(faa->fa_node, 0, IPL_NET,
362 	    fec_intr, sc, sc->sc_dev.dv_xname);
363 	sc->sc_ih[1] = fdt_intr_establish_idx(faa->fa_node, 1, IPL_NET,
364 	    fec_intr, sc, sc->sc_dev.dv_xname);
365 	sc->sc_ih[2] = fdt_intr_establish_idx(faa->fa_node, 2, IPL_NET,
366 	    fec_intr, sc, sc->sc_dev.dv_xname);
367 
368 	/* Tx bounce buffer to align to 16. */
369 	if (OF_is_compatible(faa->fa_node, "fsl,imx6q-fec"))
370 		sc->sc_tx_bounce = 1;
371 
372 	/* Allocate Tx descriptor ring. */
373 	sc->sc_txring = fec_dmamem_alloc(sc,
374 	    ENET_NTXDESC * sizeof(struct fec_desc), 64);
375 	if (sc->sc_txring == NULL) {
376 		printf("%s: could not allocate Tx descriptor ring\n",
377 		    sc->sc_dev.dv_xname);
378 		goto bad;
379 	}
380 	sc->sc_txdesc = ENET_DMA_KVA(sc->sc_txring);
381 
382 	/* Allocate Tx descriptors. */
383 	sc->sc_txbuf = malloc(sizeof(struct fec_buf) * ENET_NTXDESC,
384 	    M_DEVBUF, M_WAITOK);
385 	for (i = 0; i < ENET_NTXDESC; i++) {
386 		txb = &sc->sc_txbuf[i];
387 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, ENET_NTXSEGS,
388 		    MCLBYTES, 0, BUS_DMA_WAITOK, &txb->fb_map);
389 		txb->fb_m = txb->fb_m0 = NULL;
390 	}
391 
392 	/* Allocate Rx descriptor ring. */
393 	sc->sc_rxring = fec_dmamem_alloc(sc,
394 	    ENET_NRXDESC * sizeof(struct fec_desc), 64);
395 	if (sc->sc_rxring == NULL) {
396 		printf("%s: could not allocate Rx descriptor ring\n",
397 		    sc->sc_dev.dv_xname);
398 		for (i = 0; i < ENET_NTXDESC; i++) {
399 			txb = &sc->sc_txbuf[i];
400 			bus_dmamap_destroy(sc->sc_dmat, txb->fb_map);
401 		}
402 		free(sc->sc_txbuf, M_DEVBUF,
403 		    sizeof(struct fec_buf) * ENET_NTXDESC);
404 		fec_dmamem_free(sc, sc->sc_txring);
405 		goto bad;
406 	}
407 	sc->sc_rxdesc = ENET_DMA_KVA(sc->sc_rxring);
408 
409 	/* Allocate Rx descriptors. */
410 	sc->sc_rxbuf = malloc(sizeof(struct fec_buf) * ENET_NRXDESC,
411 	    M_DEVBUF, M_WAITOK);
412 	for (i = 0; i < ENET_NRXDESC; i++) {
413 		rxb = &sc->sc_rxbuf[i];
414 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
415 		    MCLBYTES, 0, BUS_DMA_WAITOK, &rxb->fb_map);
416 		rxb->fb_m = NULL;
417 	}
418 
419 	s = splnet();
420 
421 	ifp = &sc->sc_ac.ac_if;
422 	ifp->if_softc = sc;
423 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
424 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
425 	ifp->if_ioctl = fec_ioctl;
426 	ifp->if_start = fec_start;
427 	ifp->if_capabilities = IFCAP_VLAN_MTU;
428 
429 	printf("%s: address %s\n", sc->sc_dev.dv_xname,
430 	    ether_sprintf(sc->sc_ac.ac_enaddr));
431 
432 	/*
433 	 * Initialize the MII clock.  The formula is:
434 	 *
435 	 * ENET_MII_CLK = ref_freq / ((phy_speed + 1) x 2)
436 	 * phy_speed = (((ref_freq / ENET_MII_CLK) / 2) - 1)
437 	 */
438 	sc->sc_phy_speed = clock_get_frequency(sc->sc_node, "ipg");
439 	sc->sc_phy_speed = (sc->sc_phy_speed + (ENET_MII_CLK - 1)) / ENET_MII_CLK;
440 	sc->sc_phy_speed = (sc->sc_phy_speed / 2) - 1;
441 	HWRITE4(sc, ENET_MSCR, (sc->sc_phy_speed << 1) | 0x100);
442 
443 	/* Initialize MII/media info. */
444 	mii = &sc->sc_mii;
445 	mii->mii_ifp = ifp;
446 	mii->mii_readreg = fec_miibus_readreg;
447 	mii->mii_writereg = fec_miibus_writereg;
448 	mii->mii_statchg = fec_miibus_statchg;
449 
450 	ifmedia_init(&mii->mii_media, 0, fec_ifmedia_upd, fec_ifmedia_sts);
451 	mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0);
452 
453 	child = LIST_FIRST(&mii->mii_phys);
454 	if (child)
455 		fec_phy_init(sc, child);
456 
457 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
458 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
459 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
460 	} else
461 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
462 
463 	if_attach(ifp);
464 	ether_ifattach(ifp);
465 	splx(s);
466 
467 	timeout_set(&sc->sc_tick, fec_tick, sc);
468 
469 	fec_sc = sc;
470 	return;
471 
472 bad:
473 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, faa->fa_reg[0].size);
474 }
475 
476 void
477 fec_phy_init(struct fec_softc *sc, struct mii_softc *child)
478 {
479 	struct device *dev = (struct device *)sc;
480 	int phy = child->mii_phy;
481 	uint32_t reg;
482 
483 	if (child->mii_oui == MII_OUI_ATHEROS &&
484 	    child->mii_model == MII_MODEL_ATHEROS_AR8035) {
485 		/* disable SmartEEE */
486 		fec_miibus_writereg(dev, phy, 0x0d, 0x0003);
487 		fec_miibus_writereg(dev, phy, 0x0e, 0x805d);
488 		fec_miibus_writereg(dev, phy, 0x0d, 0x4003);
489 		reg = fec_miibus_readreg(dev, phy, 0x0e);
490 		fec_miibus_writereg(dev, phy, 0x0e, reg & ~0x0100);
491 
492 		/* enable 125MHz clk output */
493 		fec_miibus_writereg(dev, phy, 0x0d, 0x0007);
494 		fec_miibus_writereg(dev, phy, 0x0e, 0x8016);
495 		fec_miibus_writereg(dev, phy, 0x0d, 0x4007);
496 
497 		reg = fec_miibus_readreg(dev, phy, 0x0e) & 0xffe3;
498 		fec_miibus_writereg(dev, phy, 0x0e, reg | 0x18);
499 
500 		/* tx clock delay */
501 		fec_miibus_writereg(dev, phy, 0x1d, 0x0005);
502 		reg = fec_miibus_readreg(dev, phy, 0x1e);
503 		fec_miibus_writereg(dev, phy, 0x1e, reg | 0x0100);
504 
505 		PHY_RESET(child);
506 	}
507 
508 	if (child->mii_oui == MII_OUI_MICREL &&
509 	    child->mii_model == MII_MODEL_MICREL_KSZ9021) {
510 		uint32_t rxc, rxdv, txc, txen;
511 		uint32_t rxd0, rxd1, rxd2, rxd3;
512 		uint32_t txd0, txd1, txd2, txd3;
513 		uint32_t val;
514 
515 		rxc = OF_getpropint(sc->sc_node, "rxc-skew-ps", 1400) / 200;
516 		rxdv = OF_getpropint(sc->sc_node, "rxdv-skew-ps", 1400) / 200;
517 		txc = OF_getpropint(sc->sc_node, "txc-skew-ps", 1400) / 200;
518 		txen = OF_getpropint(sc->sc_node, "txen-skew-ps", 1400) / 200;
519 		rxd0 = OF_getpropint(sc->sc_node, "rxd0-skew-ps", 1400) / 200;
520 		rxd1 = OF_getpropint(sc->sc_node, "rxd1-skew-ps", 1400) / 200;
521 		rxd2 = OF_getpropint(sc->sc_node, "rxd2-skew-ps", 1400) / 200;
522 		rxd3 = OF_getpropint(sc->sc_node, "rxd3-skew-ps", 1400) / 200;
523 		txd0 = OF_getpropint(sc->sc_node, "txd0-skew-ps", 1400) / 200;
524 		txd1 = OF_getpropint(sc->sc_node, "txd1-skew-ps", 1400) / 200;
525 		txd2 = OF_getpropint(sc->sc_node, "txd2-skew-ps", 1400) / 200;
526 		txd3 = OF_getpropint(sc->sc_node, "txd3-skew-ps", 1400) / 200;
527 
528 		val = ((rxc & 0xf) << 12) | ((rxdv & 0xf) << 8) |
529 		    ((txc & 0xf) << 4) | ((txen & 0xf) << 0);
530 		fec_miibus_writereg(dev, phy, 0x0b, 0x8104);
531 		fec_miibus_writereg(dev, phy, 0x0c, val);
532 
533 		val = ((rxd3 & 0xf) << 12) | ((rxd2 & 0xf) << 8) |
534 		    ((rxd1 & 0xf) << 4) | ((rxd0 & 0xf) << 0);
535 		fec_miibus_writereg(dev, phy, 0x0b, 0x8105);
536 		fec_miibus_writereg(dev, phy, 0x0c, val);
537 
538 		val = ((txd3 & 0xf) << 12) | ((txd2 & 0xf) << 8) |
539 		    ((txd1 & 0xf) << 4) | ((txd0 & 0xf) << 0);
540 		fec_miibus_writereg(dev, phy, 0x0b, 0x8106);
541 		fec_miibus_writereg(dev, phy, 0x0c, val);
542 	}
543 
544 	if (child->mii_oui == MII_OUI_MICREL &&
545 	    child->mii_model == MII_MODEL_MICREL_KSZ9031) {
546 		uint32_t rxc, rxdv, txc, txen;
547 		uint32_t rxd0, rxd1, rxd2, rxd3;
548 		uint32_t txd0, txd1, txd2, txd3;
549 		uint32_t val;
550 
551 		rxc = OF_getpropint(sc->sc_node, "rxc-skew-ps", 900) / 60;
552 		rxdv = OF_getpropint(sc->sc_node, "rxdv-skew-ps", 420) / 60;
553 		txc = OF_getpropint(sc->sc_node, "txc-skew-ps", 900) / 60;
554 		txen = OF_getpropint(sc->sc_node, "txen-skew-ps", 420) / 60;
555 		rxd0 = OF_getpropint(sc->sc_node, "rxd0-skew-ps", 420) / 60;
556 		rxd1 = OF_getpropint(sc->sc_node, "rxd1-skew-ps", 420) / 60;
557 		rxd2 = OF_getpropint(sc->sc_node, "rxd2-skew-ps", 420) / 60;
558 		rxd3 = OF_getpropint(sc->sc_node, "rxd3-skew-ps", 420) / 60;
559 		txd0 = OF_getpropint(sc->sc_node, "txd0-skew-ps", 420) / 60;
560 		txd1 = OF_getpropint(sc->sc_node, "txd1-skew-ps", 420) / 60;
561 		txd2 = OF_getpropint(sc->sc_node, "txd2-skew-ps", 420) / 60;
562 		txd3 = OF_getpropint(sc->sc_node, "txd3-skew-ps", 420) / 60;
563 
564 		val = ((rxdv & 0xf) << 4) || ((txen & 0xf) << 0);
565 		fec_miibus_writereg(dev, phy, 0x0d, 0x0002);
566 		fec_miibus_writereg(dev, phy, 0x0e, 0x0004);
567 		fec_miibus_writereg(dev, phy, 0x0d, 0x4002);
568 		fec_miibus_writereg(dev, phy, 0x0e, val);
569 
570 		val = ((rxd3 & 0xf) << 12) | ((rxd2 & 0xf) << 8) |
571 		    ((rxd1 & 0xf) << 4) | ((rxd0 & 0xf) << 0);
572 		fec_miibus_writereg(dev, phy, 0x0d, 0x0002);
573 		fec_miibus_writereg(dev, phy, 0x0e, 0x0005);
574 		fec_miibus_writereg(dev, phy, 0x0d, 0x4002);
575 		fec_miibus_writereg(dev, phy, 0x0e, val);
576 
577 		val = ((txd3 & 0xf) << 12) | ((txd2 & 0xf) << 8) |
578 		    ((txd1 & 0xf) << 4) | ((txd0 & 0xf) << 0);
579 		fec_miibus_writereg(dev, phy, 0x0d, 0x0002);
580 		fec_miibus_writereg(dev, phy, 0x0e, 0x0006);
581 		fec_miibus_writereg(dev, phy, 0x0d, 0x4002);
582 		fec_miibus_writereg(dev, phy, 0x0e, val);
583 
584 		val = ((txc & 0x1f) << 5) || ((rxc & 0x1f) << 0);
585 		fec_miibus_writereg(dev, phy, 0x0d, 0x0002);
586 		fec_miibus_writereg(dev, phy, 0x0e, 0x0008);
587 		fec_miibus_writereg(dev, phy, 0x0d, 0x4002);
588 		fec_miibus_writereg(dev, phy, 0x0e, val);
589 	}
590 }
591 
592 void
593 fec_init_rxd(struct fec_softc *sc)
594 {
595 	struct fec_desc *rxd;
596 
597 	sc->sc_rx_prod = sc->sc_rx_cons = 0;
598 
599 	memset(sc->sc_rxdesc, 0, ENET_DMA_LEN(sc->sc_rxring));
600 	rxd = &sc->sc_rxdesc[ENET_NRXDESC - 1];
601 	rxd->fd_status = ENET_RXD_WRAP;
602 }
603 
604 void
605 fec_init_txd(struct fec_softc *sc)
606 {
607 	struct fec_desc *txd;
608 
609 	sc->sc_tx_prod = sc->sc_tx_cons = 0;
610 	sc->sc_tx_cnt = 0;
611 
612 	memset(sc->sc_txdesc, 0, ENET_DMA_LEN(sc->sc_txring));
613 	txd = &sc->sc_txdesc[ENET_NTXDESC - 1];
614 	txd->fd_status = ENET_TXD_WRAP;
615 }
616 
617 void
618 fec_init(struct fec_softc *sc)
619 {
620 	struct ifnet *ifp = &sc->sc_ac.ac_if;
621 	int speed = 0;
622 
623 	/* reset the controller */
624 	HSET4(sc, ENET_ECR, ENET_ECR_RESET);
625 	while (HREAD4(sc, ENET_ECR) & ENET_ECR_ETHEREN)
626 		continue;
627 
628 	/* set hw address */
629 	HWRITE4(sc, ENET_PALR,
630 	    (sc->sc_ac.ac_enaddr[0] << 24) |
631 	    (sc->sc_ac.ac_enaddr[1] << 16) |
632 	    (sc->sc_ac.ac_enaddr[2] << 8) |
633 	     sc->sc_ac.ac_enaddr[3]);
634 	HWRITE4(sc, ENET_PAUR,
635 	    (sc->sc_ac.ac_enaddr[4] << 24) |
636 	    (sc->sc_ac.ac_enaddr[5] << 16));
637 
638 	/* clear outstanding interrupts */
639 	HWRITE4(sc, ENET_EIR, 0xffffffff);
640 
641 	/* set max receive buffer size, 3-0 bits always zero for alignment */
642 	HWRITE4(sc, ENET_MRBR, ENET_MAX_PKT_SIZE);
643 
644 	/* init descriptor */
645 	fec_init_txd(sc);
646 	fec_init_rxd(sc);
647 
648 	/* fill RX ring */
649 	if_rxr_init(&sc->sc_rx_ring, 2, ENET_NRXDESC);
650 	fec_fill_rx_ring(sc);
651 
652 	bus_dmamap_sync(sc->sc_dmat, ENET_DMA_MAP(sc->sc_txring),
653 	    0, ENET_DMA_LEN(sc->sc_txring), BUS_DMASYNC_PREWRITE);
654 	bus_dmamap_sync(sc->sc_dmat, ENET_DMA_MAP(sc->sc_rxring),
655 	    0, ENET_DMA_LEN(sc->sc_rxring), BUS_DMASYNC_PREWRITE);
656 
657 	/* set descriptor */
658 	HWRITE4(sc, ENET_TDSR, ENET_DMA_DVA(sc->sc_txring));
659 	HWRITE4(sc, ENET_RDSR, ENET_DMA_DVA(sc->sc_rxring));
660 
661 	/* set it to full-duplex */
662 	HWRITE4(sc, ENET_TCR, ENET_TCR_FDEN);
663 
664 	/*
665 	 * Set max frame length to 1518 or 1522 with VLANs,
666 	 * pause frames and promisc mode.
667 	 * XXX: RGMII mode - phy dependant
668 	 */
669 	HWRITE4(sc, ENET_RCR,
670 	    ENET_RCR_MAX_FL(1522) | ENET_RCR_RGMII_MODE | ENET_RCR_MII_MODE |
671 	    ENET_RCR_FCE);
672 
673 	HWRITE4(sc, ENET_MSCR, (sc->sc_phy_speed << 1) | 0x100);
674 
675 	HWRITE4(sc, ENET_RACC, ENET_RACC_SHIFT16);
676 	HWRITE4(sc, ENET_FTRL, ENET_MAX_BUF_SIZE);
677 
678 	/* RX FIFO treshold and pause */
679 	HWRITE4(sc, ENET_RSEM, 0x84);
680 	HWRITE4(sc, ENET_RSFL, 16);
681 	HWRITE4(sc, ENET_RAEM, 8);
682 	HWRITE4(sc, ENET_RAFL, 8);
683 	HWRITE4(sc, ENET_OPD, 0xFFF0);
684 
685 	/* do store and forward, only i.MX6, needs to be set correctly else */
686 	HWRITE4(sc, ENET_TFWR, ENET_TFWR_STRFWD);
687 
688 	/* enable gigabit-ethernet and set it to support little-endian */
689 	switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
690 	case IFM_1000_T:  /* Gigabit */
691 		speed |= ENET_ECR_SPEED;
692 		break;
693 	default:
694 		speed &= ~ENET_ECR_SPEED;
695 	}
696 	HWRITE4(sc, ENET_ECR, ENET_ECR_ETHEREN | speed | ENET_ECR_DBSWP);
697 
698 #ifdef ENET_ENHANCED_BD
699 	HSET4(sc, ENET_ECR, ENET_ECR_EN1588);
700 #endif
701 
702 	/* rx descriptors are ready */
703 	HWRITE4(sc, ENET_RDAR, ENET_RDAR_RDAR);
704 
705 	/* program promiscuous mode and multicast filters */
706 	fec_iff(sc);
707 
708 	timeout_add_sec(&sc->sc_tick, 1);
709 
710 	/* Indicate we are up and running. */
711 	ifp->if_flags |= IFF_RUNNING;
712 	ifq_clr_oactive(&ifp->if_snd);
713 
714 	/* enable interrupts for tx/rx */
715 	HWRITE4(sc, ENET_EIMR, ENET_EIR_TXF | ENET_EIR_RXF);
716 
717 	fec_start(ifp);
718 }
719 
720 void
721 fec_stop(struct fec_softc *sc)
722 {
723 	struct ifnet *ifp = &sc->sc_ac.ac_if;
724 	struct fec_buf *txb, *rxb;
725 	int i;
726 
727 	/*
728 	 * Mark the interface down and cancel the watchdog timer.
729 	 */
730 	ifp->if_flags &= ~IFF_RUNNING;
731 	ifp->if_timer = 0;
732 	ifq_clr_oactive(&ifp->if_snd);
733 
734 	timeout_del(&sc->sc_tick);
735 
736 	/* reset the controller */
737 	HSET4(sc, ENET_ECR, ENET_ECR_RESET);
738 	while (HREAD4(sc, ENET_ECR) & ENET_ECR_ETHEREN)
739 		continue;
740 
741 	HWRITE4(sc, ENET_MSCR, (sc->sc_phy_speed << 1) | 0x100);
742 
743 	for (i = 0; i < ENET_NTXDESC; i++) {
744 		txb = &sc->sc_txbuf[i];
745 		if (txb->fb_m == NULL)
746 			continue;
747 		bus_dmamap_sync(sc->sc_dmat, txb->fb_map, 0,
748 		    txb->fb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
749 		bus_dmamap_unload(sc->sc_dmat, txb->fb_map);
750 		m_freem(txb->fb_m);
751 		m_freem(txb->fb_m0);
752 		txb->fb_m = txb->fb_m0 = NULL;
753 	}
754 	for (i = 0; i < ENET_NRXDESC; i++) {
755 		rxb = &sc->sc_rxbuf[i];
756 		if (rxb->fb_m == NULL)
757 			continue;
758 		bus_dmamap_sync(sc->sc_dmat, rxb->fb_map, 0,
759 		    rxb->fb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
760 		bus_dmamap_unload(sc->sc_dmat, rxb->fb_map);
761 		if_rxr_put(&sc->sc_rx_ring, 1);
762 		rxb->fb_m = NULL;
763 	}
764 }
765 
766 void
767 fec_iff(struct fec_softc *sc)
768 {
769 	struct arpcom *ac = &sc->sc_ac;
770 	struct ifnet *ifp = &sc->sc_ac.ac_if;
771 	struct ether_multi *enm;
772 	struct ether_multistep step;
773 	uint64_t ghash = 0, ihash = 0;
774 	uint32_t h;
775 
776 	ifp->if_flags &= ~IFF_ALLMULTI;
777 
778 	if (ifp->if_flags & IFF_PROMISC) {
779 		ifp->if_flags |= IFF_ALLMULTI;
780 		ihash = 0xffffffffffffffffLLU;
781 	} else if (ac->ac_multirangecnt > 0) {
782 		ifp->if_flags |= IFF_ALLMULTI;
783 		ghash = 0xffffffffffffffffLLU;
784 	} else {
785 		ETHER_FIRST_MULTI(step, ac, enm);
786 		while (enm != NULL) {
787 			h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
788 
789 			ghash |= 1LLU << (((uint8_t *)&h)[3] >> 2);
790 
791 			ETHER_NEXT_MULTI(step, enm);
792 		}
793 	}
794 
795 	HWRITE4(sc, ENET_GAUR, (uint32_t)(ghash >> 32));
796 	HWRITE4(sc, ENET_GALR, (uint32_t)ghash);
797 
798 	HWRITE4(sc, ENET_IAUR, (uint32_t)(ihash >> 32));
799 	HWRITE4(sc, ENET_IALR, (uint32_t)ihash);
800 }
801 
802 int
803 fec_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
804 {
805 	struct fec_softc *sc = ifp->if_softc;
806 	struct ifreq *ifr = (struct ifreq *)data;
807 	int s, error = 0;
808 
809 	s = splnet();
810 
811 	switch (cmd) {
812 	case SIOCSIFADDR:
813 		ifp->if_flags |= IFF_UP;
814 		if (!(ifp->if_flags & IFF_RUNNING))
815 			fec_init(sc);
816 		break;
817 
818 	case SIOCSIFFLAGS:
819 		if (ifp->if_flags & IFF_UP) {
820 			if (ifp->if_flags & IFF_RUNNING)
821 				error = ENETRESET;
822 			else
823 				fec_init(sc);
824 		} else {
825 			if (ifp->if_flags & IFF_RUNNING)
826 				fec_stop(sc);
827 		}
828 		break;
829 
830 	case SIOCGIFMEDIA:
831 	case SIOCSIFMEDIA:
832 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
833 		break;
834 
835 	default:
836 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
837 	}
838 
839 	if (error == ENETRESET) {
840 		if (ifp->if_flags & IFF_RUNNING)
841 			fec_iff(sc);
842 		error = 0;
843 	}
844 
845 	splx(s);
846 	return(error);
847 }
848 
849 void
850 fec_start(struct ifnet *ifp)
851 {
852 	struct fec_softc *sc = ifp->if_softc;
853 	struct mbuf *m = NULL;
854 	int error, idx;
855 
856 	if (!(ifp->if_flags & IFF_RUNNING))
857 		return;
858 	if (ifq_is_oactive(&ifp->if_snd))
859 		return;
860 	if (ifq_empty(&ifp->if_snd))
861 		return;
862 
863 	idx = sc->sc_tx_prod;
864 	while ((sc->sc_txdesc[idx].fd_status & ENET_TXD_READY) == 0) {
865 		m = ifq_deq_begin(&ifp->if_snd);
866 		if (m == NULL)
867 			break;
868 
869 		error = fec_encap(sc, m, &idx);
870 		if (error == ENOBUFS) {
871 			ifq_deq_rollback(&ifp->if_snd, m);
872 			ifq_set_oactive(&ifp->if_snd);
873 			break;
874 		}
875 		if (error == EFBIG) {
876 			ifq_deq_commit(&ifp->if_snd, m);
877 			m_freem(m); /* give up: drop it */
878 			ifp->if_oerrors++;
879 			continue;
880 		}
881 
882 		ifq_deq_commit(&ifp->if_snd, m);
883 
884 #if NBPFILTER > 0
885 		if (ifp->if_bpf)
886 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
887 #endif
888 	}
889 
890 	if (sc->sc_tx_prod != idx) {
891 		sc->sc_tx_prod = idx;
892 
893 		/* Set a timeout in case the chip goes out to lunch. */
894 		ifp->if_timer = 5;
895 	}
896 }
897 
898 int
899 fec_encap(struct fec_softc *sc, struct mbuf *m0, int *idx)
900 {
901 	struct fec_desc *txd, *txd_start;
902 	bus_dmamap_t map;
903 	struct mbuf *m;
904 	int cur, frag, i;
905 	int ret;
906 
907 	m = m0;
908 	cur = frag = *idx;
909 	map = sc->sc_txbuf[cur].fb_map;
910 
911 	if (sc->sc_tx_bounce) {
912 		m = m_dup_pkt(m0, 0, M_DONTWAIT);
913 		if (m == NULL) {
914 			ret = ENOBUFS;
915 			goto fail;
916 		}
917 	}
918 
919 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT)) {
920 		if (m_defrag(m, M_DONTWAIT)) {
921 			ret = EFBIG;
922 			goto fail;
923 		}
924 		if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT)) {
925 			ret = EFBIG;
926 			goto fail;
927 		}
928 	}
929 
930 	if (map->dm_nsegs > (ENET_NTXDESC - sc->sc_tx_cnt - 2)) {
931 		bus_dmamap_unload(sc->sc_dmat, map);
932 		ret = ENOBUFS;
933 		goto fail;
934 	}
935 
936 	/* Sync the DMA map. */
937 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
938 	    BUS_DMASYNC_PREWRITE);
939 
940 	txd = txd_start = &sc->sc_txdesc[frag];
941 	for (i = 0; i < map->dm_nsegs; i++) {
942 		txd->fd_addr = map->dm_segs[i].ds_addr;
943 		txd->fd_len = map->dm_segs[i].ds_len;
944 		txd->fd_status &= ENET_TXD_WRAP;
945 		if (i == (map->dm_nsegs - 1))
946 			txd->fd_status |= ENET_TXD_LAST | ENET_TXD_TC;
947 		if (i != 0)
948 			txd->fd_status |= ENET_TXD_READY;
949 
950 		bus_dmamap_sync(sc->sc_dmat, ENET_DMA_MAP(sc->sc_txring),
951 		    frag * sizeof(*txd), sizeof(*txd), BUS_DMASYNC_PREWRITE);
952 
953 		cur = frag;
954 		if (frag == (ENET_NTXDESC - 1)) {
955 			txd = &sc->sc_txdesc[0];
956 			frag = 0;
957 		} else {
958 			txd++;
959 			frag++;
960 		}
961 		KASSERT(frag != sc->sc_tx_cons);
962 	}
963 
964 	txd_start->fd_status |= ENET_TXD_READY;
965 	bus_dmamap_sync(sc->sc_dmat, ENET_DMA_MAP(sc->sc_txring),
966 	    *idx * sizeof(*txd), sizeof(*txd), BUS_DMASYNC_PREWRITE);
967 
968 	HWRITE4(sc, ENET_TDAR, ENET_TDAR_TDAR);
969 
970 	KASSERT(sc->sc_txbuf[cur].fb_m == NULL);
971 	KASSERT(sc->sc_txbuf[cur].fb_m0 == NULL);
972 	sc->sc_txbuf[*idx].fb_map = sc->sc_txbuf[cur].fb_map;
973 	sc->sc_txbuf[cur].fb_map = map;
974 	sc->sc_txbuf[cur].fb_m = m;
975 	if (m != m0)
976 		sc->sc_txbuf[cur].fb_m0 = m0;
977 
978 	sc->sc_tx_cnt += map->dm_nsegs;
979 	*idx = frag;
980 
981 	return (0);
982 
983 fail:
984 	if (m != m0)
985 		m_freem(m);
986 	return (ret);
987 }
988 
989 /*
990  * Established by attachment driver at interrupt priority IPL_NET.
991  */
992 int
993 fec_intr(void *arg)
994 {
995 	struct fec_softc *sc = arg;
996 	struct ifnet *ifp = &sc->sc_ac.ac_if;
997 	u_int32_t status;
998 
999 	/* Find out which interrupts are pending. */
1000 	status = HREAD4(sc, ENET_EIR);
1001 
1002 	/* Acknowledge the interrupts we are about to handle. */
1003 	status &= (ENET_EIR_RXF | ENET_EIR_TXF);
1004 	HWRITE4(sc, ENET_EIR, status);
1005 
1006 	/*
1007 	 * Handle incoming packets.
1008 	 */
1009 	if (ISSET(status, ENET_EIR_RXF))
1010 		fec_rx_proc(sc);
1011 
1012 	/*
1013 	 * Handle transmitted packets.
1014 	 */
1015 	if (ISSET(status, ENET_EIR_TXF))
1016 		fec_tx_proc(sc);
1017 
1018 	/* Try to transmit. */
1019 	if (ifp->if_flags & IFF_RUNNING && !ifq_empty(&ifp->if_snd))
1020 		fec_start(ifp);
1021 
1022 	return 1;
1023 }
1024 
1025 void
1026 fec_tx_proc(struct fec_softc *sc)
1027 {
1028 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1029 	struct fec_desc *txd;
1030 	struct fec_buf *txb;
1031 	int idx;
1032 
1033 	bus_dmamap_sync(sc->sc_dmat, ENET_DMA_MAP(sc->sc_txring), 0,
1034 	    ENET_DMA_LEN(sc->sc_txring),
1035 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1036 
1037 	while (sc->sc_tx_cnt > 0) {
1038 		idx = sc->sc_tx_cons;
1039 		KASSERT(idx < ENET_NTXDESC);
1040 
1041 		txd = &sc->sc_txdesc[idx];
1042 		if (txd->fd_status & ENET_TXD_READY)
1043 			break;
1044 
1045 		txb = &sc->sc_txbuf[idx];
1046 		if (txb->fb_m) {
1047 			bus_dmamap_sync(sc->sc_dmat, txb->fb_map, 0,
1048 			    txb->fb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1049 			bus_dmamap_unload(sc->sc_dmat, txb->fb_map);
1050 
1051 			m_freem(txb->fb_m);
1052 			m_freem(txb->fb_m0);
1053 			txb->fb_m = txb->fb_m0 = NULL;
1054 		}
1055 
1056 		ifq_clr_oactive(&ifp->if_snd);
1057 
1058 		sc->sc_tx_cnt--;
1059 
1060 		if (sc->sc_tx_cons == (ENET_NTXDESC - 1))
1061 			sc->sc_tx_cons = 0;
1062 		else
1063 			sc->sc_tx_cons++;
1064 
1065 		txd->fd_status &= ENET_TXD_WRAP;
1066 	}
1067 
1068 	if (sc->sc_tx_cnt == 0)
1069 		ifp->if_timer = 0;
1070 	else /* ERR006358 */
1071 		HWRITE4(sc, ENET_TDAR, ENET_TDAR_TDAR);
1072 }
1073 
1074 void
1075 fec_rx_proc(struct fec_softc *sc)
1076 {
1077 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1078 	struct fec_desc *rxd;
1079 	struct fec_buf *rxb;
1080 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1081 	struct mbuf *m;
1082 	int idx, len;
1083 
1084 	if ((ifp->if_flags & IFF_RUNNING) == 0)
1085 		return;
1086 
1087 	bus_dmamap_sync(sc->sc_dmat, ENET_DMA_MAP(sc->sc_rxring), 0,
1088 	    ENET_DMA_LEN(sc->sc_rxring),
1089 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1090 
1091 	while (if_rxr_inuse(&sc->sc_rx_ring) > 0) {
1092 		idx = sc->sc_rx_cons;
1093 		KASSERT(idx < ENET_NRXDESC);
1094 
1095 		rxd = &sc->sc_rxdesc[idx];
1096 		if (rxd->fd_status & ENET_RXD_EMPTY)
1097 			break;
1098 
1099 		len = rxd->fd_len;
1100 		rxb = &sc->sc_rxbuf[idx];
1101 		KASSERT(rxb->fb_m);
1102 
1103 		bus_dmamap_sync(sc->sc_dmat, rxb->fb_map, 0,
1104 		    len, BUS_DMASYNC_POSTREAD);
1105 		bus_dmamap_unload(sc->sc_dmat, rxb->fb_map);
1106 
1107 		/* Strip off CRC. */
1108 		len -= ETHER_CRC_LEN;
1109 		KASSERT(len > 0);
1110 
1111 		m = rxb->fb_m;
1112 		rxb->fb_m = NULL;
1113 
1114 		m_adj(m, ETHER_ALIGN);
1115 		m->m_pkthdr.len = m->m_len = len;
1116 
1117 		ml_enqueue(&ml, m);
1118 
1119 		if_rxr_put(&sc->sc_rx_ring, 1);
1120 		if (sc->sc_rx_cons == (ENET_NRXDESC - 1))
1121 			sc->sc_rx_cons = 0;
1122 		else
1123 			sc->sc_rx_cons++;
1124 	}
1125 
1126 	if (ifiq_input(&ifp->if_rcv, &ml))
1127 		if_rxr_livelocked(&sc->sc_rx_ring);
1128 
1129 	fec_fill_rx_ring(sc);
1130 
1131 	bus_dmamap_sync(sc->sc_dmat, ENET_DMA_MAP(sc->sc_rxring), 0,
1132 	    ENET_DMA_LEN(sc->sc_rxring),
1133 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1134 
1135 	/* rx descriptors are ready */
1136 	HWRITE4(sc, ENET_RDAR, ENET_RDAR_RDAR);
1137 }
1138 
1139 void
1140 fec_tick(void *arg)
1141 {
1142 	struct fec_softc *sc = arg;
1143 	int s;
1144 
1145 	s = splnet();
1146 	mii_tick(&sc->sc_mii);
1147 	splx(s);
1148 
1149 	timeout_add_sec(&sc->sc_tick, 1);
1150 }
1151 
1152 /*
1153  * MII
1154  * Interrupts need ENET_ECR_ETHEREN to be set,
1155  * so we just read the interrupt status registers.
1156  */
1157 int
1158 fec_miibus_readreg(struct device *dev, int phy, int reg)
1159 {
1160 	int r = 0;
1161 	struct fec_softc *sc = (struct fec_softc *)dev;
1162 
1163 	HWRITE4(sc, ENET_EIR, ENET_EIR_MII);
1164 
1165 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, ENET_MMFR,
1166 	    ENET_MMFR_ST | ENET_MMFR_OP_RD | ENET_MMFR_TA |
1167 	    phy << ENET_MMFR_PA_SHIFT | reg << ENET_MMFR_RA_SHIFT);
1168 
1169 	while(!(HREAD4(sc, ENET_EIR) & ENET_EIR_MII));
1170 
1171 	r = bus_space_read_4(sc->sc_iot, sc->sc_ioh, ENET_MMFR);
1172 
1173 	return (r & 0xffff);
1174 }
1175 
1176 void
1177 fec_miibus_writereg(struct device *dev, int phy, int reg, int val)
1178 {
1179 	struct fec_softc *sc = (struct fec_softc *)dev;
1180 
1181 	HWRITE4(sc, ENET_EIR, ENET_EIR_MII);
1182 
1183 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, ENET_MMFR,
1184 	    ENET_MMFR_ST | ENET_MMFR_OP_WR | ENET_MMFR_TA |
1185 	    phy << ENET_MMFR_PA_SHIFT | reg << ENET_MMFR_RA_SHIFT |
1186 	    (val & 0xffff));
1187 
1188 	while(!(HREAD4(sc, ENET_EIR) & ENET_EIR_MII));
1189 
1190 	return;
1191 }
1192 
1193 void
1194 fec_miibus_statchg(struct device *dev)
1195 {
1196 	struct fec_softc *sc = (struct fec_softc *)dev;
1197 	uint32_t ecr, rcr;
1198 
1199 	ecr = HREAD4(sc, ENET_ECR) & ~ENET_ECR_SPEED;
1200 	rcr = HREAD4(sc, ENET_RCR) & ~ENET_RCR_RMII_10T;
1201 	switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
1202 	case IFM_1000_T:  /* Gigabit */
1203 		ecr |= ENET_ECR_SPEED;
1204 		break;
1205 	case IFM_100_TX:
1206 		break;
1207 	case IFM_10_T:
1208 		rcr |= ENET_RCR_RMII_10T;
1209 		break;
1210 	}
1211 	HWRITE4(sc, ENET_ECR, ecr);
1212 	HWRITE4(sc, ENET_RCR, rcr);
1213 
1214 	return;
1215 }
1216 
1217 int
1218 fec_ifmedia_upd(struct ifnet *ifp)
1219 {
1220 	struct fec_softc *sc = ifp->if_softc;
1221 	struct mii_data *mii = &sc->sc_mii;
1222 	int err;
1223 	if (mii->mii_instance) {
1224 		struct mii_softc *miisc;
1225 
1226 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1227 			mii_phy_reset(miisc);
1228 	}
1229 	err = mii_mediachg(mii);
1230 	return (err);
1231 }
1232 
1233 void
1234 fec_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1235 {
1236 	struct fec_softc *sc = ifp->if_softc;
1237 	struct mii_data *mii = &sc->sc_mii;
1238 
1239 	mii_pollstat(mii);
1240 
1241 	ifmr->ifm_active = mii->mii_media_active;
1242 	ifmr->ifm_status = mii->mii_media_status;
1243 }
1244 
1245 /*
1246  * Manage DMA'able memory.
1247  */
1248 struct fec_dmamem *
1249 fec_dmamem_alloc(struct fec_softc *sc, bus_size_t size, bus_size_t align)
1250 {
1251 	struct fec_dmamem *fdm;
1252 	int nsegs;
1253 
1254 	fdm = malloc(sizeof(*fdm), M_DEVBUF, M_WAITOK | M_ZERO);
1255 	fdm->fdm_size = size;
1256 
1257 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1258 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &fdm->fdm_map) != 0)
1259 		goto fdmfree;
1260 
1261 	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &fdm->fdm_seg, 1,
1262 	    &nsegs, BUS_DMA_WAITOK) != 0)
1263 		goto destroy;
1264 
1265 	if (bus_dmamem_map(sc->sc_dmat, &fdm->fdm_seg, nsegs, size,
1266 	    &fdm->fdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
1267 		goto free;
1268 
1269 	if (bus_dmamap_load(sc->sc_dmat, fdm->fdm_map, fdm->fdm_kva, size,
1270 	    NULL, BUS_DMA_WAITOK) != 0)
1271 		goto unmap;
1272 
1273 	return (fdm);
1274 
1275 unmap:
1276 	bus_dmamem_unmap(sc->sc_dmat, fdm->fdm_kva, size);
1277 free:
1278 	bus_dmamem_free(sc->sc_dmat, &fdm->fdm_seg, 1);
1279 destroy:
1280 	bus_dmamap_destroy(sc->sc_dmat, fdm->fdm_map);
1281 fdmfree:
1282 	free(fdm, M_DEVBUF, sizeof(*fdm));
1283 
1284 	return (NULL);
1285 }
1286 
1287 void
1288 fec_dmamem_free(struct fec_softc *sc, struct fec_dmamem *fdm)
1289 {
1290 	bus_dmamem_unmap(sc->sc_dmat, fdm->fdm_kva, fdm->fdm_size);
1291 	bus_dmamem_free(sc->sc_dmat, &fdm->fdm_seg, 1);
1292 	bus_dmamap_destroy(sc->sc_dmat, fdm->fdm_map);
1293 	free(fdm, M_DEVBUF, sizeof(*fdm));
1294 }
1295 
1296 struct mbuf *
1297 fec_alloc_mbuf(struct fec_softc *sc, bus_dmamap_t map)
1298 {
1299 	struct mbuf *m = NULL;
1300 
1301 	m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
1302 	if (!m)
1303 		return (NULL);
1304 	m->m_len = m->m_pkthdr.len = MCLBYTES;
1305 
1306 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) {
1307 		printf("%s: could not load mbuf DMA map",
1308 		    sc->sc_dev.dv_xname);
1309 		m_freem(m);
1310 		return (NULL);
1311 	}
1312 
1313 	bus_dmamap_sync(sc->sc_dmat, map, 0,
1314 	    m->m_pkthdr.len, BUS_DMASYNC_PREREAD);
1315 
1316 	return (m);
1317 }
1318 
1319 void
1320 fec_fill_rx_ring(struct fec_softc *sc)
1321 {
1322 	struct fec_desc *rxd;
1323 	struct fec_buf *rxb;
1324 	u_int slots;
1325 
1326 	for (slots = if_rxr_get(&sc->sc_rx_ring, ENET_NRXDESC);
1327 	    slots > 0; slots--) {
1328 		rxb = &sc->sc_rxbuf[sc->sc_rx_prod];
1329 		rxb->fb_m = fec_alloc_mbuf(sc, rxb->fb_map);
1330 		if (rxb->fb_m == NULL)
1331 			break;
1332 		rxd = &sc->sc_rxdesc[sc->sc_rx_prod];
1333 		rxd->fd_len = rxb->fb_map->dm_segs[0].ds_len - 1;
1334 		rxd->fd_addr = rxb->fb_map->dm_segs[0].ds_addr;
1335 		rxd->fd_status &= ENET_RXD_WRAP;
1336 		rxd->fd_status |= ENET_RXD_EMPTY;
1337 
1338 		if (sc->sc_rx_prod == (ENET_NRXDESC - 1))
1339 			sc->sc_rx_prod = 0;
1340 		else
1341 			sc->sc_rx_prod++;
1342 	}
1343 	if_rxr_put(&sc->sc_rx_ring, slots);
1344 }
1345