xref: /freebsd/sys/dev/dwc/if_dwc.c (revision e17f5b1d)
1 /*-
2  * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com>
3  * All rights reserved.
4  *
5  * This software was developed by SRI International and the University of
6  * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
7  * ("CTSRD"), as part of the DARPA CRASH research programme.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 /*
32  * Ethernet media access controller (EMAC)
33  * Chapter 17, Altera Cyclone V Device Handbook (CV-5V2 2014.07.22)
34  *
35  * EMAC is an instance of the Synopsys DesignWare 3504-0
36  * Universal 10/100/1000 Ethernet MAC (DWC_gmac).
37  */
38 
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/bus.h>
45 #include <sys/gpio.h>
46 #include <sys/kernel.h>
47 #include <sys/lock.h>
48 #include <sys/malloc.h>
49 #include <sys/mbuf.h>
50 #include <sys/module.h>
51 #include <sys/mutex.h>
52 #include <sys/rman.h>
53 #include <sys/socket.h>
54 #include <sys/sockio.h>
55 
56 #include <net/bpf.h>
57 #include <net/if.h>
58 #include <net/ethernet.h>
59 #include <net/if_dl.h>
60 #include <net/if_media.h>
61 #include <net/if_types.h>
62 #include <net/if_var.h>
63 
64 #include <machine/bus.h>
65 
66 #include <dev/dwc/if_dwc.h>
67 #include <dev/dwc/if_dwcvar.h>
68 #include <dev/mii/mii.h>
69 #include <dev/mii/miivar.h>
70 #include <dev/ofw/ofw_bus.h>
71 #include <dev/ofw/ofw_bus_subr.h>
72 
73 #ifdef EXT_RESOURCES
74 #include <dev/extres/clk/clk.h>
75 #include <dev/extres/hwreset/hwreset.h>
76 #endif
77 
78 #include "if_dwc_if.h"
79 #include "gpio_if.h"
80 #include "miibus_if.h"
81 
82 #define	READ4(_sc, _reg) \
83 	bus_read_4((_sc)->res[0], _reg)
84 #define	WRITE4(_sc, _reg, _val) \
85 	bus_write_4((_sc)->res[0], _reg, _val)
86 
87 #define	MAC_RESET_TIMEOUT	100
88 #define	WATCHDOG_TIMEOUT_SECS	5
89 #define	STATS_HARVEST_INTERVAL	2
90 
91 #define	DWC_LOCK(sc)			mtx_lock(&(sc)->mtx)
92 #define	DWC_UNLOCK(sc)			mtx_unlock(&(sc)->mtx)
93 #define	DWC_ASSERT_LOCKED(sc)		mtx_assert(&(sc)->mtx, MA_OWNED)
94 #define	DWC_ASSERT_UNLOCKED(sc)		mtx_assert(&(sc)->mtx, MA_NOTOWNED)
95 
96 /* TX descriptors - TDESC0 is almost unified */
97 #define	TDESC0_OWN		(1U << 31)
98 #define	TDESC0_IHE		(1U << 16)	/* IP Header Error */
99 #define	TDESC0_ES		(1U << 15)	/* Error Summary */
100 #define	TDESC0_JT		(1U << 14)	/* Jabber Timeout */
101 #define	TDESC0_FF		(1U << 13)	/* Frame Flushed */
102 #define	TDESC0_PCE		(1U << 12)	/* Payload Checksum Error */
103 #define	TDESC0_LOC		(1U << 11)	/* Loss of Carrier */
104 #define	TDESC0_NC		(1U << 10)	/* No Carrier */
105 #define	TDESC0_LC		(1U <<  9)	/* Late Collision */
106 #define	TDESC0_EC		(1U <<  8)	/* Excessive Collision */
107 #define	TDESC0_VF		(1U <<  7)	/* VLAN Frame */
108 #define	TDESC0_CC_MASK		0xf
109 #define	TDESC0_CC_SHIFT		3		/* Collision Count */
110 #define	TDESC0_ED		(1U <<  2)	/* Excessive Deferral */
111 #define	TDESC0_UF		(1U <<  1)	/* Underflow Error */
112 #define	TDESC0_DB		(1U <<  0)	/* Deferred Bit */
113 /* TX descriptors - TDESC0 extended format only */
114 #define	ETDESC0_IC		(1U << 30)	/* Interrupt on Completion */
115 #define	ETDESC0_LS		(1U << 29)	/* Last Segment */
116 #define	ETDESC0_FS		(1U << 28)	/* First Segment */
117 #define	ETDESC0_DC		(1U << 27)	/* Disable CRC */
118 #define	ETDESC0_DP		(1U << 26)	/* Disable Padding */
119 #define	ETDESC0_CIC_NONE	(0U << 22)	/* Checksum Insertion Control */
120 #define	ETDESC0_CIC_HDR		(1U << 22)
121 #define	ETDESC0_CIC_SEG 	(2U << 22)
122 #define	ETDESC0_CIC_FULL	(3U << 22)
123 #define	ETDESC0_TER		(1U << 21)	/* Transmit End of Ring */
124 #define	ETDESC0_TCH		(1U << 20)	/* Second Address Chained */
125 
126 /* TX descriptors - TDESC1 normal format */
127 #define	NTDESC1_IC		(1U << 31)	/* Interrupt on Completion */
128 #define	NTDESC1_LS		(1U << 30)	/* Last Segment */
129 #define	NTDESC1_FS		(1U << 29)	/* First Segment */
130 #define	NTDESC1_CIC_NONE	(0U << 27)	/* Checksum Insertion Control */
131 #define	NTDESC1_CIC_HDR		(1U << 27)
132 #define	NTDESC1_CIC_SEG 	(2U << 27)
133 #define	NTDESC1_CIC_FULL	(3U << 27)
134 #define	NTDESC1_DC		(1U << 26)	/* Disable CRC */
135 #define	NTDESC1_TER		(1U << 25)	/* Transmit End of Ring */
136 #define	NTDESC1_TCH		(1U << 24)	/* Second Address Chained */
137 /* TX descriptors - TDESC1 extended format */
138 #define	ETDESC1_DP		(1U << 23)	/* Disable Padding */
139 #define	ETDESC1_TBS2_MASK	0x7ff
140 #define	ETDESC1_TBS2_SHIFT	11		/* Receive Buffer 2 Size */
141 #define	ETDESC1_TBS1_MASK	0x7ff
142 #define	ETDESC1_TBS1_SHIFT	0		/* Receive Buffer 1 Size */
143 
144 /* RX descriptor - RDESC0 is unified */
145 #define	RDESC0_OWN		(1U << 31)
146 #define	RDESC0_AFM		(1U << 30)	/* Dest. Address Filter Fail */
147 #define	RDESC0_FL_MASK		0x3fff
148 #define	RDESC0_FL_SHIFT		16		/* Frame Length */
149 #define	RDESC0_ES		(1U << 15)	/* Error Summary */
150 #define	RDESC0_DE		(1U << 14)	/* Descriptor Error */
151 #define	RDESC0_SAF		(1U << 13)	/* Source Address Filter Fail */
152 #define	RDESC0_LE		(1U << 12)	/* Length Error */
153 #define	RDESC0_OE		(1U << 11)	/* Overflow Error */
154 #define	RDESC0_VLAN		(1U << 10)	/* VLAN Tag */
155 #define	RDESC0_FS		(1U <<  9)	/* First Descriptor */
156 #define	RDESC0_LS		(1U <<  8)	/* Last Descriptor */
157 #define	RDESC0_ICE		(1U <<  7)	/* IPC Checksum Error */
158 #define	RDESC0_GF		(1U <<  7)	/* Giant Frame */
159 #define	RDESC0_LC		(1U <<  6)	/* Late Collision */
160 #define	RDESC0_FT		(1U <<  5)	/* Frame Type */
161 #define	RDESC0_RWT		(1U <<  4)	/* Receive Watchdog Timeout */
162 #define	RDESC0_RE		(1U <<  3)	/* Receive Error */
163 #define	RDESC0_DBE		(1U <<  2)	/* Dribble Bit Error */
164 #define	RDESC0_CE		(1U <<  1)	/* CRC Error */
165 #define	RDESC0_PCE		(1U <<  0)	/* Payload Checksum Error */
166 #define	RDESC0_RXMA		(1U <<  0)	/* Rx MAC Address */
167 
168 /* RX descriptors - RDESC1 normal format */
169 #define	NRDESC1_DIC		(1U << 31)	/* Disable Intr on Completion */
170 #define	NRDESC1_RER		(1U << 25)	/* Receive End of Ring */
171 #define	NRDESC1_RCH		(1U << 24)	/* Second Address Chained */
172 #define	NRDESC1_RBS2_MASK	0x7ff
173 #define	NRDESC1_RBS2_SHIFT	11		/* Receive Buffer 2 Size */
174 #define	NRDESC1_RBS1_MASK	0x7ff
175 #define	NRDESC1_RBS1_SHIFT	0		/* Receive Buffer 1 Size */
176 
177 /* RX descriptors - RDESC1 enhanced format */
178 #define	ERDESC1_DIC		(1U << 31)	/* Disable Intr on Completion */
179 #define	ERDESC1_RBS2_MASK	0x7ffff
180 #define	ERDESC1_RBS2_SHIFT	16		/* Receive Buffer 2 Size */
181 #define	ERDESC1_RER		(1U << 15)	/* Receive End of Ring */
182 #define	ERDESC1_RCH		(1U << 14)	/* Second Address Chained */
183 #define	ERDESC1_RBS1_MASK	0x7ffff
184 #define	ERDESC1_RBS1_SHIFT	0		/* Receive Buffer 1 Size */
185 
186 /*
187  * A hardware buffer descriptor.  Rx and Tx buffers have the same descriptor
188  * layout, but the bits in the fields have different meanings.
189  */
190 struct dwc_hwdesc
191 {
192 	uint32_t desc0;
193 	uint32_t desc1;
194 	uint32_t addr1;		/* ptr to first buffer data */
195 	uint32_t addr2;		/* ptr to next descriptor / second buffer data*/
196 };
197 
198 /*
199  * The hardware imposes alignment restrictions on various objects involved in
200  * DMA transfers.  These values are expressed in bytes (not bits).
201  */
202 #define	DWC_DESC_RING_ALIGN	2048
203 
204 static struct resource_spec dwc_spec[] = {
205 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
206 	{ SYS_RES_IRQ,		0,	RF_ACTIVE },
207 	{ -1, 0 }
208 };
209 
210 static void dwc_txfinish_locked(struct dwc_softc *sc);
211 static void dwc_rxfinish_locked(struct dwc_softc *sc);
212 static void dwc_stop_locked(struct dwc_softc *sc);
213 static void dwc_setup_rxfilter(struct dwc_softc *sc);
214 
215 static inline uint32_t
216 next_rxidx(struct dwc_softc *sc, uint32_t curidx)
217 {
218 
219 	return ((curidx + 1) % RX_DESC_COUNT);
220 }
221 
222 static inline uint32_t
223 next_txidx(struct dwc_softc *sc, uint32_t curidx)
224 {
225 
226 	return ((curidx + 1) % TX_DESC_COUNT);
227 }
228 
229 static void
230 dwc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
231 {
232 
233 	if (error != 0)
234 		return;
235 	*(bus_addr_t *)arg = segs[0].ds_addr;
236 }
237 
238 inline static void
239 dwc_setup_txdesc(struct dwc_softc *sc, int idx, bus_addr_t paddr,
240     uint32_t len)
241 {
242 	uint32_t desc0, desc1;
243 
244 	/* Addr/len 0 means we're clearing the descriptor after xmit done. */
245 	if (paddr == 0 || len == 0) {
246 		desc0 = 0;
247 		desc1 = 0;
248 		--sc->txcount;
249 	} else {
250 		if (sc->mactype != DWC_GMAC_EXT_DESC) {
251 			desc0 = 0;
252 			desc1 = NTDESC1_TCH | NTDESC1_FS | NTDESC1_LS |
253 			    NTDESC1_IC | len;
254 		} else {
255 			desc0 = ETDESC0_TCH | ETDESC0_FS | ETDESC0_LS |
256 			    ETDESC0_IC;
257 			desc1 = len;
258 		}
259 		++sc->txcount;
260 	}
261 
262 	sc->txdesc_ring[idx].addr1 = (uint32_t)(paddr);
263 	sc->txdesc_ring[idx].desc0 = desc0;
264 	sc->txdesc_ring[idx].desc1 = desc1;
265 
266 	if (paddr && len) {
267 		wmb();
268 		sc->txdesc_ring[idx].desc0 |= TDESC0_OWN;
269 		wmb();
270 	}
271 }
272 
273 static int
274 dwc_setup_txbuf(struct dwc_softc *sc, int idx, struct mbuf **mp)
275 {
276 	struct bus_dma_segment seg;
277 	int error, nsegs;
278 	struct mbuf * m;
279 
280 	if ((m = m_defrag(*mp, M_NOWAIT)) == NULL)
281 		return (ENOMEM);
282 	*mp = m;
283 
284 	error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag, sc->txbuf_map[idx].map,
285 	    m, &seg, &nsegs, 0);
286 	if (error != 0) {
287 		return (ENOMEM);
288 	}
289 
290 	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
291 
292 	bus_dmamap_sync(sc->txbuf_tag, sc->txbuf_map[idx].map,
293 	    BUS_DMASYNC_PREWRITE);
294 
295 	sc->txbuf_map[idx].mbuf = m;
296 
297 	dwc_setup_txdesc(sc, idx, seg.ds_addr, seg.ds_len);
298 
299 	return (0);
300 }
301 
302 static void
303 dwc_txstart_locked(struct dwc_softc *sc)
304 {
305 	struct ifnet *ifp;
306 	struct mbuf *m;
307 	int enqueued;
308 
309 	DWC_ASSERT_LOCKED(sc);
310 
311 	if (!sc->link_is_up)
312 		return;
313 
314 	ifp = sc->ifp;
315 
316 	if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
317 		return;
318 
319 	enqueued = 0;
320 
321 	for (;;) {
322 		if (sc->txcount == (TX_DESC_COUNT - 1)) {
323 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
324 			break;
325 		}
326 
327 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
328 		if (m == NULL)
329 			break;
330 		if (dwc_setup_txbuf(sc, sc->tx_idx_head, &m) != 0) {
331 			 IFQ_DRV_PREPEND(&ifp->if_snd, m);
332 			break;
333 		}
334 		BPF_MTAP(ifp, m);
335 		sc->tx_idx_head = next_txidx(sc, sc->tx_idx_head);
336 		++enqueued;
337 	}
338 
339 	if (enqueued != 0) {
340 		WRITE4(sc, TRANSMIT_POLL_DEMAND, 0x1);
341 		sc->tx_watchdog_count = WATCHDOG_TIMEOUT_SECS;
342 	}
343 }
344 
345 static void
346 dwc_txstart(struct ifnet *ifp)
347 {
348 	struct dwc_softc *sc = ifp->if_softc;
349 
350 	DWC_LOCK(sc);
351 	dwc_txstart_locked(sc);
352 	DWC_UNLOCK(sc);
353 }
354 
355 static void
356 dwc_stop_locked(struct dwc_softc *sc)
357 {
358 	struct ifnet *ifp;
359 	uint32_t reg;
360 
361 	DWC_ASSERT_LOCKED(sc);
362 
363 	ifp = sc->ifp;
364 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
365 	sc->tx_watchdog_count = 0;
366 	sc->stats_harvest_count = 0;
367 
368 	callout_stop(&sc->dwc_callout);
369 
370 	/* Stop DMA TX */
371 	reg = READ4(sc, OPERATION_MODE);
372 	reg &= ~(MODE_ST);
373 	WRITE4(sc, OPERATION_MODE, reg);
374 
375 	/* Flush TX */
376 	reg = READ4(sc, OPERATION_MODE);
377 	reg |= (MODE_FTF);
378 	WRITE4(sc, OPERATION_MODE, reg);
379 
380 	/* Stop transmitters */
381 	reg = READ4(sc, MAC_CONFIGURATION);
382 	reg &= ~(CONF_TE | CONF_RE);
383 	WRITE4(sc, MAC_CONFIGURATION, reg);
384 
385 	/* Stop DMA RX */
386 	reg = READ4(sc, OPERATION_MODE);
387 	reg &= ~(MODE_SR);
388 	WRITE4(sc, OPERATION_MODE, reg);
389 }
390 
391 static void dwc_clear_stats(struct dwc_softc *sc)
392 {
393 	uint32_t reg;
394 
395 	reg = READ4(sc, MMC_CONTROL);
396 	reg |= (MMC_CONTROL_CNTRST);
397 	WRITE4(sc, MMC_CONTROL, reg);
398 }
399 
400 static void
401 dwc_harvest_stats(struct dwc_softc *sc)
402 {
403 	struct ifnet *ifp;
404 
405 	/* We don't need to harvest too often. */
406 	if (++sc->stats_harvest_count < STATS_HARVEST_INTERVAL)
407 		return;
408 
409 	sc->stats_harvest_count = 0;
410 	ifp = sc->ifp;
411 
412 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, READ4(sc, RXFRAMECOUNT_GB));
413 	if_inc_counter(ifp, IFCOUNTER_IMCASTS, READ4(sc, RXMULTICASTFRAMES_G));
414 	if_inc_counter(ifp, IFCOUNTER_IERRORS,
415 	    READ4(sc, RXOVERSIZE_G) + READ4(sc, RXUNDERSIZE_G) +
416 	    READ4(sc, RXCRCERROR) + READ4(sc, RXALIGNMENTERROR) +
417 	    READ4(sc, RXRUNTERROR) + READ4(sc, RXJABBERERROR) +
418 	    READ4(sc, RXLENGTHERROR));
419 
420 	if_inc_counter(ifp, IFCOUNTER_OPACKETS, READ4(sc, TXFRAMECOUNT_G));
421 	if_inc_counter(ifp, IFCOUNTER_OMCASTS, READ4(sc, TXMULTICASTFRAMES_G));
422 	if_inc_counter(ifp, IFCOUNTER_OERRORS,
423 	    READ4(sc, TXOVERSIZE_G) + READ4(sc, TXEXCESSDEF) +
424 	    READ4(sc, TXCARRIERERR) + READ4(sc, TXUNDERFLOWERROR));
425 
426 	if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
427 	    READ4(sc, TXEXESSCOL) + READ4(sc, TXLATECOL));
428 
429 	dwc_clear_stats(sc);
430 }
431 
432 static void
433 dwc_tick(void *arg)
434 {
435 	struct dwc_softc *sc;
436 	struct ifnet *ifp;
437 	int link_was_up;
438 
439 	sc = arg;
440 
441 	DWC_ASSERT_LOCKED(sc);
442 
443 	ifp = sc->ifp;
444 
445 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
446 	    return;
447 
448 	/*
449 	 * Typical tx watchdog.  If this fires it indicates that we enqueued
450 	 * packets for output and never got a txdone interrupt for them.  Maybe
451 	 * it's a missed interrupt somehow, just pretend we got one.
452 	 */
453 	if (sc->tx_watchdog_count > 0) {
454 		if (--sc->tx_watchdog_count == 0) {
455 			dwc_txfinish_locked(sc);
456 		}
457 	}
458 
459 	/* Gather stats from hardware counters. */
460 	dwc_harvest_stats(sc);
461 
462 	/* Check the media status. */
463 	link_was_up = sc->link_is_up;
464 	mii_tick(sc->mii_softc);
465 	if (sc->link_is_up && !link_was_up)
466 		dwc_txstart_locked(sc);
467 
468 	/* Schedule another check one second from now. */
469 	callout_reset(&sc->dwc_callout, hz, dwc_tick, sc);
470 }
471 
472 static void
473 dwc_init_locked(struct dwc_softc *sc)
474 {
475 	struct ifnet *ifp = sc->ifp;
476 	uint32_t reg;
477 
478 	DWC_ASSERT_LOCKED(sc);
479 
480 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
481 		return;
482 
483 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
484 
485 	dwc_setup_rxfilter(sc);
486 
487 	/* Initializa DMA and enable transmitters */
488 	reg = READ4(sc, OPERATION_MODE);
489 	reg |= (MODE_TSF | MODE_OSF | MODE_FUF);
490 	reg &= ~(MODE_RSF);
491 	reg |= (MODE_RTC_LEV32 << MODE_RTC_SHIFT);
492 	WRITE4(sc, OPERATION_MODE, reg);
493 
494 	WRITE4(sc, INTERRUPT_ENABLE, INT_EN_DEFAULT);
495 
496 	/* Start DMA */
497 	reg = READ4(sc, OPERATION_MODE);
498 	reg |= (MODE_ST | MODE_SR);
499 	WRITE4(sc, OPERATION_MODE, reg);
500 
501 	/* Enable transmitters */
502 	reg = READ4(sc, MAC_CONFIGURATION);
503 	reg |= (CONF_JD | CONF_ACS | CONF_BE);
504 	reg |= (CONF_TE | CONF_RE);
505 	WRITE4(sc, MAC_CONFIGURATION, reg);
506 
507 	/*
508 	 * Call mii_mediachg() which will call back into dwc_miibus_statchg()
509 	 * to set up the remaining config registers based on current media.
510 	 */
511 	mii_mediachg(sc->mii_softc);
512 	callout_reset(&sc->dwc_callout, hz, dwc_tick, sc);
513 }
514 
515 static void
516 dwc_init(void *if_softc)
517 {
518 	struct dwc_softc *sc = if_softc;
519 
520 	DWC_LOCK(sc);
521 	dwc_init_locked(sc);
522 	DWC_UNLOCK(sc);
523 }
524 
525 
526 inline static uint32_t
527 dwc_setup_rxdesc(struct dwc_softc *sc, int idx, bus_addr_t paddr)
528 {
529 	uint32_t nidx;
530 
531 	sc->rxdesc_ring[idx].addr1 = (uint32_t)paddr;
532 	nidx = next_rxidx(sc, idx);
533 	sc->rxdesc_ring[idx].addr2 = sc->rxdesc_ring_paddr +
534 	    (nidx * sizeof(struct dwc_hwdesc));
535 	if (sc->mactype != DWC_GMAC_EXT_DESC)
536 		sc->rxdesc_ring[idx].desc1 = NRDESC1_RCH |
537 		    MIN(MCLBYTES, NRDESC1_RBS1_MASK);
538 	else
539 		sc->rxdesc_ring[idx].desc1 = ERDESC1_RCH |
540 		    MIN(MCLBYTES, ERDESC1_RBS1_MASK);
541 
542 	wmb();
543 	sc->rxdesc_ring[idx].desc0 = RDESC0_OWN;
544 	wmb();
545 	return (nidx);
546 }
547 
548 static int
549 dwc_setup_rxbuf(struct dwc_softc *sc, int idx, struct mbuf *m)
550 {
551 	struct bus_dma_segment seg;
552 	int error, nsegs;
553 
554 	m_adj(m, ETHER_ALIGN);
555 
556 	error = bus_dmamap_load_mbuf_sg(sc->rxbuf_tag, sc->rxbuf_map[idx].map,
557 	    m, &seg, &nsegs, 0);
558 	if (error != 0)
559 		return (error);
560 
561 	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
562 
563 	bus_dmamap_sync(sc->rxbuf_tag, sc->rxbuf_map[idx].map,
564 	    BUS_DMASYNC_PREREAD);
565 
566 	sc->rxbuf_map[idx].mbuf = m;
567 	dwc_setup_rxdesc(sc, idx, seg.ds_addr);
568 
569 	return (0);
570 }
571 
572 static struct mbuf *
573 dwc_alloc_mbufcl(struct dwc_softc *sc)
574 {
575 	struct mbuf *m;
576 
577 	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
578 	if (m != NULL)
579 		m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
580 
581 	return (m);
582 }
583 
584 static struct mbuf *
585 dwc_rxfinish_one(struct dwc_softc *sc, struct dwc_hwdesc *desc,
586     struct dwc_bufmap *map)
587 {
588 	struct ifnet *ifp;
589 	struct mbuf *m, *m0;
590 	int len;
591 	uint32_t rdesc0;
592 
593 	m = map->mbuf;
594 	ifp = sc->ifp;
595 	rdesc0 = desc ->desc0;
596 	/* Validate descriptor. */
597 	if (rdesc0 & RDESC0_ES) {
598 		/*
599 		 * Errored packet. Statistic counters are updated
600 		 * globally, so do nothing
601 		 */
602 		return (NULL);
603 	}
604 
605 	if ((rdesc0 & (RDESC0_FS | RDESC0_LS)) !=
606 		    (RDESC0_FS | RDESC0_LS)) {
607 		/*
608 		 * Something very wrong happens. The whole packet should be
609 		 * recevied in one descriptr. Report problem.
610 		 */
611 		device_printf(sc->dev,
612 		    "%s: RX descriptor without FIRST and LAST bit set: 0x%08X",
613 		    __func__, rdesc0);
614 		return (NULL);
615 	}
616 
617 	len = (rdesc0 >> RDESC0_FL_SHIFT) & RDESC0_FL_MASK;
618 	if (len < 64) {
619 		/*
620 		 * Lenght is invalid, recycle old mbuf
621 		 * Probably impossible case
622 		 */
623 		return (NULL);
624 	}
625 
626 	/* Allocate new buffer */
627 	m0 = dwc_alloc_mbufcl(sc);
628 	if (m0 == NULL) {
629 		/* no new mbuf available, recycle old */
630 		if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, 1);
631 		return (NULL);
632 	}
633 	/* Do dmasync for newly received packet */
634 	bus_dmamap_sync(sc->rxbuf_tag, map->map, BUS_DMASYNC_POSTREAD);
635 	bus_dmamap_unload(sc->rxbuf_tag, map->map);
636 
637 	/* Received packet is valid, process it */
638 	m->m_pkthdr.rcvif = ifp;
639 	m->m_pkthdr.len = len;
640 	m->m_len = len;
641 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
642 
643 	/* Remove trailing FCS */
644 	m_adj(m, -ETHER_CRC_LEN);
645 
646 	DWC_UNLOCK(sc);
647 	(*ifp->if_input)(ifp, m);
648 	DWC_LOCK(sc);
649 	return (m0);
650 }
651 
652 static void
653 dwc_media_status(struct ifnet * ifp, struct ifmediareq *ifmr)
654 {
655 	struct dwc_softc *sc;
656 	struct mii_data *mii;
657 
658 	sc = ifp->if_softc;
659 	mii = sc->mii_softc;
660 	DWC_LOCK(sc);
661 	mii_pollstat(mii);
662 	ifmr->ifm_active = mii->mii_media_active;
663 	ifmr->ifm_status = mii->mii_media_status;
664 	DWC_UNLOCK(sc);
665 }
666 
667 static int
668 dwc_media_change_locked(struct dwc_softc *sc)
669 {
670 
671 	return (mii_mediachg(sc->mii_softc));
672 }
673 
674 static int
675 dwc_media_change(struct ifnet * ifp)
676 {
677 	struct dwc_softc *sc;
678 	int error;
679 
680 	sc = ifp->if_softc;
681 
682 	DWC_LOCK(sc);
683 	error = dwc_media_change_locked(sc);
684 	DWC_UNLOCK(sc);
685 	return (error);
686 }
687 
688 static const uint8_t nibbletab[] = {
689 	/* 0x0 0000 -> 0000 */  0x0,
690 	/* 0x1 0001 -> 1000 */  0x8,
691 	/* 0x2 0010 -> 0100 */  0x4,
692 	/* 0x3 0011 -> 1100 */  0xc,
693 	/* 0x4 0100 -> 0010 */  0x2,
694 	/* 0x5 0101 -> 1010 */  0xa,
695 	/* 0x6 0110 -> 0110 */  0x6,
696 	/* 0x7 0111 -> 1110 */  0xe,
697 	/* 0x8 1000 -> 0001 */  0x1,
698 	/* 0x9 1001 -> 1001 */  0x9,
699 	/* 0xa 1010 -> 0101 */  0x5,
700 	/* 0xb 1011 -> 1101 */  0xd,
701 	/* 0xc 1100 -> 0011 */  0x3,
702 	/* 0xd 1101 -> 1011 */  0xb,
703 	/* 0xe 1110 -> 0111 */  0x7,
704 	/* 0xf 1111 -> 1111 */  0xf, };
705 
706 static uint8_t
707 bitreverse(uint8_t x)
708 {
709 
710 	return (nibbletab[x & 0xf] << 4) | nibbletab[x >> 4];
711 }
712 
713 struct dwc_hash_maddr_ctx {
714 	struct dwc_softc *sc;
715 	uint32_t hash[8];
716 };
717 
718 static u_int
719 dwc_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
720 {
721 	struct dwc_hash_maddr_ctx *ctx = arg;
722 	uint32_t crc, hashbit, hashreg;
723 	uint8_t val;
724 
725 	crc = ether_crc32_le(LLADDR(sdl), ETHER_ADDR_LEN);
726 	/* Take lower 8 bits and reverse it */
727 	val = bitreverse(~crc & 0xff);
728 	if (ctx->sc->mactype != DWC_GMAC_EXT_DESC)
729 		val >>= 2; /* Only need lower 6 bits */
730 	hashreg = (val >> 5);
731 	hashbit = (val & 31);
732 	ctx->hash[hashreg] |= (1 << hashbit);
733 
734 	return (1);
735 }
736 
737 static void
738 dwc_setup_rxfilter(struct dwc_softc *sc)
739 {
740 	struct dwc_hash_maddr_ctx ctx;
741 	struct ifnet *ifp;
742 	uint8_t *eaddr;
743 	uint32_t ffval, hi, lo;
744 	int nhash, i;
745 
746 	DWC_ASSERT_LOCKED(sc);
747 
748 	ifp = sc->ifp;
749 	nhash = sc->mactype != DWC_GMAC_EXT_DESC ? 2 : 8;
750 
751 	/*
752 	 * Set the multicast (group) filter hash.
753 	 */
754 	if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
755 		ffval = (FRAME_FILTER_PM);
756 		for (i = 0; i < nhash; i++)
757 			ctx.hash[i] = ~0;
758 	} else {
759 		ffval = (FRAME_FILTER_HMC);
760 		for (i = 0; i < nhash; i++)
761 			ctx.hash[i] = 0;
762 		ctx.sc = sc;
763 		if_foreach_llmaddr(ifp, dwc_hash_maddr, &ctx);
764 	}
765 
766 	/*
767 	 * Set the individual address filter hash.
768 	 */
769 	if (ifp->if_flags & IFF_PROMISC)
770 		ffval |= (FRAME_FILTER_PR);
771 
772 	/*
773 	 * Set the primary address.
774 	 */
775 	eaddr = IF_LLADDR(ifp);
776 	lo = eaddr[0] | (eaddr[1] << 8) | (eaddr[2] << 16) |
777 	    (eaddr[3] << 24);
778 	hi = eaddr[4] | (eaddr[5] << 8);
779 	WRITE4(sc, MAC_ADDRESS_LOW(0), lo);
780 	WRITE4(sc, MAC_ADDRESS_HIGH(0), hi);
781 	WRITE4(sc, MAC_FRAME_FILTER, ffval);
782 	if (sc->mactype != DWC_GMAC_EXT_DESC) {
783 		WRITE4(sc, GMAC_MAC_HTLOW, ctx.hash[0]);
784 		WRITE4(sc, GMAC_MAC_HTHIGH, ctx.hash[1]);
785 	} else {
786 		for (i = 0; i < nhash; i++)
787 			WRITE4(sc, HASH_TABLE_REG(i), ctx.hash[i]);
788 	}
789 }
790 
791 static int
792 dwc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
793 {
794 	struct dwc_softc *sc;
795 	struct mii_data *mii;
796 	struct ifreq *ifr;
797 	int mask, error;
798 
799 	sc = ifp->if_softc;
800 	ifr = (struct ifreq *)data;
801 
802 	error = 0;
803 	switch (cmd) {
804 	case SIOCSIFFLAGS:
805 		DWC_LOCK(sc);
806 		if (ifp->if_flags & IFF_UP) {
807 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
808 				if ((ifp->if_flags ^ sc->if_flags) &
809 				    (IFF_PROMISC | IFF_ALLMULTI))
810 					dwc_setup_rxfilter(sc);
811 			} else {
812 				if (!sc->is_detaching)
813 					dwc_init_locked(sc);
814 			}
815 		} else {
816 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
817 				dwc_stop_locked(sc);
818 		}
819 		sc->if_flags = ifp->if_flags;
820 		DWC_UNLOCK(sc);
821 		break;
822 	case SIOCADDMULTI:
823 	case SIOCDELMULTI:
824 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
825 			DWC_LOCK(sc);
826 			dwc_setup_rxfilter(sc);
827 			DWC_UNLOCK(sc);
828 		}
829 		break;
830 	case SIOCSIFMEDIA:
831 	case SIOCGIFMEDIA:
832 		mii = sc->mii_softc;
833 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
834 		break;
835 	case SIOCSIFCAP:
836 		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
837 		if (mask & IFCAP_VLAN_MTU) {
838 			/* No work to do except acknowledge the change took */
839 			ifp->if_capenable ^= IFCAP_VLAN_MTU;
840 		}
841 		break;
842 
843 	default:
844 		error = ether_ioctl(ifp, cmd, data);
845 		break;
846 	}
847 
848 	return (error);
849 }
850 
851 static void
852 dwc_txfinish_locked(struct dwc_softc *sc)
853 {
854 	struct dwc_bufmap *bmap;
855 	struct dwc_hwdesc *desc;
856 	struct ifnet *ifp;
857 
858 	DWC_ASSERT_LOCKED(sc);
859 
860 	ifp = sc->ifp;
861 	while (sc->tx_idx_tail != sc->tx_idx_head) {
862 		desc = &sc->txdesc_ring[sc->tx_idx_tail];
863 		if ((desc->desc0 & TDESC0_OWN) != 0)
864 			break;
865 		bmap = &sc->txbuf_map[sc->tx_idx_tail];
866 		bus_dmamap_sync(sc->txbuf_tag, bmap->map,
867 		    BUS_DMASYNC_POSTWRITE);
868 		bus_dmamap_unload(sc->txbuf_tag, bmap->map);
869 		m_freem(bmap->mbuf);
870 		bmap->mbuf = NULL;
871 		dwc_setup_txdesc(sc, sc->tx_idx_tail, 0, 0);
872 		sc->tx_idx_tail = next_txidx(sc, sc->tx_idx_tail);
873 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
874 		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
875 	}
876 
877 	/* If there are no buffers outstanding, muzzle the watchdog. */
878 	if (sc->tx_idx_tail == sc->tx_idx_head) {
879 		sc->tx_watchdog_count = 0;
880 	}
881 }
882 
883 static void
884 dwc_rxfinish_locked(struct dwc_softc *sc)
885 {
886 	struct ifnet *ifp;
887 	struct mbuf *m;
888 	int error, idx;
889 	struct dwc_hwdesc *desc;
890 
891 	DWC_ASSERT_LOCKED(sc);
892 	ifp = sc->ifp;
893 	for (;;) {
894 		idx = sc->rx_idx;
895 		desc = sc->rxdesc_ring + idx;
896 		if ((desc->desc0 & RDESC0_OWN) != 0)
897 			break;
898 
899 		m = dwc_rxfinish_one(sc, desc, sc->rxbuf_map + idx);
900 		if (m == NULL) {
901 			wmb();
902 			desc->desc0 = RDESC0_OWN;
903 			wmb();
904 		} else {
905 			/* We cannot create hole in RX ring */
906 			error = dwc_setup_rxbuf(sc, idx, m);
907 			if (error != 0)
908 				panic("dwc_setup_rxbuf failed:  error %d\n",
909 				    error);
910 		}
911 		sc->rx_idx = next_rxidx(sc, sc->rx_idx);
912 	}
913 }
914 
915 static void
916 dwc_intr(void *arg)
917 {
918 	struct dwc_softc *sc;
919 	uint32_t reg;
920 
921 	sc = arg;
922 
923 	DWC_LOCK(sc);
924 
925 	reg = READ4(sc, INTERRUPT_STATUS);
926 	if (reg)
927 		READ4(sc, SGMII_RGMII_SMII_CTRL_STATUS);
928 
929 	reg = READ4(sc, DMA_STATUS);
930 	if (reg & DMA_STATUS_NIS) {
931 		if (reg & DMA_STATUS_RI)
932 			dwc_rxfinish_locked(sc);
933 
934 		if (reg & DMA_STATUS_TI) {
935 			dwc_txfinish_locked(sc);
936 			dwc_txstart_locked(sc);
937 		}
938 	}
939 
940 	if (reg & DMA_STATUS_AIS) {
941 		if (reg & DMA_STATUS_FBI) {
942 			/* Fatal bus error */
943 			device_printf(sc->dev,
944 			    "Ethernet DMA error, restarting controller.\n");
945 			dwc_stop_locked(sc);
946 			dwc_init_locked(sc);
947 		}
948 	}
949 
950 	WRITE4(sc, DMA_STATUS, reg & DMA_STATUS_INTR_MASK);
951 	DWC_UNLOCK(sc);
952 }
953 
954 static int
955 setup_dma(struct dwc_softc *sc)
956 {
957 	struct mbuf *m;
958 	int error;
959 	int nidx;
960 	int idx;
961 
962 	/*
963 	 * Set up TX descriptor ring, descriptors, and dma maps.
964 	 */
965 	error = bus_dma_tag_create(
966 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
967 	    DWC_DESC_RING_ALIGN, 0,	/* alignment, boundary */
968 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
969 	    BUS_SPACE_MAXADDR,		/* highaddr */
970 	    NULL, NULL,			/* filter, filterarg */
971 	    TX_DESC_SIZE, 1, 		/* maxsize, nsegments */
972 	    TX_DESC_SIZE,		/* maxsegsize */
973 	    0,				/* flags */
974 	    NULL, NULL,			/* lockfunc, lockarg */
975 	    &sc->txdesc_tag);
976 	if (error != 0) {
977 		device_printf(sc->dev,
978 		    "could not create TX ring DMA tag.\n");
979 		goto out;
980 	}
981 
982 	error = bus_dmamem_alloc(sc->txdesc_tag, (void**)&sc->txdesc_ring,
983 	    BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
984 	    &sc->txdesc_map);
985 	if (error != 0) {
986 		device_printf(sc->dev,
987 		    "could not allocate TX descriptor ring.\n");
988 		goto out;
989 	}
990 
991 	error = bus_dmamap_load(sc->txdesc_tag, sc->txdesc_map,
992 	    sc->txdesc_ring, TX_DESC_SIZE, dwc_get1paddr,
993 	    &sc->txdesc_ring_paddr, 0);
994 	if (error != 0) {
995 		device_printf(sc->dev,
996 		    "could not load TX descriptor ring map.\n");
997 		goto out;
998 	}
999 
1000 	for (idx = 0; idx < TX_DESC_COUNT; idx++) {
1001 		nidx = next_txidx(sc, idx);
1002 		sc->txdesc_ring[idx].addr2 = sc->txdesc_ring_paddr +
1003 		    (nidx * sizeof(struct dwc_hwdesc));
1004 	}
1005 
1006 	error = bus_dma_tag_create(
1007 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
1008 	    1, 0,			/* alignment, boundary */
1009 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1010 	    BUS_SPACE_MAXADDR,		/* highaddr */
1011 	    NULL, NULL,			/* filter, filterarg */
1012 	    MCLBYTES, 1, 		/* maxsize, nsegments */
1013 	    MCLBYTES,			/* maxsegsize */
1014 	    0,				/* flags */
1015 	    NULL, NULL,			/* lockfunc, lockarg */
1016 	    &sc->txbuf_tag);
1017 	if (error != 0) {
1018 		device_printf(sc->dev,
1019 		    "could not create TX ring DMA tag.\n");
1020 		goto out;
1021 	}
1022 
1023 	for (idx = 0; idx < TX_DESC_COUNT; idx++) {
1024 		error = bus_dmamap_create(sc->txbuf_tag, BUS_DMA_COHERENT,
1025 		    &sc->txbuf_map[idx].map);
1026 		if (error != 0) {
1027 			device_printf(sc->dev,
1028 			    "could not create TX buffer DMA map.\n");
1029 			goto out;
1030 		}
1031 		dwc_setup_txdesc(sc, idx, 0, 0);
1032 	}
1033 
1034 	/*
1035 	 * Set up RX descriptor ring, descriptors, dma maps, and mbufs.
1036 	 */
1037 	error = bus_dma_tag_create(
1038 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
1039 	    DWC_DESC_RING_ALIGN, 0,	/* alignment, boundary */
1040 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1041 	    BUS_SPACE_MAXADDR,		/* highaddr */
1042 	    NULL, NULL,			/* filter, filterarg */
1043 	    RX_DESC_SIZE, 1, 		/* maxsize, nsegments */
1044 	    RX_DESC_SIZE,		/* maxsegsize */
1045 	    0,				/* flags */
1046 	    NULL, NULL,			/* lockfunc, lockarg */
1047 	    &sc->rxdesc_tag);
1048 	if (error != 0) {
1049 		device_printf(sc->dev,
1050 		    "could not create RX ring DMA tag.\n");
1051 		goto out;
1052 	}
1053 
1054 	error = bus_dmamem_alloc(sc->rxdesc_tag, (void **)&sc->rxdesc_ring,
1055 	    BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
1056 	    &sc->rxdesc_map);
1057 	if (error != 0) {
1058 		device_printf(sc->dev,
1059 		    "could not allocate RX descriptor ring.\n");
1060 		goto out;
1061 	}
1062 
1063 	error = bus_dmamap_load(sc->rxdesc_tag, sc->rxdesc_map,
1064 	    sc->rxdesc_ring, RX_DESC_SIZE, dwc_get1paddr,
1065 	    &sc->rxdesc_ring_paddr, 0);
1066 	if (error != 0) {
1067 		device_printf(sc->dev,
1068 		    "could not load RX descriptor ring map.\n");
1069 		goto out;
1070 	}
1071 
1072 	error = bus_dma_tag_create(
1073 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
1074 	    1, 0,			/* alignment, boundary */
1075 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1076 	    BUS_SPACE_MAXADDR,		/* highaddr */
1077 	    NULL, NULL,			/* filter, filterarg */
1078 	    MCLBYTES, 1, 		/* maxsize, nsegments */
1079 	    MCLBYTES,			/* maxsegsize */
1080 	    0,				/* flags */
1081 	    NULL, NULL,			/* lockfunc, lockarg */
1082 	    &sc->rxbuf_tag);
1083 	if (error != 0) {
1084 		device_printf(sc->dev,
1085 		    "could not create RX buf DMA tag.\n");
1086 		goto out;
1087 	}
1088 
1089 	for (idx = 0; idx < RX_DESC_COUNT; idx++) {
1090 		error = bus_dmamap_create(sc->rxbuf_tag, BUS_DMA_COHERENT,
1091 		    &sc->rxbuf_map[idx].map);
1092 		if (error != 0) {
1093 			device_printf(sc->dev,
1094 			    "could not create RX buffer DMA map.\n");
1095 			goto out;
1096 		}
1097 		if ((m = dwc_alloc_mbufcl(sc)) == NULL) {
1098 			device_printf(sc->dev, "Could not alloc mbuf\n");
1099 			error = ENOMEM;
1100 			goto out;
1101 		}
1102 		if ((error = dwc_setup_rxbuf(sc, idx, m)) != 0) {
1103 			device_printf(sc->dev,
1104 			    "could not create new RX buffer.\n");
1105 			goto out;
1106 		}
1107 	}
1108 
1109 out:
1110 	if (error != 0)
1111 		return (ENXIO);
1112 
1113 	return (0);
1114 }
1115 
1116 static int
1117 dwc_get_hwaddr(struct dwc_softc *sc, uint8_t *hwaddr)
1118 {
1119 	uint32_t hi, lo, rnd;
1120 
1121 	/*
1122 	 * Try to recover a MAC address from the running hardware. If there's
1123 	 * something non-zero there, assume the bootloader did the right thing
1124 	 * and just use it.
1125 	 *
1126 	 * Otherwise, set the address to a convenient locally assigned address,
1127 	 * 'bsd' + random 24 low-order bits.  'b' is 0x62, which has the locally
1128 	 * assigned bit set, and the broadcast/multicast bit clear.
1129 	 */
1130 	lo = READ4(sc, MAC_ADDRESS_LOW(0));
1131 	hi = READ4(sc, MAC_ADDRESS_HIGH(0)) & 0xffff;
1132 	if ((lo != 0xffffffff) || (hi != 0xffff)) {
1133 		hwaddr[0] = (lo >>  0) & 0xff;
1134 		hwaddr[1] = (lo >>  8) & 0xff;
1135 		hwaddr[2] = (lo >> 16) & 0xff;
1136 		hwaddr[3] = (lo >> 24) & 0xff;
1137 		hwaddr[4] = (hi >>  0) & 0xff;
1138 		hwaddr[5] = (hi >>  8) & 0xff;
1139 	} else {
1140 		rnd = arc4random() & 0x00ffffff;
1141 		hwaddr[0] = 'b';
1142 		hwaddr[1] = 's';
1143 		hwaddr[2] = 'd';
1144 		hwaddr[3] = rnd >> 16;
1145 		hwaddr[4] = rnd >>  8;
1146 		hwaddr[5] = rnd >>  0;
1147 	}
1148 
1149 	return (0);
1150 }
1151 
1152 #define	GPIO_ACTIVE_LOW 1
1153 
1154 static int
1155 dwc_reset(device_t dev)
1156 {
1157 	pcell_t gpio_prop[4];
1158 	pcell_t delay_prop[3];
1159 	phandle_t node, gpio_node;
1160 	device_t gpio;
1161 	uint32_t pin, flags;
1162 	uint32_t pin_value;
1163 
1164 	node = ofw_bus_get_node(dev);
1165 	if (OF_getencprop(node, "snps,reset-gpio",
1166 	    gpio_prop, sizeof(gpio_prop)) <= 0)
1167 		return (0);
1168 
1169 	if (OF_getencprop(node, "snps,reset-delays-us",
1170 	    delay_prop, sizeof(delay_prop)) <= 0) {
1171 		device_printf(dev,
1172 		    "Wrong property for snps,reset-delays-us");
1173 		return (ENXIO);
1174 	}
1175 
1176 	gpio_node = OF_node_from_xref(gpio_prop[0]);
1177 	if ((gpio = OF_device_from_xref(gpio_prop[0])) == NULL) {
1178 		device_printf(dev,
1179 		    "Can't find gpio controller for phy reset\n");
1180 		return (ENXIO);
1181 	}
1182 
1183 	if (GPIO_MAP_GPIOS(gpio, node, gpio_node,
1184 	    nitems(gpio_prop) - 1,
1185 	    gpio_prop + 1, &pin, &flags) != 0) {
1186 		device_printf(dev, "Can't map gpio for phy reset\n");
1187 		return (ENXIO);
1188 	}
1189 
1190 	pin_value = GPIO_PIN_LOW;
1191 	if (OF_hasprop(node, "snps,reset-active-low"))
1192 		pin_value = GPIO_PIN_HIGH;
1193 
1194 	GPIO_PIN_SETFLAGS(gpio, pin, GPIO_PIN_OUTPUT);
1195 	GPIO_PIN_SET(gpio, pin, pin_value);
1196 	DELAY(delay_prop[0] * 5);
1197 	GPIO_PIN_SET(gpio, pin, !pin_value);
1198 	DELAY(delay_prop[1] * 5);
1199 	GPIO_PIN_SET(gpio, pin, pin_value);
1200 	DELAY(delay_prop[2] * 5);
1201 
1202 	return (0);
1203 }
1204 
1205 #ifdef EXT_RESOURCES
1206 static int
1207 dwc_clock_init(device_t dev)
1208 {
1209 	hwreset_t rst;
1210 	clk_t clk;
1211 	int error;
1212 
1213 	/* Enable clock */
1214 	if (clk_get_by_ofw_name(dev, 0, "stmmaceth", &clk) == 0) {
1215 		error = clk_enable(clk);
1216 		if (error != 0) {
1217 			device_printf(dev, "could not enable main clock\n");
1218 			return (error);
1219 		}
1220 	}
1221 
1222 	/* De-assert reset */
1223 	if (hwreset_get_by_ofw_name(dev, 0, "stmmaceth", &rst) == 0) {
1224 		error = hwreset_deassert(rst);
1225 		if (error != 0) {
1226 			device_printf(dev, "could not de-assert reset\n");
1227 			return (error);
1228 		}
1229 	}
1230 
1231 	return (0);
1232 }
1233 #endif
1234 
1235 static int
1236 dwc_probe(device_t dev)
1237 {
1238 
1239 	if (!ofw_bus_status_okay(dev))
1240 		return (ENXIO);
1241 
1242 	if (!ofw_bus_is_compatible(dev, "snps,dwmac"))
1243 		return (ENXIO);
1244 
1245 	device_set_desc(dev, "Gigabit Ethernet Controller");
1246 	return (BUS_PROBE_DEFAULT);
1247 }
1248 
1249 static int
1250 dwc_attach(device_t dev)
1251 {
1252 	uint8_t macaddr[ETHER_ADDR_LEN];
1253 	struct dwc_softc *sc;
1254 	struct ifnet *ifp;
1255 	int error, i;
1256 	uint32_t reg;
1257 
1258 	sc = device_get_softc(dev);
1259 	sc->dev = dev;
1260 	sc->rx_idx = 0;
1261 	sc->txcount = TX_DESC_COUNT;
1262 	sc->mii_clk = IF_DWC_MII_CLK(dev);
1263 	sc->mactype = IF_DWC_MAC_TYPE(dev);
1264 
1265 	if (IF_DWC_INIT(dev) != 0)
1266 		return (ENXIO);
1267 
1268 #ifdef EXT_RESOURCES
1269 	if (dwc_clock_init(dev) != 0)
1270 		return (ENXIO);
1271 #endif
1272 
1273 	if (bus_alloc_resources(dev, dwc_spec, sc->res)) {
1274 		device_printf(dev, "could not allocate resources\n");
1275 		return (ENXIO);
1276 	}
1277 
1278 	/* Read MAC before reset */
1279 	if (dwc_get_hwaddr(sc, macaddr)) {
1280 		device_printf(sc->dev, "can't get mac\n");
1281 		return (ENXIO);
1282 	}
1283 
1284 	/* Reset the PHY if needed */
1285 	if (dwc_reset(dev) != 0) {
1286 		device_printf(dev, "Can't reset the PHY\n");
1287 		return (ENXIO);
1288 	}
1289 
1290 	/* Reset */
1291 	reg = READ4(sc, BUS_MODE);
1292 	reg |= (BUS_MODE_SWR);
1293 	WRITE4(sc, BUS_MODE, reg);
1294 
1295 	for (i = 0; i < MAC_RESET_TIMEOUT; i++) {
1296 		if ((READ4(sc, BUS_MODE) & BUS_MODE_SWR) == 0)
1297 			break;
1298 		DELAY(10);
1299 	}
1300 	if (i >= MAC_RESET_TIMEOUT) {
1301 		device_printf(sc->dev, "Can't reset DWC.\n");
1302 		return (ENXIO);
1303 	}
1304 
1305 	if (sc->mactype != DWC_GMAC_EXT_DESC) {
1306 		reg = BUS_MODE_FIXEDBURST;
1307 		reg |= (BUS_MODE_PRIORXTX_41 << BUS_MODE_PRIORXTX_SHIFT);
1308 	} else
1309 		reg = (BUS_MODE_EIGHTXPBL);
1310 	reg |= (BUS_MODE_PBL_BEATS_8 << BUS_MODE_PBL_SHIFT);
1311 	WRITE4(sc, BUS_MODE, reg);
1312 
1313 	/*
1314 	 * DMA must be stop while changing descriptor list addresses.
1315 	 */
1316 	reg = READ4(sc, OPERATION_MODE);
1317 	reg &= ~(MODE_ST | MODE_SR);
1318 	WRITE4(sc, OPERATION_MODE, reg);
1319 
1320 	if (setup_dma(sc))
1321 	        return (ENXIO);
1322 
1323 	/* Setup addresses */
1324 	WRITE4(sc, RX_DESCR_LIST_ADDR, sc->rxdesc_ring_paddr);
1325 	WRITE4(sc, TX_DESCR_LIST_ADDR, sc->txdesc_ring_paddr);
1326 
1327 	mtx_init(&sc->mtx, device_get_nameunit(sc->dev),
1328 	    MTX_NETWORK_LOCK, MTX_DEF);
1329 
1330 	callout_init_mtx(&sc->dwc_callout, &sc->mtx, 0);
1331 
1332 	/* Setup interrupt handler. */
1333 	error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE,
1334 	    NULL, dwc_intr, sc, &sc->intr_cookie);
1335 	if (error != 0) {
1336 		device_printf(dev, "could not setup interrupt handler.\n");
1337 		return (ENXIO);
1338 	}
1339 
1340 	/* Set up the ethernet interface. */
1341 	sc->ifp = ifp = if_alloc(IFT_ETHER);
1342 
1343 	ifp->if_softc = sc;
1344 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1345 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1346 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1347 	ifp->if_capenable = ifp->if_capabilities;
1348 	ifp->if_start = dwc_txstart;
1349 	ifp->if_ioctl = dwc_ioctl;
1350 	ifp->if_init = dwc_init;
1351 	IFQ_SET_MAXLEN(&ifp->if_snd, TX_DESC_COUNT - 1);
1352 	ifp->if_snd.ifq_drv_maxlen = TX_DESC_COUNT - 1;
1353 	IFQ_SET_READY(&ifp->if_snd);
1354 
1355 	/* Attach the mii driver. */
1356 	error = mii_attach(dev, &sc->miibus, ifp, dwc_media_change,
1357 	    dwc_media_status, BMSR_DEFCAPMASK, MII_PHY_ANY,
1358 	    MII_OFFSET_ANY, 0);
1359 
1360 	if (error != 0) {
1361 		device_printf(dev, "PHY attach failed\n");
1362 		return (ENXIO);
1363 	}
1364 	sc->mii_softc = device_get_softc(sc->miibus);
1365 
1366 	/* All ready to run, attach the ethernet interface. */
1367 	ether_ifattach(ifp, macaddr);
1368 	sc->is_attached = true;
1369 
1370 	return (0);
1371 }
1372 
1373 static int
1374 dwc_miibus_read_reg(device_t dev, int phy, int reg)
1375 {
1376 	struct dwc_softc *sc;
1377 	uint16_t mii;
1378 	size_t cnt;
1379 	int rv = 0;
1380 
1381 	sc = device_get_softc(dev);
1382 
1383 	mii = ((phy & GMII_ADDRESS_PA_MASK) << GMII_ADDRESS_PA_SHIFT)
1384 	    | ((reg & GMII_ADDRESS_GR_MASK) << GMII_ADDRESS_GR_SHIFT)
1385 	    | (sc->mii_clk << GMII_ADDRESS_CR_SHIFT)
1386 	    | GMII_ADDRESS_GB; /* Busy flag */
1387 
1388 	WRITE4(sc, GMII_ADDRESS, mii);
1389 
1390 	for (cnt = 0; cnt < 1000; cnt++) {
1391 		if (!(READ4(sc, GMII_ADDRESS) & GMII_ADDRESS_GB)) {
1392 			rv = READ4(sc, GMII_DATA);
1393 			break;
1394 		}
1395 		DELAY(10);
1396 	}
1397 
1398 	return rv;
1399 }
1400 
1401 static int
1402 dwc_miibus_write_reg(device_t dev, int phy, int reg, int val)
1403 {
1404 	struct dwc_softc *sc;
1405 	uint16_t mii;
1406 	size_t cnt;
1407 
1408 	sc = device_get_softc(dev);
1409 
1410 	mii = ((phy & GMII_ADDRESS_PA_MASK) << GMII_ADDRESS_PA_SHIFT)
1411 	    | ((reg & GMII_ADDRESS_GR_MASK) << GMII_ADDRESS_GR_SHIFT)
1412 	    | (sc->mii_clk << GMII_ADDRESS_CR_SHIFT)
1413 	    | GMII_ADDRESS_GB | GMII_ADDRESS_GW;
1414 
1415 	WRITE4(sc, GMII_DATA, val);
1416 	WRITE4(sc, GMII_ADDRESS, mii);
1417 
1418 	for (cnt = 0; cnt < 1000; cnt++) {
1419 		if (!(READ4(sc, GMII_ADDRESS) & GMII_ADDRESS_GB)) {
1420 			break;
1421                 }
1422 		DELAY(10);
1423 	}
1424 
1425 	return (0);
1426 }
1427 
1428 static void
1429 dwc_miibus_statchg(device_t dev)
1430 {
1431 	struct dwc_softc *sc;
1432 	struct mii_data *mii;
1433 	uint32_t reg;
1434 
1435 	/*
1436 	 * Called by the MII bus driver when the PHY establishes
1437 	 * link to set the MAC interface registers.
1438 	 */
1439 
1440 	sc = device_get_softc(dev);
1441 
1442 	DWC_ASSERT_LOCKED(sc);
1443 
1444 	mii = sc->mii_softc;
1445 
1446 	if (mii->mii_media_status & IFM_ACTIVE)
1447 		sc->link_is_up = true;
1448 	else
1449 		sc->link_is_up = false;
1450 
1451 	reg = READ4(sc, MAC_CONFIGURATION);
1452 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1453 	case IFM_1000_T:
1454 	case IFM_1000_SX:
1455 		reg &= ~(CONF_FES | CONF_PS);
1456 		break;
1457 	case IFM_100_TX:
1458 		reg |= (CONF_FES | CONF_PS);
1459 		break;
1460 	case IFM_10_T:
1461 		reg &= ~(CONF_FES);
1462 		reg |= (CONF_PS);
1463 		break;
1464 	case IFM_NONE:
1465 		sc->link_is_up = false;
1466 		return;
1467 	default:
1468 		sc->link_is_up = false;
1469 		device_printf(dev, "Unsupported media %u\n",
1470 		    IFM_SUBTYPE(mii->mii_media_active));
1471 		return;
1472 	}
1473 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
1474 		reg |= (CONF_DM);
1475 	else
1476 		reg &= ~(CONF_DM);
1477 	WRITE4(sc, MAC_CONFIGURATION, reg);
1478 }
1479 
1480 static device_method_t dwc_methods[] = {
1481 	DEVMETHOD(device_probe,		dwc_probe),
1482 	DEVMETHOD(device_attach,	dwc_attach),
1483 
1484 	/* MII Interface */
1485 	DEVMETHOD(miibus_readreg,	dwc_miibus_read_reg),
1486 	DEVMETHOD(miibus_writereg,	dwc_miibus_write_reg),
1487 	DEVMETHOD(miibus_statchg,	dwc_miibus_statchg),
1488 
1489 	{ 0, 0 }
1490 };
1491 
1492 driver_t dwc_driver = {
1493 	"dwc",
1494 	dwc_methods,
1495 	sizeof(struct dwc_softc),
1496 };
1497 
1498 static devclass_t dwc_devclass;
1499 
1500 DRIVER_MODULE(dwc, simplebus, dwc_driver, dwc_devclass, 0, 0);
1501 DRIVER_MODULE(miibus, dwc, miibus_driver, miibus_devclass, 0, 0);
1502 
1503 MODULE_DEPEND(dwc, ether, 1, 1, 1);
1504 MODULE_DEPEND(dwc, miibus, 1, 1, 1);
1505