xref: /openbsd/sys/dev/ic/dwqe.c (revision 72c7c57a)
1 /*	$OpenBSD: dwqe.c,v 1.18 2024/03/29 08:19:40 stsp Exp $	*/
2 /*
3  * Copyright (c) 2008, 2019 Mark Kettenis <kettenis@openbsd.org>
4  * Copyright (c) 2017, 2022 Patrick Wildt <patrick@blueri.se>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  * Driver for the Synopsys Designware ethernet controller.
21  */
22 
23 #include "bpfilter.h"
24 
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/device.h>
28 #include <sys/kernel.h>
29 #include <sys/malloc.h>
30 #include <sys/mbuf.h>
31 #include <sys/queue.h>
32 #include <sys/socket.h>
33 #include <sys/sockio.h>
34 #include <sys/timeout.h>
35 
36 #include <machine/bus.h>
37 
38 #include <net/if.h>
39 #include <net/if_media.h>
40 
41 #include <dev/mii/mii.h>
42 #include <dev/mii/miivar.h>
43 
44 #if NBPFILTER > 0
45 #include <net/bpf.h>
46 #endif
47 
48 #include <netinet/in.h>
49 #include <netinet/if_ether.h>
50 
51 #include <dev/ic/dwqevar.h>
52 #include <dev/ic/dwqereg.h>
53 
54 struct cfdriver dwqe_cd = {
55 	NULL, "dwqe", DV_IFNET
56 };
57 
58 uint32_t dwqe_read(struct dwqe_softc *, bus_addr_t);
59 void	dwqe_write(struct dwqe_softc *, bus_addr_t, uint32_t);
60 
61 int	dwqe_ioctl(struct ifnet *, u_long, caddr_t);
62 void	dwqe_start(struct ifqueue *);
63 void	dwqe_watchdog(struct ifnet *);
64 
65 int	dwqe_media_change(struct ifnet *);
66 void	dwqe_media_status(struct ifnet *, struct ifmediareq *);
67 
68 void	dwqe_mii_attach(struct dwqe_softc *);
69 int	dwqe_mii_readreg(struct device *, int, int);
70 void	dwqe_mii_writereg(struct device *, int, int, int);
71 void	dwqe_mii_statchg(struct device *);
72 
73 void	dwqe_lladdr_read(struct dwqe_softc *, uint8_t *);
74 void	dwqe_lladdr_write(struct dwqe_softc *);
75 
76 void	dwqe_tick(void *);
77 void	dwqe_rxtick(void *);
78 
79 int	dwqe_intr(void *);
80 void	dwqe_tx_proc(struct dwqe_softc *);
81 void	dwqe_rx_proc(struct dwqe_softc *);
82 
83 void	dwqe_up(struct dwqe_softc *);
84 void	dwqe_down(struct dwqe_softc *);
85 void	dwqe_iff(struct dwqe_softc *);
86 int	dwqe_encap(struct dwqe_softc *, struct mbuf *, int *, int *);
87 
88 void	dwqe_reset(struct dwqe_softc *);
89 
90 struct dwqe_dmamem *
91 	dwqe_dmamem_alloc(struct dwqe_softc *, bus_size_t, bus_size_t);
92 void	dwqe_dmamem_free(struct dwqe_softc *, struct dwqe_dmamem *);
93 struct mbuf *dwqe_alloc_mbuf(struct dwqe_softc *, bus_dmamap_t);
94 void	dwqe_fill_rx_ring(struct dwqe_softc *);
95 
96 int
97 dwqe_attach(struct dwqe_softc *sc)
98 {
99 	struct ifnet *ifp;
100 	uint32_t version, mode;
101 	int i;
102 
103 	version = dwqe_read(sc, GMAC_VERSION);
104 	printf(": rev 0x%02x, address %s\n", version & GMAC_VERSION_SNPS_MASK,
105 	    ether_sprintf(sc->sc_lladdr));
106 
107 	for (i = 0; i < 4; i++)
108 		sc->sc_hw_feature[i] = dwqe_read(sc, GMAC_MAC_HW_FEATURE(i));
109 
110 	timeout_set(&sc->sc_phy_tick, dwqe_tick, sc);
111 	timeout_set(&sc->sc_rxto, dwqe_rxtick, sc);
112 
113 	ifp = &sc->sc_ac.ac_if;
114 	ifp->if_softc = sc;
115 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
116 	ifp->if_xflags = IFXF_MPSAFE;
117 	ifp->if_ioctl = dwqe_ioctl;
118 	ifp->if_qstart = dwqe_start;
119 	ifp->if_watchdog = dwqe_watchdog;
120 	ifq_init_maxlen(&ifp->if_snd, DWQE_NTXDESC - 1);
121 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
122 
123 	ifp->if_capabilities = IFCAP_VLAN_MTU;
124 
125 	sc->sc_mii.mii_ifp = ifp;
126 	sc->sc_mii.mii_readreg = dwqe_mii_readreg;
127 	sc->sc_mii.mii_writereg = dwqe_mii_writereg;
128 	sc->sc_mii.mii_statchg = dwqe_mii_statchg;
129 
130 	ifmedia_init(&sc->sc_media, 0, dwqe_media_change, dwqe_media_status);
131 
132 	dwqe_reset(sc);
133 
134 	/* Configure DMA engine. */
135 	mode = dwqe_read(sc, GMAC_SYS_BUS_MODE);
136 	if (sc->sc_fixed_burst)
137 		mode |= GMAC_SYS_BUS_MODE_FB;
138 	if (sc->sc_mixed_burst)
139 		mode |= GMAC_SYS_BUS_MODE_MB;
140 	if (sc->sc_aal)
141 		mode |= GMAC_SYS_BUS_MODE_AAL;
142 	dwqe_write(sc, GMAC_SYS_BUS_MODE, mode);
143 
144 	/* Configure channel 0. */
145 	mode = dwqe_read(sc, GMAC_CHAN_CONTROL(0));
146 	if (sc->sc_8xpbl)
147 		mode |= GMAC_CHAN_CONTROL_8XPBL;
148 	dwqe_write(sc, GMAC_CHAN_CONTROL(0), mode);
149 
150 	mode = dwqe_read(sc, GMAC_CHAN_TX_CONTROL(0));
151 	mode &= ~GMAC_CHAN_TX_CONTROL_PBL_MASK;
152 	mode |= sc->sc_txpbl << GMAC_CHAN_TX_CONTROL_PBL_SHIFT;
153 	mode |= GMAC_CHAN_TX_CONTROL_OSP;
154 	dwqe_write(sc, GMAC_CHAN_TX_CONTROL(0), mode);
155 	mode = dwqe_read(sc, GMAC_CHAN_RX_CONTROL(0));
156 	mode &= ~GMAC_CHAN_RX_CONTROL_RPBL_MASK;
157 	mode |= sc->sc_rxpbl << GMAC_CHAN_RX_CONTROL_RPBL_SHIFT;
158 	dwqe_write(sc, GMAC_CHAN_RX_CONTROL(0), mode);
159 
160 	/* Configure AXI master. */
161 	if (sc->sc_axi_config) {
162 		int i;
163 
164 		mode = dwqe_read(sc, GMAC_SYS_BUS_MODE);
165 
166 		mode &= ~GMAC_SYS_BUS_MODE_EN_LPI;
167 		if (sc->sc_lpi_en)
168 			mode |= GMAC_SYS_BUS_MODE_EN_LPI;
169 		mode &= ~GMAC_SYS_BUS_MODE_LPI_XIT_FRM;
170 		if (sc->sc_xit_frm)
171 			mode |= GMAC_SYS_BUS_MODE_LPI_XIT_FRM;
172 
173 		mode &= ~GMAC_SYS_BUS_MODE_WR_OSR_LMT_MASK;
174 		mode |= (sc->sc_wr_osr_lmt << GMAC_SYS_BUS_MODE_WR_OSR_LMT_SHIFT);
175 		mode &= ~GMAC_SYS_BUS_MODE_RD_OSR_LMT_MASK;
176 		mode |= (sc->sc_rd_osr_lmt << GMAC_SYS_BUS_MODE_RD_OSR_LMT_SHIFT);
177 
178 		for (i = 0; i < nitems(sc->sc_blen); i++) {
179 			switch (sc->sc_blen[i]) {
180 			case 256:
181 				mode |= GMAC_SYS_BUS_MODE_BLEN_256;
182 				break;
183 			case 128:
184 				mode |= GMAC_SYS_BUS_MODE_BLEN_128;
185 				break;
186 			case 64:
187 				mode |= GMAC_SYS_BUS_MODE_BLEN_64;
188 				break;
189 			case 32:
190 				mode |= GMAC_SYS_BUS_MODE_BLEN_32;
191 				break;
192 			case 16:
193 				mode |= GMAC_SYS_BUS_MODE_BLEN_16;
194 				break;
195 			case 8:
196 				mode |= GMAC_SYS_BUS_MODE_BLEN_8;
197 				break;
198 			case 4:
199 				mode |= GMAC_SYS_BUS_MODE_BLEN_4;
200 				break;
201 			}
202 		}
203 
204 		dwqe_write(sc, GMAC_SYS_BUS_MODE, mode);
205 	}
206 
207 	if (!sc->sc_fixed_link)
208 		dwqe_mii_attach(sc);
209 
210 	if_attach(ifp);
211 	ether_ifattach(ifp);
212 
213 	/* Disable interrupts. */
214 	dwqe_write(sc, GMAC_INT_EN, 0);
215 	dwqe_write(sc, GMAC_CHAN_INTR_ENA(0), 0);
216 
217 	return 0;
218 }
219 
220 void
221 dwqe_mii_attach(struct dwqe_softc *sc)
222 {
223 	int mii_flags = 0;
224 
225 	switch (sc->sc_phy_mode) {
226 	case DWQE_PHY_MODE_RGMII:
227 		mii_flags |= MIIF_SETDELAY;
228 		break;
229 	case DWQE_PHY_MODE_RGMII_ID:
230 		mii_flags |= MIIF_SETDELAY | MIIF_RXID | MIIF_TXID;
231 		break;
232 	case DWQE_PHY_MODE_RGMII_RXID:
233 		mii_flags |= MIIF_SETDELAY | MIIF_RXID;
234 		break;
235 	case DWQE_PHY_MODE_RGMII_TXID:
236 		mii_flags |= MIIF_SETDELAY | MIIF_TXID;
237 		break;
238 	default:
239 		break;
240 	}
241 
242 	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, sc->sc_phyloc,
243 	    (sc->sc_phyloc == MII_PHY_ANY) ? 0 : MII_OFFSET_ANY, mii_flags);
244 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
245 		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
246 		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
247 		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
248 	} else
249 		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
250 }
251 
252 uint32_t
253 dwqe_read(struct dwqe_softc *sc, bus_addr_t addr)
254 {
255 	return bus_space_read_4(sc->sc_iot, sc->sc_ioh, addr);
256 }
257 
258 void
259 dwqe_write(struct dwqe_softc *sc, bus_addr_t addr, uint32_t data)
260 {
261 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, addr, data);
262 }
263 
264 void
265 dwqe_lladdr_read(struct dwqe_softc *sc, uint8_t *lladdr)
266 {
267 	uint32_t machi, maclo;
268 
269 	machi = dwqe_read(sc, GMAC_MAC_ADDR0_HI);
270 	maclo = dwqe_read(sc, GMAC_MAC_ADDR0_LO);
271 
272 	if (machi || maclo) {
273 		lladdr[0] = (maclo >> 0) & 0xff;
274 		lladdr[1] = (maclo >> 8) & 0xff;
275 		lladdr[2] = (maclo >> 16) & 0xff;
276 		lladdr[3] = (maclo >> 24) & 0xff;
277 		lladdr[4] = (machi >> 0) & 0xff;
278 		lladdr[5] = (machi >> 8) & 0xff;
279 	} else {
280 		ether_fakeaddr(&sc->sc_ac.ac_if);
281 	}
282 }
283 
284 void
285 dwqe_lladdr_write(struct dwqe_softc *sc)
286 {
287 	dwqe_write(sc, GMAC_MAC_ADDR0_HI,
288 	    sc->sc_lladdr[5] << 8 | sc->sc_lladdr[4] << 0);
289 	dwqe_write(sc, GMAC_MAC_ADDR0_LO,
290 	    sc->sc_lladdr[3] << 24 | sc->sc_lladdr[2] << 16 |
291 	    sc->sc_lladdr[1] << 8 | sc->sc_lladdr[0] << 0);
292 }
293 
294 void
295 dwqe_start(struct ifqueue *ifq)
296 {
297 	struct ifnet *ifp = ifq->ifq_if;
298 	struct dwqe_softc *sc = ifp->if_softc;
299 	struct mbuf *m;
300 	int error, idx, left, used;
301 
302 	if (!(ifp->if_flags & IFF_RUNNING))
303 		return;
304 	if (ifq_is_oactive(&ifp->if_snd))
305 		return;
306 	if (ifq_empty(&ifp->if_snd))
307 		return;
308 	if (!sc->sc_link)
309 		return;
310 
311 	idx = sc->sc_tx_prod;
312 	left = sc->sc_tx_cons;
313 	if (left <= idx)
314 		left += DWQE_NTXDESC;
315 	left -= idx;
316 	used = 0;
317 
318 	for (;;) {
319 		if (used + DWQE_NTXSEGS + 1 > left) {
320 			ifq_set_oactive(ifq);
321 			break;
322 		}
323 
324 		m = ifq_dequeue(ifq);
325 		if (m == NULL)
326 			break;
327 
328 		error = dwqe_encap(sc, m, &idx, &used);
329 		if (error == EFBIG) {
330 			m_freem(m); /* give up: drop it */
331 			ifp->if_oerrors++;
332 			continue;
333 		}
334 
335 #if NBPFILTER > 0
336 		if (ifp->if_bpf)
337 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
338 #endif
339 	}
340 
341 	if (used > 0) {
342 		sc->sc_tx_prod = idx;
343 
344 		/* Set a timeout in case the chip goes out to lunch. */
345 		ifp->if_timer = 5;
346 
347 		/*
348 		 * Start the transmit process after the last in-use Tx
349 		 * descriptor's OWN bit has been updated.
350 		 */
351 		dwqe_write(sc, GMAC_CHAN_TX_END_ADDR(0), DWQE_DMA_DVA(sc->sc_txring) +
352 		    idx * sizeof(struct dwqe_desc));
353 	}
354 }
355 
356 int
357 dwqe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr)
358 {
359 	struct dwqe_softc *sc = ifp->if_softc;
360 	struct ifreq *ifr = (struct ifreq *)addr;
361 	int error = 0, s;
362 
363 	s = splnet();
364 
365 	switch (cmd) {
366 	case SIOCSIFADDR:
367 		ifp->if_flags |= IFF_UP;
368 		/* FALLTHROUGH */
369 	case SIOCSIFFLAGS:
370 		if (ifp->if_flags & IFF_UP) {
371 			if (ifp->if_flags & IFF_RUNNING)
372 				error = ENETRESET;
373 			else
374 				dwqe_up(sc);
375 		} else {
376 			if (ifp->if_flags & IFF_RUNNING)
377 				dwqe_down(sc);
378 		}
379 		break;
380 
381 	case SIOCGIFMEDIA:
382 	case SIOCSIFMEDIA:
383 		if (sc->sc_fixed_link)
384 			error = ENOTTY;
385 		else
386 			error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
387 		break;
388 
389 	case SIOCGIFRXR:
390 		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
391 		    NULL, MCLBYTES, &sc->sc_rx_ring);
392 		break;
393 
394 	default:
395 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, addr);
396 		break;
397 	}
398 
399 	if (error == ENETRESET) {
400 		if (ifp->if_flags & IFF_RUNNING)
401 			dwqe_iff(sc);
402 		error = 0;
403 	}
404 
405 	splx(s);
406 	return (error);
407 }
408 
409 void
410 dwqe_watchdog(struct ifnet *ifp)
411 {
412 	printf("%s\n", __func__);
413 }
414 
415 int
416 dwqe_media_change(struct ifnet *ifp)
417 {
418 	struct dwqe_softc *sc = ifp->if_softc;
419 
420 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
421 		mii_mediachg(&sc->sc_mii);
422 
423 	return (0);
424 }
425 
426 void
427 dwqe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
428 {
429 	struct dwqe_softc *sc = ifp->if_softc;
430 
431 	if (LIST_FIRST(&sc->sc_mii.mii_phys)) {
432 		mii_pollstat(&sc->sc_mii);
433 		ifmr->ifm_active = sc->sc_mii.mii_media_active;
434 		ifmr->ifm_status = sc->sc_mii.mii_media_status;
435 	}
436 }
437 
438 int
439 dwqe_mii_readreg(struct device *self, int phy, int reg)
440 {
441 	struct dwqe_softc *sc = (void *)self;
442 	int n;
443 
444 	dwqe_write(sc, GMAC_MAC_MDIO_ADDR,
445 	    (sc->sc_clk << GMAC_MAC_MDIO_ADDR_CR_SHIFT) |
446 	    (phy << GMAC_MAC_MDIO_ADDR_PA_SHIFT) |
447 	    (reg << GMAC_MAC_MDIO_ADDR_RDA_SHIFT) |
448 	    GMAC_MAC_MDIO_ADDR_GOC_READ |
449 	    GMAC_MAC_MDIO_ADDR_GB);
450 
451 	for (n = 0; n < 2000; n++) {
452 		delay(10);
453 		if ((dwqe_read(sc, GMAC_MAC_MDIO_ADDR) & GMAC_MAC_MDIO_ADDR_GB) == 0)
454 			return dwqe_read(sc, GMAC_MAC_MDIO_DATA);
455 	}
456 
457 	printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname);
458 	return (0);
459 }
460 
461 void
462 dwqe_mii_writereg(struct device *self, int phy, int reg, int val)
463 {
464 	struct dwqe_softc *sc = (void *)self;
465 	int n;
466 
467 	dwqe_write(sc, GMAC_MAC_MDIO_DATA, val);
468 	dwqe_write(sc, GMAC_MAC_MDIO_ADDR,
469 	    (sc->sc_clk << GMAC_MAC_MDIO_ADDR_CR_SHIFT) |
470 	    (phy << GMAC_MAC_MDIO_ADDR_PA_SHIFT) |
471 	    (reg << GMAC_MAC_MDIO_ADDR_RDA_SHIFT) |
472 	    GMAC_MAC_MDIO_ADDR_GOC_WRITE |
473 	    GMAC_MAC_MDIO_ADDR_GB);
474 
475 	for (n = 0; n < 2000; n++) {
476 		delay(10);
477 		if ((dwqe_read(sc, GMAC_MAC_MDIO_ADDR) & GMAC_MAC_MDIO_ADDR_GB) == 0)
478 			return;
479 	}
480 
481 	printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname);
482 }
483 
484 void
485 dwqe_mii_statchg(struct device *self)
486 {
487 	struct dwqe_softc *sc = (void *)self;
488 	struct ifnet *ifp = &sc->sc_ac.ac_if;
489 	uint32_t conf;
490 
491 	conf = dwqe_read(sc, GMAC_MAC_CONF);
492 	conf &= ~(GMAC_MAC_CONF_PS | GMAC_MAC_CONF_FES);
493 
494 	switch (ifp->if_baudrate) {
495 	case IF_Mbps(1000):
496 		sc->sc_link = 1;
497 		break;
498 	case IF_Mbps(100):
499 		conf |= GMAC_MAC_CONF_PS | GMAC_MAC_CONF_FES;
500 		sc->sc_link = 1;
501 		break;
502 	case IF_Mbps(10):
503 		conf |= GMAC_MAC_CONF_PS;
504 		sc->sc_link = 1;
505 		break;
506 	default:
507 		sc->sc_link = 0;
508 		return;
509 	}
510 
511 	if (sc->sc_link == 0)
512 		return;
513 
514 	conf &= ~GMAC_MAC_CONF_DM;
515 	if (ifp->if_link_state == LINK_STATE_FULL_DUPLEX)
516 		conf |= GMAC_MAC_CONF_DM;
517 
518 	dwqe_write(sc, GMAC_MAC_CONF, conf);
519 }
520 
521 void
522 dwqe_tick(void *arg)
523 {
524 	struct dwqe_softc *sc = arg;
525 	int s;
526 
527 	s = splnet();
528 	mii_tick(&sc->sc_mii);
529 	splx(s);
530 
531 	timeout_add_sec(&sc->sc_phy_tick, 1);
532 }
533 
534 void
535 dwqe_rxtick(void *arg)
536 {
537 	struct dwqe_softc *sc = arg;
538 	int s;
539 
540 	s = splnet();
541 
542 	/* TODO: disable RXQ? */
543 	printf("%s:%d\n", __func__, __LINE__);
544 
545 	bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_rxring),
546 	    0, DWQE_DMA_LEN(sc->sc_rxring),
547 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
548 
549 	dwqe_write(sc, GMAC_CHAN_RX_BASE_ADDR_HI(0), 0);
550 	dwqe_write(sc, GMAC_CHAN_RX_BASE_ADDR(0), 0);
551 
552 	sc->sc_rx_prod = sc->sc_rx_cons = 0;
553 	dwqe_fill_rx_ring(sc);
554 
555 	bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_rxring),
556 	    0, DWQE_DMA_LEN(sc->sc_rxring),
557 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
558 
559 	dwqe_write(sc, GMAC_CHAN_RX_BASE_ADDR_HI(0), DWQE_DMA_DVA(sc->sc_rxring) >> 32);
560 	dwqe_write(sc, GMAC_CHAN_RX_BASE_ADDR(0), DWQE_DMA_DVA(sc->sc_rxring));
561 
562 	/* TODO: re-enable RXQ? */
563 
564 	splx(s);
565 }
566 
567 int
568 dwqe_intr(void *arg)
569 {
570 	struct dwqe_softc *sc = arg;
571 	uint32_t reg;
572 
573 	reg = dwqe_read(sc, GMAC_INT_STATUS);
574 	dwqe_write(sc, GMAC_INT_STATUS, reg);
575 
576 	reg = dwqe_read(sc, GMAC_CHAN_STATUS(0));
577 	dwqe_write(sc, GMAC_CHAN_STATUS(0), reg);
578 
579 	if (reg & GMAC_CHAN_STATUS_RI)
580 		dwqe_rx_proc(sc);
581 
582 	if (reg & GMAC_CHAN_STATUS_TI)
583 		dwqe_tx_proc(sc);
584 
585 	return (1);
586 }
587 
588 void
589 dwqe_tx_proc(struct dwqe_softc *sc)
590 {
591 	struct ifnet *ifp = &sc->sc_ac.ac_if;
592 	struct dwqe_desc *txd;
593 	struct dwqe_buf *txb;
594 	int idx, txfree;
595 
596 	if ((ifp->if_flags & IFF_RUNNING) == 0)
597 		return;
598 
599 	bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_txring), 0,
600 	    DWQE_DMA_LEN(sc->sc_txring),
601 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
602 
603 	txfree = 0;
604 	while (sc->sc_tx_cons != sc->sc_tx_prod) {
605 		idx = sc->sc_tx_cons;
606 		KASSERT(idx < DWQE_NTXDESC);
607 
608 		txd = &sc->sc_txdesc[idx];
609 		if (txd->sd_tdes3 & TDES3_OWN)
610 			break;
611 
612 		if (txd->sd_tdes3 & TDES3_ES)
613 			ifp->if_oerrors++;
614 
615 		txb = &sc->sc_txbuf[idx];
616 		if (txb->tb_m) {
617 			bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0,
618 			    txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
619 			bus_dmamap_unload(sc->sc_dmat, txb->tb_map);
620 
621 			m_freem(txb->tb_m);
622 			txb->tb_m = NULL;
623 		}
624 
625 		txfree++;
626 
627 		if (sc->sc_tx_cons == (DWQE_NTXDESC - 1))
628 			sc->sc_tx_cons = 0;
629 		else
630 			sc->sc_tx_cons++;
631 
632 		txd->sd_tdes3 = 0;
633 	}
634 
635 	if (sc->sc_tx_cons == sc->sc_tx_prod)
636 		ifp->if_timer = 0;
637 
638 	if (txfree) {
639 		if (ifq_is_oactive(&ifp->if_snd))
640 			ifq_restart(&ifp->if_snd);
641 	}
642 }
643 
644 void
645 dwqe_rx_proc(struct dwqe_softc *sc)
646 {
647 	struct ifnet *ifp = &sc->sc_ac.ac_if;
648 	struct dwqe_desc *rxd;
649 	struct dwqe_buf *rxb;
650 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
651 	struct mbuf *m;
652 	int idx, len, cnt, put;
653 
654 	if ((ifp->if_flags & IFF_RUNNING) == 0)
655 		return;
656 
657 	bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_rxring), 0,
658 	    DWQE_DMA_LEN(sc->sc_rxring),
659 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
660 
661 	cnt = if_rxr_inuse(&sc->sc_rx_ring);
662 	put = 0;
663 	while (put < cnt) {
664 		idx = sc->sc_rx_cons;
665 		KASSERT(idx < DWQE_NRXDESC);
666 
667 		rxd = &sc->sc_rxdesc[idx];
668 		if (rxd->sd_tdes3 & RDES3_OWN)
669 			break;
670 
671 		len = rxd->sd_tdes3 & RDES3_LENGTH;
672 		rxb = &sc->sc_rxbuf[idx];
673 		KASSERT(rxb->tb_m);
674 
675 		bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0,
676 		    len, BUS_DMASYNC_POSTREAD);
677 		bus_dmamap_unload(sc->sc_dmat, rxb->tb_map);
678 
679 		m = rxb->tb_m;
680 		rxb->tb_m = NULL;
681 
682 		if (rxd->sd_tdes3 & RDES3_ES) {
683 			ifp->if_ierrors++;
684 			m_freem(m);
685 		} else {
686 			/* Strip off CRC. */
687 			len -= ETHER_CRC_LEN;
688 			KASSERT(len > 0);
689 
690 			m->m_pkthdr.len = m->m_len = len;
691 
692 			ml_enqueue(&ml, m);
693 		}
694 
695 		put++;
696 		if (sc->sc_rx_cons == (DWQE_NRXDESC - 1))
697 			sc->sc_rx_cons = 0;
698 		else
699 			sc->sc_rx_cons++;
700 	}
701 
702 	if_rxr_put(&sc->sc_rx_ring, put);
703 	if (ifiq_input(&ifp->if_rcv, &ml))
704 		if_rxr_livelocked(&sc->sc_rx_ring);
705 
706 	dwqe_fill_rx_ring(sc);
707 
708 	bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_rxring), 0,
709 	    DWQE_DMA_LEN(sc->sc_rxring),
710 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
711 }
712 
713 void
714 dwqe_up(struct dwqe_softc *sc)
715 {
716 	struct ifnet *ifp = &sc->sc_ac.ac_if;
717 	struct dwqe_buf *txb, *rxb;
718 	uint32_t mode, reg, fifosz, tqs, rqs;
719 	int i;
720 
721 	/* Allocate Tx descriptor ring. */
722 	sc->sc_txring = dwqe_dmamem_alloc(sc,
723 	    DWQE_NTXDESC * sizeof(struct dwqe_desc), 8);
724 	sc->sc_txdesc = DWQE_DMA_KVA(sc->sc_txring);
725 
726 	sc->sc_txbuf = malloc(sizeof(struct dwqe_buf) * DWQE_NTXDESC,
727 	    M_DEVBUF, M_WAITOK);
728 	for (i = 0; i < DWQE_NTXDESC; i++) {
729 		txb = &sc->sc_txbuf[i];
730 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, DWQE_NTXSEGS,
731 		    MCLBYTES, 0, BUS_DMA_WAITOK, &txb->tb_map);
732 		txb->tb_m = NULL;
733 	}
734 
735 	bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_txring),
736 	    0, DWQE_DMA_LEN(sc->sc_txring), BUS_DMASYNC_PREWRITE);
737 
738 	sc->sc_tx_prod = sc->sc_tx_cons = 0;
739 
740 	dwqe_write(sc, GMAC_CHAN_TX_BASE_ADDR_HI(0), DWQE_DMA_DVA(sc->sc_txring) >> 32);
741 	dwqe_write(sc, GMAC_CHAN_TX_BASE_ADDR(0), DWQE_DMA_DVA(sc->sc_txring));
742 	dwqe_write(sc, GMAC_CHAN_TX_RING_LEN(0), DWQE_NTXDESC - 1);
743 	dwqe_write(sc, GMAC_CHAN_TX_END_ADDR(0), DWQE_DMA_DVA(sc->sc_txring));
744 
745 	/* Allocate  descriptor ring. */
746 	sc->sc_rxring = dwqe_dmamem_alloc(sc,
747 	    DWQE_NRXDESC * sizeof(struct dwqe_desc), 8);
748 	sc->sc_rxdesc = DWQE_DMA_KVA(sc->sc_rxring);
749 
750 	sc->sc_rxbuf = malloc(sizeof(struct dwqe_buf) * DWQE_NRXDESC,
751 	    M_DEVBUF, M_WAITOK);
752 
753 	for (i = 0; i < DWQE_NRXDESC; i++) {
754 		rxb = &sc->sc_rxbuf[i];
755 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
756 		    MCLBYTES, 0, BUS_DMA_WAITOK, &rxb->tb_map);
757 		rxb->tb_m = NULL;
758 	}
759 
760 	if_rxr_init(&sc->sc_rx_ring, 2, DWQE_NRXDESC - 1);
761 
762 	dwqe_write(sc, GMAC_CHAN_RX_BASE_ADDR_HI(0), DWQE_DMA_DVA(sc->sc_rxring) >> 32);
763 	dwqe_write(sc, GMAC_CHAN_RX_BASE_ADDR(0), DWQE_DMA_DVA(sc->sc_rxring));
764 	dwqe_write(sc, GMAC_CHAN_RX_RING_LEN(0), DWQE_NRXDESC - 1);
765 
766 	sc->sc_rx_prod = sc->sc_rx_cons = 0;
767 	dwqe_fill_rx_ring(sc);
768 
769 	bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_rxring),
770 	    0, DWQE_DMA_LEN(sc->sc_rxring),
771 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
772 
773 	dwqe_lladdr_write(sc);
774 
775 	/* Configure media. */
776 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
777 		mii_mediachg(&sc->sc_mii);
778 
779 	/* Program promiscuous mode and multicast filters. */
780 	dwqe_iff(sc);
781 
782 	ifp->if_flags |= IFF_RUNNING;
783 	ifq_clr_oactive(&ifp->if_snd);
784 
785 	dwqe_write(sc, GMAC_MAC_1US_TIC_CTR, (sc->sc_clkrate / 1000000) - 1);
786 
787 	/* Start receive DMA */
788 	reg = dwqe_read(sc, GMAC_CHAN_RX_CONTROL(0));
789 	reg |= GMAC_CHAN_RX_CONTROL_SR;
790 	dwqe_write(sc, GMAC_CHAN_RX_CONTROL(0), reg);
791 
792 	/* Start transmit DMA */
793 	reg = dwqe_read(sc, GMAC_CHAN_TX_CONTROL(0));
794 	reg |= GMAC_CHAN_TX_CONTROL_ST;
795 	dwqe_write(sc, GMAC_CHAN_TX_CONTROL(0), reg);
796 
797 	mode = dwqe_read(sc, GMAC_MTL_CHAN_RX_OP_MODE(0));
798 	if (sc->sc_force_thresh_dma_mode) {
799 		mode &= ~GMAC_MTL_CHAN_RX_OP_MODE_RSF;
800 		mode &= ~GMAC_MTL_CHAN_RX_OP_MODE_RTC_MASK;
801 		mode |= GMAC_MTL_CHAN_RX_OP_MODE_RTC_128;
802 	} else {
803 		mode |= GMAC_MTL_CHAN_RX_OP_MODE_RSF;
804 	}
805 	mode &= ~GMAC_MTL_CHAN_RX_OP_MODE_RQS_MASK;
806 	if (sc->sc_rxfifo_size)
807 		fifosz = sc->sc_rxfifo_size;
808 	else
809 		fifosz = (128 <<
810 		    GMAC_MAC_HW_FEATURE1_RXFIFOSIZE(sc->sc_hw_feature[1]));
811 	rqs = fifosz / 256 - 1;
812 	mode |= (rqs << GMAC_MTL_CHAN_RX_OP_MODE_RQS_SHIFT) &
813 	   GMAC_MTL_CHAN_RX_OP_MODE_RQS_MASK;
814 	if (fifosz >= 4096) {
815 		mode |= GMAC_MTL_CHAN_RX_OP_MODE_EHFC;
816 		mode &= ~GMAC_MTL_CHAN_RX_OP_MODE_RFD_MASK;
817 		mode |= 0x3 << GMAC_MTL_CHAN_RX_OP_MODE_RFD_SHIFT;
818 		mode &= ~GMAC_MTL_CHAN_RX_OP_MODE_RFA_MASK;
819 		mode |= 0x1 << GMAC_MTL_CHAN_RX_OP_MODE_RFA_SHIFT;
820 	}
821 	dwqe_write(sc, GMAC_MTL_CHAN_RX_OP_MODE(0), mode);
822 
823 	mode = dwqe_read(sc, GMAC_MTL_CHAN_TX_OP_MODE(0));
824 	if (sc->sc_force_thresh_dma_mode) {
825 		mode &= ~GMAC_MTL_CHAN_TX_OP_MODE_TSF;
826 		mode &= ~GMAC_MTL_CHAN_TX_OP_MODE_TTC_MASK;
827 		mode |= GMAC_MTL_CHAN_TX_OP_MODE_TTC_512;
828 	} else {
829 		mode |= GMAC_MTL_CHAN_TX_OP_MODE_TSF;
830 	}
831 	mode &= ~GMAC_MTL_CHAN_TX_OP_MODE_TXQEN_MASK;
832 	mode |= GMAC_MTL_CHAN_TX_OP_MODE_TXQEN;
833 	mode &= ~GMAC_MTL_CHAN_TX_OP_MODE_TQS_MASK;
834 	if (sc->sc_txfifo_size)
835 		fifosz = sc->sc_txfifo_size;
836 	else
837 		fifosz = (128 <<
838 		    GMAC_MAC_HW_FEATURE1_TXFIFOSIZE(sc->sc_hw_feature[1]));
839 	tqs = (fifosz / 256) - 1;
840 	mode |= (tqs << GMAC_MTL_CHAN_TX_OP_MODE_TQS_SHIFT) &
841 	    GMAC_MTL_CHAN_TX_OP_MODE_TQS_MASK;
842 	dwqe_write(sc, GMAC_MTL_CHAN_TX_OP_MODE(0), mode);
843 
844 	reg = dwqe_read(sc, GMAC_QX_TX_FLOW_CTRL(0));
845 	reg |= 0xffffU << GMAC_QX_TX_FLOW_CTRL_PT_SHIFT;
846 	reg |= GMAC_QX_TX_FLOW_CTRL_TFE;
847 	dwqe_write(sc, GMAC_QX_TX_FLOW_CTRL(0), reg);
848 	reg = dwqe_read(sc, GMAC_RX_FLOW_CTRL);
849 	reg |= GMAC_RX_FLOW_CTRL_RFE;
850 	dwqe_write(sc, GMAC_RX_FLOW_CTRL, reg);
851 
852 	dwqe_write(sc, GMAC_RXQ_CTRL0, GMAC_RXQ_CTRL0_DCB_QUEUE_EN(0));
853 
854 	dwqe_write(sc, GMAC_MAC_CONF, dwqe_read(sc, GMAC_MAC_CONF) |
855 	    GMAC_MAC_CONF_BE | GMAC_MAC_CONF_JD | GMAC_MAC_CONF_JE |
856 	    GMAC_MAC_CONF_DCRS | GMAC_MAC_CONF_TE | GMAC_MAC_CONF_RE);
857 
858 	dwqe_write(sc, GMAC_CHAN_INTR_ENA(0),
859 	    GMAC_CHAN_INTR_ENA_NIE |
860 	    GMAC_CHAN_INTR_ENA_AIE |
861 	    GMAC_CHAN_INTR_ENA_FBE |
862 	    GMAC_CHAN_INTR_ENA_RIE |
863 	    GMAC_CHAN_INTR_ENA_TIE);
864 
865 	if (!sc->sc_fixed_link)
866 		timeout_add_sec(&sc->sc_phy_tick, 1);
867 }
868 
869 void
870 dwqe_down(struct dwqe_softc *sc)
871 {
872 	struct ifnet *ifp = &sc->sc_ac.ac_if;
873 	struct dwqe_buf *txb, *rxb;
874 	uint32_t reg;
875 	int i;
876 
877 	timeout_del(&sc->sc_rxto);
878 	if (!sc->sc_fixed_link)
879 		timeout_del(&sc->sc_phy_tick);
880 
881 	ifp->if_flags &= ~IFF_RUNNING;
882 	ifq_clr_oactive(&ifp->if_snd);
883 	ifp->if_timer = 0;
884 
885 	/* Disable receiver */
886 	reg = dwqe_read(sc, GMAC_MAC_CONF);
887 	reg &= ~GMAC_MAC_CONF_RE;
888 	dwqe_write(sc, GMAC_MAC_CONF, reg);
889 
890 	/* Stop receive DMA */
891 	reg = dwqe_read(sc, GMAC_CHAN_RX_CONTROL(0));
892 	reg &= ~GMAC_CHAN_RX_CONTROL_SR;
893 	dwqe_write(sc, GMAC_CHAN_RX_CONTROL(0), reg);
894 
895 	/* Stop transmit DMA */
896 	reg = dwqe_read(sc, GMAC_CHAN_TX_CONTROL(0));
897 	reg &= ~GMAC_CHAN_TX_CONTROL_ST;
898 	dwqe_write(sc, GMAC_CHAN_TX_CONTROL(0), reg);
899 
900 	/* Flush data in the TX FIFO */
901 	reg = dwqe_read(sc, GMAC_MTL_CHAN_TX_OP_MODE(0));
902 	reg |= GMAC_MTL_CHAN_TX_OP_MODE_FTQ;
903 	dwqe_write(sc, GMAC_MTL_CHAN_TX_OP_MODE(0), reg);
904 	/* Wait for flush to complete */
905 	for (i = 10000; i > 0; i--) {
906 		reg = dwqe_read(sc, GMAC_MTL_CHAN_TX_OP_MODE(0));
907 		if ((reg & GMAC_MTL_CHAN_TX_OP_MODE_FTQ) == 0)
908 			break;
909 		delay(1);
910 	}
911 	if (i == 0) {
912 		printf("%s: timeout flushing TX queue\n",
913 		    sc->sc_dev.dv_xname);
914 	}
915 
916 	/* Disable transmitter */
917 	reg = dwqe_read(sc, GMAC_MAC_CONF);
918 	reg &= ~GMAC_MAC_CONF_TE;
919 	dwqe_write(sc, GMAC_MAC_CONF, reg);
920 
921 	dwqe_write(sc, GMAC_CHAN_INTR_ENA(0), 0);
922 
923 	intr_barrier(sc->sc_ih);
924 	ifq_barrier(&ifp->if_snd);
925 
926 	for (i = 0; i < DWQE_NTXDESC; i++) {
927 		txb = &sc->sc_txbuf[i];
928 		if (txb->tb_m) {
929 			bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0,
930 			    txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
931 			bus_dmamap_unload(sc->sc_dmat, txb->tb_map);
932 			m_freem(txb->tb_m);
933 		}
934 		bus_dmamap_destroy(sc->sc_dmat, txb->tb_map);
935 	}
936 
937 	dwqe_dmamem_free(sc, sc->sc_txring);
938 	free(sc->sc_txbuf, M_DEVBUF, 0);
939 
940 	for (i = 0; i < DWQE_NRXDESC; i++) {
941 		rxb = &sc->sc_rxbuf[i];
942 		if (rxb->tb_m) {
943 			bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0,
944 			    rxb->tb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
945 			bus_dmamap_unload(sc->sc_dmat, rxb->tb_map);
946 			m_freem(rxb->tb_m);
947 		}
948 		bus_dmamap_destroy(sc->sc_dmat, rxb->tb_map);
949 	}
950 
951 	dwqe_dmamem_free(sc, sc->sc_rxring);
952 	free(sc->sc_rxbuf, M_DEVBUF, 0);
953 }
954 
955 /* Bit Reversal - http://aggregate.org/MAGIC/#Bit%20Reversal */
956 static uint32_t
957 bitrev32(uint32_t x)
958 {
959 	x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
960 	x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
961 	x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
962 	x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
963 
964 	return (x >> 16) | (x << 16);
965 }
966 
967 void
968 dwqe_iff(struct dwqe_softc *sc)
969 {
970 	struct arpcom *ac = &sc->sc_ac;
971 	struct ifnet *ifp = &sc->sc_ac.ac_if;
972 	struct ether_multi *enm;
973 	struct ether_multistep step;
974 	uint32_t crc, hash[2], hashbit, hashreg;
975 	uint32_t reg;
976 
977 	reg = 0;
978 
979 	ifp->if_flags &= ~IFF_ALLMULTI;
980 	bzero(hash, sizeof(hash));
981 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
982 		ifp->if_flags |= IFF_ALLMULTI;
983 		reg |= GMAC_MAC_PACKET_FILTER_PM;
984 		if (ifp->if_flags & IFF_PROMISC)
985 			reg |= GMAC_MAC_PACKET_FILTER_PR |
986 			    GMAC_MAC_PACKET_FILTER_PCF_ALL;
987 	} else {
988 		reg |= GMAC_MAC_PACKET_FILTER_HMC;
989 		ETHER_FIRST_MULTI(step, ac, enm);
990 		while (enm != NULL) {
991 			crc = ether_crc32_le(enm->enm_addrlo,
992 			    ETHER_ADDR_LEN) & 0x7f;
993 
994 			crc = bitrev32(~crc) >> 26;
995 			hashreg = (crc >> 5);
996 			hashbit = (crc & 0x1f);
997 			hash[hashreg] |= (1 << hashbit);
998 
999 			ETHER_NEXT_MULTI(step, enm);
1000 		}
1001 	}
1002 
1003 	dwqe_lladdr_write(sc);
1004 
1005 	dwqe_write(sc, GMAC_MAC_HASH_TAB_REG0, hash[0]);
1006 	dwqe_write(sc, GMAC_MAC_HASH_TAB_REG1, hash[1]);
1007 
1008 	dwqe_write(sc, GMAC_MAC_PACKET_FILTER, reg);
1009 }
1010 
1011 int
1012 dwqe_encap(struct dwqe_softc *sc, struct mbuf *m, int *idx, int *used)
1013 {
1014 	struct dwqe_desc *txd, *txd_start;
1015 	bus_dmamap_t map;
1016 	int cur, frag, i;
1017 
1018 	cur = frag = *idx;
1019 	map = sc->sc_txbuf[cur].tb_map;
1020 
1021 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT)) {
1022 		if (m_defrag(m, M_DONTWAIT))
1023 			return (EFBIG);
1024 		if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT))
1025 			return (EFBIG);
1026 	}
1027 
1028 	/* Sync the DMA map. */
1029 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1030 	    BUS_DMASYNC_PREWRITE);
1031 
1032 	txd = txd_start = &sc->sc_txdesc[frag];
1033 	for (i = 0; i < map->dm_nsegs; i++) {
1034 		/* TODO: check for 32-bit vs 64-bit support */
1035 		KASSERT((map->dm_segs[i].ds_addr >> 32) == 0);
1036 
1037 		txd->sd_tdes0 = (uint32_t)map->dm_segs[i].ds_addr;
1038 		txd->sd_tdes1 = (uint32_t)(map->dm_segs[i].ds_addr >> 32);
1039 		txd->sd_tdes2 = map->dm_segs[i].ds_len;
1040 		txd->sd_tdes3 = m->m_pkthdr.len;
1041 		if (i == 0)
1042 			txd->sd_tdes3 |= TDES3_FS;
1043 		if (i == (map->dm_nsegs - 1)) {
1044 			txd->sd_tdes2 |= TDES2_IC;
1045 			txd->sd_tdes3 |= TDES3_LS;
1046 		}
1047 		if (i != 0)
1048 			txd->sd_tdes3 |= TDES3_OWN;
1049 
1050 		bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_txring),
1051 		    frag * sizeof(*txd), sizeof(*txd), BUS_DMASYNC_PREWRITE);
1052 
1053 		cur = frag;
1054 		if (frag == (DWQE_NTXDESC - 1)) {
1055 			txd = &sc->sc_txdesc[0];
1056 			frag = 0;
1057 		} else {
1058 			txd++;
1059 			frag++;
1060 		}
1061 		KASSERT(frag != sc->sc_tx_cons);
1062 	}
1063 
1064 	txd_start->sd_tdes3 |= TDES3_OWN;
1065 	bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_txring),
1066 	    *idx * sizeof(*txd), sizeof(*txd), BUS_DMASYNC_PREWRITE);
1067 
1068 	KASSERT(sc->sc_txbuf[cur].tb_m == NULL);
1069 	sc->sc_txbuf[*idx].tb_map = sc->sc_txbuf[cur].tb_map;
1070 	sc->sc_txbuf[cur].tb_map = map;
1071 	sc->sc_txbuf[cur].tb_m = m;
1072 
1073 	*idx = frag;
1074 	*used += map->dm_nsegs;
1075 
1076 	return (0);
1077 }
1078 
1079 void
1080 dwqe_reset(struct dwqe_softc *sc)
1081 {
1082 	int n;
1083 
1084 	dwqe_write(sc, GMAC_BUS_MODE, dwqe_read(sc, GMAC_BUS_MODE) |
1085 	    GMAC_BUS_MODE_SWR);
1086 
1087 	for (n = 0; n < 30000; n++) {
1088 		if ((dwqe_read(sc, GMAC_BUS_MODE) &
1089 		    GMAC_BUS_MODE_SWR) == 0)
1090 			return;
1091 		delay(10);
1092 	}
1093 
1094 	printf("%s: reset timeout\n", sc->sc_dev.dv_xname);
1095 }
1096 
1097 struct dwqe_dmamem *
1098 dwqe_dmamem_alloc(struct dwqe_softc *sc, bus_size_t size, bus_size_t align)
1099 {
1100 	struct dwqe_dmamem *tdm;
1101 	int nsegs;
1102 
1103 	tdm = malloc(sizeof(*tdm), M_DEVBUF, M_WAITOK | M_ZERO);
1104 	tdm->tdm_size = size;
1105 
1106 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1107 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &tdm->tdm_map) != 0)
1108 		goto tdmfree;
1109 
1110 	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &tdm->tdm_seg, 1,
1111 	    &nsegs, BUS_DMA_WAITOK) != 0)
1112 		goto destroy;
1113 
1114 	if (bus_dmamem_map(sc->sc_dmat, &tdm->tdm_seg, nsegs, size,
1115 	    &tdm->tdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
1116 		goto free;
1117 
1118 	if (bus_dmamap_load(sc->sc_dmat, tdm->tdm_map, tdm->tdm_kva, size,
1119 	    NULL, BUS_DMA_WAITOK) != 0)
1120 		goto unmap;
1121 
1122 	bzero(tdm->tdm_kva, size);
1123 
1124 	return (tdm);
1125 
1126 unmap:
1127 	bus_dmamem_unmap(sc->sc_dmat, tdm->tdm_kva, size);
1128 free:
1129 	bus_dmamem_free(sc->sc_dmat, &tdm->tdm_seg, 1);
1130 destroy:
1131 	bus_dmamap_destroy(sc->sc_dmat, tdm->tdm_map);
1132 tdmfree:
1133 	free(tdm, M_DEVBUF, 0);
1134 
1135 	return (NULL);
1136 }
1137 
1138 void
1139 dwqe_dmamem_free(struct dwqe_softc *sc, struct dwqe_dmamem *tdm)
1140 {
1141 	bus_dmamem_unmap(sc->sc_dmat, tdm->tdm_kva, tdm->tdm_size);
1142 	bus_dmamem_free(sc->sc_dmat, &tdm->tdm_seg, 1);
1143 	bus_dmamap_destroy(sc->sc_dmat, tdm->tdm_map);
1144 	free(tdm, M_DEVBUF, 0);
1145 }
1146 
1147 struct mbuf *
1148 dwqe_alloc_mbuf(struct dwqe_softc *sc, bus_dmamap_t map)
1149 {
1150 	struct mbuf *m = NULL;
1151 
1152 	m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
1153 	if (!m)
1154 		return (NULL);
1155 	m->m_len = m->m_pkthdr.len = MCLBYTES;
1156 	m_adj(m, ETHER_ALIGN);
1157 
1158 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) {
1159 		printf("%s: could not load mbuf DMA map", DEVNAME(sc));
1160 		m_freem(m);
1161 		return (NULL);
1162 	}
1163 
1164 	bus_dmamap_sync(sc->sc_dmat, map, 0,
1165 	    m->m_pkthdr.len, BUS_DMASYNC_PREREAD);
1166 
1167 	return (m);
1168 }
1169 
1170 void
1171 dwqe_fill_rx_ring(struct dwqe_softc *sc)
1172 {
1173 	struct dwqe_desc *rxd;
1174 	struct dwqe_buf *rxb;
1175 	u_int slots;
1176 
1177 	for (slots = if_rxr_get(&sc->sc_rx_ring, DWQE_NRXDESC);
1178 	    slots > 0; slots--) {
1179 		rxb = &sc->sc_rxbuf[sc->sc_rx_prod];
1180 		rxb->tb_m = dwqe_alloc_mbuf(sc, rxb->tb_map);
1181 		if (rxb->tb_m == NULL)
1182 			break;
1183 
1184 		/* TODO: check for 32-bit vs 64-bit support */
1185 		KASSERT((rxb->tb_map->dm_segs[0].ds_addr >> 32) == 0);
1186 
1187 		rxd = &sc->sc_rxdesc[sc->sc_rx_prod];
1188 		rxd->sd_tdes0 = (uint32_t)rxb->tb_map->dm_segs[0].ds_addr;
1189 		rxd->sd_tdes1 = (uint32_t)(rxb->tb_map->dm_segs[0].ds_addr >> 32);
1190 		rxd->sd_tdes2 = 0;
1191 		rxd->sd_tdes3 = RDES3_OWN | RDES3_IC | RDES3_BUF1V;
1192 
1193 		if (sc->sc_rx_prod == (DWQE_NRXDESC - 1))
1194 			sc->sc_rx_prod = 0;
1195 		else
1196 			sc->sc_rx_prod++;
1197 	}
1198 	if_rxr_put(&sc->sc_rx_ring, slots);
1199 
1200 	dwqe_write(sc, GMAC_CHAN_RX_END_ADDR(0), DWQE_DMA_DVA(sc->sc_rxring) +
1201 	    sc->sc_rx_prod * sizeof(*rxd));
1202 
1203 	if (if_rxr_inuse(&sc->sc_rx_ring) == 0)
1204 		timeout_add(&sc->sc_rxto, 1);
1205 }
1206