xref: /openbsd/sys/dev/ic/dwqe.c (revision 84a278ff)
1 /*	$OpenBSD: dwqe.c,v 1.22 2024/06/05 10:19:55 stsp Exp $	*/
2 /*
3  * Copyright (c) 2008, 2019 Mark Kettenis <kettenis@openbsd.org>
4  * Copyright (c) 2017, 2022 Patrick Wildt <patrick@blueri.se>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  * Driver for the Synopsys Designware ethernet controller.
21  */
22 
23 #include "bpfilter.h"
24 #include "vlan.h"
25 
26 #include <sys/param.h>
27 #include <sys/systm.h>
28 #include <sys/device.h>
29 #include <sys/kernel.h>
30 #include <sys/malloc.h>
31 #include <sys/mbuf.h>
32 #include <sys/queue.h>
33 #include <sys/socket.h>
34 #include <sys/sockio.h>
35 #include <sys/timeout.h>
36 
37 #include <machine/bus.h>
38 
39 #include <net/if.h>
40 #include <net/if_media.h>
41 
42 #include <dev/mii/mii.h>
43 #include <dev/mii/miivar.h>
44 
45 #if NBPFILTER > 0
46 #include <net/bpf.h>
47 #endif
48 
49 #include <netinet/in.h>
50 #include <netinet/if_ether.h>
51 
52 #include <dev/ic/dwqevar.h>
53 #include <dev/ic/dwqereg.h>
54 
55 struct cfdriver dwqe_cd = {
56 	NULL, "dwqe", DV_IFNET
57 };
58 
59 uint32_t dwqe_read(struct dwqe_softc *, bus_addr_t);
60 void	dwqe_write(struct dwqe_softc *, bus_addr_t, uint32_t);
61 
62 int	dwqe_ioctl(struct ifnet *, u_long, caddr_t);
63 void	dwqe_start(struct ifqueue *);
64 void	dwqe_watchdog(struct ifnet *);
65 
66 int	dwqe_media_change(struct ifnet *);
67 void	dwqe_media_status(struct ifnet *, struct ifmediareq *);
68 
69 void	dwqe_mii_attach(struct dwqe_softc *);
70 int	dwqe_mii_readreg(struct device *, int, int);
71 void	dwqe_mii_writereg(struct device *, int, int, int);
72 void	dwqe_mii_statchg(struct device *);
73 
74 void	dwqe_lladdr_read(struct dwqe_softc *, uint8_t *);
75 void	dwqe_lladdr_write(struct dwqe_softc *);
76 
77 void	dwqe_tick(void *);
78 void	dwqe_rxtick(void *);
79 
80 int	dwqe_intr(void *);
81 void	dwqe_tx_proc(struct dwqe_softc *);
82 void	dwqe_rx_proc(struct dwqe_softc *);
83 
84 void	dwqe_up(struct dwqe_softc *);
85 void	dwqe_down(struct dwqe_softc *);
86 void	dwqe_iff(struct dwqe_softc *);
87 int	dwqe_encap(struct dwqe_softc *, struct mbuf *, int *, int *);
88 
89 void	dwqe_reset(struct dwqe_softc *);
90 
91 struct dwqe_dmamem *
92 	dwqe_dmamem_alloc(struct dwqe_softc *, bus_size_t, bus_size_t);
93 void	dwqe_dmamem_free(struct dwqe_softc *, struct dwqe_dmamem *);
94 struct mbuf *dwqe_alloc_mbuf(struct dwqe_softc *, bus_dmamap_t);
95 void	dwqe_fill_rx_ring(struct dwqe_softc *);
96 
97 int
dwqe_have_tx_csum_offload(struct dwqe_softc * sc)98 dwqe_have_tx_csum_offload(struct dwqe_softc *sc)
99 {
100 	return (sc->sc_hw_feature[0] & GMAC_MAC_HW_FEATURE0_TXCOESEL);
101 }
102 
103 int
dwqe_have_tx_vlan_offload(struct dwqe_softc * sc)104 dwqe_have_tx_vlan_offload(struct dwqe_softc *sc)
105 {
106 #if NVLAN > 0
107 	return (sc->sc_hw_feature[0] & GMAC_MAC_HW_FEATURE0_SAVLANINS);
108 #else
109 	return 0;
110 #endif
111 }
112 
113 void
dwqe_set_vlan_rx_mode(struct dwqe_softc * sc)114 dwqe_set_vlan_rx_mode(struct dwqe_softc *sc)
115 {
116 #if NVLAN > 0
117 	uint32_t reg;
118 
119 	/* Enable outer VLAN tag stripping on Rx. */
120 	reg = dwqe_read(sc, GMAC_VLAN_TAG_CTRL);
121 	reg |= GMAC_VLAN_TAG_CTRL_EVLRXS | GMAC_VLAN_TAG_CTRL_STRIP_ALWAYS;
122 	dwqe_write(sc, GMAC_VLAN_TAG_CTRL, reg);
123 #endif
124 }
125 
126 void
dwqe_set_vlan_tx_mode(struct dwqe_softc * sc)127 dwqe_set_vlan_tx_mode(struct dwqe_softc *sc)
128 {
129 #if NVLAN > 0
130 	uint32_t reg;
131 
132 	reg = dwqe_read(sc, GMAC_VLAN_TAG_INCL);
133 
134 	/* Enable insertion of outer VLAN tag. */
135 	reg |= GMAC_VLAN_TAG_INCL_INSERT;
136 
137 	/*
138 	 * Generate C-VLAN tags (type 0x8100, 802.1Q). Setting this
139 	 * bit would result in S-VLAN tags (type 0x88A8, 802.1ad).
140 	 */
141 	reg &= ~GMAC_VLAN_TAG_INCL_CSVL;
142 
143 	/* Use VLAN tags provided in Tx context descriptors. */
144 	reg |= GMAC_VLAN_TAG_INCL_VLTI;
145 
146 	dwqe_write(sc, GMAC_VLAN_TAG_INCL, reg);
147 #endif
148 }
149 
150 int
dwqe_attach(struct dwqe_softc * sc)151 dwqe_attach(struct dwqe_softc *sc)
152 {
153 	struct ifnet *ifp;
154 	uint32_t version, mode;
155 	int i;
156 
157 	version = dwqe_read(sc, GMAC_VERSION);
158 	printf(": rev 0x%02x, address %s\n", version & GMAC_VERSION_SNPS_MASK,
159 	    ether_sprintf(sc->sc_lladdr));
160 
161 	for (i = 0; i < 4; i++)
162 		sc->sc_hw_feature[i] = dwqe_read(sc, GMAC_MAC_HW_FEATURE(i));
163 
164 	timeout_set(&sc->sc_phy_tick, dwqe_tick, sc);
165 	timeout_set(&sc->sc_rxto, dwqe_rxtick, sc);
166 
167 	ifp = &sc->sc_ac.ac_if;
168 	ifp->if_softc = sc;
169 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
170 	ifp->if_xflags = IFXF_MPSAFE;
171 	ifp->if_ioctl = dwqe_ioctl;
172 	ifp->if_qstart = dwqe_start;
173 	ifp->if_watchdog = dwqe_watchdog;
174 	ifq_init_maxlen(&ifp->if_snd, DWQE_NTXDESC - 1);
175 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
176 
177 	ifp->if_capabilities = IFCAP_VLAN_MTU;
178 	if (dwqe_have_tx_vlan_offload(sc))
179 		ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
180 	if (dwqe_have_tx_csum_offload(sc)) {
181 		ifp->if_capabilities |= (IFCAP_CSUM_IPv4 |
182 		    IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4 |
183 		    IFCAP_CSUM_TCPv6 | IFCAP_CSUM_UDPv6);
184 	}
185 
186 	sc->sc_mii.mii_ifp = ifp;
187 	sc->sc_mii.mii_readreg = dwqe_mii_readreg;
188 	sc->sc_mii.mii_writereg = dwqe_mii_writereg;
189 	sc->sc_mii.mii_statchg = dwqe_mii_statchg;
190 
191 	ifmedia_init(&sc->sc_media, 0, dwqe_media_change, dwqe_media_status);
192 
193 	dwqe_reset(sc);
194 
195 	/* Configure DMA engine. */
196 	mode = dwqe_read(sc, GMAC_SYS_BUS_MODE);
197 	if (sc->sc_fixed_burst)
198 		mode |= GMAC_SYS_BUS_MODE_FB;
199 	if (sc->sc_mixed_burst)
200 		mode |= GMAC_SYS_BUS_MODE_MB;
201 	if (sc->sc_aal)
202 		mode |= GMAC_SYS_BUS_MODE_AAL;
203 	dwqe_write(sc, GMAC_SYS_BUS_MODE, mode);
204 
205 	/* Configure channel 0. */
206 	mode = dwqe_read(sc, GMAC_CHAN_CONTROL(0));
207 	if (sc->sc_8xpbl)
208 		mode |= GMAC_CHAN_CONTROL_8XPBL;
209 	dwqe_write(sc, GMAC_CHAN_CONTROL(0), mode);
210 
211 	mode = dwqe_read(sc, GMAC_CHAN_TX_CONTROL(0));
212 	mode &= ~GMAC_CHAN_TX_CONTROL_PBL_MASK;
213 	mode |= sc->sc_txpbl << GMAC_CHAN_TX_CONTROL_PBL_SHIFT;
214 	mode |= GMAC_CHAN_TX_CONTROL_OSP;
215 	dwqe_write(sc, GMAC_CHAN_TX_CONTROL(0), mode);
216 	mode = dwqe_read(sc, GMAC_CHAN_RX_CONTROL(0));
217 	mode &= ~GMAC_CHAN_RX_CONTROL_RPBL_MASK;
218 	mode |= sc->sc_rxpbl << GMAC_CHAN_RX_CONTROL_RPBL_SHIFT;
219 	dwqe_write(sc, GMAC_CHAN_RX_CONTROL(0), mode);
220 
221 	/* Configure AXI master. */
222 	if (sc->sc_axi_config) {
223 		int i;
224 
225 		mode = dwqe_read(sc, GMAC_SYS_BUS_MODE);
226 
227 		mode &= ~GMAC_SYS_BUS_MODE_EN_LPI;
228 		if (sc->sc_lpi_en)
229 			mode |= GMAC_SYS_BUS_MODE_EN_LPI;
230 		mode &= ~GMAC_SYS_BUS_MODE_LPI_XIT_FRM;
231 		if (sc->sc_xit_frm)
232 			mode |= GMAC_SYS_BUS_MODE_LPI_XIT_FRM;
233 
234 		mode &= ~GMAC_SYS_BUS_MODE_WR_OSR_LMT_MASK;
235 		mode |= (sc->sc_wr_osr_lmt << GMAC_SYS_BUS_MODE_WR_OSR_LMT_SHIFT);
236 		mode &= ~GMAC_SYS_BUS_MODE_RD_OSR_LMT_MASK;
237 		mode |= (sc->sc_rd_osr_lmt << GMAC_SYS_BUS_MODE_RD_OSR_LMT_SHIFT);
238 
239 		for (i = 0; i < nitems(sc->sc_blen); i++) {
240 			switch (sc->sc_blen[i]) {
241 			case 256:
242 				mode |= GMAC_SYS_BUS_MODE_BLEN_256;
243 				break;
244 			case 128:
245 				mode |= GMAC_SYS_BUS_MODE_BLEN_128;
246 				break;
247 			case 64:
248 				mode |= GMAC_SYS_BUS_MODE_BLEN_64;
249 				break;
250 			case 32:
251 				mode |= GMAC_SYS_BUS_MODE_BLEN_32;
252 				break;
253 			case 16:
254 				mode |= GMAC_SYS_BUS_MODE_BLEN_16;
255 				break;
256 			case 8:
257 				mode |= GMAC_SYS_BUS_MODE_BLEN_8;
258 				break;
259 			case 4:
260 				mode |= GMAC_SYS_BUS_MODE_BLEN_4;
261 				break;
262 			}
263 		}
264 
265 		dwqe_write(sc, GMAC_SYS_BUS_MODE, mode);
266 	}
267 
268 	if (!sc->sc_fixed_link)
269 		dwqe_mii_attach(sc);
270 
271 	/*
272 	 * All devices support VLAN tag stripping on Rx but inserting
273 	 * VLAN tags during Tx is an optional feature.
274 	 */
275 	dwqe_set_vlan_rx_mode(sc);
276 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
277 		dwqe_set_vlan_tx_mode(sc);
278 
279 	if_attach(ifp);
280 	ether_ifattach(ifp);
281 
282 	/* Disable interrupts. */
283 	dwqe_write(sc, GMAC_INT_EN, 0);
284 	dwqe_write(sc, GMAC_CHAN_INTR_ENA(0), 0);
285 	dwqe_write(sc, GMAC_MMC_RX_INT_MASK, 0xffffffff);
286 	dwqe_write(sc, GMAC_MMC_TX_INT_MASK, 0xffffffff);
287 
288 	return 0;
289 }
290 
291 void
dwqe_mii_attach(struct dwqe_softc * sc)292 dwqe_mii_attach(struct dwqe_softc *sc)
293 {
294 	int mii_flags = 0;
295 
296 	switch (sc->sc_phy_mode) {
297 	case DWQE_PHY_MODE_RGMII:
298 		mii_flags |= MIIF_SETDELAY;
299 		break;
300 	case DWQE_PHY_MODE_RGMII_ID:
301 		mii_flags |= MIIF_SETDELAY | MIIF_RXID | MIIF_TXID;
302 		break;
303 	case DWQE_PHY_MODE_RGMII_RXID:
304 		mii_flags |= MIIF_SETDELAY | MIIF_RXID;
305 		break;
306 	case DWQE_PHY_MODE_RGMII_TXID:
307 		mii_flags |= MIIF_SETDELAY | MIIF_TXID;
308 		break;
309 	default:
310 		break;
311 	}
312 
313 	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, sc->sc_phyloc,
314 	    (sc->sc_phyloc == MII_PHY_ANY) ? 0 : MII_OFFSET_ANY, mii_flags);
315 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
316 		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
317 		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
318 		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
319 	} else
320 		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
321 }
322 
323 uint32_t
dwqe_read(struct dwqe_softc * sc,bus_addr_t addr)324 dwqe_read(struct dwqe_softc *sc, bus_addr_t addr)
325 {
326 	return bus_space_read_4(sc->sc_iot, sc->sc_ioh, addr);
327 }
328 
329 void
dwqe_write(struct dwqe_softc * sc,bus_addr_t addr,uint32_t data)330 dwqe_write(struct dwqe_softc *sc, bus_addr_t addr, uint32_t data)
331 {
332 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, addr, data);
333 }
334 
335 void
dwqe_lladdr_read(struct dwqe_softc * sc,uint8_t * lladdr)336 dwqe_lladdr_read(struct dwqe_softc *sc, uint8_t *lladdr)
337 {
338 	uint32_t machi, maclo;
339 
340 	machi = dwqe_read(sc, GMAC_MAC_ADDR0_HI);
341 	maclo = dwqe_read(sc, GMAC_MAC_ADDR0_LO);
342 
343 	if (machi || maclo) {
344 		lladdr[0] = (maclo >> 0) & 0xff;
345 		lladdr[1] = (maclo >> 8) & 0xff;
346 		lladdr[2] = (maclo >> 16) & 0xff;
347 		lladdr[3] = (maclo >> 24) & 0xff;
348 		lladdr[4] = (machi >> 0) & 0xff;
349 		lladdr[5] = (machi >> 8) & 0xff;
350 	} else {
351 		ether_fakeaddr(&sc->sc_ac.ac_if);
352 	}
353 }
354 
355 void
dwqe_lladdr_write(struct dwqe_softc * sc)356 dwqe_lladdr_write(struct dwqe_softc *sc)
357 {
358 	dwqe_write(sc, GMAC_MAC_ADDR0_HI,
359 	    sc->sc_lladdr[5] << 8 | sc->sc_lladdr[4] << 0);
360 	dwqe_write(sc, GMAC_MAC_ADDR0_LO,
361 	    sc->sc_lladdr[3] << 24 | sc->sc_lladdr[2] << 16 |
362 	    sc->sc_lladdr[1] << 8 | sc->sc_lladdr[0] << 0);
363 }
364 
365 void
dwqe_start(struct ifqueue * ifq)366 dwqe_start(struct ifqueue *ifq)
367 {
368 	struct ifnet *ifp = ifq->ifq_if;
369 	struct dwqe_softc *sc = ifp->if_softc;
370 	struct mbuf *m;
371 	int error, idx, left, used;
372 
373 	if (!(ifp->if_flags & IFF_RUNNING))
374 		return;
375 	if (ifq_is_oactive(&ifp->if_snd))
376 		return;
377 	if (ifq_empty(&ifp->if_snd))
378 		return;
379 	if (!sc->sc_link)
380 		return;
381 
382 	idx = sc->sc_tx_prod;
383 	left = sc->sc_tx_cons;
384 	if (left <= idx)
385 		left += DWQE_NTXDESC;
386 	left -= idx;
387 	used = 0;
388 
389 	for (;;) {
390 		/* VLAN tags require an extra Tx context descriptor. */
391 		if (used + DWQE_NTXSEGS + 2 > left) {
392 			ifq_set_oactive(ifq);
393 			break;
394 		}
395 
396 		m = ifq_dequeue(ifq);
397 		if (m == NULL)
398 			break;
399 
400 		error = dwqe_encap(sc, m, &idx, &used);
401 		if (error == EFBIG) {
402 			m_freem(m); /* give up: drop it */
403 			ifp->if_oerrors++;
404 			continue;
405 		}
406 
407 #if NBPFILTER > 0
408 		if (ifp->if_bpf)
409 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
410 #endif
411 	}
412 
413 	if (used > 0) {
414 		sc->sc_tx_prod = idx;
415 
416 		/* Set a timeout in case the chip goes out to lunch. */
417 		ifp->if_timer = 5;
418 
419 		/*
420 		 * Start the transmit process after the last in-use Tx
421 		 * descriptor's OWN bit has been updated.
422 		 */
423 		dwqe_write(sc, GMAC_CHAN_TX_END_ADDR(0), DWQE_DMA_DVA(sc->sc_txring) +
424 		    idx * sizeof(struct dwqe_desc));
425 	}
426 }
427 
428 int
dwqe_ioctl(struct ifnet * ifp,u_long cmd,caddr_t addr)429 dwqe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr)
430 {
431 	struct dwqe_softc *sc = ifp->if_softc;
432 	struct ifreq *ifr = (struct ifreq *)addr;
433 	int error = 0, s;
434 
435 	s = splnet();
436 
437 	switch (cmd) {
438 	case SIOCSIFADDR:
439 		ifp->if_flags |= IFF_UP;
440 		/* FALLTHROUGH */
441 	case SIOCSIFFLAGS:
442 		if (ifp->if_flags & IFF_UP) {
443 			if (ifp->if_flags & IFF_RUNNING)
444 				error = ENETRESET;
445 			else
446 				dwqe_up(sc);
447 		} else {
448 			if (ifp->if_flags & IFF_RUNNING)
449 				dwqe_down(sc);
450 		}
451 		break;
452 
453 	case SIOCGIFMEDIA:
454 	case SIOCSIFMEDIA:
455 		if (sc->sc_fixed_link)
456 			error = ENOTTY;
457 		else
458 			error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
459 		break;
460 
461 	case SIOCGIFRXR:
462 		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
463 		    NULL, MCLBYTES, &sc->sc_rx_ring);
464 		break;
465 
466 	default:
467 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, addr);
468 		break;
469 	}
470 
471 	if (error == ENETRESET) {
472 		if (ifp->if_flags & IFF_RUNNING)
473 			dwqe_iff(sc);
474 		error = 0;
475 	}
476 
477 	splx(s);
478 	return (error);
479 }
480 
481 void
dwqe_watchdog(struct ifnet * ifp)482 dwqe_watchdog(struct ifnet *ifp)
483 {
484 	printf("%s\n", __func__);
485 }
486 
487 int
dwqe_media_change(struct ifnet * ifp)488 dwqe_media_change(struct ifnet *ifp)
489 {
490 	struct dwqe_softc *sc = ifp->if_softc;
491 
492 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
493 		mii_mediachg(&sc->sc_mii);
494 
495 	return (0);
496 }
497 
498 void
dwqe_media_status(struct ifnet * ifp,struct ifmediareq * ifmr)499 dwqe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
500 {
501 	struct dwqe_softc *sc = ifp->if_softc;
502 
503 	if (LIST_FIRST(&sc->sc_mii.mii_phys)) {
504 		mii_pollstat(&sc->sc_mii);
505 		ifmr->ifm_active = sc->sc_mii.mii_media_active;
506 		ifmr->ifm_status = sc->sc_mii.mii_media_status;
507 	}
508 }
509 
510 int
dwqe_mii_readreg(struct device * self,int phy,int reg)511 dwqe_mii_readreg(struct device *self, int phy, int reg)
512 {
513 	struct dwqe_softc *sc = (void *)self;
514 	int n;
515 
516 	dwqe_write(sc, GMAC_MAC_MDIO_ADDR,
517 	    (sc->sc_clk << GMAC_MAC_MDIO_ADDR_CR_SHIFT) |
518 	    (phy << GMAC_MAC_MDIO_ADDR_PA_SHIFT) |
519 	    (reg << GMAC_MAC_MDIO_ADDR_RDA_SHIFT) |
520 	    GMAC_MAC_MDIO_ADDR_GOC_READ |
521 	    GMAC_MAC_MDIO_ADDR_GB);
522 
523 	for (n = 0; n < 2000; n++) {
524 		delay(10);
525 		if ((dwqe_read(sc, GMAC_MAC_MDIO_ADDR) & GMAC_MAC_MDIO_ADDR_GB) == 0)
526 			return dwqe_read(sc, GMAC_MAC_MDIO_DATA);
527 	}
528 
529 	printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname);
530 	return (0);
531 }
532 
533 void
dwqe_mii_writereg(struct device * self,int phy,int reg,int val)534 dwqe_mii_writereg(struct device *self, int phy, int reg, int val)
535 {
536 	struct dwqe_softc *sc = (void *)self;
537 	int n;
538 
539 	dwqe_write(sc, GMAC_MAC_MDIO_DATA, val);
540 	dwqe_write(sc, GMAC_MAC_MDIO_ADDR,
541 	    (sc->sc_clk << GMAC_MAC_MDIO_ADDR_CR_SHIFT) |
542 	    (phy << GMAC_MAC_MDIO_ADDR_PA_SHIFT) |
543 	    (reg << GMAC_MAC_MDIO_ADDR_RDA_SHIFT) |
544 	    GMAC_MAC_MDIO_ADDR_GOC_WRITE |
545 	    GMAC_MAC_MDIO_ADDR_GB);
546 
547 	for (n = 0; n < 2000; n++) {
548 		delay(10);
549 		if ((dwqe_read(sc, GMAC_MAC_MDIO_ADDR) & GMAC_MAC_MDIO_ADDR_GB) == 0)
550 			return;
551 	}
552 
553 	printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname);
554 }
555 
556 void
dwqe_mii_statchg(struct device * self)557 dwqe_mii_statchg(struct device *self)
558 {
559 	struct dwqe_softc *sc = (void *)self;
560 	struct ifnet *ifp = &sc->sc_ac.ac_if;
561 	uint32_t conf;
562 
563 	conf = dwqe_read(sc, GMAC_MAC_CONF);
564 	conf &= ~(GMAC_MAC_CONF_PS | GMAC_MAC_CONF_FES);
565 
566 	switch (ifp->if_baudrate) {
567 	case IF_Mbps(1000):
568 		sc->sc_link = 1;
569 		break;
570 	case IF_Mbps(100):
571 		conf |= GMAC_MAC_CONF_PS | GMAC_MAC_CONF_FES;
572 		sc->sc_link = 1;
573 		break;
574 	case IF_Mbps(10):
575 		conf |= GMAC_MAC_CONF_PS;
576 		sc->sc_link = 1;
577 		break;
578 	default:
579 		sc->sc_link = 0;
580 		return;
581 	}
582 
583 	if (sc->sc_link == 0)
584 		return;
585 
586 	conf &= ~GMAC_MAC_CONF_DM;
587 	if (ifp->if_link_state == LINK_STATE_FULL_DUPLEX)
588 		conf |= GMAC_MAC_CONF_DM;
589 
590 	dwqe_write(sc, GMAC_MAC_CONF, conf);
591 }
592 
593 void
dwqe_tick(void * arg)594 dwqe_tick(void *arg)
595 {
596 	struct dwqe_softc *sc = arg;
597 	int s;
598 
599 	s = splnet();
600 	mii_tick(&sc->sc_mii);
601 	splx(s);
602 
603 	timeout_add_sec(&sc->sc_phy_tick, 1);
604 }
605 
606 void
dwqe_rxtick(void * arg)607 dwqe_rxtick(void *arg)
608 {
609 	struct dwqe_softc *sc = arg;
610 	int s;
611 
612 	s = splnet();
613 
614 	/* TODO: disable RXQ? */
615 	printf("%s:%d\n", __func__, __LINE__);
616 
617 	bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_rxring),
618 	    0, DWQE_DMA_LEN(sc->sc_rxring),
619 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
620 
621 	dwqe_write(sc, GMAC_CHAN_RX_BASE_ADDR_HI(0), 0);
622 	dwqe_write(sc, GMAC_CHAN_RX_BASE_ADDR(0), 0);
623 
624 	sc->sc_rx_prod = sc->sc_rx_cons = 0;
625 	dwqe_fill_rx_ring(sc);
626 
627 	bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_rxring),
628 	    0, DWQE_DMA_LEN(sc->sc_rxring),
629 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
630 
631 	dwqe_write(sc, GMAC_CHAN_RX_BASE_ADDR_HI(0), DWQE_DMA_DVA(sc->sc_rxring) >> 32);
632 	dwqe_write(sc, GMAC_CHAN_RX_BASE_ADDR(0), DWQE_DMA_DVA(sc->sc_rxring));
633 
634 	/* TODO: re-enable RXQ? */
635 
636 	splx(s);
637 }
638 
639 int
dwqe_intr(void * arg)640 dwqe_intr(void *arg)
641 {
642 	struct dwqe_softc *sc = arg;
643 	uint32_t reg;
644 
645 	reg = dwqe_read(sc, GMAC_INT_STATUS);
646 	dwqe_write(sc, GMAC_INT_STATUS, reg);
647 
648 	reg = dwqe_read(sc, GMAC_CHAN_STATUS(0));
649 	dwqe_write(sc, GMAC_CHAN_STATUS(0), reg);
650 
651 	if (reg & GMAC_CHAN_STATUS_RI)
652 		dwqe_rx_proc(sc);
653 
654 	if (reg & GMAC_CHAN_STATUS_TI)
655 		dwqe_tx_proc(sc);
656 
657 	return (1);
658 }
659 
660 void
dwqe_tx_proc(struct dwqe_softc * sc)661 dwqe_tx_proc(struct dwqe_softc *sc)
662 {
663 	struct ifnet *ifp = &sc->sc_ac.ac_if;
664 	struct dwqe_desc *txd;
665 	struct dwqe_buf *txb;
666 	int idx, txfree;
667 
668 	if ((ifp->if_flags & IFF_RUNNING) == 0)
669 		return;
670 
671 	bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_txring), 0,
672 	    DWQE_DMA_LEN(sc->sc_txring),
673 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
674 
675 	txfree = 0;
676 	while (sc->sc_tx_cons != sc->sc_tx_prod) {
677 		idx = sc->sc_tx_cons;
678 		KASSERT(idx < DWQE_NTXDESC);
679 
680 		txd = &sc->sc_txdesc[idx];
681 		if (txd->sd_tdes3 & TDES3_OWN)
682 			break;
683 
684 		if (txd->sd_tdes3 & TDES3_ES)
685 			ifp->if_oerrors++;
686 
687 		txb = &sc->sc_txbuf[idx];
688 		if (txb->tb_m) {
689 			bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0,
690 			    txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
691 			bus_dmamap_unload(sc->sc_dmat, txb->tb_map);
692 
693 			m_freem(txb->tb_m);
694 			txb->tb_m = NULL;
695 		}
696 
697 		txfree++;
698 
699 		if (sc->sc_tx_cons == (DWQE_NTXDESC - 1))
700 			sc->sc_tx_cons = 0;
701 		else
702 			sc->sc_tx_cons++;
703 
704 		txd->sd_tdes3 = 0;
705 	}
706 
707 	if (sc->sc_tx_cons == sc->sc_tx_prod)
708 		ifp->if_timer = 0;
709 
710 	if (txfree) {
711 		if (ifq_is_oactive(&ifp->if_snd))
712 			ifq_restart(&ifp->if_snd);
713 	}
714 }
715 
716 int
dwqe_have_rx_csum_offload(struct dwqe_softc * sc)717 dwqe_have_rx_csum_offload(struct dwqe_softc *sc)
718 {
719 	return (sc->sc_hw_feature[0] & GMAC_MAC_HW_FEATURE0_RXCOESEL);
720 }
721 
722 void
dwqe_rx_csum(struct dwqe_softc * sc,struct mbuf * m,struct dwqe_desc * rxd)723 dwqe_rx_csum(struct dwqe_softc *sc, struct mbuf *m, struct dwqe_desc *rxd)
724 {
725 	uint16_t csum_flags = 0;
726 
727 	/*
728 	 * Checksum offload must be supported, the Last-Descriptor bit
729 	 * must be set, RDES1 must be valid, and checksumming must not
730 	 * have been bypassed (happens for unknown packet types), and
731 	 * an IP header must have been detected.
732 	 */
733 	if (!dwqe_have_rx_csum_offload(sc) ||
734 	    (rxd->sd_tdes3 & RDES3_LD) == 0 ||
735 	    (rxd->sd_tdes3 & RDES3_RDES1_VALID) == 0 ||
736 	    (rxd->sd_tdes1 & RDES1_IP_CSUM_BYPASS) ||
737 	    (rxd->sd_tdes1 & (RDES1_IPV4_HDR | RDES1_IPV6_HDR)) == 0)
738 		return;
739 
740 	/* If the IP header checksum is invalid then the payload is ignored. */
741 	if (rxd->sd_tdes1 & RDES1_IP_HDR_ERROR) {
742 		if (rxd->sd_tdes1 & RDES1_IPV4_HDR)
743 			csum_flags |= M_IPV4_CSUM_IN_BAD;
744 	} else {
745 		if (rxd->sd_tdes1 & RDES1_IPV4_HDR)
746 			csum_flags |= M_IPV4_CSUM_IN_OK;
747 
748 		/* Detect payload type and corresponding checksum errors. */
749 		switch (rxd->sd_tdes1 & RDES1_IP_PAYLOAD_TYPE) {
750 		case RDES1_IP_PAYLOAD_UDP:
751 			if (rxd->sd_tdes1 & RDES1_IP_PAYLOAD_ERROR)
752 				csum_flags |= M_UDP_CSUM_IN_BAD;
753 			else
754 				csum_flags |= M_UDP_CSUM_IN_OK;
755 			break;
756 		case RDES1_IP_PAYLOAD_TCP:
757 			if (rxd->sd_tdes1 & RDES1_IP_PAYLOAD_ERROR)
758 				csum_flags |= M_TCP_CSUM_IN_BAD;
759 			else
760 				csum_flags |= M_TCP_CSUM_IN_OK;
761 			break;
762 		case RDES1_IP_PAYLOAD_ICMP:
763 			if (rxd->sd_tdes1 & RDES1_IP_PAYLOAD_ERROR)
764 				csum_flags |= M_ICMP_CSUM_IN_BAD;
765 			else
766 				csum_flags |= M_ICMP_CSUM_IN_OK;
767 			break;
768 		default:
769 			break;
770 		}
771 	}
772 
773 	m->m_pkthdr.csum_flags |= csum_flags;
774 }
775 
776 void
dwqe_vlan_strip(struct dwqe_softc * sc,struct mbuf * m,struct dwqe_desc * rxd)777 dwqe_vlan_strip(struct dwqe_softc *sc, struct mbuf *m, struct dwqe_desc *rxd)
778 {
779 #if NVLAN > 0
780 	uint16_t tag;
781 
782 	if ((rxd->sd_tdes3 & RDES3_RDES0_VALID) &&
783 	    (rxd->sd_tdes3 & RDES3_LD)) {
784 		tag = rxd->sd_tdes0 & RDES0_OVT;
785 		m->m_pkthdr.ether_vtag = le16toh(tag);
786 		m->m_flags |= M_VLANTAG;
787 	}
788 #endif
789 }
790 
791 void
dwqe_rx_proc(struct dwqe_softc * sc)792 dwqe_rx_proc(struct dwqe_softc *sc)
793 {
794 	struct ifnet *ifp = &sc->sc_ac.ac_if;
795 	struct dwqe_desc *rxd;
796 	struct dwqe_buf *rxb;
797 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
798 	struct mbuf *m;
799 	int idx, len, cnt, put;
800 
801 	if ((ifp->if_flags & IFF_RUNNING) == 0)
802 		return;
803 
804 	bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_rxring), 0,
805 	    DWQE_DMA_LEN(sc->sc_rxring),
806 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
807 
808 	cnt = if_rxr_inuse(&sc->sc_rx_ring);
809 	put = 0;
810 	while (put < cnt) {
811 		idx = sc->sc_rx_cons;
812 		KASSERT(idx < DWQE_NRXDESC);
813 
814 		rxd = &sc->sc_rxdesc[idx];
815 		if (rxd->sd_tdes3 & RDES3_OWN)
816 			break;
817 
818 		len = rxd->sd_tdes3 & RDES3_LENGTH;
819 		rxb = &sc->sc_rxbuf[idx];
820 		KASSERT(rxb->tb_m);
821 
822 		bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0,
823 		    len, BUS_DMASYNC_POSTREAD);
824 		bus_dmamap_unload(sc->sc_dmat, rxb->tb_map);
825 
826 		m = rxb->tb_m;
827 		rxb->tb_m = NULL;
828 
829 		if (rxd->sd_tdes3 & RDES3_ES) {
830 			ifp->if_ierrors++;
831 			m_freem(m);
832 		} else {
833 			/* Strip off CRC. */
834 			len -= ETHER_CRC_LEN;
835 			KASSERT(len > 0);
836 
837 			m->m_pkthdr.len = m->m_len = len;
838 
839 			dwqe_rx_csum(sc, m, rxd);
840 			dwqe_vlan_strip(sc, m, rxd);
841 			ml_enqueue(&ml, m);
842 		}
843 
844 		put++;
845 		if (sc->sc_rx_cons == (DWQE_NRXDESC - 1))
846 			sc->sc_rx_cons = 0;
847 		else
848 			sc->sc_rx_cons++;
849 	}
850 
851 	if_rxr_put(&sc->sc_rx_ring, put);
852 	if (ifiq_input(&ifp->if_rcv, &ml))
853 		if_rxr_livelocked(&sc->sc_rx_ring);
854 
855 	dwqe_fill_rx_ring(sc);
856 
857 	bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_rxring), 0,
858 	    DWQE_DMA_LEN(sc->sc_rxring),
859 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
860 }
861 
862 void
dwqe_up(struct dwqe_softc * sc)863 dwqe_up(struct dwqe_softc *sc)
864 {
865 	struct ifnet *ifp = &sc->sc_ac.ac_if;
866 	struct dwqe_buf *txb, *rxb;
867 	uint32_t mode, reg, fifosz, tqs, rqs;
868 	int i;
869 
870 	/* Allocate Tx descriptor ring. */
871 	sc->sc_txring = dwqe_dmamem_alloc(sc,
872 	    DWQE_NTXDESC * sizeof(struct dwqe_desc), 8);
873 	sc->sc_txdesc = DWQE_DMA_KVA(sc->sc_txring);
874 
875 	sc->sc_txbuf = malloc(sizeof(struct dwqe_buf) * DWQE_NTXDESC,
876 	    M_DEVBUF, M_WAITOK);
877 	for (i = 0; i < DWQE_NTXDESC; i++) {
878 		txb = &sc->sc_txbuf[i];
879 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, DWQE_NTXSEGS,
880 		    MCLBYTES, 0, BUS_DMA_WAITOK, &txb->tb_map);
881 		txb->tb_m = NULL;
882 	}
883 
884 	bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_txring),
885 	    0, DWQE_DMA_LEN(sc->sc_txring), BUS_DMASYNC_PREWRITE);
886 
887 	sc->sc_tx_prod = sc->sc_tx_cons = 0;
888 
889 	dwqe_write(sc, GMAC_CHAN_TX_BASE_ADDR_HI(0), DWQE_DMA_DVA(sc->sc_txring) >> 32);
890 	dwqe_write(sc, GMAC_CHAN_TX_BASE_ADDR(0), DWQE_DMA_DVA(sc->sc_txring));
891 	dwqe_write(sc, GMAC_CHAN_TX_RING_LEN(0), DWQE_NTXDESC - 1);
892 	dwqe_write(sc, GMAC_CHAN_TX_END_ADDR(0), DWQE_DMA_DVA(sc->sc_txring));
893 
894 	/* Allocate  descriptor ring. */
895 	sc->sc_rxring = dwqe_dmamem_alloc(sc,
896 	    DWQE_NRXDESC * sizeof(struct dwqe_desc), 8);
897 	sc->sc_rxdesc = DWQE_DMA_KVA(sc->sc_rxring);
898 
899 	sc->sc_rxbuf = malloc(sizeof(struct dwqe_buf) * DWQE_NRXDESC,
900 	    M_DEVBUF, M_WAITOK);
901 
902 	for (i = 0; i < DWQE_NRXDESC; i++) {
903 		rxb = &sc->sc_rxbuf[i];
904 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
905 		    MCLBYTES, 0, BUS_DMA_WAITOK, &rxb->tb_map);
906 		rxb->tb_m = NULL;
907 	}
908 
909 	if_rxr_init(&sc->sc_rx_ring, 2, DWQE_NRXDESC - 1);
910 
911 	dwqe_write(sc, GMAC_CHAN_RX_BASE_ADDR_HI(0), DWQE_DMA_DVA(sc->sc_rxring) >> 32);
912 	dwqe_write(sc, GMAC_CHAN_RX_BASE_ADDR(0), DWQE_DMA_DVA(sc->sc_rxring));
913 	dwqe_write(sc, GMAC_CHAN_RX_RING_LEN(0), DWQE_NRXDESC - 1);
914 
915 	sc->sc_rx_prod = sc->sc_rx_cons = 0;
916 	dwqe_fill_rx_ring(sc);
917 
918 	bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_rxring),
919 	    0, DWQE_DMA_LEN(sc->sc_rxring),
920 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
921 
922 	dwqe_lladdr_write(sc);
923 
924 	/* Configure media. */
925 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
926 		mii_mediachg(&sc->sc_mii);
927 
928 	/* Program promiscuous mode and multicast filters. */
929 	dwqe_iff(sc);
930 
931 	ifp->if_flags |= IFF_RUNNING;
932 	ifq_clr_oactive(&ifp->if_snd);
933 
934 	dwqe_write(sc, GMAC_MAC_1US_TIC_CTR, (sc->sc_clkrate / 1000000) - 1);
935 
936 	/* Start receive DMA */
937 	reg = dwqe_read(sc, GMAC_CHAN_RX_CONTROL(0));
938 	reg |= GMAC_CHAN_RX_CONTROL_SR;
939 	dwqe_write(sc, GMAC_CHAN_RX_CONTROL(0), reg);
940 
941 	/* Start transmit DMA */
942 	reg = dwqe_read(sc, GMAC_CHAN_TX_CONTROL(0));
943 	reg |= GMAC_CHAN_TX_CONTROL_ST;
944 	dwqe_write(sc, GMAC_CHAN_TX_CONTROL(0), reg);
945 
946 	mode = dwqe_read(sc, GMAC_MTL_CHAN_RX_OP_MODE(0));
947 	if (sc->sc_force_thresh_dma_mode) {
948 		mode &= ~GMAC_MTL_CHAN_RX_OP_MODE_RSF;
949 		mode &= ~GMAC_MTL_CHAN_RX_OP_MODE_RTC_MASK;
950 		mode |= GMAC_MTL_CHAN_RX_OP_MODE_RTC_128;
951 	} else {
952 		mode |= GMAC_MTL_CHAN_RX_OP_MODE_RSF;
953 	}
954 	mode &= ~GMAC_MTL_CHAN_RX_OP_MODE_RQS_MASK;
955 	if (sc->sc_rxfifo_size)
956 		fifosz = sc->sc_rxfifo_size;
957 	else
958 		fifosz = (128 <<
959 		    GMAC_MAC_HW_FEATURE1_RXFIFOSIZE(sc->sc_hw_feature[1]));
960 	rqs = fifosz / 256 - 1;
961 	mode |= (rqs << GMAC_MTL_CHAN_RX_OP_MODE_RQS_SHIFT) &
962 	   GMAC_MTL_CHAN_RX_OP_MODE_RQS_MASK;
963 	if (fifosz >= 4096) {
964 		mode |= GMAC_MTL_CHAN_RX_OP_MODE_EHFC;
965 		mode &= ~GMAC_MTL_CHAN_RX_OP_MODE_RFD_MASK;
966 		mode |= 0x3 << GMAC_MTL_CHAN_RX_OP_MODE_RFD_SHIFT;
967 		mode &= ~GMAC_MTL_CHAN_RX_OP_MODE_RFA_MASK;
968 		mode |= 0x1 << GMAC_MTL_CHAN_RX_OP_MODE_RFA_SHIFT;
969 	}
970 	dwqe_write(sc, GMAC_MTL_CHAN_RX_OP_MODE(0), mode);
971 
972 	mode = dwqe_read(sc, GMAC_MTL_CHAN_TX_OP_MODE(0));
973 	if (sc->sc_force_thresh_dma_mode) {
974 		mode &= ~GMAC_MTL_CHAN_TX_OP_MODE_TSF;
975 		mode &= ~GMAC_MTL_CHAN_TX_OP_MODE_TTC_MASK;
976 		mode |= GMAC_MTL_CHAN_TX_OP_MODE_TTC_512;
977 	} else {
978 		mode |= GMAC_MTL_CHAN_TX_OP_MODE_TSF;
979 	}
980 	mode &= ~GMAC_MTL_CHAN_TX_OP_MODE_TXQEN_MASK;
981 	mode |= GMAC_MTL_CHAN_TX_OP_MODE_TXQEN;
982 	mode &= ~GMAC_MTL_CHAN_TX_OP_MODE_TQS_MASK;
983 	if (sc->sc_txfifo_size)
984 		fifosz = sc->sc_txfifo_size;
985 	else
986 		fifosz = (128 <<
987 		    GMAC_MAC_HW_FEATURE1_TXFIFOSIZE(sc->sc_hw_feature[1]));
988 	tqs = (fifosz / 256) - 1;
989 	mode |= (tqs << GMAC_MTL_CHAN_TX_OP_MODE_TQS_SHIFT) &
990 	    GMAC_MTL_CHAN_TX_OP_MODE_TQS_MASK;
991 	dwqe_write(sc, GMAC_MTL_CHAN_TX_OP_MODE(0), mode);
992 
993 	reg = dwqe_read(sc, GMAC_QX_TX_FLOW_CTRL(0));
994 	reg |= 0xffffU << GMAC_QX_TX_FLOW_CTRL_PT_SHIFT;
995 	reg |= GMAC_QX_TX_FLOW_CTRL_TFE;
996 	dwqe_write(sc, GMAC_QX_TX_FLOW_CTRL(0), reg);
997 	reg = dwqe_read(sc, GMAC_RX_FLOW_CTRL);
998 	reg |= GMAC_RX_FLOW_CTRL_RFE;
999 	dwqe_write(sc, GMAC_RX_FLOW_CTRL, reg);
1000 
1001 	dwqe_write(sc, GMAC_RXQ_CTRL0, GMAC_RXQ_CTRL0_DCB_QUEUE_EN(0));
1002 
1003 	dwqe_write(sc, GMAC_MAC_CONF, dwqe_read(sc, GMAC_MAC_CONF) |
1004 	    GMAC_MAC_CONF_BE | GMAC_MAC_CONF_JD | GMAC_MAC_CONF_JE |
1005 	    GMAC_MAC_CONF_DCRS | GMAC_MAC_CONF_TE | GMAC_MAC_CONF_RE);
1006 
1007 	dwqe_write(sc, GMAC_CHAN_INTR_ENA(0),
1008 	    GMAC_CHAN_INTR_ENA_NIE |
1009 	    GMAC_CHAN_INTR_ENA_AIE |
1010 	    GMAC_CHAN_INTR_ENA_FBE |
1011 	    GMAC_CHAN_INTR_ENA_RIE |
1012 	    GMAC_CHAN_INTR_ENA_TIE);
1013 
1014 	if (!sc->sc_fixed_link)
1015 		timeout_add_sec(&sc->sc_phy_tick, 1);
1016 
1017 	if (dwqe_have_rx_csum_offload(sc)) {
1018 		reg = dwqe_read(sc, GMAC_MAC_CONF);
1019 		reg |= GMAC_MAC_CONF_IPC;
1020 		dwqe_write(sc, GMAC_MAC_CONF, reg);
1021 	}
1022 }
1023 
1024 void
dwqe_down(struct dwqe_softc * sc)1025 dwqe_down(struct dwqe_softc *sc)
1026 {
1027 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1028 	struct dwqe_buf *txb, *rxb;
1029 	uint32_t reg;
1030 	int i;
1031 
1032 	timeout_del(&sc->sc_rxto);
1033 	if (!sc->sc_fixed_link)
1034 		timeout_del(&sc->sc_phy_tick);
1035 
1036 	ifp->if_flags &= ~IFF_RUNNING;
1037 	ifq_clr_oactive(&ifp->if_snd);
1038 	ifp->if_timer = 0;
1039 
1040 	/* Disable receiver */
1041 	reg = dwqe_read(sc, GMAC_MAC_CONF);
1042 	reg &= ~GMAC_MAC_CONF_RE;
1043 	dwqe_write(sc, GMAC_MAC_CONF, reg);
1044 
1045 	/* Stop receive DMA */
1046 	reg = dwqe_read(sc, GMAC_CHAN_RX_CONTROL(0));
1047 	reg &= ~GMAC_CHAN_RX_CONTROL_SR;
1048 	dwqe_write(sc, GMAC_CHAN_RX_CONTROL(0), reg);
1049 
1050 	/* Stop transmit DMA */
1051 	reg = dwqe_read(sc, GMAC_CHAN_TX_CONTROL(0));
1052 	reg &= ~GMAC_CHAN_TX_CONTROL_ST;
1053 	dwqe_write(sc, GMAC_CHAN_TX_CONTROL(0), reg);
1054 
1055 	/* Flush data in the TX FIFO */
1056 	reg = dwqe_read(sc, GMAC_MTL_CHAN_TX_OP_MODE(0));
1057 	reg |= GMAC_MTL_CHAN_TX_OP_MODE_FTQ;
1058 	dwqe_write(sc, GMAC_MTL_CHAN_TX_OP_MODE(0), reg);
1059 	/* Wait for flush to complete */
1060 	for (i = 10000; i > 0; i--) {
1061 		reg = dwqe_read(sc, GMAC_MTL_CHAN_TX_OP_MODE(0));
1062 		if ((reg & GMAC_MTL_CHAN_TX_OP_MODE_FTQ) == 0)
1063 			break;
1064 		delay(1);
1065 	}
1066 	if (i == 0) {
1067 		printf("%s: timeout flushing TX queue\n",
1068 		    sc->sc_dev.dv_xname);
1069 	}
1070 
1071 	/* Disable transmitter */
1072 	reg = dwqe_read(sc, GMAC_MAC_CONF);
1073 	reg &= ~GMAC_MAC_CONF_TE;
1074 	dwqe_write(sc, GMAC_MAC_CONF, reg);
1075 
1076 	dwqe_write(sc, GMAC_CHAN_INTR_ENA(0), 0);
1077 
1078 	intr_barrier(sc->sc_ih);
1079 	ifq_barrier(&ifp->if_snd);
1080 
1081 	for (i = 0; i < DWQE_NTXDESC; i++) {
1082 		txb = &sc->sc_txbuf[i];
1083 		if (txb->tb_m) {
1084 			bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0,
1085 			    txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1086 			bus_dmamap_unload(sc->sc_dmat, txb->tb_map);
1087 			m_freem(txb->tb_m);
1088 		}
1089 		bus_dmamap_destroy(sc->sc_dmat, txb->tb_map);
1090 	}
1091 
1092 	dwqe_dmamem_free(sc, sc->sc_txring);
1093 	free(sc->sc_txbuf, M_DEVBUF, 0);
1094 
1095 	for (i = 0; i < DWQE_NRXDESC; i++) {
1096 		rxb = &sc->sc_rxbuf[i];
1097 		if (rxb->tb_m) {
1098 			bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0,
1099 			    rxb->tb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1100 			bus_dmamap_unload(sc->sc_dmat, rxb->tb_map);
1101 			m_freem(rxb->tb_m);
1102 		}
1103 		bus_dmamap_destroy(sc->sc_dmat, rxb->tb_map);
1104 	}
1105 
1106 	dwqe_dmamem_free(sc, sc->sc_rxring);
1107 	free(sc->sc_rxbuf, M_DEVBUF, 0);
1108 }
1109 
1110 /* Bit Reversal - http://aggregate.org/MAGIC/#Bit%20Reversal */
1111 static uint32_t
bitrev32(uint32_t x)1112 bitrev32(uint32_t x)
1113 {
1114 	x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
1115 	x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
1116 	x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
1117 	x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
1118 
1119 	return (x >> 16) | (x << 16);
1120 }
1121 
1122 void
dwqe_iff(struct dwqe_softc * sc)1123 dwqe_iff(struct dwqe_softc *sc)
1124 {
1125 	struct arpcom *ac = &sc->sc_ac;
1126 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1127 	struct ether_multi *enm;
1128 	struct ether_multistep step;
1129 	uint32_t crc, hash[2], hashbit, hashreg;
1130 	uint32_t reg;
1131 
1132 	reg = 0;
1133 
1134 	ifp->if_flags &= ~IFF_ALLMULTI;
1135 	bzero(hash, sizeof(hash));
1136 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1137 		ifp->if_flags |= IFF_ALLMULTI;
1138 		reg |= GMAC_MAC_PACKET_FILTER_PM;
1139 		if (ifp->if_flags & IFF_PROMISC)
1140 			reg |= GMAC_MAC_PACKET_FILTER_PR |
1141 			    GMAC_MAC_PACKET_FILTER_PCF_ALL;
1142 	} else {
1143 		reg |= GMAC_MAC_PACKET_FILTER_HMC;
1144 		ETHER_FIRST_MULTI(step, ac, enm);
1145 		while (enm != NULL) {
1146 			crc = ether_crc32_le(enm->enm_addrlo,
1147 			    ETHER_ADDR_LEN) & 0x7f;
1148 
1149 			crc = bitrev32(~crc) >> 26;
1150 			hashreg = (crc >> 5);
1151 			hashbit = (crc & 0x1f);
1152 			hash[hashreg] |= (1 << hashbit);
1153 
1154 			ETHER_NEXT_MULTI(step, enm);
1155 		}
1156 	}
1157 
1158 	dwqe_lladdr_write(sc);
1159 
1160 	dwqe_write(sc, GMAC_MAC_HASH_TAB_REG0, hash[0]);
1161 	dwqe_write(sc, GMAC_MAC_HASH_TAB_REG1, hash[1]);
1162 
1163 	dwqe_write(sc, GMAC_MAC_PACKET_FILTER, reg);
1164 }
1165 
1166 void
dwqe_tx_csum(struct dwqe_softc * sc,struct mbuf * m,struct dwqe_desc * txd)1167 dwqe_tx_csum(struct dwqe_softc *sc, struct mbuf *m, struct dwqe_desc *txd)
1168 {
1169 	if (!dwqe_have_tx_csum_offload(sc))
1170 		return;
1171 
1172 	/* Checksum flags are valid only on first descriptor. */
1173 	if ((txd->sd_tdes3 & TDES3_FS) == 0)
1174 		return;
1175 
1176 	/* TSO and Tx checksum offloading are incompatible. */
1177 	if (txd->sd_tdes3 & TDES3_TSO_EN)
1178 		return;
1179 
1180 	if (m->m_pkthdr.csum_flags & (M_IPV4_CSUM_OUT |
1181 	    M_TCP_CSUM_OUT | M_UDP_CSUM_OUT))
1182 		txd->sd_tdes3 |= TDES3_CSUM_IPHDR_PAYLOAD_PSEUDOHDR;
1183 }
1184 
1185 uint16_t
dwqe_set_tx_context_desc(struct dwqe_softc * sc,struct mbuf * m,int idx)1186 dwqe_set_tx_context_desc(struct dwqe_softc *sc, struct mbuf *m, int idx)
1187 {
1188 	uint16_t tag = 0;
1189 #if NVLAN > 0
1190 	struct dwqe_desc *ctxt_txd;
1191 
1192 	if ((m->m_flags & M_VLANTAG) == 0)
1193 		return 0;
1194 
1195 	tag = m->m_pkthdr.ether_vtag;
1196 	if (tag) {
1197 		ctxt_txd = &sc->sc_txdesc[idx];
1198 		ctxt_txd->sd_tdes3 |= (htole16(tag) & TDES3_VLAN_TAG);
1199 		ctxt_txd->sd_tdes3 |= TDES3_VLAN_TAG_VALID;
1200 		ctxt_txd->sd_tdes3 |= (TDES3_CTXT | TDES3_OWN);
1201 	}
1202 #endif
1203 	return tag;
1204 }
1205 
1206 int
dwqe_encap(struct dwqe_softc * sc,struct mbuf * m,int * idx,int * used)1207 dwqe_encap(struct dwqe_softc *sc, struct mbuf *m, int *idx, int *used)
1208 {
1209 	struct dwqe_desc *txd, *txd_start;
1210 	bus_dmamap_t map;
1211 	int cur, frag, i;
1212 	uint16_t vlan_tag = 0;
1213 
1214 	cur = frag = *idx;
1215 	map = sc->sc_txbuf[cur].tb_map;
1216 
1217 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT)) {
1218 		if (m_defrag(m, M_DONTWAIT))
1219 			return (EFBIG);
1220 		if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT))
1221 			return (EFBIG);
1222 	}
1223 
1224 	/* Sync the DMA map. */
1225 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1226 	    BUS_DMASYNC_PREWRITE);
1227 
1228 	if (dwqe_have_tx_vlan_offload(sc)) {
1229 		vlan_tag = dwqe_set_tx_context_desc(sc, m, frag);
1230 		if (vlan_tag) {
1231 			(*used)++;
1232 			if (frag == (DWQE_NTXDESC - 1))
1233 				frag = 0;
1234 			else
1235 				frag++;
1236 		}
1237 	}
1238 
1239 	txd = txd_start = &sc->sc_txdesc[frag];
1240 	for (i = 0; i < map->dm_nsegs; i++) {
1241 		/* TODO: check for 32-bit vs 64-bit support */
1242 		KASSERT((map->dm_segs[i].ds_addr >> 32) == 0);
1243 
1244 		txd->sd_tdes0 = (uint32_t)map->dm_segs[i].ds_addr;
1245 		txd->sd_tdes1 = (uint32_t)(map->dm_segs[i].ds_addr >> 32);
1246 		txd->sd_tdes2 = map->dm_segs[i].ds_len;
1247 		txd->sd_tdes3 = m->m_pkthdr.len;
1248 		if (i == 0) {
1249 			txd->sd_tdes3 |= TDES3_FS;
1250 			dwqe_tx_csum(sc, m, txd);
1251 			if (vlan_tag)
1252 				txd->sd_tdes2 |= TDES2_VLAN_TAG_INSERT;
1253 		}
1254 		if (i == (map->dm_nsegs - 1)) {
1255 			txd->sd_tdes2 |= TDES2_IC;
1256 			txd->sd_tdes3 |= TDES3_LS;
1257 		}
1258 		if (i != 0)
1259 			txd->sd_tdes3 |= TDES3_OWN;
1260 
1261 		bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_txring),
1262 		    frag * sizeof(*txd), sizeof(*txd), BUS_DMASYNC_PREWRITE);
1263 
1264 		cur = frag;
1265 		if (frag == (DWQE_NTXDESC - 1)) {
1266 			txd = &sc->sc_txdesc[0];
1267 			frag = 0;
1268 		} else {
1269 			txd++;
1270 			frag++;
1271 		}
1272 		KASSERT(frag != sc->sc_tx_cons);
1273 	}
1274 
1275 	txd_start->sd_tdes3 |= TDES3_OWN;
1276 	bus_dmamap_sync(sc->sc_dmat, DWQE_DMA_MAP(sc->sc_txring),
1277 	    *idx * sizeof(*txd), sizeof(*txd), BUS_DMASYNC_PREWRITE);
1278 
1279 	KASSERT(sc->sc_txbuf[cur].tb_m == NULL);
1280 	sc->sc_txbuf[*idx].tb_map = sc->sc_txbuf[cur].tb_map;
1281 	sc->sc_txbuf[cur].tb_map = map;
1282 	sc->sc_txbuf[cur].tb_m = m;
1283 
1284 	*idx = frag;
1285 	*used += map->dm_nsegs;
1286 
1287 	return (0);
1288 }
1289 
1290 void
dwqe_reset(struct dwqe_softc * sc)1291 dwqe_reset(struct dwqe_softc *sc)
1292 {
1293 	int n;
1294 
1295 	dwqe_write(sc, GMAC_BUS_MODE, dwqe_read(sc, GMAC_BUS_MODE) |
1296 	    GMAC_BUS_MODE_SWR);
1297 
1298 	for (n = 0; n < 30000; n++) {
1299 		if ((dwqe_read(sc, GMAC_BUS_MODE) &
1300 		    GMAC_BUS_MODE_SWR) == 0)
1301 			return;
1302 		delay(10);
1303 	}
1304 
1305 	printf("%s: reset timeout\n", sc->sc_dev.dv_xname);
1306 }
1307 
1308 struct dwqe_dmamem *
dwqe_dmamem_alloc(struct dwqe_softc * sc,bus_size_t size,bus_size_t align)1309 dwqe_dmamem_alloc(struct dwqe_softc *sc, bus_size_t size, bus_size_t align)
1310 {
1311 	struct dwqe_dmamem *tdm;
1312 	int nsegs;
1313 
1314 	tdm = malloc(sizeof(*tdm), M_DEVBUF, M_WAITOK | M_ZERO);
1315 	tdm->tdm_size = size;
1316 
1317 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1318 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &tdm->tdm_map) != 0)
1319 		goto tdmfree;
1320 
1321 	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &tdm->tdm_seg, 1,
1322 	    &nsegs, BUS_DMA_WAITOK) != 0)
1323 		goto destroy;
1324 
1325 	if (bus_dmamem_map(sc->sc_dmat, &tdm->tdm_seg, nsegs, size,
1326 	    &tdm->tdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
1327 		goto free;
1328 
1329 	if (bus_dmamap_load(sc->sc_dmat, tdm->tdm_map, tdm->tdm_kva, size,
1330 	    NULL, BUS_DMA_WAITOK) != 0)
1331 		goto unmap;
1332 
1333 	bzero(tdm->tdm_kva, size);
1334 
1335 	return (tdm);
1336 
1337 unmap:
1338 	bus_dmamem_unmap(sc->sc_dmat, tdm->tdm_kva, size);
1339 free:
1340 	bus_dmamem_free(sc->sc_dmat, &tdm->tdm_seg, 1);
1341 destroy:
1342 	bus_dmamap_destroy(sc->sc_dmat, tdm->tdm_map);
1343 tdmfree:
1344 	free(tdm, M_DEVBUF, 0);
1345 
1346 	return (NULL);
1347 }
1348 
1349 void
dwqe_dmamem_free(struct dwqe_softc * sc,struct dwqe_dmamem * tdm)1350 dwqe_dmamem_free(struct dwqe_softc *sc, struct dwqe_dmamem *tdm)
1351 {
1352 	bus_dmamem_unmap(sc->sc_dmat, tdm->tdm_kva, tdm->tdm_size);
1353 	bus_dmamem_free(sc->sc_dmat, &tdm->tdm_seg, 1);
1354 	bus_dmamap_destroy(sc->sc_dmat, tdm->tdm_map);
1355 	free(tdm, M_DEVBUF, 0);
1356 }
1357 
1358 struct mbuf *
dwqe_alloc_mbuf(struct dwqe_softc * sc,bus_dmamap_t map)1359 dwqe_alloc_mbuf(struct dwqe_softc *sc, bus_dmamap_t map)
1360 {
1361 	struct mbuf *m = NULL;
1362 
1363 	m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
1364 	if (!m)
1365 		return (NULL);
1366 	m->m_len = m->m_pkthdr.len = MCLBYTES;
1367 	m_adj(m, ETHER_ALIGN);
1368 
1369 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) {
1370 		printf("%s: could not load mbuf DMA map", DEVNAME(sc));
1371 		m_freem(m);
1372 		return (NULL);
1373 	}
1374 
1375 	bus_dmamap_sync(sc->sc_dmat, map, 0,
1376 	    m->m_pkthdr.len, BUS_DMASYNC_PREREAD);
1377 
1378 	return (m);
1379 }
1380 
1381 void
dwqe_fill_rx_ring(struct dwqe_softc * sc)1382 dwqe_fill_rx_ring(struct dwqe_softc *sc)
1383 {
1384 	struct dwqe_desc *rxd;
1385 	struct dwqe_buf *rxb;
1386 	u_int slots;
1387 
1388 	for (slots = if_rxr_get(&sc->sc_rx_ring, DWQE_NRXDESC);
1389 	    slots > 0; slots--) {
1390 		rxb = &sc->sc_rxbuf[sc->sc_rx_prod];
1391 		rxb->tb_m = dwqe_alloc_mbuf(sc, rxb->tb_map);
1392 		if (rxb->tb_m == NULL)
1393 			break;
1394 
1395 		/* TODO: check for 32-bit vs 64-bit support */
1396 		KASSERT((rxb->tb_map->dm_segs[0].ds_addr >> 32) == 0);
1397 
1398 		rxd = &sc->sc_rxdesc[sc->sc_rx_prod];
1399 		rxd->sd_tdes0 = (uint32_t)rxb->tb_map->dm_segs[0].ds_addr;
1400 		rxd->sd_tdes1 = (uint32_t)(rxb->tb_map->dm_segs[0].ds_addr >> 32);
1401 		rxd->sd_tdes2 = 0;
1402 		rxd->sd_tdes3 = RDES3_OWN | RDES3_IC | RDES3_BUF1V;
1403 
1404 		if (sc->sc_rx_prod == (DWQE_NRXDESC - 1))
1405 			sc->sc_rx_prod = 0;
1406 		else
1407 			sc->sc_rx_prod++;
1408 	}
1409 	if_rxr_put(&sc->sc_rx_ring, slots);
1410 
1411 	dwqe_write(sc, GMAC_CHAN_RX_END_ADDR(0), DWQE_DMA_DVA(sc->sc_rxring) +
1412 	    sc->sc_rx_prod * sizeof(*rxd));
1413 
1414 	if (if_rxr_inuse(&sc->sc_rx_ring) == 0)
1415 		timeout_add(&sc->sc_rxto, 1);
1416 }
1417