xref: /freebsd/sys/dev/xilinx/if_xae.c (revision 61e21613)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2019 Ruslan Bukin <br@bsdpad.com>
5  *
6  * This software was developed by SRI International and the University of
7  * Cambridge Computer Laboratory (Department of Computer Science and
8  * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
9  * DARPA SSITH research programme.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/bus.h>
36 #include <sys/kernel.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/mbuf.h>
40 #include <sys/module.h>
41 #include <sys/mutex.h>
42 #include <sys/rman.h>
43 #include <sys/socket.h>
44 #include <sys/sockio.h>
45 
46 #include <net/bpf.h>
47 #include <net/if.h>
48 #include <net/ethernet.h>
49 #include <net/if_dl.h>
50 #include <net/if_media.h>
51 #include <net/if_types.h>
52 #include <net/if_var.h>
53 
54 #include <machine/bus.h>
55 
56 #include <dev/mii/mii.h>
57 #include <dev/mii/miivar.h>
58 #include <dev/mii/tiphy.h>
59 #include <dev/ofw/ofw_bus.h>
60 #include <dev/ofw/ofw_bus_subr.h>
61 #include <dev/xilinx/if_xaereg.h>
62 #include <dev/xilinx/if_xaevar.h>
63 
64 #include <dev/xilinx/axidma.h>
65 
66 #include "miibus_if.h"
67 
68 #define	READ4(_sc, _reg) \
69 	bus_read_4((_sc)->res[0], _reg)
70 #define	WRITE4(_sc, _reg, _val) \
71 	bus_write_4((_sc)->res[0], _reg, _val)
72 
73 #define	READ8(_sc, _reg) \
74 	bus_read_8((_sc)->res[0], _reg)
75 #define	WRITE8(_sc, _reg, _val) \
76 	bus_write_8((_sc)->res[0], _reg, _val)
77 
78 #define	XAE_LOCK(sc)			mtx_lock(&(sc)->mtx)
79 #define	XAE_UNLOCK(sc)			mtx_unlock(&(sc)->mtx)
80 #define	XAE_ASSERT_LOCKED(sc)		mtx_assert(&(sc)->mtx, MA_OWNED)
81 #define	XAE_ASSERT_UNLOCKED(sc)		mtx_assert(&(sc)->mtx, MA_NOTOWNED)
82 
83 #define XAE_DEBUG
84 #undef XAE_DEBUG
85 
86 #ifdef XAE_DEBUG
87 #define dprintf(fmt, ...)  printf(fmt, ##__VA_ARGS__)
88 #else
89 #define dprintf(fmt, ...)
90 #endif
91 
92 #define	RX_QUEUE_SIZE		64
93 #define	TX_QUEUE_SIZE		64
94 #define	NUM_RX_MBUF		16
95 #define	BUFRING_SIZE		8192
96 #define	MDIO_CLK_DIV_DEFAULT	29
97 
98 #define	PHY1_RD(sc, _r)		\
99 	xae_miibus_read_reg(sc->dev, 1, _r)
100 #define	PHY1_WR(sc, _r, _v)	\
101 	xae_miibus_write_reg(sc->dev, 1, _r, _v)
102 
103 #define	PHY_RD(sc, _r)		\
104 	xae_miibus_read_reg(sc->dev, sc->phy_addr, _r)
105 #define	PHY_WR(sc, _r, _v)	\
106 	xae_miibus_write_reg(sc->dev, sc->phy_addr, _r, _v)
107 
108 /* Use this macro to access regs > 0x1f */
109 #define WRITE_TI_EREG(sc, reg, data) {					\
110 	PHY_WR(sc, MII_MMDACR, MMDACR_DADDRMASK);			\
111 	PHY_WR(sc, MII_MMDAADR, reg);					\
112 	PHY_WR(sc, MII_MMDACR, MMDACR_DADDRMASK | MMDACR_FN_DATANPI);	\
113 	PHY_WR(sc, MII_MMDAADR, data);					\
114 }
115 
116 /* Not documented, Xilinx VCU118 workaround */
117 #define	 CFG4_SGMII_TMR			0x160 /* bits 8:7 MUST be '10' */
118 #define	DP83867_SGMIICTL1		0xD3 /* not documented register */
119 #define	 SGMIICTL1_SGMII_6W		(1 << 14) /* no idea what it is */
120 
121 static struct resource_spec xae_spec[] = {
122 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
123 	{ SYS_RES_IRQ,		0,	RF_ACTIVE },
124 	{ -1, 0 }
125 };
126 
127 static void xae_stop_locked(struct xae_softc *sc);
128 static void xae_setup_rxfilter(struct xae_softc *sc);
129 
130 static int
131 xae_rx_enqueue(struct xae_softc *sc, uint32_t n)
132 {
133 	struct mbuf *m;
134 	int i;
135 
136 	for (i = 0; i < n; i++) {
137 		m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
138 		if (m == NULL) {
139 			device_printf(sc->dev,
140 			    "%s: Can't alloc rx mbuf\n", __func__);
141 			return (-1);
142 		}
143 
144 		m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
145 		xdma_enqueue_mbuf(sc->xchan_rx, &m, 0, 4, 4, XDMA_DEV_TO_MEM);
146 	}
147 
148 	return (0);
149 }
150 
151 static int
152 xae_get_phyaddr(phandle_t node, int *phy_addr)
153 {
154 	phandle_t phy_node;
155 	pcell_t phy_handle, phy_reg;
156 
157 	if (OF_getencprop(node, "phy-handle", (void *)&phy_handle,
158 	    sizeof(phy_handle)) <= 0)
159 		return (ENXIO);
160 
161 	phy_node = OF_node_from_xref(phy_handle);
162 
163 	if (OF_getencprop(phy_node, "reg", (void *)&phy_reg,
164 	    sizeof(phy_reg)) <= 0)
165 		return (ENXIO);
166 
167 	*phy_addr = phy_reg;
168 
169 	return (0);
170 }
171 
172 static int
173 xae_xdma_tx_intr(void *arg, xdma_transfer_status_t *status)
174 {
175 	xdma_transfer_status_t st;
176 	struct xae_softc *sc;
177 	if_t ifp;
178 	struct mbuf *m;
179 	int err;
180 
181 	sc = arg;
182 
183 	XAE_LOCK(sc);
184 
185 	ifp = sc->ifp;
186 
187 	for (;;) {
188 		err = xdma_dequeue_mbuf(sc->xchan_tx, &m, &st);
189 		if (err != 0) {
190 			break;
191 		}
192 
193 		if (st.error != 0) {
194 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
195 		}
196 
197 		m_freem(m);
198 	}
199 
200 	if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
201 
202 	XAE_UNLOCK(sc);
203 
204 	return (0);
205 }
206 
207 static int
208 xae_xdma_rx_intr(void *arg, xdma_transfer_status_t *status)
209 {
210 	xdma_transfer_status_t st;
211 	struct xae_softc *sc;
212 	if_t ifp;
213 	struct mbuf *m;
214 	int err;
215 	uint32_t cnt_processed;
216 
217 	sc = arg;
218 
219 	dprintf("%s\n", __func__);
220 
221 	XAE_LOCK(sc);
222 
223 	ifp = sc->ifp;
224 
225 	cnt_processed = 0;
226 	for (;;) {
227 		err = xdma_dequeue_mbuf(sc->xchan_rx, &m, &st);
228 		if (err != 0) {
229 			break;
230 		}
231 		cnt_processed++;
232 
233 		if (st.error != 0) {
234 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
235 			m_freem(m);
236 			continue;
237 		}
238 
239 		m->m_pkthdr.len = m->m_len = st.transferred;
240 		m->m_pkthdr.rcvif = ifp;
241 		XAE_UNLOCK(sc);
242 		if_input(ifp, m);
243 		XAE_LOCK(sc);
244 	}
245 
246 	xae_rx_enqueue(sc, cnt_processed);
247 
248 	XAE_UNLOCK(sc);
249 
250 	return (0);
251 }
252 
253 static void
254 xae_qflush(if_t ifp)
255 {
256 }
257 
258 static int
259 xae_transmit_locked(if_t ifp)
260 {
261 	struct xae_softc *sc;
262 	struct mbuf *m;
263 	struct buf_ring *br;
264 	int error;
265 	int enq;
266 
267 	dprintf("%s\n", __func__);
268 
269 	sc = if_getsoftc(ifp);
270 	br = sc->br;
271 
272 	enq = 0;
273 
274 	while ((m = drbr_peek(ifp, br)) != NULL) {
275 		error = xdma_enqueue_mbuf(sc->xchan_tx,
276 		    &m, 0, 4, 4, XDMA_MEM_TO_DEV);
277 		if (error != 0) {
278 			/* No space in request queue available yet. */
279 			drbr_putback(ifp, br, m);
280 			break;
281 		}
282 
283 		drbr_advance(ifp, br);
284 
285 		enq++;
286 
287 		/* If anyone is interested give them a copy. */
288 		ETHER_BPF_MTAP(ifp, m);
289         }
290 
291 	if (enq > 0)
292 		xdma_queue_submit(sc->xchan_tx);
293 
294 	return (0);
295 }
296 
297 static int
298 xae_transmit(if_t ifp, struct mbuf *m)
299 {
300 	struct xae_softc *sc;
301 	int error;
302 
303 	dprintf("%s\n", __func__);
304 
305 	sc = if_getsoftc(ifp);
306 
307 	XAE_LOCK(sc);
308 
309 	error = drbr_enqueue(ifp, sc->br, m);
310 	if (error) {
311 		XAE_UNLOCK(sc);
312 		return (error);
313 	}
314 
315 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
316 	    IFF_DRV_RUNNING) {
317 		XAE_UNLOCK(sc);
318 		return (0);
319 	}
320 
321 	if (!sc->link_is_up) {
322 		XAE_UNLOCK(sc);
323 		return (0);
324 	}
325 
326 	error = xae_transmit_locked(ifp);
327 
328 	XAE_UNLOCK(sc);
329 
330 	return (error);
331 }
332 
333 static void
334 xae_stop_locked(struct xae_softc *sc)
335 {
336 	if_t ifp;
337 	uint32_t reg;
338 
339 	XAE_ASSERT_LOCKED(sc);
340 
341 	ifp = sc->ifp;
342 	if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
343 
344 	callout_stop(&sc->xae_callout);
345 
346 	/* Stop the transmitter */
347 	reg = READ4(sc, XAE_TC);
348 	reg &= ~TC_TX;
349 	WRITE4(sc, XAE_TC, reg);
350 
351 	/* Stop the receiver. */
352 	reg = READ4(sc, XAE_RCW1);
353 	reg &= ~RCW1_RX;
354 	WRITE4(sc, XAE_RCW1, reg);
355 }
356 
357 static uint64_t
358 xae_stat(struct xae_softc *sc, int counter_id)
359 {
360 	uint64_t new, old;
361 	uint64_t delta;
362 
363 	KASSERT(counter_id < XAE_MAX_COUNTERS,
364 		("counter %d is out of range", counter_id));
365 
366 	new = READ8(sc, XAE_STATCNT(counter_id));
367 	old = sc->counters[counter_id];
368 
369 	if (new >= old)
370 		delta = new - old;
371 	else
372 		delta = UINT64_MAX - old + new;
373 	sc->counters[counter_id] = new;
374 
375 	return (delta);
376 }
377 
378 static void
379 xae_harvest_stats(struct xae_softc *sc)
380 {
381 	if_t ifp;
382 
383 	ifp = sc->ifp;
384 
385 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, xae_stat(sc, RX_GOOD_FRAMES));
386 	if_inc_counter(ifp, IFCOUNTER_IMCASTS, xae_stat(sc, RX_GOOD_MCASTS));
387 	if_inc_counter(ifp, IFCOUNTER_IERRORS,
388 	    xae_stat(sc, RX_FRAME_CHECK_SEQ_ERROR) +
389 	    xae_stat(sc, RX_LEN_OUT_OF_RANGE) +
390 	    xae_stat(sc, RX_ALIGNMENT_ERRORS));
391 
392 	if_inc_counter(ifp, IFCOUNTER_OBYTES, xae_stat(sc, TX_BYTES));
393 	if_inc_counter(ifp, IFCOUNTER_OPACKETS, xae_stat(sc, TX_GOOD_FRAMES));
394 	if_inc_counter(ifp, IFCOUNTER_OMCASTS, xae_stat(sc, TX_GOOD_MCASTS));
395 	if_inc_counter(ifp, IFCOUNTER_OERRORS,
396 	    xae_stat(sc, TX_GOOD_UNDERRUN_ERRORS));
397 
398 	if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
399 	    xae_stat(sc, TX_SINGLE_COLLISION_FRAMES) +
400 	    xae_stat(sc, TX_MULTI_COLLISION_FRAMES) +
401 	    xae_stat(sc, TX_LATE_COLLISIONS) +
402 	    xae_stat(sc, TX_EXCESS_COLLISIONS));
403 }
404 
405 static void
406 xae_tick(void *arg)
407 {
408 	struct xae_softc *sc;
409 	if_t ifp;
410 	int link_was_up;
411 
412 	sc = arg;
413 
414 	XAE_ASSERT_LOCKED(sc);
415 
416 	ifp = sc->ifp;
417 
418 	if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
419 		return;
420 
421 	/* Gather stats from hardware counters. */
422 	xae_harvest_stats(sc);
423 
424 	/* Check the media status. */
425 	link_was_up = sc->link_is_up;
426 	mii_tick(sc->mii_softc);
427 	if (sc->link_is_up && !link_was_up)
428 		xae_transmit_locked(sc->ifp);
429 
430 	/* Schedule another check one second from now. */
431 	callout_reset(&sc->xae_callout, hz, xae_tick, sc);
432 }
433 
434 static void
435 xae_init_locked(struct xae_softc *sc)
436 {
437 	if_t ifp;
438 
439 	XAE_ASSERT_LOCKED(sc);
440 
441 	ifp = sc->ifp;
442 	if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
443 		return;
444 
445 	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
446 
447 	xae_setup_rxfilter(sc);
448 
449 	/* Enable the transmitter */
450 	WRITE4(sc, XAE_TC, TC_TX);
451 
452 	/* Enable the receiver. */
453 	WRITE4(sc, XAE_RCW1, RCW1_RX);
454 
455 	/*
456 	 * Call mii_mediachg() which will call back into xae_miibus_statchg()
457 	 * to set up the remaining config registers based on current media.
458 	 */
459 	mii_mediachg(sc->mii_softc);
460 	callout_reset(&sc->xae_callout, hz, xae_tick, sc);
461 }
462 
463 static void
464 xae_init(void *arg)
465 {
466 	struct xae_softc *sc;
467 
468 	sc = arg;
469 
470 	XAE_LOCK(sc);
471 	xae_init_locked(sc);
472 	XAE_UNLOCK(sc);
473 }
474 
475 static void
476 xae_media_status(if_t  ifp, struct ifmediareq *ifmr)
477 {
478 	struct xae_softc *sc;
479 	struct mii_data *mii;
480 
481 	sc = if_getsoftc(ifp);
482 	mii = sc->mii_softc;
483 
484 	XAE_LOCK(sc);
485 	mii_pollstat(mii);
486 	ifmr->ifm_active = mii->mii_media_active;
487 	ifmr->ifm_status = mii->mii_media_status;
488 	XAE_UNLOCK(sc);
489 }
490 
491 static int
492 xae_media_change_locked(struct xae_softc *sc)
493 {
494 
495 	return (mii_mediachg(sc->mii_softc));
496 }
497 
498 static int
499 xae_media_change(if_t  ifp)
500 {
501 	struct xae_softc *sc;
502 	int error;
503 
504 	sc = if_getsoftc(ifp);
505 
506 	XAE_LOCK(sc);
507 	error = xae_media_change_locked(sc);
508 	XAE_UNLOCK(sc);
509 
510 	return (error);
511 }
512 
513 static u_int
514 xae_write_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
515 {
516 	struct xae_softc *sc = arg;
517 	uint32_t reg;
518 	uint8_t *ma;
519 
520 	if (cnt >= XAE_MULTICAST_TABLE_SIZE)
521 		return (1);
522 
523 	ma = LLADDR(sdl);
524 
525 	reg = READ4(sc, XAE_FFC) & 0xffffff00;
526 	reg |= cnt;
527 	WRITE4(sc, XAE_FFC, reg);
528 
529 	reg = (ma[0]);
530 	reg |= (ma[1] << 8);
531 	reg |= (ma[2] << 16);
532 	reg |= (ma[3] << 24);
533 	WRITE4(sc, XAE_FFV(0), reg);
534 
535 	reg = ma[4];
536 	reg |= ma[5] << 8;
537 	WRITE4(sc, XAE_FFV(1), reg);
538 
539 	return (1);
540 }
541 
542 static void
543 xae_setup_rxfilter(struct xae_softc *sc)
544 {
545 	if_t ifp;
546 	uint32_t reg;
547 
548 	XAE_ASSERT_LOCKED(sc);
549 
550 	ifp = sc->ifp;
551 
552 	/*
553 	 * Set the multicast (group) filter hash.
554 	 */
555 	if ((if_getflags(ifp) & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
556 		reg = READ4(sc, XAE_FFC);
557 		reg |= FFC_PM;
558 		WRITE4(sc, XAE_FFC, reg);
559 	} else {
560 		reg = READ4(sc, XAE_FFC);
561 		reg &= ~FFC_PM;
562 		WRITE4(sc, XAE_FFC, reg);
563 
564 		if_foreach_llmaddr(ifp, xae_write_maddr, sc);
565 	}
566 
567 	/*
568 	 * Set the primary address.
569 	 */
570 	reg = sc->macaddr[0];
571 	reg |= (sc->macaddr[1] << 8);
572 	reg |= (sc->macaddr[2] << 16);
573 	reg |= (sc->macaddr[3] << 24);
574 	WRITE4(sc, XAE_UAW0, reg);
575 
576 	reg = sc->macaddr[4];
577 	reg |= (sc->macaddr[5] << 8);
578 	WRITE4(sc, XAE_UAW1, reg);
579 }
580 
581 static int
582 xae_ioctl(if_t ifp, u_long cmd, caddr_t data)
583 {
584 	struct xae_softc *sc;
585 	struct mii_data *mii;
586 	struct ifreq *ifr;
587 	int mask, error;
588 
589 	sc = if_getsoftc(ifp);
590 	ifr = (struct ifreq *)data;
591 
592 	error = 0;
593 	switch (cmd) {
594 	case SIOCSIFFLAGS:
595 		XAE_LOCK(sc);
596 		if (if_getflags(ifp) & IFF_UP) {
597 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
598 				if ((if_getflags(ifp) ^ sc->if_flags) &
599 				    (IFF_PROMISC | IFF_ALLMULTI))
600 					xae_setup_rxfilter(sc);
601 			} else {
602 				if (!sc->is_detaching)
603 					xae_init_locked(sc);
604 			}
605 		} else {
606 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
607 				xae_stop_locked(sc);
608 		}
609 		sc->if_flags = if_getflags(ifp);
610 		XAE_UNLOCK(sc);
611 		break;
612 	case SIOCADDMULTI:
613 	case SIOCDELMULTI:
614 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
615 			XAE_LOCK(sc);
616 			xae_setup_rxfilter(sc);
617 			XAE_UNLOCK(sc);
618 		}
619 		break;
620 	case SIOCSIFMEDIA:
621 	case SIOCGIFMEDIA:
622 		mii = sc->mii_softc;
623 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
624 		break;
625 	case SIOCSIFCAP:
626 		mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
627 		if (mask & IFCAP_VLAN_MTU) {
628 			/* No work to do except acknowledge the change took */
629 			if_togglecapenable(ifp, IFCAP_VLAN_MTU);
630 		}
631 		break;
632 
633 	default:
634 		error = ether_ioctl(ifp, cmd, data);
635 		break;
636 	}
637 
638 	return (error);
639 }
640 
641 static void
642 xae_intr(void *arg)
643 {
644 
645 }
646 
647 static int
648 xae_get_hwaddr(struct xae_softc *sc, uint8_t *hwaddr)
649 {
650 	phandle_t node;
651 	int len;
652 
653 	node = ofw_bus_get_node(sc->dev);
654 
655 	/* Check if there is property */
656 	if ((len = OF_getproplen(node, "local-mac-address")) <= 0)
657 		return (EINVAL);
658 
659 	if (len != ETHER_ADDR_LEN)
660 		return (EINVAL);
661 
662 	OF_getprop(node, "local-mac-address", hwaddr,
663 	    ETHER_ADDR_LEN);
664 
665 	return (0);
666 }
667 
668 static int
669 mdio_wait(struct xae_softc *sc)
670 {
671 	uint32_t reg;
672 	int timeout;
673 
674 	timeout = 200;
675 
676 	do {
677 		reg = READ4(sc, XAE_MDIO_CTRL);
678 		if (reg & MDIO_CTRL_READY)
679 			break;
680 		DELAY(1);
681 	} while (timeout--);
682 
683 	if (timeout <= 0) {
684 		printf("Failed to get MDIO ready\n");
685 		return (1);
686 	}
687 
688 	return (0);
689 }
690 
691 static int
692 xae_miibus_read_reg(device_t dev, int phy, int reg)
693 {
694 	struct xae_softc *sc;
695 	uint32_t mii;
696 	int rv;
697 
698 	sc = device_get_softc(dev);
699 
700 	if (mdio_wait(sc))
701 		return (0);
702 
703 	mii = MDIO_CTRL_TX_OP_READ | MDIO_CTRL_INITIATE;
704 	mii |= (reg << MDIO_TX_REGAD_S);
705 	mii |= (phy << MDIO_TX_PHYAD_S);
706 
707 	WRITE4(sc, XAE_MDIO_CTRL, mii);
708 
709 	if (mdio_wait(sc))
710 		return (0);
711 
712 	rv = READ4(sc, XAE_MDIO_READ);
713 
714 	return (rv);
715 }
716 
717 static int
718 xae_miibus_write_reg(device_t dev, int phy, int reg, int val)
719 {
720 	struct xae_softc *sc;
721 	uint32_t mii;
722 
723 	sc = device_get_softc(dev);
724 
725 	if (mdio_wait(sc))
726 		return (1);
727 
728 	mii = MDIO_CTRL_TX_OP_WRITE | MDIO_CTRL_INITIATE;
729 	mii |= (reg << MDIO_TX_REGAD_S);
730 	mii |= (phy << MDIO_TX_PHYAD_S);
731 
732 	WRITE4(sc, XAE_MDIO_WRITE, val);
733 	WRITE4(sc, XAE_MDIO_CTRL, mii);
734 
735 	if (mdio_wait(sc))
736 		return (1);
737 
738 	return (0);
739 }
740 
741 static void
742 xae_phy_fixup(struct xae_softc *sc)
743 {
744 	uint32_t reg;
745 
746 	do {
747 		WRITE_TI_EREG(sc, DP83867_SGMIICTL1, SGMIICTL1_SGMII_6W);
748 		PHY_WR(sc, DP83867_PHYCR, PHYCR_SGMII_EN);
749 
750 		reg = PHY_RD(sc, DP83867_CFG2);
751 		reg &= ~CFG2_SPEED_OPT_ATTEMPT_CNT_M;
752 		reg |= (CFG2_SPEED_OPT_ATTEMPT_CNT_4);
753 		reg |= CFG2_INTERRUPT_POLARITY;
754 		reg |= CFG2_SPEED_OPT_ENHANCED_EN;
755 		reg |= CFG2_SPEED_OPT_10M_EN;
756 		PHY_WR(sc, DP83867_CFG2, reg);
757 
758 		WRITE_TI_EREG(sc, DP83867_CFG4, CFG4_SGMII_TMR);
759 		PHY_WR(sc, MII_BMCR,
760 		    BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1 | BMCR_RESET);
761 	} while (PHY1_RD(sc, MII_BMCR) == 0x0ffff);
762 
763 	do {
764 		PHY1_WR(sc, MII_BMCR,
765 		    BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1 | BMCR_STARTNEG);
766 		DELAY(40000);
767 	} while ((PHY1_RD(sc, MII_BMSR) & BMSR_ACOMP) == 0);
768 }
769 
770 static int
771 get_xdma_std(struct xae_softc *sc)
772 {
773 
774 	sc->xdma_tx = xdma_ofw_get(sc->dev, "tx");
775 	if (sc->xdma_tx == NULL)
776 		return (ENXIO);
777 
778 	sc->xdma_rx = xdma_ofw_get(sc->dev, "rx");
779 	if (sc->xdma_rx == NULL) {
780 		xdma_put(sc->xdma_tx);
781 		return (ENXIO);
782 	}
783 
784 	return (0);
785 }
786 
787 static int
788 get_xdma_axistream(struct xae_softc *sc)
789 {
790 	struct axidma_fdt_data *data;
791 	device_t dma_dev;
792 	phandle_t node;
793 	pcell_t prop;
794 	size_t len;
795 
796 	node = ofw_bus_get_node(sc->dev);
797 	len = OF_getencprop(node, "axistream-connected", &prop, sizeof(prop));
798 	if (len != sizeof(prop)) {
799 		device_printf(sc->dev,
800 		    "%s: Couldn't get axistream-connected prop.\n", __func__);
801 		return (ENXIO);
802 	}
803 	dma_dev = OF_device_from_xref(prop);
804 	if (dma_dev == NULL) {
805 		device_printf(sc->dev, "Could not get DMA device by xref.\n");
806 		return (ENXIO);
807 	}
808 
809 	sc->xdma_tx = xdma_get(sc->dev, dma_dev);
810 	if (sc->xdma_tx == NULL) {
811 		device_printf(sc->dev, "Could not find DMA controller.\n");
812 		return (ENXIO);
813 	}
814 	data = malloc(sizeof(struct axidma_fdt_data),
815 	    M_DEVBUF, (M_WAITOK | M_ZERO));
816 	data->id = AXIDMA_TX_CHAN;
817 	sc->xdma_tx->data = data;
818 
819 	sc->xdma_rx = xdma_get(sc->dev, dma_dev);
820 	if (sc->xdma_rx == NULL) {
821 		device_printf(sc->dev, "Could not find DMA controller.\n");
822 		return (ENXIO);
823 	}
824 	data = malloc(sizeof(struct axidma_fdt_data),
825 	    M_DEVBUF, (M_WAITOK | M_ZERO));
826 	data->id = AXIDMA_RX_CHAN;
827 	sc->xdma_rx->data = data;
828 
829 	return (0);
830 }
831 
832 static int
833 setup_xdma(struct xae_softc *sc)
834 {
835 	device_t dev;
836 	vmem_t *vmem;
837 	int error;
838 
839 	dev = sc->dev;
840 
841 	/* Get xDMA controller */
842 	error = get_xdma_std(sc);
843 
844 	if (error) {
845 		device_printf(sc->dev,
846 		    "Fallback to axistream-connected property\n");
847 		error = get_xdma_axistream(sc);
848 	}
849 
850 	if (error) {
851 		device_printf(dev, "Could not find xDMA controllers.\n");
852 		return (ENXIO);
853 	}
854 
855 	/* Alloc xDMA TX virtual channel. */
856 	sc->xchan_tx = xdma_channel_alloc(sc->xdma_tx, 0);
857 	if (sc->xchan_tx == NULL) {
858 		device_printf(dev, "Can't alloc virtual DMA TX channel.\n");
859 		return (ENXIO);
860 	}
861 
862 	/* Setup interrupt handler. */
863 	error = xdma_setup_intr(sc->xchan_tx, 0,
864 	    xae_xdma_tx_intr, sc, &sc->ih_tx);
865 	if (error) {
866 		device_printf(sc->dev,
867 		    "Can't setup xDMA TX interrupt handler.\n");
868 		return (ENXIO);
869 	}
870 
871 	/* Alloc xDMA RX virtual channel. */
872 	sc->xchan_rx = xdma_channel_alloc(sc->xdma_rx, 0);
873 	if (sc->xchan_rx == NULL) {
874 		device_printf(dev, "Can't alloc virtual DMA RX channel.\n");
875 		return (ENXIO);
876 	}
877 
878 	/* Setup interrupt handler. */
879 	error = xdma_setup_intr(sc->xchan_rx, XDMA_INTR_NET,
880 	    xae_xdma_rx_intr, sc, &sc->ih_rx);
881 	if (error) {
882 		device_printf(sc->dev,
883 		    "Can't setup xDMA RX interrupt handler.\n");
884 		return (ENXIO);
885 	}
886 
887 	/* Setup bounce buffer */
888 	vmem = xdma_get_memory(dev);
889 	if (vmem) {
890 		xchan_set_memory(sc->xchan_tx, vmem);
891 		xchan_set_memory(sc->xchan_rx, vmem);
892 	}
893 
894 	xdma_prep_sg(sc->xchan_tx,
895 	    TX_QUEUE_SIZE,	/* xchan requests queue size */
896 	    MCLBYTES,	/* maxsegsize */
897 	    8,		/* maxnsegs */
898 	    16,		/* alignment */
899 	    0,		/* boundary */
900 	    BUS_SPACE_MAXADDR_32BIT,
901 	    BUS_SPACE_MAXADDR);
902 
903 	xdma_prep_sg(sc->xchan_rx,
904 	    RX_QUEUE_SIZE,	/* xchan requests queue size */
905 	    MCLBYTES,	/* maxsegsize */
906 	    1,		/* maxnsegs */
907 	    16,		/* alignment */
908 	    0,		/* boundary */
909 	    BUS_SPACE_MAXADDR_32BIT,
910 	    BUS_SPACE_MAXADDR);
911 
912 	return (0);
913 }
914 
915 static int
916 xae_probe(device_t dev)
917 {
918 
919 	if (!ofw_bus_status_okay(dev))
920 		return (ENXIO);
921 
922 	if (!ofw_bus_is_compatible(dev, "xlnx,axi-ethernet-1.00.a"))
923 		return (ENXIO);
924 
925 	device_set_desc(dev, "Xilinx AXI Ethernet");
926 
927 	return (BUS_PROBE_DEFAULT);
928 }
929 
930 static int
931 xae_attach(device_t dev)
932 {
933 	struct xae_softc *sc;
934 	if_t ifp;
935 	phandle_t node;
936 	uint32_t reg;
937 	int error;
938 
939 	sc = device_get_softc(dev);
940 	sc->dev = dev;
941 	node = ofw_bus_get_node(dev);
942 
943 	if (setup_xdma(sc) != 0) {
944 		device_printf(dev, "Could not setup xDMA.\n");
945 		return (ENXIO);
946 	}
947 
948 	mtx_init(&sc->mtx, device_get_nameunit(sc->dev),
949 	    MTX_NETWORK_LOCK, MTX_DEF);
950 
951 	sc->br = buf_ring_alloc(BUFRING_SIZE, M_DEVBUF,
952 	    M_NOWAIT, &sc->mtx);
953 	if (sc->br == NULL)
954 		return (ENOMEM);
955 
956 	if (bus_alloc_resources(dev, xae_spec, sc->res)) {
957 		device_printf(dev, "could not allocate resources\n");
958 		return (ENXIO);
959 	}
960 
961 	/* Memory interface */
962 	sc->bst = rman_get_bustag(sc->res[0]);
963 	sc->bsh = rman_get_bushandle(sc->res[0]);
964 
965 	device_printf(sc->dev, "Identification: %x\n",
966 	    READ4(sc, XAE_IDENT));
967 
968 	/* Get MAC addr */
969 	if (xae_get_hwaddr(sc, sc->macaddr)) {
970 		device_printf(sc->dev, "can't get mac\n");
971 		return (ENXIO);
972 	}
973 
974 	/* Enable MII clock */
975 	reg = (MDIO_CLK_DIV_DEFAULT << MDIO_SETUP_CLK_DIV_S);
976 	reg |= MDIO_SETUP_ENABLE;
977 	WRITE4(sc, XAE_MDIO_SETUP, reg);
978 	if (mdio_wait(sc))
979 		return (ENXIO);
980 
981 	callout_init_mtx(&sc->xae_callout, &sc->mtx, 0);
982 
983 	/* Setup interrupt handler. */
984 	error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE,
985 	    NULL, xae_intr, sc, &sc->intr_cookie);
986 	if (error != 0) {
987 		device_printf(dev, "could not setup interrupt handler.\n");
988 		return (ENXIO);
989 	}
990 
991 	/* Set up the ethernet interface. */
992 	sc->ifp = ifp = if_alloc(IFT_ETHER);
993 	if (ifp == NULL) {
994 		device_printf(dev, "could not allocate ifp.\n");
995 		return (ENXIO);
996 	}
997 
998 	if_setsoftc(ifp, sc);
999 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1000 	if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
1001 	if_setcapabilities(ifp, IFCAP_VLAN_MTU);
1002 	if_setcapenable(ifp, if_getcapabilities(ifp));
1003 	if_settransmitfn(ifp, xae_transmit);
1004 	if_setqflushfn(ifp, xae_qflush);
1005 	if_setioctlfn(ifp, xae_ioctl);
1006 	if_setinitfn(ifp, xae_init);
1007 	if_setsendqlen(ifp, TX_DESC_COUNT - 1);
1008 	if_setsendqready(ifp);
1009 
1010 	if (xae_get_phyaddr(node, &sc->phy_addr) != 0)
1011 		return (ENXIO);
1012 
1013 	/* Attach the mii driver. */
1014 	error = mii_attach(dev, &sc->miibus, ifp, xae_media_change,
1015 	    xae_media_status, BMSR_DEFCAPMASK, sc->phy_addr,
1016 	    MII_OFFSET_ANY, 0);
1017 
1018 	if (error != 0) {
1019 		device_printf(dev, "PHY attach failed\n");
1020 		return (ENXIO);
1021 	}
1022 	sc->mii_softc = device_get_softc(sc->miibus);
1023 
1024 	/* Apply vcu118 workaround. */
1025 	if (OF_getproplen(node, "xlnx,vcu118") >= 0)
1026 		xae_phy_fixup(sc);
1027 
1028 	/* All ready to run, attach the ethernet interface. */
1029 	ether_ifattach(ifp, sc->macaddr);
1030 	sc->is_attached = true;
1031 
1032 	xae_rx_enqueue(sc, NUM_RX_MBUF);
1033 	xdma_queue_submit(sc->xchan_rx);
1034 
1035 	return (0);
1036 }
1037 
1038 static int
1039 xae_detach(device_t dev)
1040 {
1041 	struct xae_softc *sc;
1042 	if_t ifp;
1043 
1044 	sc = device_get_softc(dev);
1045 
1046 	KASSERT(mtx_initialized(&sc->mtx), ("%s: mutex not initialized",
1047 	    device_get_nameunit(dev)));
1048 
1049 	ifp = sc->ifp;
1050 
1051 	/* Only cleanup if attach succeeded. */
1052 	if (device_is_attached(dev)) {
1053 		XAE_LOCK(sc);
1054 		xae_stop_locked(sc);
1055 		XAE_UNLOCK(sc);
1056 		callout_drain(&sc->xae_callout);
1057 		ether_ifdetach(ifp);
1058 	}
1059 
1060 	if (sc->miibus != NULL)
1061 		device_delete_child(dev, sc->miibus);
1062 
1063 	if (ifp != NULL)
1064 		if_free(ifp);
1065 
1066 	mtx_destroy(&sc->mtx);
1067 
1068 	bus_teardown_intr(dev, sc->res[1], sc->intr_cookie);
1069 
1070 	bus_release_resources(dev, xae_spec, sc->res);
1071 
1072 	xdma_channel_free(sc->xchan_tx);
1073 	xdma_channel_free(sc->xchan_rx);
1074 	xdma_put(sc->xdma_tx);
1075 	xdma_put(sc->xdma_rx);
1076 
1077 	return (0);
1078 }
1079 
1080 static void
1081 xae_miibus_statchg(device_t dev)
1082 {
1083 	struct xae_softc *sc;
1084 	struct mii_data *mii;
1085 	uint32_t reg;
1086 
1087 	/*
1088 	 * Called by the MII bus driver when the PHY establishes
1089 	 * link to set the MAC interface registers.
1090 	 */
1091 
1092 	sc = device_get_softc(dev);
1093 
1094 	XAE_ASSERT_LOCKED(sc);
1095 
1096 	mii = sc->mii_softc;
1097 
1098 	if (mii->mii_media_status & IFM_ACTIVE)
1099 		sc->link_is_up = true;
1100 	else
1101 		sc->link_is_up = false;
1102 
1103 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1104 	case IFM_1000_T:
1105 	case IFM_1000_SX:
1106 		reg = SPEED_1000;
1107 		break;
1108 	case IFM_100_TX:
1109 		reg = SPEED_100;
1110 		break;
1111 	case IFM_10_T:
1112 		reg = SPEED_10;
1113 		break;
1114 	case IFM_NONE:
1115 		sc->link_is_up = false;
1116 		return;
1117 	default:
1118 		sc->link_is_up = false;
1119 		device_printf(dev, "Unsupported media %u\n",
1120 		    IFM_SUBTYPE(mii->mii_media_active));
1121 		return;
1122 	}
1123 
1124 	WRITE4(sc, XAE_SPEED, reg);
1125 }
1126 
1127 static device_method_t xae_methods[] = {
1128 	DEVMETHOD(device_probe,		xae_probe),
1129 	DEVMETHOD(device_attach,	xae_attach),
1130 	DEVMETHOD(device_detach,	xae_detach),
1131 
1132 	/* MII Interface */
1133 	DEVMETHOD(miibus_readreg,	xae_miibus_read_reg),
1134 	DEVMETHOD(miibus_writereg,	xae_miibus_write_reg),
1135 	DEVMETHOD(miibus_statchg,	xae_miibus_statchg),
1136 	{ 0, 0 }
1137 };
1138 
1139 driver_t xae_driver = {
1140 	"xae",
1141 	xae_methods,
1142 	sizeof(struct xae_softc),
1143 };
1144 
1145 DRIVER_MODULE(xae, simplebus, xae_driver, 0, 0);
1146 DRIVER_MODULE(miibus, xae, miibus_driver, 0, 0);
1147 
1148 MODULE_DEPEND(xae, ether, 1, 1, 1);
1149 MODULE_DEPEND(xae, miibus, 1, 1, 1);
1150