1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * (C) Copyright 2011 Michal Simek
4  *
5  * Michal SIMEK <monstr@monstr.eu>
6  *
7  * Based on Xilinx gmac driver:
8  * (C) Copyright 2011 Xilinx
9  */
10 
11 #include <clk.h>
12 #include <common.h>
13 #include <cpu_func.h>
14 #include <dm.h>
15 #include <log.h>
16 #include <net.h>
17 #include <netdev.h>
18 #include <config.h>
19 #include <console.h>
20 #include <malloc.h>
21 #include <asm/cache.h>
22 #include <asm/io.h>
23 #include <phy.h>
24 #include <miiphy.h>
25 #include <wait_bit.h>
26 #include <watchdog.h>
27 #include <asm/system.h>
28 #include <asm/arch/hardware.h>
29 #include <asm/arch/sys_proto.h>
30 #include <dm/device_compat.h>
31 #include <linux/bitops.h>
32 #include <linux/err.h>
33 #include <linux/errno.h>
34 
35 /* Bit/mask specification */
36 #define ZYNQ_GEM_PHYMNTNC_OP_MASK	0x40020000 /* operation mask bits */
37 #define ZYNQ_GEM_PHYMNTNC_OP_R_MASK	0x20000000 /* read operation */
38 #define ZYNQ_GEM_PHYMNTNC_OP_W_MASK	0x10000000 /* write operation */
39 #define ZYNQ_GEM_PHYMNTNC_PHYAD_SHIFT_MASK	23 /* Shift bits for PHYAD */
40 #define ZYNQ_GEM_PHYMNTNC_PHREG_SHIFT_MASK	18 /* Shift bits for PHREG */
41 
42 #define ZYNQ_GEM_RXBUF_EOF_MASK		0x00008000 /* End of frame. */
43 #define ZYNQ_GEM_RXBUF_SOF_MASK		0x00004000 /* Start of frame. */
44 #define ZYNQ_GEM_RXBUF_LEN_MASK		0x00003FFF /* Mask for length field */
45 
46 #define ZYNQ_GEM_RXBUF_WRAP_MASK	0x00000002 /* Wrap bit, last BD */
47 #define ZYNQ_GEM_RXBUF_NEW_MASK		0x00000001 /* Used bit.. */
48 #define ZYNQ_GEM_RXBUF_ADD_MASK		0xFFFFFFFC /* Mask for address */
49 
50 /* Wrap bit, last descriptor */
51 #define ZYNQ_GEM_TXBUF_WRAP_MASK	0x40000000
52 #define ZYNQ_GEM_TXBUF_LAST_MASK	0x00008000 /* Last buffer */
53 #define ZYNQ_GEM_TXBUF_USED_MASK	0x80000000 /* Used by Hw */
54 
55 #define ZYNQ_GEM_NWCTRL_TXEN_MASK	0x00000008 /* Enable transmit */
56 #define ZYNQ_GEM_NWCTRL_RXEN_MASK	0x00000004 /* Enable receive */
57 #define ZYNQ_GEM_NWCTRL_MDEN_MASK	0x00000010 /* Enable MDIO port */
58 #define ZYNQ_GEM_NWCTRL_STARTTX_MASK	0x00000200 /* Start tx (tx_go) */
59 
60 #define ZYNQ_GEM_NWCFG_SPEED100		0x00000001 /* 100 Mbps operation */
61 #define ZYNQ_GEM_NWCFG_SPEED1000	0x00000400 /* 1Gbps operation */
62 #define ZYNQ_GEM_NWCFG_FDEN		0x00000002 /* Full Duplex mode */
63 #define ZYNQ_GEM_NWCFG_FSREM		0x00020000 /* FCS removal */
64 #define ZYNQ_GEM_NWCFG_SGMII_ENBL	0x08000000 /* SGMII Enable */
65 #define ZYNQ_GEM_NWCFG_PCS_SEL		0x00000800 /* PCS select */
66 #ifdef CONFIG_ARM64
67 #define ZYNQ_GEM_NWCFG_MDCCLKDIV	0x00100000 /* Div pclk by 64, max 160MHz */
68 #else
69 #define ZYNQ_GEM_NWCFG_MDCCLKDIV	0x000c0000 /* Div pclk by 48, max 120MHz */
70 #endif
71 
72 #ifdef CONFIG_ARM64
73 # define ZYNQ_GEM_DBUS_WIDTH	(1 << 21) /* 64 bit bus */
74 #else
75 # define ZYNQ_GEM_DBUS_WIDTH	(0 << 21) /* 32 bit bus */
76 #endif
77 
78 #define ZYNQ_GEM_NWCFG_INIT		(ZYNQ_GEM_DBUS_WIDTH | \
79 					ZYNQ_GEM_NWCFG_FDEN | \
80 					ZYNQ_GEM_NWCFG_FSREM | \
81 					ZYNQ_GEM_NWCFG_MDCCLKDIV)
82 
83 #define ZYNQ_GEM_NWSR_MDIOIDLE_MASK	0x00000004 /* PHY management idle */
84 
85 #define ZYNQ_GEM_DMACR_BLENGTH		0x00000004 /* INCR4 AHB bursts */
86 /* Use full configured addressable space (8 Kb) */
87 #define ZYNQ_GEM_DMACR_RXSIZE		0x00000300
88 /* Use full configured addressable space (4 Kb) */
89 #define ZYNQ_GEM_DMACR_TXSIZE		0x00000400
90 /* Set with binary 00011000 to use 1536 byte(1*max length frame/buffer) */
91 #define ZYNQ_GEM_DMACR_RXBUF		0x00180000
92 
93 #if defined(CONFIG_PHYS_64BIT)
94 # define ZYNQ_GEM_DMA_BUS_WIDTH		BIT(30) /* 64 bit bus */
95 #else
96 # define ZYNQ_GEM_DMA_BUS_WIDTH		(0 << 30) /* 32 bit bus */
97 #endif
98 
99 #define ZYNQ_GEM_DMACR_INIT		(ZYNQ_GEM_DMACR_BLENGTH | \
100 					ZYNQ_GEM_DMACR_RXSIZE | \
101 					ZYNQ_GEM_DMACR_TXSIZE | \
102 					ZYNQ_GEM_DMACR_RXBUF | \
103 					ZYNQ_GEM_DMA_BUS_WIDTH)
104 
105 #define ZYNQ_GEM_TSR_DONE		0x00000020 /* Tx done mask */
106 
107 #define ZYNQ_GEM_PCS_CTL_ANEG_ENBL	0x1000
108 
109 #define ZYNQ_GEM_DCFG_DBG6_DMA_64B	BIT(23)
110 
111 /* Use MII register 1 (MII status register) to detect PHY */
112 #define PHY_DETECT_REG  1
113 
114 /* Mask used to verify certain PHY features (or register contents)
115  * in the register above:
116  *  0x1000: 10Mbps full duplex support
117  *  0x0800: 10Mbps half duplex support
118  *  0x0008: Auto-negotiation support
119  */
120 #define PHY_DETECT_MASK 0x1808
121 
122 /* TX BD status masks */
123 #define ZYNQ_GEM_TXBUF_FRMLEN_MASK	0x000007ff
124 #define ZYNQ_GEM_TXBUF_EXHAUSTED	0x08000000
125 #define ZYNQ_GEM_TXBUF_UNDERRUN		0x10000000
126 
127 /* Clock frequencies for different speeds */
128 #define ZYNQ_GEM_FREQUENCY_10	2500000UL
129 #define ZYNQ_GEM_FREQUENCY_100	25000000UL
130 #define ZYNQ_GEM_FREQUENCY_1000	125000000UL
131 
132 #define RXCLK_EN		BIT(0)
133 
134 /* Device registers */
135 struct zynq_gem_regs {
136 	u32 nwctrl; /* 0x0 - Network Control reg */
137 	u32 nwcfg; /* 0x4 - Network Config reg */
138 	u32 nwsr; /* 0x8 - Network Status reg */
139 	u32 reserved1;
140 	u32 dmacr; /* 0x10 - DMA Control reg */
141 	u32 txsr; /* 0x14 - TX Status reg */
142 	u32 rxqbase; /* 0x18 - RX Q Base address reg */
143 	u32 txqbase; /* 0x1c - TX Q Base address reg */
144 	u32 rxsr; /* 0x20 - RX Status reg */
145 	u32 reserved2[2];
146 	u32 idr; /* 0x2c - Interrupt Disable reg */
147 	u32 reserved3;
148 	u32 phymntnc; /* 0x34 - Phy Maintaince reg */
149 	u32 reserved4[18];
150 	u32 hashl; /* 0x80 - Hash Low address reg */
151 	u32 hashh; /* 0x84 - Hash High address reg */
152 #define LADDR_LOW	0
153 #define LADDR_HIGH	1
154 	u32 laddr[4][LADDR_HIGH + 1]; /* 0x8c - Specific1 addr low/high reg */
155 	u32 match[4]; /* 0xa8 - Type ID1 Match reg */
156 	u32 reserved6[18];
157 #define STAT_SIZE	44
158 	u32 stat[STAT_SIZE]; /* 0x100 - Octects transmitted Low reg */
159 	u32 reserved9[20];
160 	u32 pcscntrl;
161 	u32 rserved12[36];
162 	u32 dcfg6; /* 0x294 Design config reg6 */
163 	u32 reserved7[106];
164 	u32 transmit_q1_ptr; /* 0x440 - Transmit priority queue 1 */
165 	u32 reserved8[15];
166 	u32 receive_q1_ptr; /* 0x480 - Receive priority queue 1 */
167 	u32 reserved10[17];
168 	u32 upper_txqbase; /* 0x4C8 - Upper tx_q base addr */
169 	u32 reserved11[2];
170 	u32 upper_rxqbase; /* 0x4D4 - Upper rx_q base addr */
171 };
172 
173 /* BD descriptors */
174 struct emac_bd {
175 	u32 addr; /* Next descriptor pointer */
176 	u32 status;
177 #if defined(CONFIG_PHYS_64BIT)
178 	u32 addr_hi;
179 	u32 reserved;
180 #endif
181 };
182 
183 /* Reduce amount of BUFs if you have limited amount of memory */
184 #define RX_BUF 32
185 /* Page table entries are set to 1MB, or multiples of 1MB
186  * (not < 1MB). driver uses less bd's so use 1MB bdspace.
187  */
188 #define BD_SPACE	0x100000
189 /* BD separation space */
190 #define BD_SEPRN_SPACE	(RX_BUF * sizeof(struct emac_bd))
191 
192 /* Setup the first free TX descriptor */
193 #define TX_FREE_DESC	2
194 
195 /* Initialized, rxbd_current, rx_first_buf must be 0 after init */
196 struct zynq_gem_priv {
197 	struct emac_bd *tx_bd;
198 	struct emac_bd *rx_bd;
199 	char *rxbuffers;
200 	u32 rxbd_current;
201 	u32 rx_first_buf;
202 	int phyaddr;
203 	int init;
204 	struct zynq_gem_regs *iobase;
205 	struct zynq_gem_regs *mdiobase;
206 	phy_interface_t interface;
207 	struct phy_device *phydev;
208 	ofnode phy_of_node;
209 	struct mii_dev *bus;
210 	struct clk rx_clk;
211 	struct clk tx_clk;
212 	u32 max_speed;
213 	bool int_pcs;
214 	bool dma_64bit;
215 	u32 clk_en_info;
216 };
217 
phy_setup_op(struct zynq_gem_priv * priv,u32 phy_addr,u32 regnum,u32 op,u16 * data)218 static int phy_setup_op(struct zynq_gem_priv *priv, u32 phy_addr, u32 regnum,
219 			u32 op, u16 *data)
220 {
221 	u32 mgtcr;
222 	struct zynq_gem_regs *regs = priv->mdiobase;
223 	int err;
224 
225 	err = wait_for_bit_le32(&regs->nwsr, ZYNQ_GEM_NWSR_MDIOIDLE_MASK,
226 				true, 20000, false);
227 	if (err)
228 		return err;
229 
230 	/* Construct mgtcr mask for the operation */
231 	mgtcr = ZYNQ_GEM_PHYMNTNC_OP_MASK | op |
232 		(phy_addr << ZYNQ_GEM_PHYMNTNC_PHYAD_SHIFT_MASK) |
233 		(regnum << ZYNQ_GEM_PHYMNTNC_PHREG_SHIFT_MASK) | *data;
234 
235 	/* Write mgtcr and wait for completion */
236 	writel(mgtcr, &regs->phymntnc);
237 
238 	err = wait_for_bit_le32(&regs->nwsr, ZYNQ_GEM_NWSR_MDIOIDLE_MASK,
239 				true, 20000, false);
240 	if (err)
241 		return err;
242 
243 	if (op == ZYNQ_GEM_PHYMNTNC_OP_R_MASK)
244 		*data = readl(&regs->phymntnc);
245 
246 	return 0;
247 }
248 
phyread(struct zynq_gem_priv * priv,u32 phy_addr,u32 regnum,u16 * val)249 static int phyread(struct zynq_gem_priv *priv, u32 phy_addr,
250 		   u32 regnum, u16 *val)
251 {
252 	int ret;
253 
254 	ret = phy_setup_op(priv, phy_addr, regnum,
255 			   ZYNQ_GEM_PHYMNTNC_OP_R_MASK, val);
256 
257 	if (!ret)
258 		debug("%s: phy_addr %d, regnum 0x%x, val 0x%x\n", __func__,
259 		      phy_addr, regnum, *val);
260 
261 	return ret;
262 }
263 
phywrite(struct zynq_gem_priv * priv,u32 phy_addr,u32 regnum,u16 data)264 static int phywrite(struct zynq_gem_priv *priv, u32 phy_addr,
265 		    u32 regnum, u16 data)
266 {
267 	debug("%s: phy_addr %d, regnum 0x%x, data 0x%x\n", __func__, phy_addr,
268 	      regnum, data);
269 
270 	return phy_setup_op(priv, phy_addr, regnum,
271 			    ZYNQ_GEM_PHYMNTNC_OP_W_MASK, &data);
272 }
273 
zynq_gem_setup_mac(struct udevice * dev)274 static int zynq_gem_setup_mac(struct udevice *dev)
275 {
276 	u32 i, macaddrlow, macaddrhigh;
277 	struct eth_pdata *pdata = dev_get_plat(dev);
278 	struct zynq_gem_priv *priv = dev_get_priv(dev);
279 	struct zynq_gem_regs *regs = priv->iobase;
280 
281 	/* Set the MAC bits [31:0] in BOT */
282 	macaddrlow = pdata->enetaddr[0];
283 	macaddrlow |= pdata->enetaddr[1] << 8;
284 	macaddrlow |= pdata->enetaddr[2] << 16;
285 	macaddrlow |= pdata->enetaddr[3] << 24;
286 
287 	/* Set MAC bits [47:32] in TOP */
288 	macaddrhigh = pdata->enetaddr[4];
289 	macaddrhigh |= pdata->enetaddr[5] << 8;
290 
291 	for (i = 0; i < 4; i++) {
292 		writel(0, &regs->laddr[i][LADDR_LOW]);
293 		writel(0, &regs->laddr[i][LADDR_HIGH]);
294 		/* Do not use MATCHx register */
295 		writel(0, &regs->match[i]);
296 	}
297 
298 	writel(macaddrlow, &regs->laddr[0][LADDR_LOW]);
299 	writel(macaddrhigh, &regs->laddr[0][LADDR_HIGH]);
300 
301 	return 0;
302 }
303 
zynq_phy_init(struct udevice * dev)304 static int zynq_phy_init(struct udevice *dev)
305 {
306 	int ret;
307 	struct zynq_gem_priv *priv = dev_get_priv(dev);
308 	struct zynq_gem_regs *regs_mdio = priv->mdiobase;
309 	const u32 supported = SUPPORTED_10baseT_Half |
310 			SUPPORTED_10baseT_Full |
311 			SUPPORTED_100baseT_Half |
312 			SUPPORTED_100baseT_Full |
313 			SUPPORTED_1000baseT_Half |
314 			SUPPORTED_1000baseT_Full;
315 
316 	/* Enable only MDIO bus */
317 	writel(ZYNQ_GEM_NWCTRL_MDEN_MASK, &regs_mdio->nwctrl);
318 
319 	priv->phydev = phy_connect(priv->bus, priv->phyaddr, dev,
320 				   priv->interface);
321 	if (!priv->phydev)
322 		return -ENODEV;
323 
324 	if (priv->max_speed) {
325 		ret = phy_set_supported(priv->phydev, priv->max_speed);
326 		if (ret)
327 			return ret;
328 	}
329 
330 	priv->phydev->supported &= supported | ADVERTISED_Pause |
331 				  ADVERTISED_Asym_Pause;
332 
333 	priv->phydev->advertising = priv->phydev->supported;
334 	priv->phydev->node = priv->phy_of_node;
335 
336 	return phy_config(priv->phydev);
337 }
338 
zynq_gem_init(struct udevice * dev)339 static int zynq_gem_init(struct udevice *dev)
340 {
341 	u32 i, nwconfig;
342 	int ret;
343 	unsigned long clk_rate = 0;
344 	struct zynq_gem_priv *priv = dev_get_priv(dev);
345 	struct zynq_gem_regs *regs = priv->iobase;
346 	struct zynq_gem_regs *regs_mdio = priv->mdiobase;
347 	struct emac_bd *dummy_tx_bd = &priv->tx_bd[TX_FREE_DESC];
348 	struct emac_bd *dummy_rx_bd = &priv->tx_bd[TX_FREE_DESC + 2];
349 
350 	if (readl(&regs->dcfg6) & ZYNQ_GEM_DCFG_DBG6_DMA_64B)
351 		priv->dma_64bit = true;
352 	else
353 		priv->dma_64bit = false;
354 
355 #if defined(CONFIG_PHYS_64BIT)
356 	if (!priv->dma_64bit) {
357 		printf("ERR: %s: Using 64-bit DMA but HW doesn't support it\n",
358 		       __func__);
359 		return -EINVAL;
360 	}
361 #else
362 	if (priv->dma_64bit)
363 		debug("WARN: %s: Not using 64-bit dma even HW supports it\n",
364 		      __func__);
365 #endif
366 
367 	if (!priv->init) {
368 		/* Disable all interrupts */
369 		writel(0xFFFFFFFF, &regs->idr);
370 
371 		/* Disable the receiver & transmitter */
372 		writel(0, &regs->nwctrl);
373 		writel(0, &regs->txsr);
374 		writel(0, &regs->rxsr);
375 		writel(0, &regs->phymntnc);
376 
377 		/* Clear the Hash registers for the mac address
378 		 * pointed by AddressPtr
379 		 */
380 		writel(0x0, &regs->hashl);
381 		/* Write bits [63:32] in TOP */
382 		writel(0x0, &regs->hashh);
383 
384 		/* Clear all counters */
385 		for (i = 0; i < STAT_SIZE; i++)
386 			readl(&regs->stat[i]);
387 
388 		/* Setup RxBD space */
389 		memset(priv->rx_bd, 0, RX_BUF * sizeof(struct emac_bd));
390 
391 		for (i = 0; i < RX_BUF; i++) {
392 			priv->rx_bd[i].status = 0xF0000000;
393 			priv->rx_bd[i].addr =
394 					(lower_32_bits((ulong)(priv->rxbuffers)
395 							+ (i * PKTSIZE_ALIGN)));
396 #if defined(CONFIG_PHYS_64BIT)
397 			priv->rx_bd[i].addr_hi =
398 					(upper_32_bits((ulong)(priv->rxbuffers)
399 							+ (i * PKTSIZE_ALIGN)));
400 #endif
401 	}
402 		/* WRAP bit to last BD */
403 		priv->rx_bd[--i].addr |= ZYNQ_GEM_RXBUF_WRAP_MASK;
404 		/* Write RxBDs to IP */
405 		writel(lower_32_bits((ulong)priv->rx_bd), &regs->rxqbase);
406 #if defined(CONFIG_PHYS_64BIT)
407 		writel(upper_32_bits((ulong)priv->rx_bd), &regs->upper_rxqbase);
408 #endif
409 
410 		/* Setup for DMA Configuration register */
411 		writel(ZYNQ_GEM_DMACR_INIT, &regs->dmacr);
412 
413 		/* Setup for Network Control register, MDIO, Rx and Tx enable */
414 		setbits_le32(&regs_mdio->nwctrl, ZYNQ_GEM_NWCTRL_MDEN_MASK);
415 
416 		/* Disable the second priority queue */
417 		dummy_tx_bd->addr = 0;
418 #if defined(CONFIG_PHYS_64BIT)
419 		dummy_tx_bd->addr_hi = 0;
420 #endif
421 		dummy_tx_bd->status = ZYNQ_GEM_TXBUF_WRAP_MASK |
422 				ZYNQ_GEM_TXBUF_LAST_MASK|
423 				ZYNQ_GEM_TXBUF_USED_MASK;
424 
425 		dummy_rx_bd->addr = ZYNQ_GEM_RXBUF_WRAP_MASK |
426 				ZYNQ_GEM_RXBUF_NEW_MASK;
427 #if defined(CONFIG_PHYS_64BIT)
428 		dummy_rx_bd->addr_hi = 0;
429 #endif
430 		dummy_rx_bd->status = 0;
431 
432 		writel((ulong)dummy_tx_bd, &regs->transmit_q1_ptr);
433 		writel((ulong)dummy_rx_bd, &regs->receive_q1_ptr);
434 
435 		priv->init++;
436 	}
437 
438 	ret = phy_startup(priv->phydev);
439 	if (ret)
440 		return ret;
441 
442 	if (!priv->phydev->link) {
443 		printf("%s: No link.\n", priv->phydev->dev->name);
444 		return -1;
445 	}
446 
447 	nwconfig = ZYNQ_GEM_NWCFG_INIT;
448 
449 	/*
450 	 * Set SGMII enable PCS selection only if internal PCS/PMA
451 	 * core is used and interface is SGMII.
452 	 */
453 	if (priv->interface == PHY_INTERFACE_MODE_SGMII &&
454 	    priv->int_pcs) {
455 		nwconfig |= ZYNQ_GEM_NWCFG_SGMII_ENBL |
456 			    ZYNQ_GEM_NWCFG_PCS_SEL;
457 	}
458 
459 	switch (priv->phydev->speed) {
460 	case SPEED_1000:
461 		writel(nwconfig | ZYNQ_GEM_NWCFG_SPEED1000,
462 		       &regs->nwcfg);
463 		clk_rate = ZYNQ_GEM_FREQUENCY_1000;
464 		break;
465 	case SPEED_100:
466 		writel(nwconfig | ZYNQ_GEM_NWCFG_SPEED100,
467 		       &regs->nwcfg);
468 		clk_rate = ZYNQ_GEM_FREQUENCY_100;
469 		break;
470 	case SPEED_10:
471 		clk_rate = ZYNQ_GEM_FREQUENCY_10;
472 		break;
473 	}
474 
475 #ifdef CONFIG_ARM64
476 	if (priv->interface == PHY_INTERFACE_MODE_SGMII &&
477 	    priv->int_pcs) {
478 		/*
479 		 * Disable AN for fixed link configuration, enable otherwise.
480 		 * Must be written after PCS_SEL is set in nwconfig,
481 		 * otherwise writes will not take effect.
482 		 */
483 		if (priv->phydev->phy_id != PHY_FIXED_ID)
484 			writel(readl(&regs->pcscntrl) | ZYNQ_GEM_PCS_CTL_ANEG_ENBL,
485 			       &regs->pcscntrl);
486 		else
487 			writel(readl(&regs->pcscntrl) & ~ZYNQ_GEM_PCS_CTL_ANEG_ENBL,
488 			       &regs->pcscntrl);
489 	}
490 #endif
491 
492 	ret = clk_set_rate(&priv->tx_clk, clk_rate);
493 	if (IS_ERR_VALUE(ret)) {
494 		dev_err(dev, "failed to set tx clock rate\n");
495 		return ret;
496 	}
497 
498 	ret = clk_enable(&priv->tx_clk);
499 	if (ret) {
500 		dev_err(dev, "failed to enable tx clock\n");
501 		return ret;
502 	}
503 
504 	if (priv->clk_en_info & RXCLK_EN) {
505 		ret = clk_enable(&priv->rx_clk);
506 		if (ret) {
507 			dev_err(dev, "failed to enable rx clock\n");
508 			return ret;
509 		}
510 	}
511 	setbits_le32(&regs->nwctrl, ZYNQ_GEM_NWCTRL_RXEN_MASK |
512 					ZYNQ_GEM_NWCTRL_TXEN_MASK);
513 
514 	return 0;
515 }
516 
zynq_gem_send(struct udevice * dev,void * ptr,int len)517 static int zynq_gem_send(struct udevice *dev, void *ptr, int len)
518 {
519 	dma_addr_t addr;
520 	u32 size;
521 	struct zynq_gem_priv *priv = dev_get_priv(dev);
522 	struct zynq_gem_regs *regs = priv->iobase;
523 	struct emac_bd *current_bd = &priv->tx_bd[1];
524 
525 	/* Setup Tx BD */
526 	memset(priv->tx_bd, 0, sizeof(struct emac_bd));
527 
528 	priv->tx_bd->addr = lower_32_bits((ulong)ptr);
529 #if defined(CONFIG_PHYS_64BIT)
530 	priv->tx_bd->addr_hi = upper_32_bits((ulong)ptr);
531 #endif
532 	priv->tx_bd->status = (len & ZYNQ_GEM_TXBUF_FRMLEN_MASK) |
533 			       ZYNQ_GEM_TXBUF_LAST_MASK;
534 	/* Dummy descriptor to mark it as the last in descriptor chain */
535 	current_bd->addr = 0x0;
536 #if defined(CONFIG_PHYS_64BIT)
537 	current_bd->addr_hi = 0x0;
538 #endif
539 	current_bd->status = ZYNQ_GEM_TXBUF_WRAP_MASK |
540 			     ZYNQ_GEM_TXBUF_LAST_MASK|
541 			     ZYNQ_GEM_TXBUF_USED_MASK;
542 
543 	/* setup BD */
544 	writel(lower_32_bits((ulong)priv->tx_bd), &regs->txqbase);
545 #if defined(CONFIG_PHYS_64BIT)
546 	writel(upper_32_bits((ulong)priv->tx_bd), &regs->upper_txqbase);
547 #endif
548 
549 	addr = (ulong) ptr;
550 	addr &= ~(ARCH_DMA_MINALIGN - 1);
551 	size = roundup(len, ARCH_DMA_MINALIGN);
552 	flush_dcache_range(addr, addr + size);
553 	barrier();
554 
555 	/* Start transmit */
556 	setbits_le32(&regs->nwctrl, ZYNQ_GEM_NWCTRL_STARTTX_MASK);
557 
558 	/* Read TX BD status */
559 	if (priv->tx_bd->status & ZYNQ_GEM_TXBUF_EXHAUSTED)
560 		printf("TX buffers exhausted in mid frame\n");
561 
562 	return wait_for_bit_le32(&regs->txsr, ZYNQ_GEM_TSR_DONE,
563 				 true, 20000, true);
564 }
565 
566 /* Do not check frame_recd flag in rx_status register 0x20 - just poll BD */
zynq_gem_recv(struct udevice * dev,int flags,uchar ** packetp)567 static int zynq_gem_recv(struct udevice *dev, int flags, uchar **packetp)
568 {
569 	int frame_len;
570 	dma_addr_t addr;
571 	struct zynq_gem_priv *priv = dev_get_priv(dev);
572 	struct emac_bd *current_bd = &priv->rx_bd[priv->rxbd_current];
573 
574 	if (!(current_bd->addr & ZYNQ_GEM_RXBUF_NEW_MASK))
575 		return -1;
576 
577 	if (!(current_bd->status &
578 			(ZYNQ_GEM_RXBUF_SOF_MASK | ZYNQ_GEM_RXBUF_EOF_MASK))) {
579 		printf("GEM: SOF or EOF not set for last buffer received!\n");
580 		return -1;
581 	}
582 
583 	frame_len = current_bd->status & ZYNQ_GEM_RXBUF_LEN_MASK;
584 	if (!frame_len) {
585 		printf("%s: Zero size packet?\n", __func__);
586 		return -1;
587 	}
588 
589 #if defined(CONFIG_PHYS_64BIT)
590 	addr = (dma_addr_t)((current_bd->addr & ZYNQ_GEM_RXBUF_ADD_MASK)
591 		      | ((dma_addr_t)current_bd->addr_hi << 32));
592 #else
593 	addr = current_bd->addr & ZYNQ_GEM_RXBUF_ADD_MASK;
594 #endif
595 	addr &= ~(ARCH_DMA_MINALIGN - 1);
596 
597 	*packetp = (uchar *)(uintptr_t)addr;
598 
599 	invalidate_dcache_range(addr, addr + roundup(PKTSIZE_ALIGN, ARCH_DMA_MINALIGN));
600 	barrier();
601 
602 	return frame_len;
603 }
604 
zynq_gem_free_pkt(struct udevice * dev,uchar * packet,int length)605 static int zynq_gem_free_pkt(struct udevice *dev, uchar *packet, int length)
606 {
607 	struct zynq_gem_priv *priv = dev_get_priv(dev);
608 	struct emac_bd *current_bd = &priv->rx_bd[priv->rxbd_current];
609 	struct emac_bd *first_bd;
610 	dma_addr_t addr;
611 
612 	if (current_bd->status & ZYNQ_GEM_RXBUF_SOF_MASK) {
613 		priv->rx_first_buf = priv->rxbd_current;
614 	} else {
615 		current_bd->addr &= ~ZYNQ_GEM_RXBUF_NEW_MASK;
616 		current_bd->status = 0xF0000000; /* FIXME */
617 	}
618 
619 	if (current_bd->status & ZYNQ_GEM_RXBUF_EOF_MASK) {
620 		first_bd = &priv->rx_bd[priv->rx_first_buf];
621 		first_bd->addr &= ~ZYNQ_GEM_RXBUF_NEW_MASK;
622 		first_bd->status = 0xF0000000;
623 	}
624 
625 	/* Flush the cache for the packet as well */
626 #if defined(CONFIG_PHYS_64BIT)
627 	addr = (dma_addr_t)((current_bd->addr & ZYNQ_GEM_RXBUF_ADD_MASK)
628 		| ((dma_addr_t)current_bd->addr_hi << 32));
629 #else
630 	addr = current_bd->addr & ZYNQ_GEM_RXBUF_ADD_MASK;
631 #endif
632 	flush_dcache_range(addr, addr + roundup(PKTSIZE_ALIGN,
633 						ARCH_DMA_MINALIGN));
634 	barrier();
635 
636 	if ((++priv->rxbd_current) >= RX_BUF)
637 		priv->rxbd_current = 0;
638 
639 	return 0;
640 }
641 
zynq_gem_halt(struct udevice * dev)642 static void zynq_gem_halt(struct udevice *dev)
643 {
644 	struct zynq_gem_priv *priv = dev_get_priv(dev);
645 	struct zynq_gem_regs *regs = priv->iobase;
646 
647 	clrsetbits_le32(&regs->nwctrl, ZYNQ_GEM_NWCTRL_RXEN_MASK |
648 						ZYNQ_GEM_NWCTRL_TXEN_MASK, 0);
649 }
650 
zynq_board_read_rom_ethaddr(unsigned char * ethaddr)651 __weak int zynq_board_read_rom_ethaddr(unsigned char *ethaddr)
652 {
653 	return -ENOSYS;
654 }
655 
zynq_gem_read_rom_mac(struct udevice * dev)656 static int zynq_gem_read_rom_mac(struct udevice *dev)
657 {
658 	struct eth_pdata *pdata = dev_get_plat(dev);
659 
660 	if (!pdata)
661 		return -ENOSYS;
662 
663 	return zynq_board_read_rom_ethaddr(pdata->enetaddr);
664 }
665 
zynq_gem_miiphy_read(struct mii_dev * bus,int addr,int devad,int reg)666 static int zynq_gem_miiphy_read(struct mii_dev *bus, int addr,
667 				int devad, int reg)
668 {
669 	struct zynq_gem_priv *priv = bus->priv;
670 	int ret;
671 	u16 val = 0;
672 
673 	ret = phyread(priv, addr, reg, &val);
674 	debug("%s 0x%x, 0x%x, 0x%x, 0x%x\n", __func__, addr, reg, val, ret);
675 	return val;
676 }
677 
zynq_gem_miiphy_write(struct mii_dev * bus,int addr,int devad,int reg,u16 value)678 static int zynq_gem_miiphy_write(struct mii_dev *bus, int addr, int devad,
679 				 int reg, u16 value)
680 {
681 	struct zynq_gem_priv *priv = bus->priv;
682 
683 	debug("%s 0x%x, 0x%x, 0x%x\n", __func__, addr, reg, value);
684 	return phywrite(priv, addr, reg, value);
685 }
686 
zynq_gem_probe(struct udevice * dev)687 static int zynq_gem_probe(struct udevice *dev)
688 {
689 	void *bd_space;
690 	struct zynq_gem_priv *priv = dev_get_priv(dev);
691 	int ret;
692 
693 	/* Align rxbuffers to ARCH_DMA_MINALIGN */
694 	priv->rxbuffers = memalign(ARCH_DMA_MINALIGN, RX_BUF * PKTSIZE_ALIGN);
695 	if (!priv->rxbuffers)
696 		return -ENOMEM;
697 
698 	memset(priv->rxbuffers, 0, RX_BUF * PKTSIZE_ALIGN);
699 	ulong addr = (ulong)priv->rxbuffers;
700 	flush_dcache_range(addr, addr + roundup(RX_BUF * PKTSIZE_ALIGN, ARCH_DMA_MINALIGN));
701 	barrier();
702 
703 	/* Align bd_space to MMU_SECTION_SHIFT */
704 	bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE);
705 	if (!bd_space) {
706 		ret = -ENOMEM;
707 		goto err1;
708 	}
709 
710 	mmu_set_region_dcache_behaviour((phys_addr_t)bd_space,
711 					BD_SPACE, DCACHE_OFF);
712 
713 	/* Initialize the bd spaces for tx and rx bd's */
714 	priv->tx_bd = (struct emac_bd *)bd_space;
715 	priv->rx_bd = (struct emac_bd *)((ulong)bd_space + BD_SEPRN_SPACE);
716 
717 	ret = clk_get_by_name(dev, "tx_clk", &priv->tx_clk);
718 	if (ret < 0) {
719 		dev_err(dev, "failed to get tx_clock\n");
720 		goto err2;
721 	}
722 
723 	if (priv->clk_en_info & RXCLK_EN) {
724 		ret = clk_get_by_name(dev, "rx_clk", &priv->rx_clk);
725 		if (ret < 0) {
726 			dev_err(dev, "failed to get rx_clock\n");
727 			goto err2;
728 		}
729 	}
730 
731 	priv->bus = mdio_alloc();
732 	priv->bus->read = zynq_gem_miiphy_read;
733 	priv->bus->write = zynq_gem_miiphy_write;
734 	priv->bus->priv = priv;
735 
736 	ret = mdio_register_seq(priv->bus, dev_seq(dev));
737 	if (ret)
738 		goto err2;
739 
740 	ret = zynq_phy_init(dev);
741 	if (ret)
742 		goto err3;
743 
744 	return ret;
745 
746 err3:
747 	mdio_unregister(priv->bus);
748 err2:
749 	free(priv->tx_bd);
750 err1:
751 	free(priv->rxbuffers);
752 	return ret;
753 }
754 
zynq_gem_remove(struct udevice * dev)755 static int zynq_gem_remove(struct udevice *dev)
756 {
757 	struct zynq_gem_priv *priv = dev_get_priv(dev);
758 
759 	free(priv->phydev);
760 	mdio_unregister(priv->bus);
761 	mdio_free(priv->bus);
762 
763 	return 0;
764 }
765 
766 static const struct eth_ops zynq_gem_ops = {
767 	.start			= zynq_gem_init,
768 	.send			= zynq_gem_send,
769 	.recv			= zynq_gem_recv,
770 	.free_pkt		= zynq_gem_free_pkt,
771 	.stop			= zynq_gem_halt,
772 	.write_hwaddr		= zynq_gem_setup_mac,
773 	.read_rom_hwaddr	= zynq_gem_read_rom_mac,
774 };
775 
zynq_gem_of_to_plat(struct udevice * dev)776 static int zynq_gem_of_to_plat(struct udevice *dev)
777 {
778 	struct eth_pdata *pdata = dev_get_plat(dev);
779 	struct zynq_gem_priv *priv = dev_get_priv(dev);
780 	struct ofnode_phandle_args phandle_args;
781 	const char *phy_mode;
782 
783 	pdata->iobase = (phys_addr_t)dev_read_addr(dev);
784 	priv->iobase = (struct zynq_gem_regs *)pdata->iobase;
785 	priv->mdiobase = priv->iobase;
786 	/* Hardcode for now */
787 	priv->phyaddr = -1;
788 
789 	if (!dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0,
790 					&phandle_args)) {
791 		fdt_addr_t addr;
792 		ofnode parent;
793 
794 		debug("phy-handle does exist %s\n", dev->name);
795 		priv->phyaddr = ofnode_read_u32_default(phandle_args.node,
796 							"reg", -1);
797 		priv->phy_of_node = phandle_args.node;
798 		priv->max_speed = ofnode_read_u32_default(phandle_args.node,
799 							  "max-speed",
800 							  SPEED_1000);
801 
802 		parent = ofnode_get_parent(phandle_args.node);
803 		addr = ofnode_get_addr(parent);
804 		if (addr != FDT_ADDR_T_NONE) {
805 			debug("MDIO bus not found %s\n", dev->name);
806 			priv->mdiobase = (struct zynq_gem_regs *)addr;
807 		}
808 	}
809 
810 	phy_mode = dev_read_prop(dev, "phy-mode", NULL);
811 	if (phy_mode)
812 		pdata->phy_interface = phy_get_interface_by_name(phy_mode);
813 	if (pdata->phy_interface == -1) {
814 		debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
815 		return -EINVAL;
816 	}
817 	priv->interface = pdata->phy_interface;
818 
819 	priv->int_pcs = dev_read_bool(dev, "is-internal-pcspma");
820 
821 	printf("\nZYNQ GEM: %lx, mdio bus %lx, phyaddr %d, interface %s\n",
822 	       (ulong)priv->iobase, (ulong)priv->mdiobase, priv->phyaddr,
823 	       phy_string_for_interface(priv->interface));
824 
825 	priv->clk_en_info = dev_get_driver_data(dev);
826 
827 	return 0;
828 }
829 
830 static const struct udevice_id zynq_gem_ids[] = {
831 	{ .compatible = "cdns,versal-gem", .data = RXCLK_EN },
832 	{ .compatible = "cdns,zynqmp-gem" },
833 	{ .compatible = "cdns,zynq-gem" },
834 	{ .compatible = "cdns,gem" },
835 	{ }
836 };
837 
838 U_BOOT_DRIVER(zynq_gem) = {
839 	.name	= "zynq_gem",
840 	.id	= UCLASS_ETH,
841 	.of_match = zynq_gem_ids,
842 	.of_to_plat = zynq_gem_of_to_plat,
843 	.probe	= zynq_gem_probe,
844 	.remove	= zynq_gem_remove,
845 	.ops	= &zynq_gem_ops,
846 	.priv_auto	= sizeof(struct zynq_gem_priv),
847 	.plat_auto	= sizeof(struct eth_pdata),
848 };
849