1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2019 Amit Singh Tomar <amittomer25@gmail.com>
4  *
5  * Driver for Broadcom GENETv5 Ethernet controller (as found on the RPi4)
6  * This driver is based on the Linux driver:
7  *      drivers/net/ethernet/broadcom/genet/bcmgenet.c
8  *      which is: Copyright (c) 2014-2017 Broadcom
9  *
10  * The hardware supports multiple queues (16 priority queues and one
11  * default queue), both for RX and TX. There are 256 DMA descriptors (both
12  * for TX and RX), and they live in MMIO registers. The hardware allows
13  * assigning descriptor ranges to queues, but we choose the most simple setup:
14  * All 256 descriptors are assigned to the default queue (#16).
15  * Also the Linux driver supports multiple generations of the MAC, whereas
16  * we only support v5, as used in the Raspberry Pi 4.
17  */
18 
19 #include <log.h>
20 #include <asm/cache.h>
21 #include <asm/io.h>
22 #include <clk.h>
23 #include <cpu_func.h>
24 #include <dm.h>
25 #include <fdt_support.h>
26 #include <linux/bitops.h>
27 #include <linux/delay.h>
28 #include <linux/err.h>
29 #include <malloc.h>
30 #include <miiphy.h>
31 #include <net.h>
32 #include <dm/of_access.h>
33 #include <dm/ofnode.h>
34 #include <linux/iopoll.h>
35 #include <linux/sizes.h>
36 #include <asm/dma-mapping.h>
37 #include <wait_bit.h>
38 
39 /* Register definitions derived from Linux source */
40 #define SYS_REV_CTRL			0x00
41 
42 #define SYS_PORT_CTRL			0x04
43 #define PORT_MODE_EXT_GPHY		3
44 
45 #define GENET_SYS_OFF			0x0000
46 #define SYS_RBUF_FLUSH_CTRL		(GENET_SYS_OFF  + 0x08)
47 #define SYS_TBUF_FLUSH_CTRL		(GENET_SYS_OFF  + 0x0c)
48 
49 #define GENET_EXT_OFF			0x0080
50 #define EXT_RGMII_OOB_CTRL		(GENET_EXT_OFF + 0x0c)
51 #define RGMII_LINK			BIT(4)
52 #define OOB_DISABLE			BIT(5)
53 #define RGMII_MODE_EN			BIT(6)
54 #define ID_MODE_DIS			BIT(16)
55 
56 #define GENET_RBUF_OFF			0x0300
57 #define RBUF_TBUF_SIZE_CTRL		(GENET_RBUF_OFF + 0xb4)
58 #define RBUF_CTRL			(GENET_RBUF_OFF + 0x00)
59 #define RBUF_ALIGN_2B			BIT(1)
60 
61 #define GENET_UMAC_OFF			0x0800
62 #define UMAC_MIB_CTRL			(GENET_UMAC_OFF + 0x580)
63 #define UMAC_MAX_FRAME_LEN		(GENET_UMAC_OFF + 0x014)
64 #define UMAC_MAC0			(GENET_UMAC_OFF + 0x00c)
65 #define UMAC_MAC1			(GENET_UMAC_OFF + 0x010)
66 #define UMAC_CMD			(GENET_UMAC_OFF + 0x008)
67 #define MDIO_CMD			(GENET_UMAC_OFF + 0x614)
68 #define UMAC_TX_FLUSH			(GENET_UMAC_OFF + 0x334)
69 #define MDIO_START_BUSY			BIT(29)
70 #define MDIO_READ_FAIL			BIT(28)
71 #define MDIO_RD				(2 << 26)
72 #define MDIO_WR				BIT(26)
73 #define MDIO_PMD_SHIFT			21
74 #define MDIO_PMD_MASK			0x1f
75 #define MDIO_REG_SHIFT			16
76 #define MDIO_REG_MASK			0x1f
77 
78 #define CMD_TX_EN			BIT(0)
79 #define CMD_RX_EN			BIT(1)
80 #define UMAC_SPEED_10			0
81 #define UMAC_SPEED_100			1
82 #define UMAC_SPEED_1000			2
83 #define UMAC_SPEED_2500			3
84 #define CMD_SPEED_SHIFT			2
85 #define CMD_SPEED_MASK			3
86 #define CMD_SW_RESET			BIT(13)
87 #define CMD_LCL_LOOP_EN			BIT(15)
88 #define CMD_TX_EN			BIT(0)
89 #define CMD_RX_EN			BIT(1)
90 
91 #define MIB_RESET_RX			BIT(0)
92 #define MIB_RESET_RUNT			BIT(1)
93 #define MIB_RESET_TX			BIT(2)
94 
95 /* total number of Buffer Descriptors, same for Rx/Tx */
96 #define TOTAL_DESCS			256
97 #define RX_DESCS			TOTAL_DESCS
98 #define TX_DESCS			TOTAL_DESCS
99 
100 #define DEFAULT_Q			0x10
101 
102 /* Body(1500) + EH_SIZE(14) + VLANTAG(4) + BRCMTAG(6) + FCS(4) = 1528.
103  * 1536 is multiple of 256 bytes
104  */
105 #define ENET_BRCM_TAG_LEN		6
106 #define ENET_PAD			8
107 #define ENET_MAX_MTU_SIZE		(ETH_DATA_LEN + ETH_HLEN +	 \
108 					 VLAN_HLEN + ENET_BRCM_TAG_LEN + \
109 					 ETH_FCS_LEN + ENET_PAD)
110 
111 /* Tx/Rx Dma Descriptor common bits */
112 #define DMA_EN				BIT(0)
113 #define DMA_RING_BUF_EN_SHIFT		0x01
114 #define DMA_RING_BUF_EN_MASK		0xffff
115 #define DMA_BUFLENGTH_MASK		0x0fff
116 #define DMA_BUFLENGTH_SHIFT		16
117 #define DMA_RING_SIZE_SHIFT		16
118 #define DMA_OWN				0x8000
119 #define DMA_EOP				0x4000
120 #define DMA_SOP				0x2000
121 #define DMA_WRAP			0x1000
122 #define DMA_MAX_BURST_LENGTH		0x8
123 /* Tx specific DMA descriptor bits */
124 #define DMA_TX_UNDERRUN			0x0200
125 #define DMA_TX_APPEND_CRC		0x0040
126 #define DMA_TX_OW_CRC			0x0020
127 #define DMA_TX_DO_CSUM			0x0010
128 #define DMA_TX_QTAG_SHIFT		7
129 
130 /* DMA rings size */
131 #define DMA_RING_SIZE			0x40
132 #define DMA_RINGS_SIZE			(DMA_RING_SIZE * (DEFAULT_Q + 1))
133 
134 /* DMA descriptor */
135 #define DMA_DESC_LENGTH_STATUS		0x00
136 #define DMA_DESC_ADDRESS_LO		0x04
137 #define DMA_DESC_ADDRESS_HI		0x08
138 #define DMA_DESC_SIZE			12
139 
140 #define GENET_RX_OFF			0x2000
141 #define GENET_RDMA_REG_OFF					\
142 	(GENET_RX_OFF + TOTAL_DESCS * DMA_DESC_SIZE)
143 #define GENET_TX_OFF			0x4000
144 #define GENET_TDMA_REG_OFF					\
145 	(GENET_TX_OFF + TOTAL_DESCS * DMA_DESC_SIZE)
146 
147 #define DMA_FC_THRESH_HI		(RX_DESCS >> 4)
148 #define DMA_FC_THRESH_LO		5
149 #define DMA_FC_THRESH_VALUE		((DMA_FC_THRESH_LO << 16) |	\
150 					  DMA_FC_THRESH_HI)
151 
152 #define DMA_XOFF_THRESHOLD_SHIFT	16
153 
154 #define TDMA_RING_REG_BASE					\
155 	(GENET_TDMA_REG_OFF + DEFAULT_Q * DMA_RING_SIZE)
156 #define TDMA_READ_PTR			(TDMA_RING_REG_BASE + 0x00)
157 #define TDMA_CONS_INDEX			(TDMA_RING_REG_BASE + 0x08)
158 #define TDMA_PROD_INDEX			(TDMA_RING_REG_BASE + 0x0c)
159 #define DMA_RING_BUF_SIZE		0x10
160 #define DMA_START_ADDR			0x14
161 #define DMA_END_ADDR			0x1c
162 #define DMA_MBUF_DONE_THRESH		0x24
163 #define TDMA_FLOW_PERIOD		(TDMA_RING_REG_BASE + 0x28)
164 #define TDMA_WRITE_PTR			(TDMA_RING_REG_BASE + 0x2c)
165 
166 #define RDMA_RING_REG_BASE					\
167 	(GENET_RDMA_REG_OFF + DEFAULT_Q * DMA_RING_SIZE)
168 #define RDMA_WRITE_PTR			(RDMA_RING_REG_BASE + 0x00)
169 #define RDMA_PROD_INDEX			(RDMA_RING_REG_BASE + 0x08)
170 #define RDMA_CONS_INDEX			(RDMA_RING_REG_BASE + 0x0c)
171 #define RDMA_XON_XOFF_THRESH		(RDMA_RING_REG_BASE + 0x28)
172 #define RDMA_READ_PTR			(RDMA_RING_REG_BASE + 0x2c)
173 
174 #define TDMA_REG_BASE			(GENET_TDMA_REG_OFF + DMA_RINGS_SIZE)
175 #define RDMA_REG_BASE			(GENET_RDMA_REG_OFF + DMA_RINGS_SIZE)
176 #define DMA_RING_CFG			0x00
177 #define DMA_CTRL			0x04
178 #define DMA_SCB_BURST_SIZE		0x0c
179 
180 #define RX_BUF_LENGTH			2048
181 #define RX_TOTAL_BUFSIZE		(RX_BUF_LENGTH * RX_DESCS)
182 #define RX_BUF_OFFSET			2
183 
184 struct bcmgenet_eth_priv {
185 	char rxbuffer[RX_TOTAL_BUFSIZE] __aligned(ARCH_DMA_MINALIGN);
186 	void *mac_reg;
187 	void *tx_desc_base;
188 	void *rx_desc_base;
189 	int tx_index;
190 	int rx_index;
191 	int c_index;
192 	int phyaddr;
193 	u32 interface;
194 	u32 speed;
195 	struct phy_device *phydev;
196 	struct mii_dev *bus;
197 };
198 
bcmgenet_umac_reset(struct bcmgenet_eth_priv * priv)199 static void bcmgenet_umac_reset(struct bcmgenet_eth_priv *priv)
200 {
201 	u32 reg;
202 
203 	reg = readl(priv->mac_reg + SYS_RBUF_FLUSH_CTRL);
204 	reg |= BIT(1);
205 	writel(reg, (priv->mac_reg + SYS_RBUF_FLUSH_CTRL));
206 	udelay(10);
207 
208 	reg &= ~BIT(1);
209 	writel(reg, (priv->mac_reg + SYS_RBUF_FLUSH_CTRL));
210 	udelay(10);
211 
212 	writel(0, (priv->mac_reg + SYS_RBUF_FLUSH_CTRL));
213 	udelay(10);
214 
215 	writel(0, priv->mac_reg + UMAC_CMD);
216 
217 	writel(CMD_SW_RESET | CMD_LCL_LOOP_EN, priv->mac_reg + UMAC_CMD);
218 	udelay(2);
219 	writel(0, priv->mac_reg + UMAC_CMD);
220 
221 	/* clear tx/rx counter */
222 	writel(MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT,
223 	       priv->mac_reg + UMAC_MIB_CTRL);
224 	writel(0, priv->mac_reg + UMAC_MIB_CTRL);
225 
226 	writel(ENET_MAX_MTU_SIZE, priv->mac_reg + UMAC_MAX_FRAME_LEN);
227 
228 	/* init rx registers, enable ip header optimization */
229 	reg = readl(priv->mac_reg + RBUF_CTRL);
230 	reg |= RBUF_ALIGN_2B;
231 	writel(reg, (priv->mac_reg + RBUF_CTRL));
232 
233 	writel(1, (priv->mac_reg + RBUF_TBUF_SIZE_CTRL));
234 }
235 
bcmgenet_gmac_write_hwaddr(struct udevice * dev)236 static int bcmgenet_gmac_write_hwaddr(struct udevice *dev)
237 {
238 	struct bcmgenet_eth_priv *priv = dev_get_priv(dev);
239 	struct eth_pdata *pdata = dev_get_plat(dev);
240 	uchar *addr = pdata->enetaddr;
241 	u32 reg;
242 
243 	reg = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
244 	writel_relaxed(reg, priv->mac_reg + UMAC_MAC0);
245 
246 	reg = addr[4] << 8 | addr[5];
247 	writel_relaxed(reg, priv->mac_reg + UMAC_MAC1);
248 
249 	return 0;
250 }
251 
bcmgenet_disable_dma(struct bcmgenet_eth_priv * priv)252 static void bcmgenet_disable_dma(struct bcmgenet_eth_priv *priv)
253 {
254 	clrbits_32(priv->mac_reg + TDMA_REG_BASE + DMA_CTRL, DMA_EN);
255 	clrbits_32(priv->mac_reg + RDMA_REG_BASE + DMA_CTRL, DMA_EN);
256 
257 	writel(1, priv->mac_reg + UMAC_TX_FLUSH);
258 	udelay(10);
259 	writel(0, priv->mac_reg + UMAC_TX_FLUSH);
260 }
261 
bcmgenet_enable_dma(struct bcmgenet_eth_priv * priv)262 static void bcmgenet_enable_dma(struct bcmgenet_eth_priv *priv)
263 {
264 	u32 dma_ctrl = (1 << (DEFAULT_Q + DMA_RING_BUF_EN_SHIFT)) | DMA_EN;
265 
266 	writel(dma_ctrl, priv->mac_reg + TDMA_REG_BASE + DMA_CTRL);
267 
268 	setbits_32(priv->mac_reg + RDMA_REG_BASE + DMA_CTRL, dma_ctrl);
269 }
270 
bcmgenet_gmac_eth_send(struct udevice * dev,void * packet,int length)271 static int bcmgenet_gmac_eth_send(struct udevice *dev, void *packet, int length)
272 {
273 	struct bcmgenet_eth_priv *priv = dev_get_priv(dev);
274 	void *desc_base = priv->tx_desc_base + priv->tx_index * DMA_DESC_SIZE;
275 	u32 len_stat = length << DMA_BUFLENGTH_SHIFT;
276 	ulong packet_aligned = rounddown((ulong)packet, ARCH_DMA_MINALIGN);
277 	u32 prod_index, cons;
278 	u32 tries = 100;
279 
280 	prod_index = readl(priv->mac_reg + TDMA_PROD_INDEX);
281 
282 	/* There is actually no reason for the rounding here, but the ARMv7
283 	 * implementation of flush_dcache_range() checks for aligned
284 	 * boundaries of the flushed range.
285 	 * Adjust them here to pass that check and avoid misleading messages.
286 	 */
287 	flush_dcache_range(packet_aligned,
288 			   packet_aligned + roundup(length, ARCH_DMA_MINALIGN));
289 
290 	len_stat |= 0x3F << DMA_TX_QTAG_SHIFT;
291 	len_stat |= DMA_TX_APPEND_CRC | DMA_SOP | DMA_EOP;
292 
293 	/* Set-up packet for transmission */
294 	writel(lower_32_bits((ulong)packet), (desc_base + DMA_DESC_ADDRESS_LO));
295 	writel(upper_32_bits((ulong)packet), (desc_base + DMA_DESC_ADDRESS_HI));
296 	writel(len_stat, (desc_base + DMA_DESC_LENGTH_STATUS));
297 
298 	/* Increment index and start transmission */
299 	if (++priv->tx_index >= TX_DESCS)
300 		priv->tx_index = 0;
301 
302 	prod_index++;
303 
304 	/* Start Transmisson */
305 	writel(prod_index, priv->mac_reg + TDMA_PROD_INDEX);
306 
307 	do {
308 		cons = readl(priv->mac_reg + TDMA_CONS_INDEX);
309 	} while ((cons & 0xffff) < prod_index && --tries);
310 	if (!tries)
311 		return -ETIMEDOUT;
312 
313 	return 0;
314 }
315 
316 /* Check whether all cache lines affected by an invalidate are within
317  * the buffer, to make sure we don't accidentally lose unrelated dirty
318  * data stored nearby.
319  * Alignment of the buffer start address will be checked in the implementation
320  * of invalidate_dcache_range().
321  */
invalidate_dcache_check(unsigned long addr,size_t size,size_t buffer_size)322 static void invalidate_dcache_check(unsigned long addr, size_t size,
323 				    size_t buffer_size)
324 {
325 	size_t inval_size = roundup(size, ARCH_DMA_MINALIGN);
326 
327 	if (unlikely(inval_size > buffer_size))
328 		printf("WARNING: Cache invalidate area exceeds buffer size\n");
329 
330 	invalidate_dcache_range(addr, addr + inval_size);
331 }
332 
bcmgenet_gmac_eth_recv(struct udevice * dev,int flags,uchar ** packetp)333 static int bcmgenet_gmac_eth_recv(struct udevice *dev,
334 				  int flags, uchar **packetp)
335 {
336 	struct bcmgenet_eth_priv *priv = dev_get_priv(dev);
337 	void *desc_base = priv->rx_desc_base + priv->rx_index * DMA_DESC_SIZE;
338 	u32 prod_index = readl(priv->mac_reg + RDMA_PROD_INDEX);
339 	u32 length, addr;
340 
341 	if (prod_index == priv->c_index)
342 		return -EAGAIN;
343 
344 	length = readl(desc_base + DMA_DESC_LENGTH_STATUS);
345 	length = (length >> DMA_BUFLENGTH_SHIFT) & DMA_BUFLENGTH_MASK;
346 	addr = readl(desc_base + DMA_DESC_ADDRESS_LO);
347 
348 	invalidate_dcache_check(addr, length, RX_BUF_LENGTH);
349 
350 	/* To cater for the IP header alignment the hardware does.
351 	 * This would actually not be needed if we don't program
352 	 * RBUF_ALIGN_2B
353 	 */
354 	*packetp = (uchar *)(ulong)addr + RX_BUF_OFFSET;
355 
356 	return length - RX_BUF_OFFSET;
357 }
358 
bcmgenet_gmac_free_pkt(struct udevice * dev,uchar * packet,int length)359 static int bcmgenet_gmac_free_pkt(struct udevice *dev, uchar *packet,
360 				  int length)
361 {
362 	struct bcmgenet_eth_priv *priv = dev_get_priv(dev);
363 
364 	/* Tell the MAC we have consumed that last receive buffer. */
365 	priv->c_index = (priv->c_index + 1) & 0xFFFF;
366 	writel(priv->c_index, priv->mac_reg + RDMA_CONS_INDEX);
367 
368 	/* Forward our descriptor pointer, wrapping around if needed. */
369 	if (++priv->rx_index >= RX_DESCS)
370 		priv->rx_index = 0;
371 
372 	return 0;
373 }
374 
rx_descs_init(struct bcmgenet_eth_priv * priv)375 static void rx_descs_init(struct bcmgenet_eth_priv *priv)
376 {
377 	char *rxbuffs = &priv->rxbuffer[0];
378 	u32 len_stat, i;
379 	void *desc_base = priv->rx_desc_base;
380 
381 	len_stat = (RX_BUF_LENGTH << DMA_BUFLENGTH_SHIFT) | DMA_OWN;
382 
383 	for (i = 0; i < RX_DESCS; i++) {
384 		writel(lower_32_bits((uintptr_t)&rxbuffs[i * RX_BUF_LENGTH]),
385 		       desc_base + i * DMA_DESC_SIZE + DMA_DESC_ADDRESS_LO);
386 		writel(upper_32_bits((uintptr_t)&rxbuffs[i * RX_BUF_LENGTH]),
387 		       desc_base + i * DMA_DESC_SIZE + DMA_DESC_ADDRESS_HI);
388 		writel(len_stat,
389 		       desc_base + i * DMA_DESC_SIZE + DMA_DESC_LENGTH_STATUS);
390 	}
391 }
392 
rx_ring_init(struct bcmgenet_eth_priv * priv)393 static void rx_ring_init(struct bcmgenet_eth_priv *priv)
394 {
395 	writel(DMA_MAX_BURST_LENGTH,
396 	       priv->mac_reg + RDMA_REG_BASE + DMA_SCB_BURST_SIZE);
397 
398 	writel(0x0, priv->mac_reg + RDMA_RING_REG_BASE + DMA_START_ADDR);
399 	writel(0x0, priv->mac_reg + RDMA_READ_PTR);
400 	writel(0x0, priv->mac_reg + RDMA_WRITE_PTR);
401 	writel(RX_DESCS * DMA_DESC_SIZE / 4 - 1,
402 	       priv->mac_reg + RDMA_RING_REG_BASE + DMA_END_ADDR);
403 
404 	/* cannot init RDMA_PROD_INDEX to 0, so align RDMA_CONS_INDEX on it instead */
405 	priv->c_index = readl(priv->mac_reg + RDMA_PROD_INDEX);
406 	writel(priv->c_index, priv->mac_reg + RDMA_CONS_INDEX);
407 	priv->rx_index = priv->c_index;
408 	priv->rx_index &= 0xFF;
409 	writel((RX_DESCS << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH,
410 	       priv->mac_reg + RDMA_RING_REG_BASE + DMA_RING_BUF_SIZE);
411 	writel(DMA_FC_THRESH_VALUE, priv->mac_reg + RDMA_XON_XOFF_THRESH);
412 	writel(1 << DEFAULT_Q, priv->mac_reg + RDMA_REG_BASE + DMA_RING_CFG);
413 }
414 
tx_ring_init(struct bcmgenet_eth_priv * priv)415 static void tx_ring_init(struct bcmgenet_eth_priv *priv)
416 {
417 	writel(DMA_MAX_BURST_LENGTH,
418 	       priv->mac_reg + TDMA_REG_BASE + DMA_SCB_BURST_SIZE);
419 
420 	writel(0x0, priv->mac_reg + TDMA_RING_REG_BASE + DMA_START_ADDR);
421 	writel(0x0, priv->mac_reg + TDMA_READ_PTR);
422 	writel(0x0, priv->mac_reg + TDMA_WRITE_PTR);
423 	writel(TX_DESCS * DMA_DESC_SIZE / 4 - 1,
424 	       priv->mac_reg + TDMA_RING_REG_BASE + DMA_END_ADDR);
425 	/* cannot init TDMA_CONS_INDEX to 0, so align TDMA_PROD_INDEX on it instead */
426 	priv->tx_index = readl(priv->mac_reg + TDMA_CONS_INDEX);
427 	writel(priv->tx_index, priv->mac_reg + TDMA_PROD_INDEX);
428 	priv->tx_index &= 0xFF;
429 	writel(0x1, priv->mac_reg + TDMA_RING_REG_BASE + DMA_MBUF_DONE_THRESH);
430 	writel(0x0, priv->mac_reg + TDMA_FLOW_PERIOD);
431 	writel((TX_DESCS << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH,
432 	       priv->mac_reg + TDMA_RING_REG_BASE + DMA_RING_BUF_SIZE);
433 
434 	writel(1 << DEFAULT_Q, priv->mac_reg + TDMA_REG_BASE + DMA_RING_CFG);
435 }
436 
bcmgenet_adjust_link(struct bcmgenet_eth_priv * priv)437 static int bcmgenet_adjust_link(struct bcmgenet_eth_priv *priv)
438 {
439 	struct phy_device *phy_dev = priv->phydev;
440 	u32 speed;
441 
442 	switch (phy_dev->speed) {
443 	case SPEED_1000:
444 		speed = UMAC_SPEED_1000;
445 		break;
446 	case SPEED_100:
447 		speed = UMAC_SPEED_100;
448 		break;
449 	case SPEED_10:
450 		speed = UMAC_SPEED_10;
451 		break;
452 	default:
453 		printf("bcmgenet: Unsupported PHY speed: %d\n", phy_dev->speed);
454 		return -EINVAL;
455 	}
456 
457 	clrsetbits_32(priv->mac_reg + EXT_RGMII_OOB_CTRL, OOB_DISABLE,
458 			RGMII_LINK | RGMII_MODE_EN);
459 
460 	if (phy_dev->interface == PHY_INTERFACE_MODE_RGMII ||
461 	    phy_dev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
462 		setbits_32(priv->mac_reg + EXT_RGMII_OOB_CTRL, ID_MODE_DIS);
463 
464 	writel(speed << CMD_SPEED_SHIFT, (priv->mac_reg + UMAC_CMD));
465 
466 	return 0;
467 }
468 
bcmgenet_gmac_eth_start(struct udevice * dev)469 static int bcmgenet_gmac_eth_start(struct udevice *dev)
470 {
471 	struct bcmgenet_eth_priv *priv = dev_get_priv(dev);
472 	int ret;
473 
474 	priv->tx_desc_base = priv->mac_reg + GENET_TX_OFF;
475 	priv->rx_desc_base = priv->mac_reg + GENET_RX_OFF;
476 
477 	bcmgenet_umac_reset(priv);
478 
479 	bcmgenet_gmac_write_hwaddr(dev);
480 
481 	/* Disable RX/TX DMA and flush TX queues */
482 	bcmgenet_disable_dma(priv);
483 
484 	rx_ring_init(priv);
485 	rx_descs_init(priv);
486 
487 	tx_ring_init(priv);
488 
489 	/* Enable RX/TX DMA */
490 	bcmgenet_enable_dma(priv);
491 
492 	/* read PHY properties over the wire from generic PHY set-up */
493 	ret = phy_startup(priv->phydev);
494 	if (ret) {
495 		printf("bcmgenet: PHY startup failed: %d\n", ret);
496 		return ret;
497 	}
498 
499 	/* Update MAC registers based on PHY property */
500 	ret = bcmgenet_adjust_link(priv);
501 	if (ret) {
502 		printf("bcmgenet: adjust PHY link failed: %d\n", ret);
503 		return ret;
504 	}
505 
506 	/* Enable Rx/Tx */
507 	setbits_32(priv->mac_reg + UMAC_CMD, CMD_TX_EN | CMD_RX_EN);
508 
509 	return 0;
510 }
511 
bcmgenet_phy_init(struct bcmgenet_eth_priv * priv,void * dev)512 static int bcmgenet_phy_init(struct bcmgenet_eth_priv *priv, void *dev)
513 {
514 	struct phy_device *phydev;
515 	int ret;
516 
517 	phydev = phy_connect(priv->bus, priv->phyaddr, dev, priv->interface);
518 	if (!phydev)
519 		return -ENODEV;
520 
521 	phydev->supported &= PHY_GBIT_FEATURES;
522 	if (priv->speed) {
523 		ret = phy_set_supported(priv->phydev, priv->speed);
524 		if (ret)
525 			return ret;
526 	}
527 	phydev->advertising = phydev->supported;
528 
529 	phy_connect_dev(phydev, dev);
530 
531 	priv->phydev = phydev;
532 	phy_config(priv->phydev);
533 
534 	return 0;
535 }
536 
bcmgenet_mdio_start(struct bcmgenet_eth_priv * priv)537 static void bcmgenet_mdio_start(struct bcmgenet_eth_priv *priv)
538 {
539 	setbits_32(priv->mac_reg + MDIO_CMD, MDIO_START_BUSY);
540 }
541 
bcmgenet_mdio_write(struct mii_dev * bus,int addr,int devad,int reg,u16 value)542 static int bcmgenet_mdio_write(struct mii_dev *bus, int addr, int devad,
543 			       int reg, u16 value)
544 {
545 	struct udevice *dev = bus->priv;
546 	struct bcmgenet_eth_priv *priv = dev_get_priv(dev);
547 	u32 val;
548 
549 	/* Prepare the read operation */
550 	val = MDIO_WR | (addr << MDIO_PMD_SHIFT) |
551 		(reg << MDIO_REG_SHIFT) | (0xffff & value);
552 	writel_relaxed(val,  priv->mac_reg + MDIO_CMD);
553 
554 	/* Start MDIO transaction */
555 	bcmgenet_mdio_start(priv);
556 
557 	return wait_for_bit_32(priv->mac_reg + MDIO_CMD,
558 			       MDIO_START_BUSY, false, 20, true);
559 }
560 
bcmgenet_mdio_read(struct mii_dev * bus,int addr,int devad,int reg)561 static int bcmgenet_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
562 {
563 	struct udevice *dev = bus->priv;
564 	struct bcmgenet_eth_priv *priv = dev_get_priv(dev);
565 	u32 val;
566 	int ret;
567 
568 	/* Prepare the read operation */
569 	val = MDIO_RD | (addr << MDIO_PMD_SHIFT) | (reg << MDIO_REG_SHIFT);
570 	writel_relaxed(val, priv->mac_reg + MDIO_CMD);
571 
572 	/* Start MDIO transaction */
573 	bcmgenet_mdio_start(priv);
574 
575 	ret = wait_for_bit_32(priv->mac_reg + MDIO_CMD,
576 			      MDIO_START_BUSY, false, 20, true);
577 	if (ret)
578 		return ret;
579 
580 	val = readl_relaxed(priv->mac_reg + MDIO_CMD);
581 
582 	return val & 0xffff;
583 }
584 
bcmgenet_mdio_init(const char * name,struct udevice * priv)585 static int bcmgenet_mdio_init(const char *name, struct udevice *priv)
586 {
587 	struct mii_dev *bus = mdio_alloc();
588 
589 	if (!bus) {
590 		debug("Failed to allocate MDIO bus\n");
591 		return -ENOMEM;
592 	}
593 
594 	bus->read = bcmgenet_mdio_read;
595 	bus->write = bcmgenet_mdio_write;
596 	snprintf(bus->name, sizeof(bus->name), name);
597 	bus->priv = (void *)priv;
598 
599 	return mdio_register(bus);
600 }
601 
602 /* We only support RGMII (as used on the RPi4). */
bcmgenet_interface_set(struct bcmgenet_eth_priv * priv)603 static int bcmgenet_interface_set(struct bcmgenet_eth_priv *priv)
604 {
605 	phy_interface_t phy_mode = priv->interface;
606 
607 	switch (phy_mode) {
608 	case PHY_INTERFACE_MODE_RGMII:
609 	case PHY_INTERFACE_MODE_RGMII_RXID:
610 		writel(PORT_MODE_EXT_GPHY, priv->mac_reg + SYS_PORT_CTRL);
611 		break;
612 	default:
613 		printf("unknown phy mode: %d\n", priv->interface);
614 		return -EINVAL;
615 	}
616 
617 	return 0;
618 }
619 
bcmgenet_eth_probe(struct udevice * dev)620 static int bcmgenet_eth_probe(struct udevice *dev)
621 {
622 	struct eth_pdata *pdata = dev_get_plat(dev);
623 	struct bcmgenet_eth_priv *priv = dev_get_priv(dev);
624 	ofnode mdio_node;
625 	const char *name;
626 	u32 reg;
627 	int ret;
628 	u8 major;
629 
630 	priv->mac_reg = map_physmem(pdata->iobase, SZ_64K, MAP_NOCACHE);
631 	priv->interface = pdata->phy_interface;
632 	priv->speed = pdata->max_speed;
633 
634 	/* Read GENET HW version */
635 	reg = readl_relaxed(priv->mac_reg + SYS_REV_CTRL);
636 	major = (reg >> 24) & 0x0f;
637 	if (major != 6) {
638 		if (major == 5)
639 			major = 4;
640 		else if (major == 0)
641 			major = 1;
642 
643 		printf("Unsupported GENETv%d.%d\n", major, (reg >> 16) & 0x0f);
644 		return -ENODEV;
645 	}
646 
647 	ret = bcmgenet_interface_set(priv);
648 	if (ret)
649 		return ret;
650 
651 	writel(0, priv->mac_reg + SYS_RBUF_FLUSH_CTRL);
652 	udelay(10);
653 	/* disable MAC while updating its registers */
654 	writel(0, priv->mac_reg + UMAC_CMD);
655 	/* issue soft reset with (rg)mii loopback to ensure a stable rxclk */
656 	writel(CMD_SW_RESET | CMD_LCL_LOOP_EN, priv->mac_reg + UMAC_CMD);
657 
658 	mdio_node = dev_read_first_subnode(dev);
659 	name = ofnode_get_name(mdio_node);
660 
661 	ret = bcmgenet_mdio_init(name, dev);
662 	if (ret)
663 		return ret;
664 
665 	priv->bus = miiphy_get_dev_by_name(name);
666 
667 	return bcmgenet_phy_init(priv, dev);
668 }
669 
bcmgenet_gmac_eth_stop(struct udevice * dev)670 static void bcmgenet_gmac_eth_stop(struct udevice *dev)
671 {
672 	struct bcmgenet_eth_priv *priv = dev_get_priv(dev);
673 
674 	clrbits_32(priv->mac_reg + UMAC_CMD, CMD_TX_EN | CMD_RX_EN);
675 
676 	bcmgenet_disable_dma(priv);
677 }
678 
679 static const struct eth_ops bcmgenet_gmac_eth_ops = {
680 	.start                  = bcmgenet_gmac_eth_start,
681 	.write_hwaddr           = bcmgenet_gmac_write_hwaddr,
682 	.send                   = bcmgenet_gmac_eth_send,
683 	.recv                   = bcmgenet_gmac_eth_recv,
684 	.free_pkt               = bcmgenet_gmac_free_pkt,
685 	.stop                   = bcmgenet_gmac_eth_stop,
686 };
687 
bcmgenet_eth_of_to_plat(struct udevice * dev)688 static int bcmgenet_eth_of_to_plat(struct udevice *dev)
689 {
690 	struct eth_pdata *pdata = dev_get_plat(dev);
691 	struct bcmgenet_eth_priv *priv = dev_get_priv(dev);
692 	struct ofnode_phandle_args phy_node;
693 	const char *phy_mode;
694 	int ret;
695 
696 	pdata->iobase = dev_read_addr(dev);
697 
698 	/* Get phy mode from DT */
699 	pdata->phy_interface = -1;
700 	phy_mode = dev_read_string(dev, "phy-mode");
701 	if (phy_mode)
702 		pdata->phy_interface = phy_get_interface_by_name(phy_mode);
703 	if (pdata->phy_interface == -1) {
704 		debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
705 		return -EINVAL;
706 	}
707 
708 	ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0,
709 					 &phy_node);
710 	if (!ret) {
711 		ofnode_read_s32(phy_node.node, "reg", &priv->phyaddr);
712 		ofnode_read_s32(phy_node.node, "max-speed", &pdata->max_speed);
713 	}
714 
715 	return 0;
716 }
717 
718 /* The BCM2711 implementation has a limited burst length compared to a generic
719  * GENETv5 version, but we go with that shorter value (8) in both cases, for
720  * the sake of simplicity.
721  */
722 static const struct udevice_id bcmgenet_eth_ids[] = {
723 	{.compatible = "brcm,genet-v5"},
724 	{.compatible = "brcm,bcm2711-genet-v5"},
725 	{}
726 };
727 
728 U_BOOT_DRIVER(eth_bcmgenet) = {
729 	.name   = "eth_bcmgenet",
730 	.id     = UCLASS_ETH,
731 	.of_match = bcmgenet_eth_ids,
732 	.of_to_plat = bcmgenet_eth_of_to_plat,
733 	.probe  = bcmgenet_eth_probe,
734 	.ops    = &bcmgenet_gmac_eth_ops,
735 	.priv_auto	= sizeof(struct bcmgenet_eth_priv),
736 	.plat_auto	= sizeof(struct eth_pdata),
737 	.flags = DM_FLAG_ALLOC_PRIV_DMA,
738 };
739