1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2018 MediaTek Inc.
4  *
5  * Author: Weijie Gao <weijie.gao@mediatek.com>
6  * Author: Mark Lee <mark-mc.lee@mediatek.com>
7  */
8 
9 #include <common.h>
10 #include <cpu_func.h>
11 #include <dm.h>
12 #include <log.h>
13 #include <malloc.h>
14 #include <miiphy.h>
15 #include <net.h>
16 #include <regmap.h>
17 #include <reset.h>
18 #include <syscon.h>
19 #include <wait_bit.h>
20 #include <asm/cache.h>
21 #include <asm/gpio.h>
22 #include <asm/io.h>
23 #include <dm/device_compat.h>
24 #include <linux/delay.h>
25 #include <linux/err.h>
26 #include <linux/ioport.h>
27 #include <linux/mdio.h>
28 #include <linux/mii.h>
29 
30 #include "mtk_eth.h"
31 
32 #define NUM_TX_DESC		24
33 #define NUM_RX_DESC		24
34 #define TX_TOTAL_BUF_SIZE	(NUM_TX_DESC * PKTSIZE_ALIGN)
35 #define RX_TOTAL_BUF_SIZE	(NUM_RX_DESC * PKTSIZE_ALIGN)
36 #define TOTAL_PKT_BUF_SIZE	(TX_TOTAL_BUF_SIZE + RX_TOTAL_BUF_SIZE)
37 
38 #define MT753X_NUM_PHYS		5
39 #define MT753X_NUM_PORTS	7
40 #define MT753X_DFL_SMI_ADDR	31
41 #define MT753X_SMI_ADDR_MASK	0x1f
42 
43 #define MT753X_PHY_ADDR(base, addr) \
44 	(((base) + (addr)) & 0x1f)
45 
46 #define GDMA_FWD_TO_CPU \
47 	(0x20000000 | \
48 	GDM_ICS_EN | \
49 	GDM_TCS_EN | \
50 	GDM_UCS_EN | \
51 	STRP_CRC | \
52 	(DP_PDMA << MYMAC_DP_S) | \
53 	(DP_PDMA << BC_DP_S) | \
54 	(DP_PDMA << MC_DP_S) | \
55 	(DP_PDMA << UN_DP_S))
56 
57 #define GDMA_FWD_DISCARD \
58 	(0x20000000 | \
59 	GDM_ICS_EN | \
60 	GDM_TCS_EN | \
61 	GDM_UCS_EN | \
62 	STRP_CRC | \
63 	(DP_DISCARD << MYMAC_DP_S) | \
64 	(DP_DISCARD << BC_DP_S) | \
65 	(DP_DISCARD << MC_DP_S) | \
66 	(DP_DISCARD << UN_DP_S))
67 
68 struct pdma_rxd_info1 {
69 	u32 PDP0;
70 };
71 
72 struct pdma_rxd_info2 {
73 	u32 PLEN1 : 14;
74 	u32 LS1 : 1;
75 	u32 UN_USED : 1;
76 	u32 PLEN0 : 14;
77 	u32 LS0 : 1;
78 	u32 DDONE : 1;
79 };
80 
81 struct pdma_rxd_info3 {
82 	u32 PDP1;
83 };
84 
85 struct pdma_rxd_info4 {
86 	u32 FOE_ENTRY : 14;
87 	u32 CRSN : 5;
88 	u32 SP : 3;
89 	u32 L4F : 1;
90 	u32 L4VLD : 1;
91 	u32 TACK : 1;
92 	u32 IP4F : 1;
93 	u32 IP4 : 1;
94 	u32 IP6 : 1;
95 	u32 UN_USED : 4;
96 };
97 
98 struct pdma_rxdesc {
99 	struct pdma_rxd_info1 rxd_info1;
100 	struct pdma_rxd_info2 rxd_info2;
101 	struct pdma_rxd_info3 rxd_info3;
102 	struct pdma_rxd_info4 rxd_info4;
103 };
104 
105 struct pdma_txd_info1 {
106 	u32 SDP0;
107 };
108 
109 struct pdma_txd_info2 {
110 	u32 SDL1 : 14;
111 	u32 LS1 : 1;
112 	u32 BURST : 1;
113 	u32 SDL0 : 14;
114 	u32 LS0 : 1;
115 	u32 DDONE : 1;
116 };
117 
118 struct pdma_txd_info3 {
119 	u32 SDP1;
120 };
121 
122 struct pdma_txd_info4 {
123 	u32 VLAN_TAG : 16;
124 	u32 INS : 1;
125 	u32 RESV : 2;
126 	u32 UDF : 6;
127 	u32 FPORT : 3;
128 	u32 TSO : 1;
129 	u32 TUI_CO : 3;
130 };
131 
132 struct pdma_txdesc {
133 	struct pdma_txd_info1 txd_info1;
134 	struct pdma_txd_info2 txd_info2;
135 	struct pdma_txd_info3 txd_info3;
136 	struct pdma_txd_info4 txd_info4;
137 };
138 
139 enum mtk_switch {
140 	SW_NONE,
141 	SW_MT7530,
142 	SW_MT7531
143 };
144 
145 enum mtk_soc {
146 	SOC_MT7623,
147 	SOC_MT7629,
148 	SOC_MT7622
149 };
150 
151 struct mtk_eth_priv {
152 	char pkt_pool[TOTAL_PKT_BUF_SIZE] __aligned(ARCH_DMA_MINALIGN);
153 
154 	struct pdma_txdesc *tx_ring_noc;
155 	struct pdma_rxdesc *rx_ring_noc;
156 
157 	int rx_dma_owner_idx0;
158 	int tx_cpu_owner_idx0;
159 
160 	void __iomem *fe_base;
161 	void __iomem *gmac_base;
162 	void __iomem *ethsys_base;
163 	void __iomem *sgmii_base;
164 
165 	struct mii_dev *mdio_bus;
166 	int (*mii_read)(struct mtk_eth_priv *priv, u8 phy, u8 reg);
167 	int (*mii_write)(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 val);
168 	int (*mmd_read)(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg);
169 	int (*mmd_write)(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg,
170 			 u16 val);
171 
172 	enum mtk_soc soc;
173 	int gmac_id;
174 	int force_mode;
175 	int speed;
176 	int duplex;
177 
178 	struct phy_device *phydev;
179 	int phy_interface;
180 	int phy_addr;
181 
182 	enum mtk_switch sw;
183 	int (*switch_init)(struct mtk_eth_priv *priv);
184 	u32 mt753x_smi_addr;
185 	u32 mt753x_phy_base;
186 
187 	struct gpio_desc rst_gpio;
188 	int mcm;
189 
190 	struct reset_ctl rst_fe;
191 	struct reset_ctl rst_mcm;
192 };
193 
mtk_pdma_write(struct mtk_eth_priv * priv,u32 reg,u32 val)194 static void mtk_pdma_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
195 {
196 	writel(val, priv->fe_base + PDMA_BASE + reg);
197 }
198 
mtk_pdma_rmw(struct mtk_eth_priv * priv,u32 reg,u32 clr,u32 set)199 static void mtk_pdma_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
200 			 u32 set)
201 {
202 	clrsetbits_le32(priv->fe_base + PDMA_BASE + reg, clr, set);
203 }
204 
mtk_gdma_write(struct mtk_eth_priv * priv,int no,u32 reg,u32 val)205 static void mtk_gdma_write(struct mtk_eth_priv *priv, int no, u32 reg,
206 			   u32 val)
207 {
208 	u32 gdma_base;
209 
210 	if (no == 1)
211 		gdma_base = GDMA2_BASE;
212 	else
213 		gdma_base = GDMA1_BASE;
214 
215 	writel(val, priv->fe_base + gdma_base + reg);
216 }
217 
mtk_gmac_read(struct mtk_eth_priv * priv,u32 reg)218 static u32 mtk_gmac_read(struct mtk_eth_priv *priv, u32 reg)
219 {
220 	return readl(priv->gmac_base + reg);
221 }
222 
mtk_gmac_write(struct mtk_eth_priv * priv,u32 reg,u32 val)223 static void mtk_gmac_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
224 {
225 	writel(val, priv->gmac_base + reg);
226 }
227 
mtk_gmac_rmw(struct mtk_eth_priv * priv,u32 reg,u32 clr,u32 set)228 static void mtk_gmac_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr, u32 set)
229 {
230 	clrsetbits_le32(priv->gmac_base + reg, clr, set);
231 }
232 
mtk_ethsys_rmw(struct mtk_eth_priv * priv,u32 reg,u32 clr,u32 set)233 static void mtk_ethsys_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
234 			   u32 set)
235 {
236 	clrsetbits_le32(priv->ethsys_base + reg, clr, set);
237 }
238 
239 /* Direct MDIO clause 22/45 access via SoC */
mtk_mii_rw(struct mtk_eth_priv * priv,u8 phy,u8 reg,u16 data,u32 cmd,u32 st)240 static int mtk_mii_rw(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 data,
241 		      u32 cmd, u32 st)
242 {
243 	int ret;
244 	u32 val;
245 
246 	val = (st << MDIO_ST_S) |
247 	      ((cmd << MDIO_CMD_S) & MDIO_CMD_M) |
248 	      (((u32)phy << MDIO_PHY_ADDR_S) & MDIO_PHY_ADDR_M) |
249 	      (((u32)reg << MDIO_REG_ADDR_S) & MDIO_REG_ADDR_M);
250 
251 	if (cmd == MDIO_CMD_WRITE)
252 		val |= data & MDIO_RW_DATA_M;
253 
254 	mtk_gmac_write(priv, GMAC_PIAC_REG, val | PHY_ACS_ST);
255 
256 	ret = wait_for_bit_le32(priv->gmac_base + GMAC_PIAC_REG,
257 				PHY_ACS_ST, 0, 5000, 0);
258 	if (ret) {
259 		pr_warn("MDIO access timeout\n");
260 		return ret;
261 	}
262 
263 	if (cmd == MDIO_CMD_READ) {
264 		val = mtk_gmac_read(priv, GMAC_PIAC_REG);
265 		return val & MDIO_RW_DATA_M;
266 	}
267 
268 	return 0;
269 }
270 
271 /* Direct MDIO clause 22 read via SoC */
mtk_mii_read(struct mtk_eth_priv * priv,u8 phy,u8 reg)272 static int mtk_mii_read(struct mtk_eth_priv *priv, u8 phy, u8 reg)
273 {
274 	return mtk_mii_rw(priv, phy, reg, 0, MDIO_CMD_READ, MDIO_ST_C22);
275 }
276 
277 /* Direct MDIO clause 22 write via SoC */
mtk_mii_write(struct mtk_eth_priv * priv,u8 phy,u8 reg,u16 data)278 static int mtk_mii_write(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 data)
279 {
280 	return mtk_mii_rw(priv, phy, reg, data, MDIO_CMD_WRITE, MDIO_ST_C22);
281 }
282 
283 /* Direct MDIO clause 45 read via SoC */
mtk_mmd_read(struct mtk_eth_priv * priv,u8 addr,u8 devad,u16 reg)284 static int mtk_mmd_read(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg)
285 {
286 	int ret;
287 
288 	ret = mtk_mii_rw(priv, addr, devad, reg, MDIO_CMD_ADDR, MDIO_ST_C45);
289 	if (ret)
290 		return ret;
291 
292 	return mtk_mii_rw(priv, addr, devad, 0, MDIO_CMD_READ_C45,
293 			  MDIO_ST_C45);
294 }
295 
296 /* Direct MDIO clause 45 write via SoC */
mtk_mmd_write(struct mtk_eth_priv * priv,u8 addr,u8 devad,u16 reg,u16 val)297 static int mtk_mmd_write(struct mtk_eth_priv *priv, u8 addr, u8 devad,
298 			 u16 reg, u16 val)
299 {
300 	int ret;
301 
302 	ret = mtk_mii_rw(priv, addr, devad, reg, MDIO_CMD_ADDR, MDIO_ST_C45);
303 	if (ret)
304 		return ret;
305 
306 	return mtk_mii_rw(priv, addr, devad, val, MDIO_CMD_WRITE,
307 			  MDIO_ST_C45);
308 }
309 
310 /* Indirect MDIO clause 45 read via MII registers */
mtk_mmd_ind_read(struct mtk_eth_priv * priv,u8 addr,u8 devad,u16 reg)311 static int mtk_mmd_ind_read(struct mtk_eth_priv *priv, u8 addr, u8 devad,
312 			    u16 reg)
313 {
314 	int ret;
315 
316 	ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
317 			      (MMD_ADDR << MMD_CMD_S) |
318 			      ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
319 	if (ret)
320 		return ret;
321 
322 	ret = priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, reg);
323 	if (ret)
324 		return ret;
325 
326 	ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
327 			      (MMD_DATA << MMD_CMD_S) |
328 			      ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
329 	if (ret)
330 		return ret;
331 
332 	return priv->mii_read(priv, addr, MII_MMD_ADDR_DATA_REG);
333 }
334 
335 /* Indirect MDIO clause 45 write via MII registers */
mtk_mmd_ind_write(struct mtk_eth_priv * priv,u8 addr,u8 devad,u16 reg,u16 val)336 static int mtk_mmd_ind_write(struct mtk_eth_priv *priv, u8 addr, u8 devad,
337 			     u16 reg, u16 val)
338 {
339 	int ret;
340 
341 	ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
342 			      (MMD_ADDR << MMD_CMD_S) |
343 			      ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
344 	if (ret)
345 		return ret;
346 
347 	ret = priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, reg);
348 	if (ret)
349 		return ret;
350 
351 	ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
352 			      (MMD_DATA << MMD_CMD_S) |
353 			      ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
354 	if (ret)
355 		return ret;
356 
357 	return priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, val);
358 }
359 
360 /*
361  * MT7530 Internal Register Address Bits
362  * -------------------------------------------------------------------
363  * | 15  14  13  12  11  10   9   8   7   6 | 5   4   3   2 | 1   0  |
364  * |----------------------------------------|---------------|--------|
365  * |              Page Address              |  Reg Address  | Unused |
366  * -------------------------------------------------------------------
367  */
368 
mt753x_reg_read(struct mtk_eth_priv * priv,u32 reg,u32 * data)369 static int mt753x_reg_read(struct mtk_eth_priv *priv, u32 reg, u32 *data)
370 {
371 	int ret, low_word, high_word;
372 
373 	/* Write page address */
374 	ret = mtk_mii_write(priv, priv->mt753x_smi_addr, 0x1f, reg >> 6);
375 	if (ret)
376 		return ret;
377 
378 	/* Read low word */
379 	low_word = mtk_mii_read(priv, priv->mt753x_smi_addr, (reg >> 2) & 0xf);
380 	if (low_word < 0)
381 		return low_word;
382 
383 	/* Read high word */
384 	high_word = mtk_mii_read(priv, priv->mt753x_smi_addr, 0x10);
385 	if (high_word < 0)
386 		return high_word;
387 
388 	if (data)
389 		*data = ((u32)high_word << 16) | (low_word & 0xffff);
390 
391 	return 0;
392 }
393 
mt753x_reg_write(struct mtk_eth_priv * priv,u32 reg,u32 data)394 static int mt753x_reg_write(struct mtk_eth_priv *priv, u32 reg, u32 data)
395 {
396 	int ret;
397 
398 	/* Write page address */
399 	ret = mtk_mii_write(priv, priv->mt753x_smi_addr, 0x1f, reg >> 6);
400 	if (ret)
401 		return ret;
402 
403 	/* Write low word */
404 	ret = mtk_mii_write(priv, priv->mt753x_smi_addr, (reg >> 2) & 0xf,
405 			    data & 0xffff);
406 	if (ret)
407 		return ret;
408 
409 	/* Write high word */
410 	return mtk_mii_write(priv, priv->mt753x_smi_addr, 0x10, data >> 16);
411 }
412 
mt753x_reg_rmw(struct mtk_eth_priv * priv,u32 reg,u32 clr,u32 set)413 static void mt753x_reg_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
414 			   u32 set)
415 {
416 	u32 val;
417 
418 	mt753x_reg_read(priv, reg, &val);
419 	val &= ~clr;
420 	val |= set;
421 	mt753x_reg_write(priv, reg, val);
422 }
423 
424 /* Indirect MDIO clause 22/45 access */
mt7531_mii_rw(struct mtk_eth_priv * priv,int phy,int reg,u16 data,u32 cmd,u32 st)425 static int mt7531_mii_rw(struct mtk_eth_priv *priv, int phy, int reg, u16 data,
426 			 u32 cmd, u32 st)
427 {
428 	ulong timeout;
429 	u32 val, timeout_ms;
430 	int ret = 0;
431 
432 	val = (st << MDIO_ST_S) |
433 	      ((cmd << MDIO_CMD_S) & MDIO_CMD_M) |
434 	      ((phy << MDIO_PHY_ADDR_S) & MDIO_PHY_ADDR_M) |
435 	      ((reg << MDIO_REG_ADDR_S) & MDIO_REG_ADDR_M);
436 
437 	if (cmd == MDIO_CMD_WRITE || cmd == MDIO_CMD_ADDR)
438 		val |= data & MDIO_RW_DATA_M;
439 
440 	mt753x_reg_write(priv, MT7531_PHY_IAC, val | PHY_ACS_ST);
441 
442 	timeout_ms = 100;
443 	timeout = get_timer(0);
444 	while (1) {
445 		mt753x_reg_read(priv, MT7531_PHY_IAC, &val);
446 
447 		if ((val & PHY_ACS_ST) == 0)
448 			break;
449 
450 		if (get_timer(timeout) > timeout_ms)
451 			return -ETIMEDOUT;
452 	}
453 
454 	if (cmd == MDIO_CMD_READ || cmd == MDIO_CMD_READ_C45) {
455 		mt753x_reg_read(priv, MT7531_PHY_IAC, &val);
456 		ret = val & MDIO_RW_DATA_M;
457 	}
458 
459 	return ret;
460 }
461 
mt7531_mii_ind_read(struct mtk_eth_priv * priv,u8 phy,u8 reg)462 static int mt7531_mii_ind_read(struct mtk_eth_priv *priv, u8 phy, u8 reg)
463 {
464 	u8 phy_addr;
465 
466 	if (phy >= MT753X_NUM_PHYS)
467 		return -EINVAL;
468 
469 	phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, phy);
470 
471 	return mt7531_mii_rw(priv, phy_addr, reg, 0, MDIO_CMD_READ,
472 			     MDIO_ST_C22);
473 }
474 
mt7531_mii_ind_write(struct mtk_eth_priv * priv,u8 phy,u8 reg,u16 val)475 static int mt7531_mii_ind_write(struct mtk_eth_priv *priv, u8 phy, u8 reg,
476 				u16 val)
477 {
478 	u8 phy_addr;
479 
480 	if (phy >= MT753X_NUM_PHYS)
481 		return -EINVAL;
482 
483 	phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, phy);
484 
485 	return mt7531_mii_rw(priv, phy_addr, reg, val, MDIO_CMD_WRITE,
486 			     MDIO_ST_C22);
487 }
488 
mt7531_mmd_ind_read(struct mtk_eth_priv * priv,u8 addr,u8 devad,u16 reg)489 int mt7531_mmd_ind_read(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg)
490 {
491 	u8 phy_addr;
492 	int ret;
493 
494 	if (addr >= MT753X_NUM_PHYS)
495 		return -EINVAL;
496 
497 	phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, addr);
498 
499 	ret = mt7531_mii_rw(priv, phy_addr, devad, reg, MDIO_CMD_ADDR,
500 			    MDIO_ST_C45);
501 	if (ret)
502 		return ret;
503 
504 	return mt7531_mii_rw(priv, phy_addr, devad, 0, MDIO_CMD_READ_C45,
505 			     MDIO_ST_C45);
506 }
507 
mt7531_mmd_ind_write(struct mtk_eth_priv * priv,u8 addr,u8 devad,u16 reg,u16 val)508 static int mt7531_mmd_ind_write(struct mtk_eth_priv *priv, u8 addr, u8 devad,
509 				u16 reg, u16 val)
510 {
511 	u8 phy_addr;
512 	int ret;
513 
514 	if (addr >= MT753X_NUM_PHYS)
515 		return 0;
516 
517 	phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, addr);
518 
519 	ret = mt7531_mii_rw(priv, phy_addr, devad, reg, MDIO_CMD_ADDR,
520 			    MDIO_ST_C45);
521 	if (ret)
522 		return ret;
523 
524 	return mt7531_mii_rw(priv, phy_addr, devad, val, MDIO_CMD_WRITE,
525 			     MDIO_ST_C45);
526 }
527 
mtk_mdio_read(struct mii_dev * bus,int addr,int devad,int reg)528 static int mtk_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
529 {
530 	struct mtk_eth_priv *priv = bus->priv;
531 
532 	if (devad < 0)
533 		return priv->mii_read(priv, addr, reg);
534 	else
535 		return priv->mmd_read(priv, addr, devad, reg);
536 }
537 
mtk_mdio_write(struct mii_dev * bus,int addr,int devad,int reg,u16 val)538 static int mtk_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
539 			  u16 val)
540 {
541 	struct mtk_eth_priv *priv = bus->priv;
542 
543 	if (devad < 0)
544 		return priv->mii_write(priv, addr, reg, val);
545 	else
546 		return priv->mmd_write(priv, addr, devad, reg, val);
547 }
548 
mtk_mdio_register(struct udevice * dev)549 static int mtk_mdio_register(struct udevice *dev)
550 {
551 	struct mtk_eth_priv *priv = dev_get_priv(dev);
552 	struct mii_dev *mdio_bus = mdio_alloc();
553 	int ret;
554 
555 	if (!mdio_bus)
556 		return -ENOMEM;
557 
558 	/* Assign MDIO access APIs according to the switch/phy */
559 	switch (priv->sw) {
560 	case SW_MT7530:
561 		priv->mii_read = mtk_mii_read;
562 		priv->mii_write = mtk_mii_write;
563 		priv->mmd_read = mtk_mmd_ind_read;
564 		priv->mmd_write = mtk_mmd_ind_write;
565 		break;
566 	case SW_MT7531:
567 		priv->mii_read = mt7531_mii_ind_read;
568 		priv->mii_write = mt7531_mii_ind_write;
569 		priv->mmd_read = mt7531_mmd_ind_read;
570 		priv->mmd_write = mt7531_mmd_ind_write;
571 		break;
572 	default:
573 		priv->mii_read = mtk_mii_read;
574 		priv->mii_write = mtk_mii_write;
575 		priv->mmd_read = mtk_mmd_read;
576 		priv->mmd_write = mtk_mmd_write;
577 	}
578 
579 	mdio_bus->read = mtk_mdio_read;
580 	mdio_bus->write = mtk_mdio_write;
581 	snprintf(mdio_bus->name, sizeof(mdio_bus->name), dev->name);
582 
583 	mdio_bus->priv = (void *)priv;
584 
585 	ret = mdio_register(mdio_bus);
586 
587 	if (ret)
588 		return ret;
589 
590 	priv->mdio_bus = mdio_bus;
591 
592 	return 0;
593 }
594 
mt753x_core_reg_read(struct mtk_eth_priv * priv,u32 reg)595 static int mt753x_core_reg_read(struct mtk_eth_priv *priv, u32 reg)
596 {
597 	u8 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, 0);
598 
599 	return priv->mmd_read(priv, phy_addr, 0x1f, reg);
600 }
601 
mt753x_core_reg_write(struct mtk_eth_priv * priv,u32 reg,u32 val)602 static void mt753x_core_reg_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
603 {
604 	u8 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, 0);
605 
606 	priv->mmd_write(priv, phy_addr, 0x1f, reg, val);
607 }
608 
mt7530_pad_clk_setup(struct mtk_eth_priv * priv,int mode)609 static int mt7530_pad_clk_setup(struct mtk_eth_priv *priv, int mode)
610 {
611 	u32 ncpo1, ssc_delta;
612 
613 	switch (mode) {
614 	case PHY_INTERFACE_MODE_RGMII:
615 		ncpo1 = 0x0c80;
616 		ssc_delta = 0x87;
617 		break;
618 	default:
619 		printf("error: xMII mode %d not supported\n", mode);
620 		return -EINVAL;
621 	}
622 
623 	/* Disable MT7530 core clock */
624 	mt753x_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG, 0);
625 
626 	/* Disable MT7530 PLL */
627 	mt753x_core_reg_write(priv, CORE_GSWPLL_GRP1,
628 			      (2 << RG_GSWPLL_POSDIV_200M_S) |
629 			      (32 << RG_GSWPLL_FBKDIV_200M_S));
630 
631 	/* For MT7530 core clock = 500Mhz */
632 	mt753x_core_reg_write(priv, CORE_GSWPLL_GRP2,
633 			      (1 << RG_GSWPLL_POSDIV_500M_S) |
634 			      (25 << RG_GSWPLL_FBKDIV_500M_S));
635 
636 	/* Enable MT7530 PLL */
637 	mt753x_core_reg_write(priv, CORE_GSWPLL_GRP1,
638 			      (2 << RG_GSWPLL_POSDIV_200M_S) |
639 			      (32 << RG_GSWPLL_FBKDIV_200M_S) |
640 			      RG_GSWPLL_EN_PRE);
641 
642 	udelay(20);
643 
644 	mt753x_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
645 
646 	/* Setup the MT7530 TRGMII Tx Clock */
647 	mt753x_core_reg_write(priv, CORE_PLL_GROUP5, ncpo1);
648 	mt753x_core_reg_write(priv, CORE_PLL_GROUP6, 0);
649 	mt753x_core_reg_write(priv, CORE_PLL_GROUP10, ssc_delta);
650 	mt753x_core_reg_write(priv, CORE_PLL_GROUP11, ssc_delta);
651 	mt753x_core_reg_write(priv, CORE_PLL_GROUP4, RG_SYSPLL_DDSFBK_EN |
652 			      RG_SYSPLL_BIAS_EN | RG_SYSPLL_BIAS_LPF_EN);
653 
654 	mt753x_core_reg_write(priv, CORE_PLL_GROUP2,
655 			      RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
656 			      (1 << RG_SYSPLL_POSDIV_S));
657 
658 	mt753x_core_reg_write(priv, CORE_PLL_GROUP7,
659 			      RG_LCDDS_PCW_NCPO_CHG | (3 << RG_LCCDS_C_S) |
660 			      RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
661 
662 	/* Enable MT7530 core clock */
663 	mt753x_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG,
664 			      REG_GSWCK_EN | REG_TRGMIICK_EN);
665 
666 	return 0;
667 }
668 
mt7530_setup(struct mtk_eth_priv * priv)669 static int mt7530_setup(struct mtk_eth_priv *priv)
670 {
671 	u16 phy_addr, phy_val;
672 	u32 val;
673 	int i;
674 
675 	/* Select 250MHz clk for RGMII mode */
676 	mtk_ethsys_rmw(priv, ETHSYS_CLKCFG0_REG,
677 		       ETHSYS_TRGMII_CLK_SEL362_5, 0);
678 
679 	/* Modify HWTRAP first to allow direct access to internal PHYs */
680 	mt753x_reg_read(priv, HWTRAP_REG, &val);
681 	val |= CHG_TRAP;
682 	val &= ~C_MDIO_BPS;
683 	mt753x_reg_write(priv, MHWTRAP_REG, val);
684 
685 	/* Calculate the phy base address */
686 	val = ((val & SMI_ADDR_M) >> SMI_ADDR_S) << 3;
687 	priv->mt753x_phy_base = (val | 0x7) + 1;
688 
689 	/* Turn off PHYs */
690 	for (i = 0; i < MT753X_NUM_PHYS; i++) {
691 		phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
692 		phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
693 		phy_val |= BMCR_PDOWN;
694 		priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
695 	}
696 
697 	/* Force MAC link down before reset */
698 	mt753x_reg_write(priv, PMCR_REG(5), FORCE_MODE);
699 	mt753x_reg_write(priv, PMCR_REG(6), FORCE_MODE);
700 
701 	/* MT7530 reset */
702 	mt753x_reg_write(priv, SYS_CTRL_REG, SW_SYS_RST | SW_REG_RST);
703 	udelay(100);
704 
705 	val = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
706 	      MAC_MODE | FORCE_MODE |
707 	      MAC_TX_EN | MAC_RX_EN |
708 	      BKOFF_EN | BACKPR_EN |
709 	      (SPEED_1000M << FORCE_SPD_S) |
710 	      FORCE_DPX | FORCE_LINK;
711 
712 	/* MT7530 Port6: Forced 1000M/FD, FC disabled */
713 	mt753x_reg_write(priv, PMCR_REG(6), val);
714 
715 	/* MT7530 Port5: Forced link down */
716 	mt753x_reg_write(priv, PMCR_REG(5), FORCE_MODE);
717 
718 	/* MT7530 Port6: Set to RGMII */
719 	mt753x_reg_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_M, P6_INTF_MODE_RGMII);
720 
721 	/* Hardware Trap: Enable Port6, Disable Port5 */
722 	mt753x_reg_read(priv, HWTRAP_REG, &val);
723 	val |= CHG_TRAP | LOOPDET_DIS | P5_INTF_DIS |
724 	       (P5_INTF_SEL_GMAC5 << P5_INTF_SEL_S) |
725 	       (P5_INTF_MODE_RGMII << P5_INTF_MODE_S);
726 	val &= ~(C_MDIO_BPS | P6_INTF_DIS);
727 	mt753x_reg_write(priv, MHWTRAP_REG, val);
728 
729 	/* Setup switch core pll */
730 	mt7530_pad_clk_setup(priv, priv->phy_interface);
731 
732 	/* Lower Tx Driving for TRGMII path */
733 	for (i = 0 ; i < NUM_TRGMII_CTRL ; i++)
734 		mt753x_reg_write(priv, MT7530_TRGMII_TD_ODT(i),
735 				 (8 << TD_DM_DRVP_S) | (8 << TD_DM_DRVN_S));
736 
737 	for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
738 		mt753x_reg_rmw(priv, MT7530_TRGMII_RD(i), RD_TAP_M, 16);
739 
740 	/* Turn on PHYs */
741 	for (i = 0; i < MT753X_NUM_PHYS; i++) {
742 		phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
743 		phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
744 		phy_val &= ~BMCR_PDOWN;
745 		priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
746 	}
747 
748 	return 0;
749 }
750 
mt7531_core_pll_setup(struct mtk_eth_priv * priv,int mcm)751 static void mt7531_core_pll_setup(struct mtk_eth_priv *priv, int mcm)
752 {
753 	/* Step 1 : Disable MT7531 COREPLL */
754 	mt753x_reg_rmw(priv, MT7531_PLLGP_EN, EN_COREPLL, 0);
755 
756 	/* Step 2: switch to XTAL output */
757 	mt753x_reg_rmw(priv, MT7531_PLLGP_EN, SW_CLKSW, SW_CLKSW);
758 
759 	mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_EN, 0);
760 
761 	/* Step 3: disable PLLGP and enable program PLLGP */
762 	mt753x_reg_rmw(priv, MT7531_PLLGP_EN, SW_PLLGP, SW_PLLGP);
763 
764 	/* Step 4: program COREPLL output frequency to 500MHz */
765 	mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_POSDIV_M,
766 		       2 << RG_COREPLL_POSDIV_S);
767 	udelay(25);
768 
769 	/* Currently, support XTAL 25Mhz only */
770 	mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_SDM_PCW_M,
771 		       0x140000 << RG_COREPLL_SDM_PCW_S);
772 
773 	/* Set feedback divide ratio update signal to high */
774 	mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_SDM_PCW_CHG,
775 		       RG_COREPLL_SDM_PCW_CHG);
776 
777 	/* Wait for at least 16 XTAL clocks */
778 	udelay(10);
779 
780 	/* Step 5: set feedback divide ratio update signal to low */
781 	mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_SDM_PCW_CHG, 0);
782 
783 	/* add enable 325M clock for SGMII */
784 	mt753x_reg_write(priv, MT7531_ANA_PLLGP_CR5, 0xad0000);
785 
786 	/* add enable 250SSC clock for RGMII */
787 	mt753x_reg_write(priv, MT7531_ANA_PLLGP_CR2, 0x4f40000);
788 
789 	/*Step 6: Enable MT7531 PLL */
790 	mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_EN, RG_COREPLL_EN);
791 
792 	mt753x_reg_rmw(priv, MT7531_PLLGP_EN, EN_COREPLL, EN_COREPLL);
793 
794 	udelay(25);
795 }
796 
mt7531_port_sgmii_init(struct mtk_eth_priv * priv,u32 port)797 static int mt7531_port_sgmii_init(struct mtk_eth_priv *priv,
798 				  u32 port)
799 {
800 	if (port != 5 && port != 6) {
801 		printf("mt7531: port %d is not a SGMII port\n", port);
802 		return -EINVAL;
803 	}
804 
805 	/* Set SGMII GEN2 speed(2.5G) */
806 	mt753x_reg_rmw(priv, MT7531_PHYA_CTRL_SIGNAL3(port),
807 		       SGMSYS_SPEED_2500, SGMSYS_SPEED_2500);
808 
809 	/* Disable SGMII AN */
810 	mt753x_reg_rmw(priv, MT7531_PCS_CONTROL_1(port),
811 		       SGMII_AN_ENABLE, 0);
812 
813 	/* SGMII force mode setting */
814 	mt753x_reg_write(priv, MT7531_SGMII_MODE(port), SGMII_FORCE_MODE);
815 
816 	/* Release PHYA power down state */
817 	mt753x_reg_rmw(priv, MT7531_QPHY_PWR_STATE_CTRL(port),
818 		       SGMII_PHYA_PWD, 0);
819 
820 	return 0;
821 }
822 
mt7531_port_rgmii_init(struct mtk_eth_priv * priv,u32 port)823 static int mt7531_port_rgmii_init(struct mtk_eth_priv *priv, u32 port)
824 {
825 	u32 val;
826 
827 	if (port != 5) {
828 		printf("error: RGMII mode is not available for port %d\n",
829 		       port);
830 		return -EINVAL;
831 	}
832 
833 	mt753x_reg_read(priv, MT7531_CLKGEN_CTRL, &val);
834 	val |= GP_CLK_EN;
835 	val &= ~GP_MODE_M;
836 	val |= GP_MODE_RGMII << GP_MODE_S;
837 	val |= TXCLK_NO_REVERSE;
838 	val |= RXCLK_NO_DELAY;
839 	val &= ~CLK_SKEW_IN_M;
840 	val |= CLK_SKEW_IN_NO_CHANGE << CLK_SKEW_IN_S;
841 	val &= ~CLK_SKEW_OUT_M;
842 	val |= CLK_SKEW_OUT_NO_CHANGE << CLK_SKEW_OUT_S;
843 	mt753x_reg_write(priv, MT7531_CLKGEN_CTRL, val);
844 
845 	return 0;
846 }
847 
mt7531_phy_setting(struct mtk_eth_priv * priv)848 static void mt7531_phy_setting(struct mtk_eth_priv *priv)
849 {
850 	int i;
851 	u32 val;
852 
853 	for (i = 0; i < MT753X_NUM_PHYS; i++) {
854 		/* Enable HW auto downshift */
855 		priv->mii_write(priv, i, 0x1f, 0x1);
856 		val = priv->mii_read(priv, i, PHY_EXT_REG_14);
857 		val |= PHY_EN_DOWN_SHFIT;
858 		priv->mii_write(priv, i, PHY_EXT_REG_14, val);
859 
860 		/* PHY link down power saving enable */
861 		val = priv->mii_read(priv, i, PHY_EXT_REG_17);
862 		val |= PHY_LINKDOWN_POWER_SAVING_EN;
863 		priv->mii_write(priv, i, PHY_EXT_REG_17, val);
864 
865 		val = priv->mmd_read(priv, i, 0x1e, PHY_DEV1E_REG_0C6);
866 		val &= ~PHY_POWER_SAVING_M;
867 		val |= PHY_POWER_SAVING_TX << PHY_POWER_SAVING_S;
868 		priv->mmd_write(priv, i, 0x1e, PHY_DEV1E_REG_0C6, val);
869 	}
870 }
871 
mt7531_setup(struct mtk_eth_priv * priv)872 static int mt7531_setup(struct mtk_eth_priv *priv)
873 {
874 	u16 phy_addr, phy_val;
875 	u32 val;
876 	u32 pmcr;
877 	u32 port5_sgmii;
878 	int i;
879 
880 	priv->mt753x_phy_base = (priv->mt753x_smi_addr + 1) &
881 				MT753X_SMI_ADDR_MASK;
882 
883 	/* Turn off PHYs */
884 	for (i = 0; i < MT753X_NUM_PHYS; i++) {
885 		phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
886 		phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
887 		phy_val |= BMCR_PDOWN;
888 		priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
889 	}
890 
891 	/* Force MAC link down before reset */
892 	mt753x_reg_write(priv, PMCR_REG(5), FORCE_MODE_LNK);
893 	mt753x_reg_write(priv, PMCR_REG(6), FORCE_MODE_LNK);
894 
895 	/* Switch soft reset */
896 	mt753x_reg_write(priv, SYS_CTRL_REG, SW_SYS_RST | SW_REG_RST);
897 	udelay(100);
898 
899 	/* Enable MDC input Schmitt Trigger */
900 	mt753x_reg_rmw(priv, MT7531_SMT0_IOLB, SMT_IOLB_5_SMI_MDC_EN,
901 		       SMT_IOLB_5_SMI_MDC_EN);
902 
903 	mt7531_core_pll_setup(priv, priv->mcm);
904 
905 	mt753x_reg_read(priv, MT7531_TOP_SIG_SR, &val);
906 	port5_sgmii = !!(val & PAD_DUAL_SGMII_EN);
907 
908 	/* port5 support either RGMII or SGMII, port6 only support SGMII. */
909 	switch (priv->phy_interface) {
910 	case PHY_INTERFACE_MODE_RGMII:
911 		if (!port5_sgmii)
912 			mt7531_port_rgmii_init(priv, 5);
913 		break;
914 	case PHY_INTERFACE_MODE_SGMII:
915 		mt7531_port_sgmii_init(priv, 6);
916 		if (port5_sgmii)
917 			mt7531_port_sgmii_init(priv, 5);
918 		break;
919 	default:
920 		break;
921 	}
922 
923 	pmcr = MT7531_FORCE_MODE |
924 	       (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
925 	       MAC_MODE | MAC_TX_EN | MAC_RX_EN |
926 	       BKOFF_EN | BACKPR_EN |
927 	       FORCE_RX_FC | FORCE_TX_FC |
928 	       (SPEED_1000M << FORCE_SPD_S) | FORCE_DPX |
929 	       FORCE_LINK;
930 
931 	mt753x_reg_write(priv, PMCR_REG(5), pmcr);
932 	mt753x_reg_write(priv, PMCR_REG(6), pmcr);
933 
934 	/* Turn on PHYs */
935 	for (i = 0; i < MT753X_NUM_PHYS; i++) {
936 		phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
937 		phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
938 		phy_val &= ~BMCR_PDOWN;
939 		priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
940 	}
941 
942 	mt7531_phy_setting(priv);
943 
944 	/* Enable Internal PHYs */
945 	val = mt753x_core_reg_read(priv, CORE_PLL_GROUP4);
946 	val |= MT7531_BYPASS_MODE;
947 	val &= ~MT7531_POWER_ON_OFF;
948 	mt753x_core_reg_write(priv, CORE_PLL_GROUP4, val);
949 
950 	return 0;
951 }
952 
mt753x_switch_init(struct mtk_eth_priv * priv)953 int mt753x_switch_init(struct mtk_eth_priv *priv)
954 {
955 	int ret;
956 	int i;
957 
958 	/* Global reset switch */
959 	if (priv->mcm) {
960 		reset_assert(&priv->rst_mcm);
961 		udelay(1000);
962 		reset_deassert(&priv->rst_mcm);
963 		mdelay(1000);
964 	} else if (dm_gpio_is_valid(&priv->rst_gpio)) {
965 		dm_gpio_set_value(&priv->rst_gpio, 0);
966 		udelay(1000);
967 		dm_gpio_set_value(&priv->rst_gpio, 1);
968 		mdelay(1000);
969 	}
970 
971 	ret = priv->switch_init(priv);
972 	if (ret)
973 		return ret;
974 
975 	/* Set port isolation */
976 	for (i = 0; i < MT753X_NUM_PORTS; i++) {
977 		/* Set port matrix mode */
978 		if (i != 6)
979 			mt753x_reg_write(priv, PCR_REG(i),
980 					 (0x40 << PORT_MATRIX_S));
981 		else
982 			mt753x_reg_write(priv, PCR_REG(i),
983 					 (0x3f << PORT_MATRIX_S));
984 
985 		/* Set port mode to user port */
986 		mt753x_reg_write(priv, PVC_REG(i),
987 				 (0x8100 << STAG_VPID_S) |
988 				 (VLAN_ATTR_USER << VLAN_ATTR_S));
989 	}
990 
991 	return 0;
992 }
993 
mtk_phy_link_adjust(struct mtk_eth_priv * priv)994 static void mtk_phy_link_adjust(struct mtk_eth_priv *priv)
995 {
996 	u16 lcl_adv = 0, rmt_adv = 0;
997 	u8 flowctrl;
998 	u32 mcr;
999 
1000 	mcr = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
1001 	      (MAC_RX_PKT_LEN_1536 << MAC_RX_PKT_LEN_S) |
1002 	      MAC_MODE | FORCE_MODE |
1003 	      MAC_TX_EN | MAC_RX_EN |
1004 	      BKOFF_EN | BACKPR_EN;
1005 
1006 	switch (priv->phydev->speed) {
1007 	case SPEED_10:
1008 		mcr |= (SPEED_10M << FORCE_SPD_S);
1009 		break;
1010 	case SPEED_100:
1011 		mcr |= (SPEED_100M << FORCE_SPD_S);
1012 		break;
1013 	case SPEED_1000:
1014 		mcr |= (SPEED_1000M << FORCE_SPD_S);
1015 		break;
1016 	};
1017 
1018 	if (priv->phydev->link)
1019 		mcr |= FORCE_LINK;
1020 
1021 	if (priv->phydev->duplex) {
1022 		mcr |= FORCE_DPX;
1023 
1024 		if (priv->phydev->pause)
1025 			rmt_adv = LPA_PAUSE_CAP;
1026 		if (priv->phydev->asym_pause)
1027 			rmt_adv |= LPA_PAUSE_ASYM;
1028 
1029 		if (priv->phydev->advertising & ADVERTISED_Pause)
1030 			lcl_adv |= ADVERTISE_PAUSE_CAP;
1031 		if (priv->phydev->advertising & ADVERTISED_Asym_Pause)
1032 			lcl_adv |= ADVERTISE_PAUSE_ASYM;
1033 
1034 		flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
1035 
1036 		if (flowctrl & FLOW_CTRL_TX)
1037 			mcr |= FORCE_TX_FC;
1038 		if (flowctrl & FLOW_CTRL_RX)
1039 			mcr |= FORCE_RX_FC;
1040 
1041 		debug("rx pause %s, tx pause %s\n",
1042 		      flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
1043 		      flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
1044 	}
1045 
1046 	mtk_gmac_write(priv, GMAC_PORT_MCR(priv->gmac_id), mcr);
1047 }
1048 
mtk_phy_start(struct mtk_eth_priv * priv)1049 static int mtk_phy_start(struct mtk_eth_priv *priv)
1050 {
1051 	struct phy_device *phydev = priv->phydev;
1052 	int ret;
1053 
1054 	ret = phy_startup(phydev);
1055 
1056 	if (ret) {
1057 		debug("Could not initialize PHY %s\n", phydev->dev->name);
1058 		return ret;
1059 	}
1060 
1061 	if (!phydev->link) {
1062 		debug("%s: link down.\n", phydev->dev->name);
1063 		return 0;
1064 	}
1065 
1066 	mtk_phy_link_adjust(priv);
1067 
1068 	debug("Speed: %d, %s duplex%s\n", phydev->speed,
1069 	      (phydev->duplex) ? "full" : "half",
1070 	      (phydev->port == PORT_FIBRE) ? ", fiber mode" : "");
1071 
1072 	return 0;
1073 }
1074 
mtk_phy_probe(struct udevice * dev)1075 static int mtk_phy_probe(struct udevice *dev)
1076 {
1077 	struct mtk_eth_priv *priv = dev_get_priv(dev);
1078 	struct phy_device *phydev;
1079 
1080 	phydev = phy_connect(priv->mdio_bus, priv->phy_addr, dev,
1081 			     priv->phy_interface);
1082 	if (!phydev)
1083 		return -ENODEV;
1084 
1085 	phydev->supported &= PHY_GBIT_FEATURES;
1086 	phydev->advertising = phydev->supported;
1087 
1088 	priv->phydev = phydev;
1089 	phy_config(phydev);
1090 
1091 	return 0;
1092 }
1093 
mtk_sgmii_init(struct mtk_eth_priv * priv)1094 static void mtk_sgmii_init(struct mtk_eth_priv *priv)
1095 {
1096 	/* Set SGMII GEN2 speed(2.5G) */
1097 	clrsetbits_le32(priv->sgmii_base + ((priv->soc == SOC_MT7622) ?
1098 			SGMSYS_GEN2_SPEED : SGMSYS_GEN2_SPEED_V2),
1099 			SGMSYS_SPEED_2500, SGMSYS_SPEED_2500);
1100 
1101 	/* Disable SGMII AN */
1102 	clrsetbits_le32(priv->sgmii_base + SGMSYS_PCS_CONTROL_1,
1103 			SGMII_AN_ENABLE, 0);
1104 
1105 	/* SGMII force mode setting */
1106 	writel(SGMII_FORCE_MODE, priv->sgmii_base + SGMSYS_SGMII_MODE);
1107 
1108 	/* Release PHYA power down state */
1109 	clrsetbits_le32(priv->sgmii_base + SGMSYS_QPHY_PWR_STATE_CTRL,
1110 			SGMII_PHYA_PWD, 0);
1111 }
1112 
mtk_mac_init(struct mtk_eth_priv * priv)1113 static void mtk_mac_init(struct mtk_eth_priv *priv)
1114 {
1115 	int i, ge_mode = 0;
1116 	u32 mcr;
1117 
1118 	switch (priv->phy_interface) {
1119 	case PHY_INTERFACE_MODE_RGMII_RXID:
1120 	case PHY_INTERFACE_MODE_RGMII:
1121 		ge_mode = GE_MODE_RGMII;
1122 		break;
1123 	case PHY_INTERFACE_MODE_SGMII:
1124 		ge_mode = GE_MODE_RGMII;
1125 		mtk_ethsys_rmw(priv, ETHSYS_SYSCFG0_REG, SYSCFG0_SGMII_SEL_M,
1126 			       SYSCFG0_SGMII_SEL(priv->gmac_id));
1127 		mtk_sgmii_init(priv);
1128 		break;
1129 	case PHY_INTERFACE_MODE_MII:
1130 	case PHY_INTERFACE_MODE_GMII:
1131 		ge_mode = GE_MODE_MII;
1132 		break;
1133 	case PHY_INTERFACE_MODE_RMII:
1134 		ge_mode = GE_MODE_RMII;
1135 		break;
1136 	default:
1137 		break;
1138 	}
1139 
1140 	/* set the gmac to the right mode */
1141 	mtk_ethsys_rmw(priv, ETHSYS_SYSCFG0_REG,
1142 		       SYSCFG0_GE_MODE_M << SYSCFG0_GE_MODE_S(priv->gmac_id),
1143 		       ge_mode << SYSCFG0_GE_MODE_S(priv->gmac_id));
1144 
1145 	if (priv->force_mode) {
1146 		mcr = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
1147 		      (MAC_RX_PKT_LEN_1536 << MAC_RX_PKT_LEN_S) |
1148 		      MAC_MODE | FORCE_MODE |
1149 		      MAC_TX_EN | MAC_RX_EN |
1150 		      BKOFF_EN | BACKPR_EN |
1151 		      FORCE_LINK;
1152 
1153 		switch (priv->speed) {
1154 		case SPEED_10:
1155 			mcr |= SPEED_10M << FORCE_SPD_S;
1156 			break;
1157 		case SPEED_100:
1158 			mcr |= SPEED_100M << FORCE_SPD_S;
1159 			break;
1160 		case SPEED_1000:
1161 			mcr |= SPEED_1000M << FORCE_SPD_S;
1162 			break;
1163 		}
1164 
1165 		if (priv->duplex)
1166 			mcr |= FORCE_DPX;
1167 
1168 		mtk_gmac_write(priv, GMAC_PORT_MCR(priv->gmac_id), mcr);
1169 	}
1170 
1171 	if (priv->soc == SOC_MT7623) {
1172 		/* Lower Tx Driving for TRGMII path */
1173 		for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
1174 			mtk_gmac_write(priv, GMAC_TRGMII_TD_ODT(i),
1175 				       (8 << TD_DM_DRVP_S) |
1176 				       (8 << TD_DM_DRVN_S));
1177 
1178 		mtk_gmac_rmw(priv, GMAC_TRGMII_RCK_CTRL, 0,
1179 			     RX_RST | RXC_DQSISEL);
1180 		mtk_gmac_rmw(priv, GMAC_TRGMII_RCK_CTRL, RX_RST, 0);
1181 	}
1182 }
1183 
mtk_eth_fifo_init(struct mtk_eth_priv * priv)1184 static void mtk_eth_fifo_init(struct mtk_eth_priv *priv)
1185 {
1186 	char *pkt_base = priv->pkt_pool;
1187 	int i;
1188 
1189 	mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG, 0xffff0000, 0);
1190 	udelay(500);
1191 
1192 	memset(priv->tx_ring_noc, 0, NUM_TX_DESC * sizeof(struct pdma_txdesc));
1193 	memset(priv->rx_ring_noc, 0, NUM_RX_DESC * sizeof(struct pdma_rxdesc));
1194 	memset(priv->pkt_pool, 0, TOTAL_PKT_BUF_SIZE);
1195 
1196 	flush_dcache_range((ulong)pkt_base,
1197 			   (ulong)(pkt_base + TOTAL_PKT_BUF_SIZE));
1198 
1199 	priv->rx_dma_owner_idx0 = 0;
1200 	priv->tx_cpu_owner_idx0 = 0;
1201 
1202 	for (i = 0; i < NUM_TX_DESC; i++) {
1203 		priv->tx_ring_noc[i].txd_info2.LS0 = 1;
1204 		priv->tx_ring_noc[i].txd_info2.DDONE = 1;
1205 		priv->tx_ring_noc[i].txd_info4.FPORT = priv->gmac_id + 1;
1206 
1207 		priv->tx_ring_noc[i].txd_info1.SDP0 = virt_to_phys(pkt_base);
1208 		pkt_base += PKTSIZE_ALIGN;
1209 	}
1210 
1211 	for (i = 0; i < NUM_RX_DESC; i++) {
1212 		priv->rx_ring_noc[i].rxd_info2.PLEN0 = PKTSIZE_ALIGN;
1213 		priv->rx_ring_noc[i].rxd_info1.PDP0 = virt_to_phys(pkt_base);
1214 		pkt_base += PKTSIZE_ALIGN;
1215 	}
1216 
1217 	mtk_pdma_write(priv, TX_BASE_PTR_REG(0),
1218 		       virt_to_phys(priv->tx_ring_noc));
1219 	mtk_pdma_write(priv, TX_MAX_CNT_REG(0), NUM_TX_DESC);
1220 	mtk_pdma_write(priv, TX_CTX_IDX_REG(0), priv->tx_cpu_owner_idx0);
1221 
1222 	mtk_pdma_write(priv, RX_BASE_PTR_REG(0),
1223 		       virt_to_phys(priv->rx_ring_noc));
1224 	mtk_pdma_write(priv, RX_MAX_CNT_REG(0), NUM_RX_DESC);
1225 	mtk_pdma_write(priv, RX_CRX_IDX_REG(0), NUM_RX_DESC - 1);
1226 
1227 	mtk_pdma_write(priv, PDMA_RST_IDX_REG, RST_DTX_IDX0 | RST_DRX_IDX0);
1228 }
1229 
mtk_eth_start(struct udevice * dev)1230 static int mtk_eth_start(struct udevice *dev)
1231 {
1232 	struct mtk_eth_priv *priv = dev_get_priv(dev);
1233 	int ret;
1234 
1235 	/* Reset FE */
1236 	reset_assert(&priv->rst_fe);
1237 	udelay(1000);
1238 	reset_deassert(&priv->rst_fe);
1239 	mdelay(10);
1240 
1241 	/* Packets forward to PDMA */
1242 	mtk_gdma_write(priv, priv->gmac_id, GDMA_IG_CTRL_REG, GDMA_FWD_TO_CPU);
1243 
1244 	if (priv->gmac_id == 0)
1245 		mtk_gdma_write(priv, 1, GDMA_IG_CTRL_REG, GDMA_FWD_DISCARD);
1246 	else
1247 		mtk_gdma_write(priv, 0, GDMA_IG_CTRL_REG, GDMA_FWD_DISCARD);
1248 
1249 	udelay(500);
1250 
1251 	mtk_eth_fifo_init(priv);
1252 
1253 	/* Start PHY */
1254 	if (priv->sw == SW_NONE) {
1255 		ret = mtk_phy_start(priv);
1256 		if (ret)
1257 			return ret;
1258 	}
1259 
1260 	mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG, 0,
1261 		     TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN);
1262 	udelay(500);
1263 
1264 	return 0;
1265 }
1266 
mtk_eth_stop(struct udevice * dev)1267 static void mtk_eth_stop(struct udevice *dev)
1268 {
1269 	struct mtk_eth_priv *priv = dev_get_priv(dev);
1270 
1271 	mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG,
1272 		     TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN, 0);
1273 	udelay(500);
1274 
1275 	wait_for_bit_le32(priv->fe_base + PDMA_BASE + PDMA_GLO_CFG_REG,
1276 			  RX_DMA_BUSY | TX_DMA_BUSY, 0, 5000, 0);
1277 }
1278 
mtk_eth_write_hwaddr(struct udevice * dev)1279 static int mtk_eth_write_hwaddr(struct udevice *dev)
1280 {
1281 	struct eth_pdata *pdata = dev_get_plat(dev);
1282 	struct mtk_eth_priv *priv = dev_get_priv(dev);
1283 	unsigned char *mac = pdata->enetaddr;
1284 	u32 macaddr_lsb, macaddr_msb;
1285 
1286 	macaddr_msb = ((u32)mac[0] << 8) | (u32)mac[1];
1287 	macaddr_lsb = ((u32)mac[2] << 24) | ((u32)mac[3] << 16) |
1288 		      ((u32)mac[4] << 8) | (u32)mac[5];
1289 
1290 	mtk_gdma_write(priv, priv->gmac_id, GDMA_MAC_MSB_REG, macaddr_msb);
1291 	mtk_gdma_write(priv, priv->gmac_id, GDMA_MAC_LSB_REG, macaddr_lsb);
1292 
1293 	return 0;
1294 }
1295 
mtk_eth_send(struct udevice * dev,void * packet,int length)1296 static int mtk_eth_send(struct udevice *dev, void *packet, int length)
1297 {
1298 	struct mtk_eth_priv *priv = dev_get_priv(dev);
1299 	u32 idx = priv->tx_cpu_owner_idx0;
1300 	void *pkt_base;
1301 
1302 	if (!priv->tx_ring_noc[idx].txd_info2.DDONE) {
1303 		debug("mtk-eth: TX DMA descriptor ring is full\n");
1304 		return -EPERM;
1305 	}
1306 
1307 	pkt_base = (void *)phys_to_virt(priv->tx_ring_noc[idx].txd_info1.SDP0);
1308 	memcpy(pkt_base, packet, length);
1309 	flush_dcache_range((ulong)pkt_base, (ulong)pkt_base +
1310 			   roundup(length, ARCH_DMA_MINALIGN));
1311 
1312 	priv->tx_ring_noc[idx].txd_info2.SDL0 = length;
1313 	priv->tx_ring_noc[idx].txd_info2.DDONE = 0;
1314 
1315 	priv->tx_cpu_owner_idx0 = (priv->tx_cpu_owner_idx0 + 1) % NUM_TX_DESC;
1316 	mtk_pdma_write(priv, TX_CTX_IDX_REG(0), priv->tx_cpu_owner_idx0);
1317 
1318 	return 0;
1319 }
1320 
mtk_eth_recv(struct udevice * dev,int flags,uchar ** packetp)1321 static int mtk_eth_recv(struct udevice *dev, int flags, uchar **packetp)
1322 {
1323 	struct mtk_eth_priv *priv = dev_get_priv(dev);
1324 	u32 idx = priv->rx_dma_owner_idx0;
1325 	uchar *pkt_base;
1326 	u32 length;
1327 
1328 	if (!priv->rx_ring_noc[idx].rxd_info2.DDONE) {
1329 		debug("mtk-eth: RX DMA descriptor ring is empty\n");
1330 		return -EAGAIN;
1331 	}
1332 
1333 	length = priv->rx_ring_noc[idx].rxd_info2.PLEN0;
1334 	pkt_base = (void *)phys_to_virt(priv->rx_ring_noc[idx].rxd_info1.PDP0);
1335 	invalidate_dcache_range((ulong)pkt_base, (ulong)pkt_base +
1336 				roundup(length, ARCH_DMA_MINALIGN));
1337 
1338 	if (packetp)
1339 		*packetp = pkt_base;
1340 
1341 	return length;
1342 }
1343 
mtk_eth_free_pkt(struct udevice * dev,uchar * packet,int length)1344 static int mtk_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
1345 {
1346 	struct mtk_eth_priv *priv = dev_get_priv(dev);
1347 	u32 idx = priv->rx_dma_owner_idx0;
1348 
1349 	priv->rx_ring_noc[idx].rxd_info2.DDONE = 0;
1350 	priv->rx_ring_noc[idx].rxd_info2.LS0 = 0;
1351 	priv->rx_ring_noc[idx].rxd_info2.PLEN0 = PKTSIZE_ALIGN;
1352 
1353 	mtk_pdma_write(priv, RX_CRX_IDX_REG(0), idx);
1354 	priv->rx_dma_owner_idx0 = (priv->rx_dma_owner_idx0 + 1) % NUM_RX_DESC;
1355 
1356 	return 0;
1357 }
1358 
mtk_eth_probe(struct udevice * dev)1359 static int mtk_eth_probe(struct udevice *dev)
1360 {
1361 	struct eth_pdata *pdata = dev_get_plat(dev);
1362 	struct mtk_eth_priv *priv = dev_get_priv(dev);
1363 	ulong iobase = pdata->iobase;
1364 	int ret;
1365 
1366 	/* Frame Engine Register Base */
1367 	priv->fe_base = (void *)iobase;
1368 
1369 	/* GMAC Register Base */
1370 	priv->gmac_base = (void *)(iobase + GMAC_BASE);
1371 
1372 	/* MDIO register */
1373 	ret = mtk_mdio_register(dev);
1374 	if (ret)
1375 		return ret;
1376 
1377 	/* Prepare for tx/rx rings */
1378 	priv->tx_ring_noc = (struct pdma_txdesc *)
1379 		noncached_alloc(sizeof(struct pdma_txdesc) * NUM_TX_DESC,
1380 				ARCH_DMA_MINALIGN);
1381 	priv->rx_ring_noc = (struct pdma_rxdesc *)
1382 		noncached_alloc(sizeof(struct pdma_rxdesc) * NUM_RX_DESC,
1383 				ARCH_DMA_MINALIGN);
1384 
1385 	/* Set MAC mode */
1386 	mtk_mac_init(priv);
1387 
1388 	/* Probe phy if switch is not specified */
1389 	if (priv->sw == SW_NONE)
1390 		return mtk_phy_probe(dev);
1391 
1392 	/* Initialize switch */
1393 	return mt753x_switch_init(priv);
1394 }
1395 
mtk_eth_remove(struct udevice * dev)1396 static int mtk_eth_remove(struct udevice *dev)
1397 {
1398 	struct mtk_eth_priv *priv = dev_get_priv(dev);
1399 
1400 	/* MDIO unregister */
1401 	mdio_unregister(priv->mdio_bus);
1402 	mdio_free(priv->mdio_bus);
1403 
1404 	/* Stop possibly started DMA */
1405 	mtk_eth_stop(dev);
1406 
1407 	return 0;
1408 }
1409 
mtk_eth_of_to_plat(struct udevice * dev)1410 static int mtk_eth_of_to_plat(struct udevice *dev)
1411 {
1412 	struct eth_pdata *pdata = dev_get_plat(dev);
1413 	struct mtk_eth_priv *priv = dev_get_priv(dev);
1414 	struct ofnode_phandle_args args;
1415 	struct regmap *regmap;
1416 	const char *str;
1417 	ofnode subnode;
1418 	int ret;
1419 
1420 	priv->soc = dev_get_driver_data(dev);
1421 
1422 	pdata->iobase = dev_read_addr(dev);
1423 
1424 	/* get corresponding ethsys phandle */
1425 	ret = dev_read_phandle_with_args(dev, "mediatek,ethsys", NULL, 0, 0,
1426 					 &args);
1427 	if (ret)
1428 		return ret;
1429 
1430 	regmap = syscon_node_to_regmap(args.node);
1431 	if (IS_ERR(regmap))
1432 		return PTR_ERR(regmap);
1433 
1434 	priv->ethsys_base = regmap_get_range(regmap, 0);
1435 	if (!priv->ethsys_base) {
1436 		dev_err(dev, "Unable to find ethsys\n");
1437 		return -ENODEV;
1438 	}
1439 
1440 	/* Reset controllers */
1441 	ret = reset_get_by_name(dev, "fe", &priv->rst_fe);
1442 	if (ret) {
1443 		printf("error: Unable to get reset ctrl for frame engine\n");
1444 		return ret;
1445 	}
1446 
1447 	priv->gmac_id = dev_read_u32_default(dev, "mediatek,gmac-id", 0);
1448 
1449 	/* Interface mode is required */
1450 	str = dev_read_string(dev, "phy-mode");
1451 	if (str) {
1452 		pdata->phy_interface = phy_get_interface_by_name(str);
1453 		priv->phy_interface = pdata->phy_interface;
1454 	} else {
1455 		printf("error: phy-mode is not set\n");
1456 		return -EINVAL;
1457 	}
1458 
1459 	/* Force mode or autoneg */
1460 	subnode = ofnode_find_subnode(dev_ofnode(dev), "fixed-link");
1461 	if (ofnode_valid(subnode)) {
1462 		priv->force_mode = 1;
1463 		priv->speed = ofnode_read_u32_default(subnode, "speed", 0);
1464 		priv->duplex = ofnode_read_bool(subnode, "full-duplex");
1465 
1466 		if (priv->speed != SPEED_10 && priv->speed != SPEED_100 &&
1467 		    priv->speed != SPEED_1000) {
1468 			printf("error: no valid speed set in fixed-link\n");
1469 			return -EINVAL;
1470 		}
1471 	}
1472 
1473 	if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1474 		/* get corresponding sgmii phandle */
1475 		ret = dev_read_phandle_with_args(dev, "mediatek,sgmiisys",
1476 						 NULL, 0, 0, &args);
1477 		if (ret)
1478 			return ret;
1479 
1480 		regmap = syscon_node_to_regmap(args.node);
1481 
1482 		if (IS_ERR(regmap))
1483 			return PTR_ERR(regmap);
1484 
1485 		priv->sgmii_base = regmap_get_range(regmap, 0);
1486 
1487 		if (!priv->sgmii_base) {
1488 			dev_err(dev, "Unable to find sgmii\n");
1489 			return -ENODEV;
1490 		}
1491 	}
1492 
1493 	/* check for switch first, otherwise phy will be used */
1494 	priv->sw = SW_NONE;
1495 	priv->switch_init = NULL;
1496 	str = dev_read_string(dev, "mediatek,switch");
1497 
1498 	if (str) {
1499 		if (!strcmp(str, "mt7530")) {
1500 			priv->sw = SW_MT7530;
1501 			priv->switch_init = mt7530_setup;
1502 			priv->mt753x_smi_addr = MT753X_DFL_SMI_ADDR;
1503 		} else if (!strcmp(str, "mt7531")) {
1504 			priv->sw = SW_MT7531;
1505 			priv->switch_init = mt7531_setup;
1506 			priv->mt753x_smi_addr = MT753X_DFL_SMI_ADDR;
1507 		} else {
1508 			printf("error: unsupported switch\n");
1509 			return -EINVAL;
1510 		}
1511 
1512 		priv->mcm = dev_read_bool(dev, "mediatek,mcm");
1513 		if (priv->mcm) {
1514 			ret = reset_get_by_name(dev, "mcm", &priv->rst_mcm);
1515 			if (ret) {
1516 				printf("error: no reset ctrl for mcm\n");
1517 				return ret;
1518 			}
1519 		} else {
1520 			gpio_request_by_name(dev, "reset-gpios", 0,
1521 					     &priv->rst_gpio, GPIOD_IS_OUT);
1522 		}
1523 	} else {
1524 		ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0,
1525 						 0, &args);
1526 		if (ret) {
1527 			printf("error: phy-handle is not specified\n");
1528 			return ret;
1529 		}
1530 
1531 		priv->phy_addr = ofnode_read_s32_default(args.node, "reg", -1);
1532 		if (priv->phy_addr < 0) {
1533 			printf("error: phy address is not specified\n");
1534 			return ret;
1535 		}
1536 	}
1537 
1538 	return 0;
1539 }
1540 
1541 static const struct udevice_id mtk_eth_ids[] = {
1542 	{ .compatible = "mediatek,mt7629-eth", .data = SOC_MT7629 },
1543 	{ .compatible = "mediatek,mt7623-eth", .data = SOC_MT7623 },
1544 	{ .compatible = "mediatek,mt7622-eth", .data = SOC_MT7622 },
1545 	{}
1546 };
1547 
1548 static const struct eth_ops mtk_eth_ops = {
1549 	.start = mtk_eth_start,
1550 	.stop = mtk_eth_stop,
1551 	.send = mtk_eth_send,
1552 	.recv = mtk_eth_recv,
1553 	.free_pkt = mtk_eth_free_pkt,
1554 	.write_hwaddr = mtk_eth_write_hwaddr,
1555 };
1556 
1557 U_BOOT_DRIVER(mtk_eth) = {
1558 	.name = "mtk-eth",
1559 	.id = UCLASS_ETH,
1560 	.of_match = mtk_eth_ids,
1561 	.of_to_plat = mtk_eth_of_to_plat,
1562 	.plat_auto	= sizeof(struct eth_pdata),
1563 	.probe = mtk_eth_probe,
1564 	.remove = mtk_eth_remove,
1565 	.ops = &mtk_eth_ops,
1566 	.priv_auto	= sizeof(struct mtk_eth_priv),
1567 	.flags = DM_FLAG_ALLOC_PRIV_DMA,
1568 };
1569