1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020 MediaTek Inc.
4 *
5 * Author: Weijie Gao <weijie.gao@mediatek.com>
6 */
7
8 #include <cpu_func.h>
9 #include <dm.h>
10 #include <clk.h>
11 #include <malloc.h>
12 #include <miiphy.h>
13 #include <misc.h>
14 #include <net.h>
15 #include <reset.h>
16 #include <asm/addrspace.h>
17 #include <asm/cache.h>
18 #include <asm/gpio.h>
19 #include <dm/device_compat.h>
20 #include <linux/bitfield.h>
21 #include <linux/bitops.h>
22 #include <linux/delay.h>
23 #include <linux/err.h>
24 #include <linux/ethtool.h>
25 #include <linux/io.h>
26 #include <linux/iopoll.h>
27 #include <linux/mdio.h>
28 #include <linux/mii.h>
29 #include <mach/mt7620-sysc.h>
30
31 /* Frame Engine block */
32 #define GDMA_BASE 0x600
33 #define PDMA_BASE 0x800
34
35 /* GDMA registers */
36 #define GDMA_FWD_CFG 0x00
37 #define GDMA_DST_PORT GENMASK(2, 0)
38 #define GDMA_DST_PORT_CPU 0
39
40 #define GDMA_MAC_ADRL 0x08
41 #define GDMA_MAC_ADRH 0x0c
42
43 /* PDMA registers */
44 #define TX_BASE_PTR0 0x000
45 #define TX_MAX_CNT0 0x004
46 #define TX_CTX_IDX0 0x008
47 #define TX_DTX_IDX0 0x00c
48 #define RX_BASE_PTR0 0x100
49 #define RX_MAX_CNT0 0x104
50 #define RX_CALC_IDX0 0x108
51 #define RX_DRX_IDX0 0x10c
52
53 #define PDMA_GLO_CFG 0x204
54 #define TX_WB_DDONE BIT(6)
55 #define PDMA_BT_SIZE GENMASK(5, 4)
56 #define PDMA_BT_SIZE_32B 1
57 #define RX_DMA_BUSY BIT(3)
58 #define RX_DMA_EN BIT(2)
59 #define TX_DMA_BUSY BIT(1)
60 #define TX_DMA_EN BIT(0)
61
62 #define PDMA_RST_IDX 0x208
63 #define RST_DRX_IDX0 BIT(16)
64 #define RST_DTX_IDX0 BIT(0)
65
66 /* Built-in giga ethernet switch block */
67
68 /* ARL registers */
69 #define GSW_MFC 0x0010
70 #define BC_FFP GENMASK(31, 24)
71 #define UNM_FFP GENMASK(23, 16)
72 #define UNU_FFP GENMASK(15, 8)
73 #define CPU_EN BIT(7)
74 #define CPU_PORT GENMASK(6, 4)
75
76 /* Port registers */
77 #define GSW_PCR(p) (0x2004 + (p) * 0x100)
78 #define PORT_MATRIX GENMASK(23, 16)
79
80 #define GSW_PVC(p) (0x2010 + (p) * 0x100)
81 #define STAG_VPID GENMASK(31, 16)
82 #define VLAN_ATTR GENMASK(7, 6)
83 #define VLAN_ATTR_USER 0
84
85 /* MAC registers */
86 #define GSW_PMCR(p) (0x3000 + (p) * 0x100)
87 #define IPG_CFG GENMASK(19, 18)
88 #define IPG_96BIT_WITH_SHORT_IPG 1
89 #define MAC_MODE BIT(16)
90 #define FORCE_MODE BIT(15)
91 #define MAC_TX_EN BIT(14)
92 #define MAC_RX_EN BIT(13)
93 #define BKOFF_EN BIT(9)
94 #define BACKPR_EN BIT(8)
95 #define FORCE_EEE1G BIT(7)
96 #define FORCE_EEE100 BIT(6)
97 #define FORCE_RX_FC BIT(5)
98 #define FORCE_TX_FC BIT(4)
99 #define FORCE_SPEED GENMASK(3, 2)
100 #define FORCE_SPEED_1000 2
101 #define FORCE_SPEED_100 1
102 #define FORCE_SPEED_10 0
103 #define FORCE_DUPLEX BIT(1)
104 #define FORCE_LINK BIT(0)
105
106 /* GMAC registers */
107 #define GSW_PPSC 0x7000
108 #define PHY_AP_EN BIT(31)
109 #define PHY_PRE_EN BIT(30)
110 #define PHY_MDC_CFG GENMASK(29, 24)
111 #define EPHY_AP_EN BIT(23)
112 #define EE_AN_EN BIT(16)
113 #define PHY_AP_END_ADDR GENMASK(12, 8)
114 #define PHY_AP_START_ADDR GENMASK(4, 0)
115
116 #define GSW_PIAC 0x7004
117 #define PHY_ACS_ST BIT(31)
118 #define MDIO_REG_ADDR GENMASK(29, 25)
119 #define MDIO_PHY_ADDR GENMASK(24, 20)
120 #define MDIO_CMD GENMASK(19, 18)
121 #define MDIO_CMD_WRITE 1
122 #define MDIO_CMD_READ 2
123 #define MDIO_ST GENMASK(17, 16)
124 #define MDIO_RW_DATA GENMASK(15, 0)
125
126 #define GSW_GPC1 0x7014
127 #define PHY_DIS GENMASK(28, 24)
128 #define PHY_BASE GENMASK(20, 16)
129 #define TX_CLK_MODE BIT(3)
130 #define RX_CLK_MODE BIT(2)
131
132 /* MII Registers for MDIO clause 45 indirect access */
133 #define MII_MMD_ACC_CTL_REG 0x0d
134 #define MMD_OP_MODE GENMASK(15, 14)
135 #define MMD_ADDR 0
136 #define MMD_DATA 1
137 #define MMD_DATA_RW_POST_INC 2
138 #define MMD_DATA_W_POST_INC 3
139 #define MMD_DEVAD GENMASK(4, 0)
140
141 #define MII_MMD_ADDR_DATA_REG 0x0e
142
143 /* MT7530 internal register access */
144 #define MT7530_REG_PAGE_ADDR GENMASK(15, 6)
145 #define MT7530_REG_ADDR GENMASK(5, 2)
146
147 /* MT7530 system control registers*/
148 #define MT7530_SYS_CTRL 0x7000
149 #define SW_SYS_RST BIT(1)
150 #define SW_REG_RST BIT(0)
151
152 #define MT7530_MHWTRAP 0x7804
153 #define P5_INTF_SEL_GMAC5 BIT(13)
154 #define P5_INTF_DIS BIT(6)
155
156 struct pdma_txd_info1 {
157 u32 SDP0;
158 };
159
160 struct pdma_txd_info2 {
161 u32 SDL1 : 14;
162 u32 LS1 : 1;
163 u32 BURST : 1;
164 u32 SDL0 : 14;
165 u32 LS0 : 1;
166 u32 DDONE : 1;
167 };
168
169 struct pdma_txd_info3 {
170 u32 SDP1;
171 };
172
173 struct pdma_txd_info4 {
174 u32 VPRI_VIDX : 8;
175 u32 SIDX : 4;
176 u32 INSP : 1;
177 u32 RESV : 2;
178 u32 UDF : 5;
179 u32 FP_BMAP : 8;
180 u32 TSO : 1;
181 u32 TUI_CO : 3;
182 };
183
184 struct pdma_tx_desc {
185 struct pdma_txd_info1 txd_info1;
186 struct pdma_txd_info2 txd_info2;
187 struct pdma_txd_info3 txd_info3;
188 struct pdma_txd_info4 txd_info4;
189 };
190
191 struct pdma_rxd_info1 {
192 u32 PDP0;
193 };
194
195 struct pdma_rxd_info2 {
196 u32 PLEN1 : 14;
197 u32 LS1 : 1;
198 u32 UN_USED : 1;
199 u32 PLEN0 : 14;
200 u32 LS0 : 1;
201 u32 DDONE : 1;
202 };
203
204 struct pdma_rxd_info3 {
205 u32 PDP1;
206 };
207
208 struct pdma_rxd_info4 {
209 u32 FOE_ENTRY : 14;
210 u32 CRSN : 5;
211 u32 SP : 3;
212 u32 L4F : 1;
213 u32 L4VLD : 1;
214 u32 TACK : 1;
215 u32 IP4F : 1;
216 u32 IP4 : 1;
217 u32 IP6 : 1;
218 u32 UN_USED : 4;
219 };
220
221 struct pdma_rx_desc {
222 struct pdma_rxd_info1 rxd_info1;
223 struct pdma_rxd_info2 rxd_info2;
224 struct pdma_rxd_info3 rxd_info3;
225 struct pdma_rxd_info4 rxd_info4;
226 };
227
228 struct mt7620_gsw_port_cfg {
229 phy_interface_t mode;
230 bool force_mode;
231 bool duplex;
232 u32 speed;
233 int phy_addr;
234 };
235
236 struct mt7620_eth_priv {
237 struct udevice *dev;
238
239 void __iomem *fe_base;
240 void __iomem *gsw_base;
241
242 struct mii_dev *mdio_bus;
243
244 struct pdma_tx_desc *tx_ring_noc;
245 struct pdma_rx_desc *rx_ring_noc;
246
247 int rx_dma_owner_idx0;
248 int tx_cpu_owner_idx0;
249
250 void *pkt_buf;
251 void *tx_ring;
252 void *rx_ring;
253
254 struct reset_ctl_bulk rsts;
255 struct clk_bulk clks;
256
257 struct udevice *sysc;
258
259 u32 ephy_num;
260 bool port5_mt7530;
261 struct gpio_desc gpio_swrst;
262 struct mt7620_gsw_port_cfg port_cfg[3];
263 };
264
265 #define PDMA_TIMEOUT 100000
266
267 #define NUM_TX_DESC 64
268 #define NUM_RX_DESC 128
269 #define NUM_FE_PHYS 5
270 #define NUM_PORTS 7
271 #define CPU_PORT_NUM 6
272
273 #define NUM_MT7530_PHYS 5
274
pdma_write(struct mt7620_eth_priv * priv,u32 reg,u32 val)275 static void pdma_write(struct mt7620_eth_priv *priv, u32 reg, u32 val)
276 {
277 writel(val, priv->fe_base + PDMA_BASE + reg);
278 }
279
gdma_write(struct mt7620_eth_priv * priv,u32 reg,u32 val)280 static void gdma_write(struct mt7620_eth_priv *priv, u32 reg, u32 val)
281 {
282 writel(val, priv->fe_base + GDMA_BASE + reg);
283 }
284
gdma_rmw(struct mt7620_eth_priv * priv,u32 reg,u32 clr,u32 set)285 static void gdma_rmw(struct mt7620_eth_priv *priv, u32 reg, u32 clr, u32 set)
286 {
287 clrsetbits_le32(priv->fe_base + GDMA_BASE + reg, clr, set);
288 }
289
gsw_read(struct mt7620_eth_priv * priv,u32 reg)290 static u32 gsw_read(struct mt7620_eth_priv *priv, u32 reg)
291 {
292 return readl(priv->gsw_base + reg);
293 }
294
gsw_write(struct mt7620_eth_priv * priv,u32 reg,u32 val)295 static void gsw_write(struct mt7620_eth_priv *priv, u32 reg, u32 val)
296 {
297 writel(val, priv->gsw_base + reg);
298 }
299
gsw_rmw(struct mt7620_eth_priv * priv,u32 reg,u32 clr,u32 set)300 static void gsw_rmw(struct mt7620_eth_priv *priv, u32 reg, u32 clr, u32 set)
301 {
302 clrsetbits_le32(priv->gsw_base + reg, clr, set);
303 }
304
mt7620_mdio_rw(struct mt7620_eth_priv * priv,u32 phy,u32 reg,u32 data,u32 cmd)305 static int mt7620_mdio_rw(struct mt7620_eth_priv *priv, u32 phy, u32 reg,
306 u32 data, u32 cmd)
307 {
308 int ret;
309 u32 val;
310
311 val = FIELD_PREP(MDIO_ST, 1) | FIELD_PREP(MDIO_CMD, cmd) |
312 FIELD_PREP(MDIO_PHY_ADDR, phy) |
313 FIELD_PREP(MDIO_REG_ADDR, reg);
314
315 if (cmd == MDIO_CMD_WRITE)
316 val |= FIELD_PREP(MDIO_RW_DATA, data);
317
318 gsw_write(priv, GSW_PIAC, val);
319 gsw_write(priv, GSW_PIAC, val | PHY_ACS_ST);
320
321 ret = readl_poll_timeout(priv->gsw_base + GSW_PIAC, val,
322 !(val & PHY_ACS_ST), 10000);
323 if (ret) {
324 dev_err(priv->dev, "mt7620_eth: MDIO access timeout\n");
325 return ret;
326 }
327
328 if (cmd == MDIO_CMD_READ) {
329 val = gsw_read(priv, GSW_PIAC);
330 return FIELD_GET(MDIO_RW_DATA, val);
331 }
332
333 return 0;
334 }
335
mt7620_mii_read(struct mt7620_eth_priv * priv,u32 phy,u32 reg)336 static int mt7620_mii_read(struct mt7620_eth_priv *priv, u32 phy, u32 reg)
337 {
338 return mt7620_mdio_rw(priv, phy, reg, 0, MDIO_CMD_READ);
339 }
340
mt7620_mii_write(struct mt7620_eth_priv * priv,u32 phy,u32 reg,u16 val)341 static int mt7620_mii_write(struct mt7620_eth_priv *priv, u32 phy, u32 reg,
342 u16 val)
343 {
344 return mt7620_mdio_rw(priv, phy, reg, val, MDIO_CMD_WRITE);
345 }
346
mt7620_mdio_read(struct mii_dev * bus,int addr,int devad,int reg)347 static int mt7620_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
348 {
349 struct mt7620_eth_priv *priv = bus->priv;
350 int ret;
351
352 if (devad < 0)
353 return mt7620_mdio_rw(priv, addr, reg, 0, MDIO_CMD_READ);
354
355 ret = mt7620_mdio_rw(priv, addr, MII_MMD_ACC_CTL_REG,
356 FIELD_PREP(MMD_OP_MODE, MMD_ADDR) |
357 FIELD_PREP(MMD_DEVAD, devad), MDIO_CMD_WRITE);
358 if (ret)
359 return ret;
360
361 ret = mt7620_mdio_rw(priv, addr, MII_MMD_ADDR_DATA_REG, reg,
362 MDIO_CMD_WRITE);
363 if (ret)
364 return ret;
365
366 ret = mt7620_mdio_rw(priv, addr, MII_MMD_ACC_CTL_REG,
367 FIELD_PREP(MMD_OP_MODE, MMD_DATA) |
368 FIELD_PREP(MMD_DEVAD, devad), MDIO_CMD_WRITE);
369 if (ret)
370 return ret;
371
372 return mt7620_mdio_rw(priv, addr, MII_MMD_ADDR_DATA_REG, 0,
373 MDIO_CMD_READ);
374 }
375
mt7620_mdio_write(struct mii_dev * bus,int addr,int devad,int reg,u16 val)376 static int mt7620_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
377 u16 val)
378 {
379 struct mt7620_eth_priv *priv = bus->priv;
380 int ret;
381
382 if (devad < 0)
383 return mt7620_mdio_rw(priv, addr, reg, val, MDIO_CMD_WRITE);
384
385 ret = mt7620_mdio_rw(priv, addr, MII_MMD_ACC_CTL_REG,
386 FIELD_PREP(MMD_OP_MODE, MMD_ADDR) |
387 FIELD_PREP(MMD_DEVAD, devad), MDIO_CMD_WRITE);
388 if (ret)
389 return ret;
390
391 ret = mt7620_mdio_rw(priv, addr, MII_MMD_ADDR_DATA_REG, reg,
392 MDIO_CMD_WRITE);
393 if (ret)
394 return ret;
395
396 ret = mt7620_mdio_rw(priv, addr, MII_MMD_ACC_CTL_REG,
397 FIELD_PREP(MMD_OP_MODE, MMD_DATA) |
398 FIELD_PREP(MMD_DEVAD, devad), MDIO_CMD_WRITE);
399 if (ret)
400 return ret;
401
402 return mt7620_mdio_rw(priv, addr, MII_MMD_ADDR_DATA_REG, val,
403 MDIO_CMD_WRITE);
404 }
405
mt7620_mdio_register(struct udevice * dev)406 static int mt7620_mdio_register(struct udevice *dev)
407 {
408 struct mt7620_eth_priv *priv = dev_get_priv(dev);
409 struct mii_dev *mdio_bus = mdio_alloc();
410 int ret;
411
412 if (!mdio_bus)
413 return -ENOMEM;
414
415 mdio_bus->read = mt7620_mdio_read;
416 mdio_bus->write = mt7620_mdio_write;
417 snprintf(mdio_bus->name, sizeof(mdio_bus->name), dev->name);
418
419 mdio_bus->priv = (void *)priv;
420
421 ret = mdio_register(mdio_bus);
422
423 if (ret)
424 return ret;
425
426 priv->mdio_bus = mdio_bus;
427
428 return 0;
429 }
430
mt7530_reg_read(struct mt7620_eth_priv * priv,u32 reg,u32 * data)431 static int mt7530_reg_read(struct mt7620_eth_priv *priv, u32 reg, u32 *data)
432 {
433 int ret, low_word, high_word;
434
435 /* Write page address */
436 ret = mt7620_mii_write(priv, 0x1f, 0x1f,
437 FIELD_GET(MT7530_REG_PAGE_ADDR, reg));
438 if (ret)
439 return ret;
440
441 /* Read low word */
442 low_word = mt7620_mii_read(priv, 0x1f, FIELD_GET(MT7530_REG_ADDR, reg));
443 if (low_word < 0)
444 return low_word;
445
446 /* Read high word */
447 high_word = mt7620_mii_read(priv, 0x1f, 0x10);
448 if (high_word < 0)
449 return high_word;
450
451 if (data)
452 *data = ((u32)high_word << 16) | ((u32)low_word & 0xffff);
453
454 return 0;
455 }
456
mt7530_reg_write(struct mt7620_eth_priv * priv,u32 reg,u32 data)457 static int mt7530_reg_write(struct mt7620_eth_priv *priv, u32 reg, u32 data)
458 {
459 int ret;
460
461 /* Write page address */
462 ret = mt7620_mii_write(priv, 0x1f, 0x1f,
463 FIELD_GET(MT7530_REG_PAGE_ADDR, reg));
464 if (ret)
465 return ret;
466
467 /* Write low word */
468 ret = mt7620_mii_write(priv, 0x1f, FIELD_GET(MT7530_REG_ADDR, reg),
469 data & 0xffff);
470 if (ret)
471 return ret;
472
473 /* Write high word */
474 return mt7620_mii_write(priv, 0x1f, 0x10, data >> 16);
475 }
476
mt7620_phy_restart_an(struct mt7620_eth_priv * priv,u32 phy)477 static void mt7620_phy_restart_an(struct mt7620_eth_priv *priv, u32 phy)
478 {
479 u16 val;
480
481 val = mt7620_mii_read(priv, phy, MII_BMCR);
482 val |= BMCR_ANRESTART;
483 mt7620_mii_write(priv, phy, MII_BMCR, val);
484 }
485
mt7620_gsw_ephy_init(struct mt7620_eth_priv * priv)486 static void mt7620_gsw_ephy_init(struct mt7620_eth_priv *priv)
487 {
488 struct mt7620_sysc_chip_rev chip_rev;
489 int ret;
490 u32 i;
491
492 ret = misc_ioctl(priv->sysc, MT7620_SYSC_IOCTL_GET_CHIP_REV, &chip_rev);
493 if (ret) {
494 /* Assume MT7620A if misc_ioctl() failed */
495 dev_warn(priv->dev, "mt7620_eth: failed to get chip rev\n");
496 chip_rev.bga = 1;
497 }
498
499 /* global, page 4 */
500 mt7620_mii_write(priv, 1, 31, 0x4000);
501 mt7620_mii_write(priv, 1, 17, 0x7444);
502
503 if (chip_rev.bga)
504 mt7620_mii_write(priv, 1, 19, 0x0114);
505 else
506 mt7620_mii_write(priv, 1, 19, 0x0117);
507
508 mt7620_mii_write(priv, 1, 22, 0x10cf);
509 mt7620_mii_write(priv, 1, 25, 0x6212);
510 mt7620_mii_write(priv, 1, 26, 0x0777);
511 mt7620_mii_write(priv, 1, 29, 0x4000);
512 mt7620_mii_write(priv, 1, 28, 0xc077);
513 mt7620_mii_write(priv, 1, 24, 0x0000);
514
515 /* global, page 3 */
516 mt7620_mii_write(priv, 1, 31, 0x3000);
517 mt7620_mii_write(priv, 1, 17, 0x4838);
518
519 /* global, page 2 */
520 mt7620_mii_write(priv, 1, 31, 0x2000);
521
522 if (chip_rev.bga) {
523 mt7620_mii_write(priv, 1, 21, 0x0515);
524 mt7620_mii_write(priv, 1, 22, 0x0053);
525 mt7620_mii_write(priv, 1, 23, 0x00bf);
526 mt7620_mii_write(priv, 1, 24, 0x0aaf);
527 mt7620_mii_write(priv, 1, 25, 0x0fad);
528 mt7620_mii_write(priv, 1, 26, 0x0fc1);
529 } else {
530 mt7620_mii_write(priv, 1, 21, 0x0517);
531 mt7620_mii_write(priv, 1, 22, 0x0fd2);
532 mt7620_mii_write(priv, 1, 23, 0x00bf);
533 mt7620_mii_write(priv, 1, 24, 0x0aab);
534 mt7620_mii_write(priv, 1, 25, 0x00ae);
535 mt7620_mii_write(priv, 1, 26, 0x0fff);
536 }
537
538 /* global, page 1 */
539 mt7620_mii_write(priv, 1, 31, 0x1000);
540 mt7620_mii_write(priv, 1, 17, 0xe7f8);
541
542 /* local, page 0 */
543 mt7620_mii_write(priv, 1, 31, 0x8000);
544 for (i = 0; i < priv->ephy_num; i++)
545 mt7620_mii_write(priv, i, 30, 0xa000);
546
547 for (i = 0; i < priv->ephy_num; i++)
548 mt7620_mii_write(priv, i, 4, 0x05e1);
549
550 /* local, page 2 */
551 mt7620_mii_write(priv, 1, 31, 0xa000);
552 mt7620_mii_write(priv, 0, 16, 0x1111);
553 mt7620_mii_write(priv, 1, 16, 0x1010);
554 mt7620_mii_write(priv, 2, 16, 0x1515);
555 mt7620_mii_write(priv, 3, 16, 0x0f0f);
556 if (priv->ephy_num == NUM_FE_PHYS)
557 mt7620_mii_write(priv, 4, 16, 0x1313);
558
559 /* Restart auto-negotiation */
560 for (i = 0; i < priv->ephy_num; i++)
561 mt7620_phy_restart_an(priv, i);
562
563 if (priv->port_cfg[0].phy_addr > 0)
564 mt7620_phy_restart_an(priv, priv->port_cfg[0].phy_addr);
565
566 if (priv->port_cfg[1].phy_addr > 0)
567 mt7620_phy_restart_an(priv, priv->port_cfg[1].phy_addr);
568 }
569
mt7620_setup_gmac_mode(struct mt7620_eth_priv * priv,u32 gmac,phy_interface_t mode)570 static int mt7620_setup_gmac_mode(struct mt7620_eth_priv *priv, u32 gmac,
571 phy_interface_t mode)
572 {
573 enum mt7620_sysc_ge_mode ge_mode;
574 unsigned long req;
575 int ret;
576
577 switch (gmac) {
578 case 1:
579 req = MT7620_SYSC_IOCTL_SET_GE1_MODE;
580 break;
581 case 2:
582 req = MT7620_SYSC_IOCTL_SET_GE2_MODE;
583 break;
584 default:
585 /* Should not reach here */
586 return -EINVAL;
587 }
588
589 switch (mode) {
590 case PHY_INTERFACE_MODE_MII:
591 ge_mode = MT7620_SYSC_GE_MII;
592 break;
593 case PHY_INTERFACE_MODE_RMII:
594 ge_mode = MT7620_SYSC_GE_RMII;
595 break;
596 case PHY_INTERFACE_MODE_RGMII:
597 ge_mode = MT7620_SYSC_GE_RGMII;
598 break;
599 case PHY_INTERFACE_MODE_NONE:
600 if (gmac == 2)
601 ge_mode = MT7620_SYSC_GE_ESW_PHY;
602 else
603 ge_mode = MT7620_SYSC_GE_RGMII;
604 break;
605 default:
606 /* Should not reach here */
607 return -EINVAL;
608 }
609
610 ret = misc_ioctl(priv->sysc, req, &ge_mode);
611 if (ret)
612 dev_warn(priv->dev, "mt7620_eth: failed to set GE%u mode\n",
613 gmac);
614
615 return 0;
616 }
617
mt7620_gsw_setup_port(struct mt7620_eth_priv * priv,u32 port,struct mt7620_gsw_port_cfg * port_cfg)618 static void mt7620_gsw_setup_port(struct mt7620_eth_priv *priv, u32 port,
619 struct mt7620_gsw_port_cfg *port_cfg)
620 {
621 u32 pmcr;
622
623 if (port_cfg->mode == PHY_INTERFACE_MODE_NONE) {
624 if (port == 5) {
625 gsw_write(priv, GSW_PMCR(port), FORCE_MODE);
626 return;
627 }
628
629 port_cfg->force_mode = port == CPU_PORT_NUM ? true : false;
630 }
631
632 pmcr = FIELD_PREP(IPG_CFG, IPG_96BIT_WITH_SHORT_IPG) | MAC_MODE |
633 MAC_TX_EN | MAC_RX_EN | BKOFF_EN | BACKPR_EN;
634
635 if (port_cfg->force_mode) {
636 pmcr |= FORCE_MODE | FORCE_RX_FC | FORCE_TX_FC |
637 FIELD_PREP(FORCE_SPEED, port_cfg->speed) | FORCE_LINK;
638
639 if (port_cfg->duplex)
640 pmcr |= FORCE_DUPLEX;
641 }
642
643 gsw_write(priv, GSW_PMCR(port), pmcr);
644 }
645
mt7620_gsw_set_port_isolation(struct mt7620_eth_priv * priv)646 static void mt7620_gsw_set_port_isolation(struct mt7620_eth_priv *priv)
647 {
648 u32 i;
649
650 for (i = 0; i < NUM_PORTS; i++) {
651 /* Set port matrix mode */
652 if (i != CPU_PORT_NUM)
653 gsw_write(priv, GSW_PCR(i),
654 FIELD_PREP(PORT_MATRIX, 0x40));
655 else
656 gsw_write(priv, GSW_PCR(i),
657 FIELD_PREP(PORT_MATRIX, 0x3f));
658
659 /* Set port mode to user port */
660 gsw_write(priv, GSW_PVC(i), FIELD_PREP(STAG_VPID, 0x8100) |
661 FIELD_PREP(VLAN_ATTR, VLAN_ATTR_USER));
662 }
663 }
664
mt7620_gsw_setup_phy_polling(struct mt7620_eth_priv * priv)665 static void mt7620_gsw_setup_phy_polling(struct mt7620_eth_priv *priv)
666 {
667 int phy_addr_st, phy_addr_end;
668
669 if (priv->port_cfg[0].mode == PHY_INTERFACE_MODE_NONE)
670 priv->ephy_num = NUM_FE_PHYS;
671 else
672 priv->ephy_num = NUM_FE_PHYS - 1;
673
674 if (priv->port_cfg[0].phy_addr < 0 && priv->port_cfg[1].phy_addr < 0)
675 return;
676
677 if (priv->port_cfg[0].phy_addr > 0 && priv->port_cfg[1].phy_addr > 0) {
678 phy_addr_st = priv->port_cfg[0].phy_addr;
679 phy_addr_end = priv->port_cfg[1].phy_addr;
680 } else if (priv->port_cfg[0].phy_addr > 0) {
681 phy_addr_st = priv->port_cfg[0].phy_addr;
682 phy_addr_end = priv->port_cfg[0].phy_addr + 1;
683 } else {
684 phy_addr_st = 4;
685 phy_addr_end = priv->port_cfg[1].phy_addr;
686 }
687
688 gsw_rmw(priv, GSW_PPSC, PHY_AP_END_ADDR | PHY_AP_START_ADDR,
689 PHY_AP_EN | FIELD_PREP(PHY_AP_START_ADDR, phy_addr_st) |
690 FIELD_PREP(PHY_AP_END_ADDR, phy_addr_end));
691 }
692
mt7530_gsw_set_port_isolation(struct mt7620_eth_priv * priv)693 static void mt7530_gsw_set_port_isolation(struct mt7620_eth_priv *priv)
694 {
695 u32 i;
696
697 for (i = 0; i < NUM_PORTS; i++) {
698 /* Set port matrix mode */
699 if (i != CPU_PORT_NUM)
700 mt7530_reg_write(priv, GSW_PCR(i),
701 FIELD_PREP(PORT_MATRIX, 0x40));
702 else
703 mt7530_reg_write(priv, GSW_PCR(i),
704 FIELD_PREP(PORT_MATRIX, 0x3f));
705
706 /* Set port mode to user port */
707 mt7530_reg_write(priv, GSW_PVC(i),
708 FIELD_PREP(STAG_VPID, 0x8100) |
709 FIELD_PREP(VLAN_ATTR, VLAN_ATTR_USER));
710 }
711 }
712
mt7620_gsw_config_mt7530(struct mt7620_eth_priv * priv)713 static void mt7620_gsw_config_mt7530(struct mt7620_eth_priv *priv)
714 {
715 u16 phy_val;
716 u32 i, val;
717
718 /* Disable internal PHY, set PHY base to 12 */
719 gsw_write(priv, GSW_GPC1, PHY_DIS | FIELD_PREP(PHY_BASE, 12) |
720 TX_CLK_MODE | RX_CLK_MODE);
721
722 /* MT7530 reset deassert */
723 dm_gpio_set_value(&priv->gpio_swrst, 1);
724 mdelay(1000);
725
726 /* Turn off PHYs */
727 for (i = 0; i < NUM_MT7530_PHYS; i++) {
728 phy_val = mt7620_mii_read(priv, i, MII_BMCR);
729 phy_val |= BMCR_PDOWN;
730 mt7620_mii_write(priv, i, MII_BMCR, phy_val);
731 }
732
733 /* Force MAC link down before reset */
734 mt7530_reg_write(priv, GSW_PMCR(5), FORCE_MODE);
735 mt7530_reg_write(priv, GSW_PMCR(6), FORCE_MODE);
736
737 /* MT7530 soft reset */
738 mt7530_reg_write(priv, MT7530_SYS_CTRL, SW_SYS_RST | SW_REG_RST);
739 udelay(100);
740
741 /* MT7530 port6 force to 1G (connects to MT7620 GSW port5) */
742 mt7530_reg_write(priv, GSW_PMCR(6),
743 FIELD_PREP(IPG_CFG, IPG_96BIT_WITH_SHORT_IPG) |
744 MAC_MODE | FORCE_MODE | MAC_TX_EN | MAC_RX_EN |
745 BKOFF_EN | BACKPR_EN | FORCE_RX_FC | FORCE_TX_FC |
746 FIELD_PREP(FORCE_SPEED, FORCE_SPEED_1000) |
747 FORCE_DUPLEX | FORCE_LINK);
748
749 /* Disable MT7530 port5 */
750 mt7530_reg_read(priv, MT7530_MHWTRAP, &val);
751 val |= P5_INTF_SEL_GMAC5 | P5_INTF_DIS;
752 mt7530_reg_write(priv, MT7530_MHWTRAP, val);
753
754 /* Isolate each ports */
755 mt7530_gsw_set_port_isolation(priv);
756
757 /* Turn on PHYs */
758 for (i = 0; i < NUM_MT7530_PHYS; i++) {
759 phy_val = mt7620_mii_read(priv, i, MII_BMCR);
760 phy_val &= ~BMCR_PDOWN;
761 mt7620_mii_write(priv, i, MII_BMCR, phy_val);
762 }
763 /* Restart auto-negotiation */
764 for (i = 0; i < NUM_MT7530_PHYS; i++)
765 mt7620_phy_restart_an(priv, i);
766 }
767
mt7620_gsw_init(struct mt7620_eth_priv * priv)768 static void mt7620_gsw_init(struct mt7620_eth_priv *priv)
769 {
770 /* If port5 connects to MT7530 Giga-switch, reset it first */
771 if (priv->port5_mt7530)
772 dm_gpio_set_value(&priv->gpio_swrst, 0);
773
774 /* Set forward control */
775 gsw_write(priv, GSW_MFC, FIELD_PREP(BC_FFP, 0x7f) |
776 FIELD_PREP(UNM_FFP, 0x7f) | FIELD_PREP(UNU_FFP, 0x7f) |
777 CPU_EN | FIELD_PREP(CPU_PORT, CPU_PORT_NUM));
778
779 /* Set GMAC mode (GMAC1 -> Port5, GMAC2 -> Port4) */
780 mt7620_setup_gmac_mode(priv, 1, priv->port_cfg[1].mode);
781 mt7620_setup_gmac_mode(priv, 2, priv->port_cfg[0].mode);
782
783 /* port_cfg[2] is CPU port */
784 priv->port_cfg[2].force_mode = true;
785 priv->port_cfg[2].duplex = true;
786 priv->port_cfg[2].speed = FORCE_SPEED_1000;
787
788 /* Configure GSW MAC port */
789 mt7620_gsw_setup_port(priv, 4, &priv->port_cfg[0]);
790 mt7620_gsw_setup_port(priv, 5, &priv->port_cfg[1]);
791 mt7620_gsw_setup_port(priv, 6, &priv->port_cfg[2]);
792
793 /* Isolate each port */
794 mt7620_gsw_set_port_isolation(priv);
795
796 /* Polling external phy if exists */
797 mt7620_gsw_setup_phy_polling(priv);
798
799 /* Configure ephy */
800 mt7620_gsw_ephy_init(priv);
801
802 /* If port5 connects to MT7530 Giga-switch, do initialization */
803 if (priv->port5_mt7530)
804 mt7620_gsw_config_mt7530(priv);
805 }
806
mt7620_eth_fifo_init(struct mt7620_eth_priv * priv)807 static void mt7620_eth_fifo_init(struct mt7620_eth_priv *priv)
808 {
809 uintptr_t pkt_base = (uintptr_t)priv->pkt_buf;
810 int i;
811
812 memset(priv->tx_ring, 0, NUM_TX_DESC * sizeof(struct pdma_tx_desc));
813 memset(priv->rx_ring, 0, NUM_RX_DESC * sizeof(struct pdma_rx_desc));
814 memset(priv->pkt_buf, 0, (NUM_TX_DESC + NUM_RX_DESC) * PKTSIZE_ALIGN);
815
816 priv->tx_ring_noc = (void *)CKSEG1ADDR((uintptr_t)priv->tx_ring);
817 priv->rx_ring_noc = (void *)CKSEG1ADDR((uintptr_t)priv->rx_ring);
818 priv->rx_dma_owner_idx0 = 0;
819 priv->tx_cpu_owner_idx0 = 0;
820
821 for (i = 0; i < NUM_TX_DESC; i++) {
822 priv->tx_ring_noc[i].txd_info2.LS0 = 1;
823 priv->tx_ring_noc[i].txd_info2.DDONE = 1;
824 priv->tx_ring_noc[i].txd_info4.FP_BMAP = GDMA_DST_PORT_CPU;
825 priv->tx_ring_noc[i].txd_info1.SDP0 = CPHYSADDR(pkt_base);
826 pkt_base += PKTSIZE_ALIGN;
827 }
828
829 for (i = 0; i < NUM_RX_DESC; i++) {
830 priv->rx_ring_noc[i].rxd_info2.PLEN0 = PKTSIZE_ALIGN;
831 priv->rx_ring_noc[i].rxd_info1.PDP0 = CPHYSADDR(pkt_base);
832 pkt_base += PKTSIZE_ALIGN;
833 }
834
835 pdma_write(priv, TX_BASE_PTR0, CPHYSADDR(priv->tx_ring_noc));
836 pdma_write(priv, TX_MAX_CNT0, NUM_TX_DESC);
837 pdma_write(priv, TX_CTX_IDX0, priv->tx_cpu_owner_idx0);
838
839 pdma_write(priv, RX_BASE_PTR0, CPHYSADDR(priv->rx_ring_noc));
840 pdma_write(priv, RX_MAX_CNT0, NUM_RX_DESC);
841 pdma_write(priv, RX_CALC_IDX0, NUM_RX_DESC - 1);
842
843 pdma_write(priv, PDMA_RST_IDX, RST_DTX_IDX0 | RST_DRX_IDX0);
844 }
845
mt7620_eth_start(struct udevice * dev)846 static int mt7620_eth_start(struct udevice *dev)
847 {
848 struct mt7620_eth_priv *priv = dev_get_priv(dev);
849
850 mt7620_eth_fifo_init(priv);
851
852 gdma_rmw(priv, GDMA_FWD_CFG, GDMA_DST_PORT,
853 FIELD_PREP(GDMA_DST_PORT, GDMA_DST_PORT_CPU));
854
855 pdma_write(priv, PDMA_GLO_CFG,
856 FIELD_PREP(PDMA_BT_SIZE, PDMA_BT_SIZE_32B) |
857 TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN);
858 udelay(500);
859
860 return 0;
861 }
862
mt7620_eth_stop(struct udevice * dev)863 static void mt7620_eth_stop(struct udevice *dev)
864 {
865 struct mt7620_eth_priv *priv = dev_get_priv(dev);
866 u32 val;
867 int ret;
868
869 pdma_write(priv, PDMA_GLO_CFG,
870 FIELD_PREP(PDMA_BT_SIZE, PDMA_BT_SIZE_32B));
871 udelay(500);
872
873 ret = readl_poll_timeout(priv->fe_base + PDMA_BASE + PDMA_GLO_CFG,
874 val, !(val & (RX_DMA_BUSY | TX_DMA_BUSY)),
875 PDMA_TIMEOUT);
876 if (ret)
877 dev_warn(dev, "mt7620_eth: PDMA is still busy\n");
878 }
879
mt7620_eth_write_hwaddr(struct udevice * dev)880 static int mt7620_eth_write_hwaddr(struct udevice *dev)
881 {
882 struct eth_pdata *pdata = dev_get_plat(dev);
883 struct mt7620_eth_priv *priv = dev_get_priv(dev);
884 unsigned char *mac = pdata->enetaddr;
885 u32 macaddr_lsb, macaddr_msb;
886
887 macaddr_msb = ((u32)mac[0] << 8) | (u32)mac[1];
888 macaddr_lsb = ((u32)mac[2] << 24) | ((u32)mac[3] << 16) |
889 ((u32)mac[4] << 8) | (u32)mac[5];
890
891 gdma_write(priv, GDMA_MAC_ADRH, macaddr_msb);
892 gdma_write(priv, GDMA_MAC_ADRL, macaddr_lsb);
893
894 return 0;
895 }
896
mt7620_eth_send(struct udevice * dev,void * packet,int length)897 static int mt7620_eth_send(struct udevice *dev, void *packet, int length)
898 {
899 struct mt7620_eth_priv *priv = dev_get_priv(dev);
900 u32 idx = priv->tx_cpu_owner_idx0;
901 void *pkt_base;
902
903 if (!priv->tx_ring_noc[idx].txd_info2.DDONE) {
904 printf("mt7620_eth: TX DMA descriptor ring is full\n");
905 return -EPERM;
906 }
907
908 pkt_base = (void *)CKSEG0ADDR(priv->tx_ring_noc[idx].txd_info1.SDP0);
909 memcpy(pkt_base, packet, length);
910 flush_dcache_range((ulong)pkt_base, (ulong)pkt_base + length);
911
912 priv->tx_ring_noc[idx].txd_info2.SDL0 = length;
913 priv->tx_ring_noc[idx].txd_info2.DDONE = 0;
914
915 priv->tx_cpu_owner_idx0 = (priv->tx_cpu_owner_idx0 + 1) % NUM_TX_DESC;
916 pdma_write(priv, TX_CTX_IDX0, priv->tx_cpu_owner_idx0);
917
918 return 0;
919 }
920
mt7620_eth_recv(struct udevice * dev,int flags,uchar ** packetp)921 static int mt7620_eth_recv(struct udevice *dev, int flags, uchar **packetp)
922 {
923 struct mt7620_eth_priv *priv = dev_get_priv(dev);
924 u32 idx = priv->rx_dma_owner_idx0, length;
925 uchar *pkt_base;
926
927 if (!priv->rx_ring_noc[idx].rxd_info2.DDONE) {
928 debug("mt7620_eth: RX DMA descriptor ring is empty\n");
929 return -EAGAIN;
930 }
931
932 length = priv->rx_ring_noc[idx].rxd_info2.PLEN0;
933 pkt_base = (void *)CKSEG0ADDR(priv->rx_ring_noc[idx].rxd_info1.PDP0);
934 invalidate_dcache_range((ulong)pkt_base, (ulong)pkt_base + length);
935
936 if (packetp)
937 *packetp = pkt_base;
938
939 return length;
940 }
941
mt7620_eth_free_pkt(struct udevice * dev,uchar * packet,int length)942 static int mt7620_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
943 {
944 struct mt7620_eth_priv *priv = dev_get_priv(dev);
945 u32 idx = priv->rx_dma_owner_idx0;
946
947 priv->rx_ring_noc[idx].rxd_info2.DDONE = 0;
948 priv->rx_ring_noc[idx].rxd_info2.LS0 = 0;
949 priv->rx_ring_noc[idx].rxd_info2.PLEN0 = PKTSIZE_ALIGN;
950
951 pdma_write(priv, RX_CALC_IDX0, idx);
952 priv->rx_dma_owner_idx0 = (priv->rx_dma_owner_idx0 + 1) % NUM_RX_DESC;
953
954 return 0;
955 }
956
957 static const struct eth_ops mt7620_eth_ops = {
958 .start = mt7620_eth_start,
959 .stop = mt7620_eth_stop,
960 .send = mt7620_eth_send,
961 .recv = mt7620_eth_recv,
962 .free_pkt = mt7620_eth_free_pkt,
963 .write_hwaddr = mt7620_eth_write_hwaddr,
964 };
965
mt7620_eth_alloc_rings_pkts(struct mt7620_eth_priv * priv)966 static int mt7620_eth_alloc_rings_pkts(struct mt7620_eth_priv *priv)
967 {
968 priv->tx_ring = memalign(ARCH_DMA_MINALIGN,
969 NUM_TX_DESC * sizeof(struct pdma_tx_desc));
970 if (!priv->tx_ring) {
971 dev_err(priv->dev, "mt7620_eth: unable to alloc tx ring\n");
972 return -ENOMEM;
973 }
974
975 priv->rx_ring = memalign(ARCH_DMA_MINALIGN,
976 NUM_RX_DESC * sizeof(struct pdma_rx_desc));
977 if (!priv->rx_ring) {
978 dev_err(priv->dev, "mt7620_eth: unable to alloc rx ring\n");
979 goto cleanup;
980 }
981
982 priv->pkt_buf = memalign(ARCH_DMA_MINALIGN,
983 (NUM_TX_DESC + NUM_RX_DESC) * PKTSIZE_ALIGN);
984 if (!priv->pkt_buf) {
985 dev_err(priv->dev, "mt7620_eth: unable to alloc pkt buffer\n");
986 goto cleanup;
987 }
988
989 return 0;
990
991 cleanup:
992 if (priv->tx_ring)
993 free(priv->tx_ring);
994
995 if (priv->rx_ring)
996 free(priv->rx_ring);
997
998 return -ENOMEM;
999 }
1000
mt7620_eth_free_rings_pkts(struct mt7620_eth_priv * priv)1001 static void mt7620_eth_free_rings_pkts(struct mt7620_eth_priv *priv)
1002 {
1003 free(priv->tx_ring);
1004 free(priv->rx_ring);
1005 free(priv->pkt_buf);
1006 }
1007
mt7620_eth_probe(struct udevice * dev)1008 static int mt7620_eth_probe(struct udevice *dev)
1009 {
1010 struct mt7620_eth_priv *priv = dev_get_priv(dev);
1011 u32 pcie_mode = MT7620_SYSC_PCIE_RC_MODE;
1012 int ret;
1013
1014 misc_ioctl(priv->sysc, MT7620_SYSC_IOCTL_SET_PCIE_MODE, &pcie_mode);
1015
1016 clk_enable_bulk(&priv->clks);
1017
1018 reset_assert_bulk(&priv->rsts);
1019 udelay(100);
1020 reset_deassert_bulk(&priv->rsts);
1021 udelay(1000);
1022
1023 ret = mt7620_eth_alloc_rings_pkts(priv);
1024 if (ret)
1025 return ret;
1026
1027 ret = mt7620_mdio_register(dev);
1028 if (ret)
1029 dev_warn(dev, "mt7620_eth: failed to register MDIO bus\n");
1030
1031 mt7620_gsw_init(priv);
1032
1033 return 0;
1034 }
1035
mt7620_eth_remove(struct udevice * dev)1036 static int mt7620_eth_remove(struct udevice *dev)
1037 {
1038 struct mt7620_eth_priv *priv = dev_get_priv(dev);
1039
1040 mt7620_eth_stop(dev);
1041
1042 mt7620_eth_free_rings_pkts(priv);
1043
1044 return 0;
1045 }
1046
mt7620_eth_parse_gsw_port(struct mt7620_eth_priv * priv,u32 idx,ofnode node)1047 static int mt7620_eth_parse_gsw_port(struct mt7620_eth_priv *priv, u32 idx,
1048 ofnode node)
1049 {
1050 ofnode subnode;
1051 const char *str;
1052 int mode, speed, ret;
1053 u32 phy_addr;
1054
1055 str = ofnode_read_string(node, "phy-mode");
1056 if (str) {
1057 mode = phy_get_interface_by_name(str);
1058 if (mode < 0) {
1059 dev_err(priv->dev, "mt7620_eth: invalid phy-mode\n");
1060 return -EINVAL;
1061 }
1062
1063 switch (mode) {
1064 case PHY_INTERFACE_MODE_MII:
1065 case PHY_INTERFACE_MODE_RMII:
1066 case PHY_INTERFACE_MODE_RGMII:
1067 case PHY_INTERFACE_MODE_NONE:
1068 break;
1069 default:
1070 dev_err(priv->dev,
1071 "mt7620_eth: unsupported phy-mode\n");
1072 return -ENOTSUPP;
1073 }
1074
1075 priv->port_cfg[idx].mode = mode;
1076 } else {
1077 priv->port_cfg[idx].mode = PHY_INTERFACE_MODE_NONE;
1078 }
1079
1080 subnode = ofnode_find_subnode(node, "fixed-link");
1081 if (ofnode_valid(subnode)) {
1082 priv->port_cfg[idx].force_mode = 1;
1083 priv->port_cfg[idx].duplex = ofnode_read_bool(subnode,
1084 "full-duplex");
1085 speed = ofnode_read_u32_default(subnode, "speed", 0);
1086 switch (speed) {
1087 case SPEED_10:
1088 priv->port_cfg[idx].speed = FORCE_SPEED_10;
1089 break;
1090 case SPEED_100:
1091 priv->port_cfg[idx].speed = FORCE_SPEED_100;
1092 break;
1093 case SPEED_1000:
1094 priv->port_cfg[idx].speed = FORCE_SPEED_1000;
1095 break;
1096 default:
1097 dev_err(priv->dev,
1098 "mt7620_eth: invalid speed for fixed-link\n");
1099 return -EINVAL;
1100 }
1101
1102 if (idx == 1 && ofnode_read_bool(subnode, "mediatek,mt7530")) {
1103 priv->port5_mt7530 = true;
1104
1105 ret = gpio_request_by_name_nodev(subnode,
1106 "mediatek,mt7530-reset", 0, &priv->gpio_swrst,
1107 GPIOD_IS_OUT);
1108 if (ret) {
1109 dev_err(priv->dev,
1110 "mt7620_eth: missing mt7530 reset gpio\n");
1111 return ret;
1112 }
1113 }
1114 }
1115
1116 ret = ofnode_read_u32(node, "phy-addr", &phy_addr);
1117 if (!ret) {
1118 if (phy_addr > 31 || (idx == 0 && phy_addr < 3) ||
1119 (idx == 1 && phy_addr < 4)) {
1120 dev_err(priv->dev, "mt7620_eth: invalid phy address\n");
1121 return -EINVAL;
1122 }
1123
1124 priv->port_cfg[idx].phy_addr = phy_addr;
1125 } else {
1126 priv->port_cfg[idx].phy_addr = -1;
1127 }
1128
1129 return 0;
1130 }
1131
mt7620_eth_parse_gsw_cfg(struct udevice * dev)1132 static int mt7620_eth_parse_gsw_cfg(struct udevice *dev)
1133 {
1134 struct mt7620_eth_priv *priv = dev_get_priv(dev);
1135 ofnode subnode;
1136 int ret;
1137
1138 subnode = ofnode_find_subnode(dev_ofnode(dev), "port4");
1139 if (ofnode_valid(subnode)) {
1140 ret = mt7620_eth_parse_gsw_port(priv, 0, subnode);
1141 if (ret)
1142 return ret;
1143 } else {
1144 priv->port_cfg[0].mode = PHY_INTERFACE_MODE_NONE;
1145 }
1146
1147 subnode = ofnode_find_subnode(dev_ofnode(dev), "port5");
1148 if (ofnode_valid(subnode))
1149 return mt7620_eth_parse_gsw_port(priv, 1, subnode);
1150
1151 priv->port_cfg[1].mode = PHY_INTERFACE_MODE_NONE;
1152 return 0;
1153 }
1154
mt7620_eth_of_to_plat(struct udevice * dev)1155 static int mt7620_eth_of_to_plat(struct udevice *dev)
1156 {
1157 struct eth_pdata *pdata = dev_get_plat(dev);
1158 struct mt7620_eth_priv *priv = dev_get_priv(dev);
1159 struct ofnode_phandle_args sysc_args;
1160 int ret;
1161
1162 pdata->iobase = dev_read_addr(dev);
1163
1164 priv->dev = dev;
1165
1166 ret = ofnode_parse_phandle_with_args(dev_ofnode(dev), "mediatek,sysc", NULL,
1167 0, 0, &sysc_args);
1168 if (ret) {
1169 dev_err(dev, "mt7620_eth: sysc property not found\n");
1170 return ret;
1171 }
1172
1173 ret = uclass_get_device_by_ofnode(UCLASS_MISC, sysc_args.node,
1174 &priv->sysc);
1175 if (ret) {
1176 dev_err(dev, "mt7620_eth: failed to sysc device\n");
1177 return ret;
1178 }
1179
1180 priv->fe_base = dev_remap_addr_name(dev, "fe");
1181 if (!priv->fe_base) {
1182 dev_err(dev, "mt7620_eth: failed to map fe registers\n");
1183 return -EINVAL;
1184 }
1185
1186 priv->gsw_base = dev_remap_addr_name(dev, "esw");
1187 if (!priv->gsw_base) {
1188 dev_err(dev, "mt7620_eth: failed to map esw registers\n");
1189 return -EINVAL;
1190 }
1191
1192 ret = reset_get_bulk(dev, &priv->rsts);
1193 if (ret) {
1194 dev_err(dev, "mt7620_eth: failed to get resetctl\n");
1195 return ret;
1196 }
1197
1198 ret = clk_get_bulk(dev, &priv->clks);
1199 if (ret) {
1200 dev_err(dev, "mt7620_eth: failed to get clocks\n");
1201 return ret;
1202 }
1203
1204 return mt7620_eth_parse_gsw_cfg(dev);
1205 }
1206
1207 static const struct udevice_id mt7620_eth_ids[] = {
1208 { .compatible = "mediatek,mt7620-eth" },
1209 {}
1210 };
1211
1212 U_BOOT_DRIVER(mt7620_eth) = {
1213 .name = "mt7620-eth",
1214 .id = UCLASS_ETH,
1215 .of_match = mt7620_eth_ids,
1216 .of_to_plat = mt7620_eth_of_to_plat,
1217 .plat_auto = sizeof(struct eth_pdata),
1218 .probe = mt7620_eth_probe,
1219 .remove = mt7620_eth_remove,
1220 .ops = &mt7620_eth_ops,
1221 .priv_auto = sizeof(struct mt7620_eth_priv),
1222 };
1223