1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * MediaTek ethernet IP driver for U-Boot
4 *
5 * Copyright (C) 2018 Stefan Roese <sr@denx.de>
6 *
7 * This code is mostly based on the code extracted from this MediaTek
8 * github repository:
9 *
10 * https://github.com/MediaTek-Labs/linkit-smart-uboot.git
11 *
12 * I was not able to find a specific license or other developers
13 * copyrights here, so I can't add them here.
14 */
15
16 #include <common.h>
17 #include <cpu_func.h>
18 #include <dm.h>
19 #include <log.h>
20 #include <malloc.h>
21 #include <miiphy.h>
22 #include <net.h>
23 #include <reset.h>
24 #include <wait_bit.h>
25 #include <asm/cache.h>
26 #include <asm/io.h>
27 #include <linux/bitfield.h>
28 #include <linux/bitops.h>
29 #include <linux/delay.h>
30 #include <linux/err.h>
31
32 /* Ethernet frame engine register */
33 #define PDMA_RELATED 0x0800
34
35 #define TX_BASE_PTR0 (PDMA_RELATED + 0x000)
36 #define TX_MAX_CNT0 (PDMA_RELATED + 0x004)
37 #define TX_CTX_IDX0 (PDMA_RELATED + 0x008)
38 #define TX_DTX_IDX0 (PDMA_RELATED + 0x00c)
39
40 #define RX_BASE_PTR0 (PDMA_RELATED + 0x100)
41 #define RX_MAX_CNT0 (PDMA_RELATED + 0x104)
42 #define RX_CALC_IDX0 (PDMA_RELATED + 0x108)
43
44 #define PDMA_GLO_CFG (PDMA_RELATED + 0x204)
45 #define PDMA_RST_IDX (PDMA_RELATED + 0x208)
46 #define DLY_INT_CFG (PDMA_RELATED + 0x20c)
47
48 #define SDM_RELATED 0x0c00
49
50 #define SDM_MAC_ADRL (SDM_RELATED + 0x0c) /* MAC address LSB */
51 #define SDM_MAC_ADRH (SDM_RELATED + 0x10) /* MAC Address MSB */
52
53 #define RST_DTX_IDX0 BIT(0)
54 #define RST_DRX_IDX0 BIT(16)
55
56 #define TX_DMA_EN BIT(0)
57 #define TX_DMA_BUSY BIT(1)
58 #define RX_DMA_EN BIT(2)
59 #define RX_DMA_BUSY BIT(3)
60 #define TX_WB_DDONE BIT(6)
61
62 /* Ethernet switch register */
63 #define MT7628_SWITCH_FCT0 0x0008
64 #define MT7628_SWITCH_PFC1 0x0014
65 #define MT7628_SWITCH_PVIDC0 0x0040
66 #define MT7628_SWITCH_PVIDC1 0x0044
67 #define MT7628_SWITCH_PVIDC2 0x0048
68 #define MT7628_SWITCH_PVIDC3 0x004c
69 #define MT7628_SWITCH_VMSC0 0x0070
70 #define MT7628_SWITCH_FPA 0x0084
71 #define MT7628_SWITCH_SOCPC 0x008c
72 #define MT7628_SWITCH_POC0 0x0090
73 #define MT7628_SWITCH_POC2 0x0098
74 #define MT7628_SWITCH_SGC 0x009c
75 #define MT7628_SWITCH_PCR0 0x00c0
76 #define PCR0_PHY_ADDR GENMASK(4, 0)
77 #define PCR0_PHY_REG GENMASK(12, 8)
78 #define PCR0_WT_PHY_CMD BIT(13)
79 #define PCR0_RD_PHY_CMD BIT(14)
80 #define PCR0_WT_DATA GENMASK(31, 16)
81
82 #define MT7628_SWITCH_PCR1 0x00c4
83 #define PCR1_WT_DONE BIT(0)
84 #define PCR1_RD_RDY BIT(1)
85 #define PCR1_RD_DATA GENMASK(31, 16)
86
87 #define MT7628_SWITCH_FPA1 0x00c8
88 #define MT7628_SWITCH_FCT2 0x00cc
89 #define MT7628_SWITCH_SGC2 0x00e4
90 #define MT7628_SWITCH_BMU_CTRL 0x0110
91
92 /* rxd2 */
93 #define RX_DMA_DONE BIT(31)
94 #define RX_DMA_LSO BIT(30)
95 #define RX_DMA_PLEN0 GENMASK(29, 16)
96 #define RX_DMA_TAG BIT(15)
97
98 struct fe_rx_dma {
99 unsigned int rxd1;
100 unsigned int rxd2;
101 unsigned int rxd3;
102 unsigned int rxd4;
103 } __packed __aligned(4);
104
105 #define TX_DMA_PLEN0 GENMASK(29, 16)
106 #define TX_DMA_LS1 BIT(14)
107 #define TX_DMA_LS0 BIT(30)
108 #define TX_DMA_DONE BIT(31)
109
110 #define TX_DMA_INS_VLAN_MT7621 BIT(16)
111 #define TX_DMA_INS_VLAN BIT(7)
112 #define TX_DMA_INS_PPPOE BIT(12)
113 #define TX_DMA_PN GENMASK(26, 24)
114
115 struct fe_tx_dma {
116 unsigned int txd1;
117 unsigned int txd2;
118 unsigned int txd3;
119 unsigned int txd4;
120 } __packed __aligned(4);
121
122 #define NUM_RX_DESC 256
123 #define NUM_TX_DESC 4
124 #define NUM_PHYS 5
125
126 #define PADDING_LENGTH 60
127
128 #define MTK_QDMA_PAGE_SIZE 2048
129
130 #define CONFIG_MDIO_TIMEOUT 100
131 #define CONFIG_DMA_STOP_TIMEOUT 100
132 #define CONFIG_TX_DMA_TIMEOUT 100
133
134 struct mt7628_eth_dev {
135 void __iomem *base; /* frame engine base address */
136 void __iomem *eth_sw_base; /* switch base address */
137
138 struct mii_dev *bus;
139
140 struct fe_tx_dma *tx_ring;
141 struct fe_rx_dma *rx_ring;
142
143 u8 *rx_buf[NUM_RX_DESC];
144
145 /* Point to the next RXD DMA wants to use in RXD Ring0 */
146 int rx_dma_idx;
147 /* Point to the next TXD in TXD Ring0 CPU wants to use */
148 int tx_dma_idx;
149
150 struct reset_ctl rst_ephy;
151
152 struct phy_device *phy;
153
154 int wan_port;
155 };
156
157 static int mt7628_eth_free_pkt(struct udevice *dev, uchar *packet, int length);
158
mdio_wait_read(struct mt7628_eth_dev * priv,u32 mask,bool mask_set)159 static int mdio_wait_read(struct mt7628_eth_dev *priv, u32 mask, bool mask_set)
160 {
161 void __iomem *base = priv->eth_sw_base;
162 int ret;
163
164 ret = wait_for_bit_le32(base + MT7628_SWITCH_PCR1, mask, mask_set,
165 CONFIG_MDIO_TIMEOUT, false);
166 if (ret) {
167 printf("MDIO operation timeout!\n");
168 return -ETIMEDOUT;
169 }
170
171 return 0;
172 }
173
mii_mgr_read(struct mt7628_eth_dev * priv,u32 phy_addr,u32 phy_register,u32 * read_data)174 static int mii_mgr_read(struct mt7628_eth_dev *priv,
175 u32 phy_addr, u32 phy_register, u32 *read_data)
176 {
177 void __iomem *base = priv->eth_sw_base;
178 u32 status = 0;
179 u32 ret;
180
181 *read_data = 0xffff;
182 /* Make sure previous read operation is complete */
183 ret = mdio_wait_read(priv, PCR1_RD_RDY, false);
184 if (ret)
185 return ret;
186
187 writel(PCR0_RD_PHY_CMD |
188 FIELD_PREP(PCR0_PHY_REG, phy_register) |
189 FIELD_PREP(PCR0_PHY_ADDR, phy_addr),
190 base + MT7628_SWITCH_PCR0);
191
192 /* Make sure previous read operation is complete */
193 ret = mdio_wait_read(priv, PCR1_RD_RDY, true);
194 if (ret)
195 return ret;
196
197 status = readl(base + MT7628_SWITCH_PCR1);
198 *read_data = FIELD_GET(PCR1_RD_DATA, status);
199
200 return 0;
201 }
202
mii_mgr_write(struct mt7628_eth_dev * priv,u32 phy_addr,u32 phy_register,u32 write_data)203 static int mii_mgr_write(struct mt7628_eth_dev *priv,
204 u32 phy_addr, u32 phy_register, u32 write_data)
205 {
206 void __iomem *base = priv->eth_sw_base;
207 u32 data;
208 int ret;
209
210 /* Make sure previous write operation is complete */
211 ret = mdio_wait_read(priv, PCR1_WT_DONE, false);
212 if (ret)
213 return ret;
214
215 data = FIELD_PREP(PCR0_WT_DATA, write_data) |
216 FIELD_PREP(PCR0_PHY_REG, phy_register) |
217 FIELD_PREP(PCR0_PHY_ADDR, phy_addr) |
218 PCR0_WT_PHY_CMD;
219 writel(data, base + MT7628_SWITCH_PCR0);
220
221 return mdio_wait_read(priv, PCR1_WT_DONE, true);
222 }
223
mt7628_mdio_read(struct mii_dev * bus,int addr,int devad,int reg)224 static int mt7628_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
225 {
226 u32 val;
227 int ret;
228
229 ret = mii_mgr_read(bus->priv, addr, reg, &val);
230 if (ret)
231 return ret;
232
233 return val;
234 }
235
mt7628_mdio_write(struct mii_dev * bus,int addr,int devad,int reg,u16 value)236 static int mt7628_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
237 u16 value)
238 {
239 return mii_mgr_write(bus->priv, addr, reg, value);
240 }
241
mt7628_ephy_init(struct mt7628_eth_dev * priv)242 static void mt7628_ephy_init(struct mt7628_eth_dev *priv)
243 {
244 int i;
245
246 mii_mgr_write(priv, 0, 31, 0x2000); /* change G2 page */
247 mii_mgr_write(priv, 0, 26, 0x0000);
248
249 for (i = 0; i < 5; i++) {
250 mii_mgr_write(priv, i, 31, 0x8000); /* change L0 page */
251 mii_mgr_write(priv, i, 0, 0x3100);
252
253 /* EEE disable */
254 mii_mgr_write(priv, i, 30, 0xa000);
255 mii_mgr_write(priv, i, 31, 0xa000); /* change L2 page */
256 mii_mgr_write(priv, i, 16, 0x0606);
257 mii_mgr_write(priv, i, 23, 0x0f0e);
258 mii_mgr_write(priv, i, 24, 0x1610);
259 mii_mgr_write(priv, i, 30, 0x1f15);
260 mii_mgr_write(priv, i, 28, 0x6111);
261 }
262
263 /* 100Base AOI setting */
264 mii_mgr_write(priv, 0, 31, 0x5000); /* change G5 page */
265 mii_mgr_write(priv, 0, 19, 0x004a);
266 mii_mgr_write(priv, 0, 20, 0x015a);
267 mii_mgr_write(priv, 0, 21, 0x00ee);
268 mii_mgr_write(priv, 0, 22, 0x0033);
269 mii_mgr_write(priv, 0, 23, 0x020a);
270 mii_mgr_write(priv, 0, 24, 0x0000);
271 mii_mgr_write(priv, 0, 25, 0x024a);
272 mii_mgr_write(priv, 0, 26, 0x035a);
273 mii_mgr_write(priv, 0, 27, 0x02ee);
274 mii_mgr_write(priv, 0, 28, 0x0233);
275 mii_mgr_write(priv, 0, 29, 0x000a);
276 mii_mgr_write(priv, 0, 30, 0x0000);
277
278 /* Fix EPHY idle state abnormal behavior */
279 mii_mgr_write(priv, 0, 31, 0x4000); /* change G4 page */
280 mii_mgr_write(priv, 0, 29, 0x000d);
281 mii_mgr_write(priv, 0, 30, 0x0500);
282 }
283
rt305x_esw_init(struct mt7628_eth_dev * priv)284 static void rt305x_esw_init(struct mt7628_eth_dev *priv)
285 {
286 void __iomem *base = priv->eth_sw_base;
287 void __iomem *reg;
288 u32 val = 0, pvid;
289 int i;
290
291 /*
292 * FC_RLS_TH=200, FC_SET_TH=160
293 * DROP_RLS=120, DROP_SET_TH=80
294 */
295 writel(0xc8a07850, base + MT7628_SWITCH_FCT0);
296 writel(0x00000000, base + MT7628_SWITCH_SGC2);
297 writel(0x00405555, base + MT7628_SWITCH_PFC1);
298 writel(0x00007f7f, base + MT7628_SWITCH_POC0);
299 writel(0x00007f7f, base + MT7628_SWITCH_POC2); /* disable VLAN */
300 writel(0x0002500c, base + MT7628_SWITCH_FCT2);
301 /* hashing algorithm=XOR48, aging interval=300sec */
302 writel(0x0008a301, base + MT7628_SWITCH_SGC);
303 writel(0x02404040, base + MT7628_SWITCH_SOCPC);
304
305 /* Ext PHY Addr=0x1f */
306 writel(0x3f502b28, base + MT7628_SWITCH_FPA1);
307 writel(0x00000000, base + MT7628_SWITCH_FPA);
308 /* 1us cycle number=125 (FE's clock=125Mhz) */
309 writel(0x7d000000, base + MT7628_SWITCH_BMU_CTRL);
310
311 /* LAN/WAN partition, WAN port will be unusable in u-boot network */
312 if (priv->wan_port >= 0 && priv->wan_port < 6) {
313 for (i = 0; i < 8; i++) {
314 pvid = i == priv->wan_port ? 2 : 1;
315 reg = base + MT7628_SWITCH_PVIDC0 + (i / 2) * 4;
316 if (i % 2 == 0) {
317 val = pvid;
318 } else {
319 val |= (pvid << 12);
320 writel(val, reg);
321 }
322 }
323
324 val = 0xffff407f;
325 val |= 1 << (8 + priv->wan_port);
326 val &= ~(1 << priv->wan_port);
327 writel(val, base + MT7628_SWITCH_VMSC0);
328 }
329
330 /* Reset PHY */
331 reset_assert(&priv->rst_ephy);
332 reset_deassert(&priv->rst_ephy);
333 mdelay(10);
334
335 mt7628_ephy_init(priv);
336 }
337
eth_dma_start(struct mt7628_eth_dev * priv)338 static void eth_dma_start(struct mt7628_eth_dev *priv)
339 {
340 void __iomem *base = priv->base;
341
342 setbits_le32(base + PDMA_GLO_CFG, TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN);
343 }
344
eth_dma_stop(struct mt7628_eth_dev * priv)345 static void eth_dma_stop(struct mt7628_eth_dev *priv)
346 {
347 void __iomem *base = priv->base;
348 int ret;
349
350 clrbits_le32(base + PDMA_GLO_CFG, TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN);
351
352 /* Wait for DMA to stop */
353 ret = wait_for_bit_le32(base + PDMA_GLO_CFG,
354 RX_DMA_BUSY | TX_DMA_BUSY, false,
355 CONFIG_DMA_STOP_TIMEOUT, false);
356 if (ret)
357 printf("DMA stop timeout error!\n");
358 }
359
mt7628_eth_write_hwaddr(struct udevice * dev)360 static int mt7628_eth_write_hwaddr(struct udevice *dev)
361 {
362 struct mt7628_eth_dev *priv = dev_get_priv(dev);
363 void __iomem *base = priv->base;
364 u8 *addr = ((struct eth_pdata *)dev_get_plat(dev))->enetaddr;
365 u32 val;
366
367 /* Set MAC address. */
368 val = addr[0];
369 val = (val << 8) | addr[1];
370 writel(val, base + SDM_MAC_ADRH);
371
372 val = addr[2];
373 val = (val << 8) | addr[3];
374 val = (val << 8) | addr[4];
375 val = (val << 8) | addr[5];
376 writel(val, base + SDM_MAC_ADRL);
377
378 return 0;
379 }
380
mt7628_eth_send(struct udevice * dev,void * packet,int length)381 static int mt7628_eth_send(struct udevice *dev, void *packet, int length)
382 {
383 struct mt7628_eth_dev *priv = dev_get_priv(dev);
384 void __iomem *base = priv->base;
385 int ret;
386 int idx;
387 int i;
388
389 idx = priv->tx_dma_idx;
390
391 /* Pad message to a minimum length */
392 if (length < PADDING_LENGTH) {
393 char *p = (char *)packet;
394
395 for (i = 0; i < PADDING_LENGTH - length; i++)
396 p[length + i] = 0;
397 length = PADDING_LENGTH;
398 }
399
400 /* Check if buffer is ready for next TX DMA */
401 ret = wait_for_bit_le32(&priv->tx_ring[idx].txd2, TX_DMA_DONE, true,
402 CONFIG_TX_DMA_TIMEOUT, false);
403 if (ret) {
404 printf("TX: DMA still busy on buffer %d\n", idx);
405 return ret;
406 }
407
408 flush_dcache_range((u32)packet, (u32)packet + length);
409
410 priv->tx_ring[idx].txd1 = CPHYSADDR(packet);
411 priv->tx_ring[idx].txd2 &= ~TX_DMA_PLEN0;
412 priv->tx_ring[idx].txd2 |= FIELD_PREP(TX_DMA_PLEN0, length);
413 priv->tx_ring[idx].txd2 &= ~TX_DMA_DONE;
414
415 idx = (idx + 1) % NUM_TX_DESC;
416
417 /* Make sure the writes executed at this place */
418 wmb();
419 writel(idx, base + TX_CTX_IDX0);
420
421 priv->tx_dma_idx = idx;
422
423 return 0;
424 }
425
mt7628_eth_recv(struct udevice * dev,int flags,uchar ** packetp)426 static int mt7628_eth_recv(struct udevice *dev, int flags, uchar **packetp)
427 {
428 struct mt7628_eth_dev *priv = dev_get_priv(dev);
429 u32 rxd_info;
430 int length;
431 int idx;
432
433 idx = priv->rx_dma_idx;
434
435 rxd_info = priv->rx_ring[idx].rxd2;
436 if ((rxd_info & RX_DMA_DONE) == 0)
437 return -EAGAIN;
438
439 length = FIELD_GET(RX_DMA_PLEN0, priv->rx_ring[idx].rxd2);
440 if (length == 0 || length > MTK_QDMA_PAGE_SIZE) {
441 printf("%s: invalid length (%d bytes)\n", __func__, length);
442 mt7628_eth_free_pkt(dev, NULL, 0);
443 return -EIO;
444 }
445
446 *packetp = priv->rx_buf[idx];
447 invalidate_dcache_range((u32)*packetp, (u32)*packetp + length);
448
449 priv->rx_ring[idx].rxd4 = 0;
450 priv->rx_ring[idx].rxd2 = RX_DMA_LSO;
451
452 /* Make sure the writes executed at this place */
453 wmb();
454
455 return length;
456 }
457
mt7628_eth_free_pkt(struct udevice * dev,uchar * packet,int length)458 static int mt7628_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
459 {
460 struct mt7628_eth_dev *priv = dev_get_priv(dev);
461 void __iomem *base = priv->base;
462 int idx;
463
464 idx = priv->rx_dma_idx;
465
466 /* Move point to next RXD which wants to alloc */
467 writel(idx, base + RX_CALC_IDX0);
468
469 /* Update to Next packet point that was received */
470 idx = (idx + 1) % NUM_RX_DESC;
471
472 priv->rx_dma_idx = idx;
473
474 return 0;
475 }
476
mt7628_eth_start(struct udevice * dev)477 static int mt7628_eth_start(struct udevice *dev)
478 {
479 struct mt7628_eth_dev *priv = dev_get_priv(dev);
480 void __iomem *base = priv->base;
481 uchar packet[MTK_QDMA_PAGE_SIZE];
482 uchar *packetp;
483 int ret;
484 int i;
485
486 for (i = 0; i < NUM_RX_DESC; i++) {
487 memset((void *)&priv->rx_ring[i], 0, sizeof(priv->rx_ring[0]));
488 priv->rx_ring[i].rxd2 |= RX_DMA_LSO;
489 priv->rx_ring[i].rxd1 = CPHYSADDR(priv->rx_buf[i]);
490 }
491
492 for (i = 0; i < NUM_TX_DESC; i++) {
493 memset((void *)&priv->tx_ring[i], 0, sizeof(priv->tx_ring[0]));
494 priv->tx_ring[i].txd2 = TX_DMA_LS0 | TX_DMA_DONE;
495 priv->tx_ring[i].txd4 = FIELD_PREP(TX_DMA_PN, 1);
496 }
497
498 priv->rx_dma_idx = 0;
499 priv->tx_dma_idx = 0;
500
501 /* Make sure the writes executed at this place */
502 wmb();
503
504 /* disable delay interrupt */
505 writel(0, base + DLY_INT_CFG);
506
507 clrbits_le32(base + PDMA_GLO_CFG, 0xffff0000);
508
509 /* Tell the adapter where the TX/RX rings are located. */
510 writel(CPHYSADDR(&priv->rx_ring[0]), base + RX_BASE_PTR0);
511 writel(CPHYSADDR((u32)&priv->tx_ring[0]), base + TX_BASE_PTR0);
512
513 writel(NUM_RX_DESC, base + RX_MAX_CNT0);
514 writel(NUM_TX_DESC, base + TX_MAX_CNT0);
515
516 writel(priv->tx_dma_idx, base + TX_CTX_IDX0);
517 writel(RST_DTX_IDX0, base + PDMA_RST_IDX);
518
519 writel(NUM_RX_DESC - 1, base + RX_CALC_IDX0);
520 writel(RST_DRX_IDX0, base + PDMA_RST_IDX);
521
522 /* Make sure the writes executed at this place */
523 wmb();
524 eth_dma_start(priv);
525
526 if (priv->phy) {
527 ret = phy_startup(priv->phy);
528 if (ret)
529 return ret;
530
531 if (!priv->phy->link)
532 return -EAGAIN;
533 }
534
535 /*
536 * The integrated switch seems to queue some received ethernet
537 * packets in some FIFO. Lets read the already queued packets
538 * out by using the receive routine, so that these old messages
539 * are dropped before the new xfer starts.
540 */
541 packetp = &packet[0];
542 while (mt7628_eth_recv(dev, 0, &packetp) != -EAGAIN)
543 mt7628_eth_free_pkt(dev, packetp, 0);
544
545 return 0;
546 }
547
mt7628_eth_stop(struct udevice * dev)548 static void mt7628_eth_stop(struct udevice *dev)
549 {
550 struct mt7628_eth_dev *priv = dev_get_priv(dev);
551
552 eth_dma_stop(priv);
553 }
554
mt7628_eth_probe(struct udevice * dev)555 static int mt7628_eth_probe(struct udevice *dev)
556 {
557 struct mt7628_eth_dev *priv = dev_get_priv(dev);
558 struct mii_dev *bus;
559 int poll_link_phy;
560 int ret;
561 int i;
562
563 /* Save frame-engine base address for later use */
564 priv->base = dev_remap_addr_index(dev, 0);
565 if (IS_ERR(priv->base))
566 return PTR_ERR(priv->base);
567
568 /* Save switch base address for later use */
569 priv->eth_sw_base = dev_remap_addr_index(dev, 1);
570 if (IS_ERR(priv->eth_sw_base))
571 return PTR_ERR(priv->eth_sw_base);
572
573 /* Reset controller */
574 ret = reset_get_by_name(dev, "ephy", &priv->rst_ephy);
575 if (ret) {
576 pr_err("unable to find reset controller for ethernet PHYs\n");
577 return ret;
578 }
579
580 /* WAN port will be isolated from LAN ports */
581 priv->wan_port = dev_read_u32_default(dev, "mediatek,wan-port", -1);
582
583 /* Put rx and tx rings into KSEG1 area (uncached) */
584 priv->tx_ring = (struct fe_tx_dma *)
585 KSEG1ADDR(memalign(ARCH_DMA_MINALIGN,
586 sizeof(*priv->tx_ring) * NUM_TX_DESC));
587 priv->rx_ring = (struct fe_rx_dma *)
588 KSEG1ADDR(memalign(ARCH_DMA_MINALIGN,
589 sizeof(*priv->rx_ring) * NUM_RX_DESC));
590
591 for (i = 0; i < NUM_RX_DESC; i++)
592 priv->rx_buf[i] = memalign(PKTALIGN, MTK_QDMA_PAGE_SIZE);
593
594 bus = mdio_alloc();
595 if (!bus) {
596 printf("Failed to allocate MDIO bus\n");
597 return -ENOMEM;
598 }
599
600 bus->read = mt7628_mdio_read;
601 bus->write = mt7628_mdio_write;
602 snprintf(bus->name, sizeof(bus->name), dev->name);
603 bus->priv = (void *)priv;
604
605 ret = mdio_register(bus);
606 if (ret)
607 return ret;
608
609 poll_link_phy = dev_read_u32_default(dev, "mediatek,poll-link-phy", -1);
610 if (poll_link_phy >= 0) {
611 if (poll_link_phy >= NUM_PHYS) {
612 pr_err("invalid phy %d for poll-link-phy\n",
613 poll_link_phy);
614 return ret;
615 }
616
617 priv->phy = phy_connect(bus, poll_link_phy, dev,
618 PHY_INTERFACE_MODE_MII);
619 if (!priv->phy) {
620 pr_err("failed to probe phy %d\n", poll_link_phy);
621 return -ENODEV;
622 }
623
624 priv->phy->advertising = priv->phy->supported;
625 phy_config(priv->phy);
626 }
627
628 /* Switch configuration */
629 rt305x_esw_init(priv);
630
631 return 0;
632 }
633
634 static const struct eth_ops mt7628_eth_ops = {
635 .start = mt7628_eth_start,
636 .send = mt7628_eth_send,
637 .recv = mt7628_eth_recv,
638 .free_pkt = mt7628_eth_free_pkt,
639 .stop = mt7628_eth_stop,
640 .write_hwaddr = mt7628_eth_write_hwaddr,
641 };
642
643 static const struct udevice_id mt7628_eth_ids[] = {
644 { .compatible = "mediatek,mt7628-eth" },
645 { }
646 };
647
648 U_BOOT_DRIVER(mt7628_eth) = {
649 .name = "mt7628_eth",
650 .id = UCLASS_ETH,
651 .of_match = mt7628_eth_ids,
652 .probe = mt7628_eth_probe,
653 .ops = &mt7628_eth_ops,
654 .priv_auto = sizeof(struct mt7628_eth_dev),
655 .plat_auto = sizeof(struct eth_pdata),
656 };
657