1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 *
4 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
7 */
8
9 #include <linux/of.h>
10 #include <linux/of_mdio.h>
11 #include <linux/of_net.h>
12 #include <linux/of_address.h>
13 #include <linux/mfd/syscon.h>
14 #include <linux/platform_device.h>
15 #include <linux/regmap.h>
16 #include <linux/clk.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/if_vlan.h>
19 #include <linux/reset.h>
20 #include <linux/tcp.h>
21 #include <linux/interrupt.h>
22 #include <linux/pinctrl/devinfo.h>
23 #include <linux/phylink.h>
24 #include <linux/pcs/pcs-mtk-lynxi.h>
25 #include <linux/jhash.h>
26 #include <linux/bitfield.h>
27 #include <net/dsa.h>
28 #include <net/dst_metadata.h>
29 #include <net/page_pool/helpers.h>
30
31 #include "mtk_eth_soc.h"
32 #include "mtk_wed.h"
33
34 static int mtk_msg_level = -1;
35 module_param_named(msg_level, mtk_msg_level, int, 0);
36 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
37
38 #define MTK_ETHTOOL_STAT(x) { #x, \
39 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
40
41 #define MTK_ETHTOOL_XDP_STAT(x) { #x, \
42 offsetof(struct mtk_hw_stats, xdp_stats.x) / \
43 sizeof(u64) }
44
45 static const struct mtk_reg_map mtk_reg_map = {
46 .tx_irq_mask = 0x1a1c,
47 .tx_irq_status = 0x1a18,
48 .pdma = {
49 .rx_ptr = 0x0900,
50 .rx_cnt_cfg = 0x0904,
51 .pcrx_ptr = 0x0908,
52 .glo_cfg = 0x0a04,
53 .rst_idx = 0x0a08,
54 .delay_irq = 0x0a0c,
55 .irq_status = 0x0a20,
56 .irq_mask = 0x0a28,
57 .adma_rx_dbg0 = 0x0a38,
58 .int_grp = 0x0a50,
59 },
60 .qdma = {
61 .qtx_cfg = 0x1800,
62 .qtx_sch = 0x1804,
63 .rx_ptr = 0x1900,
64 .rx_cnt_cfg = 0x1904,
65 .qcrx_ptr = 0x1908,
66 .glo_cfg = 0x1a04,
67 .rst_idx = 0x1a08,
68 .delay_irq = 0x1a0c,
69 .fc_th = 0x1a10,
70 .tx_sch_rate = 0x1a14,
71 .int_grp = 0x1a20,
72 .hred = 0x1a44,
73 .ctx_ptr = 0x1b00,
74 .dtx_ptr = 0x1b04,
75 .crx_ptr = 0x1b10,
76 .drx_ptr = 0x1b14,
77 .fq_head = 0x1b20,
78 .fq_tail = 0x1b24,
79 .fq_count = 0x1b28,
80 .fq_blen = 0x1b2c,
81 },
82 .gdm1_cnt = 0x2400,
83 .gdma_to_ppe = 0x4444,
84 .ppe_base = 0x0c00,
85 .wdma_base = {
86 [0] = 0x2800,
87 [1] = 0x2c00,
88 },
89 .pse_iq_sta = 0x0110,
90 .pse_oq_sta = 0x0118,
91 };
92
93 static const struct mtk_reg_map mt7628_reg_map = {
94 .tx_irq_mask = 0x0a28,
95 .tx_irq_status = 0x0a20,
96 .pdma = {
97 .rx_ptr = 0x0900,
98 .rx_cnt_cfg = 0x0904,
99 .pcrx_ptr = 0x0908,
100 .glo_cfg = 0x0a04,
101 .rst_idx = 0x0a08,
102 .delay_irq = 0x0a0c,
103 .irq_status = 0x0a20,
104 .irq_mask = 0x0a28,
105 .int_grp = 0x0a50,
106 },
107 };
108
109 static const struct mtk_reg_map mt7986_reg_map = {
110 .tx_irq_mask = 0x461c,
111 .tx_irq_status = 0x4618,
112 .pdma = {
113 .rx_ptr = 0x4100,
114 .rx_cnt_cfg = 0x4104,
115 .pcrx_ptr = 0x4108,
116 .glo_cfg = 0x4204,
117 .rst_idx = 0x4208,
118 .delay_irq = 0x420c,
119 .irq_status = 0x4220,
120 .irq_mask = 0x4228,
121 .adma_rx_dbg0 = 0x4238,
122 .int_grp = 0x4250,
123 },
124 .qdma = {
125 .qtx_cfg = 0x4400,
126 .qtx_sch = 0x4404,
127 .rx_ptr = 0x4500,
128 .rx_cnt_cfg = 0x4504,
129 .qcrx_ptr = 0x4508,
130 .glo_cfg = 0x4604,
131 .rst_idx = 0x4608,
132 .delay_irq = 0x460c,
133 .fc_th = 0x4610,
134 .int_grp = 0x4620,
135 .hred = 0x4644,
136 .ctx_ptr = 0x4700,
137 .dtx_ptr = 0x4704,
138 .crx_ptr = 0x4710,
139 .drx_ptr = 0x4714,
140 .fq_head = 0x4720,
141 .fq_tail = 0x4724,
142 .fq_count = 0x4728,
143 .fq_blen = 0x472c,
144 .tx_sch_rate = 0x4798,
145 },
146 .gdm1_cnt = 0x1c00,
147 .gdma_to_ppe = 0x3333,
148 .ppe_base = 0x2000,
149 .wdma_base = {
150 [0] = 0x4800,
151 [1] = 0x4c00,
152 },
153 .pse_iq_sta = 0x0180,
154 .pse_oq_sta = 0x01a0,
155 };
156
157 static const struct mtk_reg_map mt7988_reg_map = {
158 .tx_irq_mask = 0x461c,
159 .tx_irq_status = 0x4618,
160 .pdma = {
161 .rx_ptr = 0x6900,
162 .rx_cnt_cfg = 0x6904,
163 .pcrx_ptr = 0x6908,
164 .glo_cfg = 0x6a04,
165 .rst_idx = 0x6a08,
166 .delay_irq = 0x6a0c,
167 .irq_status = 0x6a20,
168 .irq_mask = 0x6a28,
169 .adma_rx_dbg0 = 0x6a38,
170 .int_grp = 0x6a50,
171 },
172 .qdma = {
173 .qtx_cfg = 0x4400,
174 .qtx_sch = 0x4404,
175 .rx_ptr = 0x4500,
176 .rx_cnt_cfg = 0x4504,
177 .qcrx_ptr = 0x4508,
178 .glo_cfg = 0x4604,
179 .rst_idx = 0x4608,
180 .delay_irq = 0x460c,
181 .fc_th = 0x4610,
182 .int_grp = 0x4620,
183 .hred = 0x4644,
184 .ctx_ptr = 0x4700,
185 .dtx_ptr = 0x4704,
186 .crx_ptr = 0x4710,
187 .drx_ptr = 0x4714,
188 .fq_head = 0x4720,
189 .fq_tail = 0x4724,
190 .fq_count = 0x4728,
191 .fq_blen = 0x472c,
192 .tx_sch_rate = 0x4798,
193 },
194 .gdm1_cnt = 0x1c00,
195 .gdma_to_ppe = 0x3333,
196 .ppe_base = 0x2000,
197 .wdma_base = {
198 [0] = 0x4800,
199 [1] = 0x4c00,
200 [2] = 0x5000,
201 },
202 .pse_iq_sta = 0x0180,
203 .pse_oq_sta = 0x01a0,
204 };
205
206 /* strings used by ethtool */
207 static const struct mtk_ethtool_stats {
208 char str[ETH_GSTRING_LEN];
209 u32 offset;
210 } mtk_ethtool_stats[] = {
211 MTK_ETHTOOL_STAT(tx_bytes),
212 MTK_ETHTOOL_STAT(tx_packets),
213 MTK_ETHTOOL_STAT(tx_skip),
214 MTK_ETHTOOL_STAT(tx_collisions),
215 MTK_ETHTOOL_STAT(rx_bytes),
216 MTK_ETHTOOL_STAT(rx_packets),
217 MTK_ETHTOOL_STAT(rx_overflow),
218 MTK_ETHTOOL_STAT(rx_fcs_errors),
219 MTK_ETHTOOL_STAT(rx_short_errors),
220 MTK_ETHTOOL_STAT(rx_long_errors),
221 MTK_ETHTOOL_STAT(rx_checksum_errors),
222 MTK_ETHTOOL_STAT(rx_flow_control_packets),
223 MTK_ETHTOOL_XDP_STAT(rx_xdp_redirect),
224 MTK_ETHTOOL_XDP_STAT(rx_xdp_pass),
225 MTK_ETHTOOL_XDP_STAT(rx_xdp_drop),
226 MTK_ETHTOOL_XDP_STAT(rx_xdp_tx),
227 MTK_ETHTOOL_XDP_STAT(rx_xdp_tx_errors),
228 MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit),
229 MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit_errors),
230 };
231
232 static const char * const mtk_clks_source_name[] = {
233 "ethif",
234 "sgmiitop",
235 "esw",
236 "gp0",
237 "gp1",
238 "gp2",
239 "gp3",
240 "xgp1",
241 "xgp2",
242 "xgp3",
243 "crypto",
244 "fe",
245 "trgpll",
246 "sgmii_tx250m",
247 "sgmii_rx250m",
248 "sgmii_cdr_ref",
249 "sgmii_cdr_fb",
250 "sgmii2_tx250m",
251 "sgmii2_rx250m",
252 "sgmii2_cdr_ref",
253 "sgmii2_cdr_fb",
254 "sgmii_ck",
255 "eth2pll",
256 "wocpu0",
257 "wocpu1",
258 "netsys0",
259 "netsys1",
260 "ethwarp_wocpu2",
261 "ethwarp_wocpu1",
262 "ethwarp_wocpu0",
263 "top_usxgmii0_sel",
264 "top_usxgmii1_sel",
265 "top_sgm0_sel",
266 "top_sgm1_sel",
267 "top_xfi_phy0_xtal_sel",
268 "top_xfi_phy1_xtal_sel",
269 "top_eth_gmii_sel",
270 "top_eth_refck_50m_sel",
271 "top_eth_sys_200m_sel",
272 "top_eth_sys_sel",
273 "top_eth_xgmii_sel",
274 "top_eth_mii_sel",
275 "top_netsys_sel",
276 "top_netsys_500m_sel",
277 "top_netsys_pao_2x_sel",
278 "top_netsys_sync_250m_sel",
279 "top_netsys_ppefb_250m_sel",
280 "top_netsys_warp_sel",
281 };
282
mtk_w32(struct mtk_eth * eth,u32 val,unsigned reg)283 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
284 {
285 __raw_writel(val, eth->base + reg);
286 }
287
mtk_r32(struct mtk_eth * eth,unsigned reg)288 u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
289 {
290 return __raw_readl(eth->base + reg);
291 }
292
mtk_m32(struct mtk_eth * eth,u32 mask,u32 set,unsigned int reg)293 u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg)
294 {
295 u32 val;
296
297 val = mtk_r32(eth, reg);
298 val &= ~mask;
299 val |= set;
300 mtk_w32(eth, val, reg);
301 return reg;
302 }
303
mtk_mdio_busy_wait(struct mtk_eth * eth)304 static int mtk_mdio_busy_wait(struct mtk_eth *eth)
305 {
306 unsigned long t_start = jiffies;
307
308 while (1) {
309 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
310 return 0;
311 if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
312 break;
313 cond_resched();
314 }
315
316 dev_err(eth->dev, "mdio: MDIO timeout\n");
317 return -ETIMEDOUT;
318 }
319
_mtk_mdio_write_c22(struct mtk_eth * eth,u32 phy_addr,u32 phy_reg,u32 write_data)320 static int _mtk_mdio_write_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg,
321 u32 write_data)
322 {
323 int ret;
324
325 ret = mtk_mdio_busy_wait(eth);
326 if (ret < 0)
327 return ret;
328
329 mtk_w32(eth, PHY_IAC_ACCESS |
330 PHY_IAC_START_C22 |
331 PHY_IAC_CMD_WRITE |
332 PHY_IAC_REG(phy_reg) |
333 PHY_IAC_ADDR(phy_addr) |
334 PHY_IAC_DATA(write_data),
335 MTK_PHY_IAC);
336
337 ret = mtk_mdio_busy_wait(eth);
338 if (ret < 0)
339 return ret;
340
341 return 0;
342 }
343
_mtk_mdio_write_c45(struct mtk_eth * eth,u32 phy_addr,u32 devad,u32 phy_reg,u32 write_data)344 static int _mtk_mdio_write_c45(struct mtk_eth *eth, u32 phy_addr,
345 u32 devad, u32 phy_reg, u32 write_data)
346 {
347 int ret;
348
349 ret = mtk_mdio_busy_wait(eth);
350 if (ret < 0)
351 return ret;
352
353 mtk_w32(eth, PHY_IAC_ACCESS |
354 PHY_IAC_START_C45 |
355 PHY_IAC_CMD_C45_ADDR |
356 PHY_IAC_REG(devad) |
357 PHY_IAC_ADDR(phy_addr) |
358 PHY_IAC_DATA(phy_reg),
359 MTK_PHY_IAC);
360
361 ret = mtk_mdio_busy_wait(eth);
362 if (ret < 0)
363 return ret;
364
365 mtk_w32(eth, PHY_IAC_ACCESS |
366 PHY_IAC_START_C45 |
367 PHY_IAC_CMD_WRITE |
368 PHY_IAC_REG(devad) |
369 PHY_IAC_ADDR(phy_addr) |
370 PHY_IAC_DATA(write_data),
371 MTK_PHY_IAC);
372
373 ret = mtk_mdio_busy_wait(eth);
374 if (ret < 0)
375 return ret;
376
377 return 0;
378 }
379
_mtk_mdio_read_c22(struct mtk_eth * eth,u32 phy_addr,u32 phy_reg)380 static int _mtk_mdio_read_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg)
381 {
382 int ret;
383
384 ret = mtk_mdio_busy_wait(eth);
385 if (ret < 0)
386 return ret;
387
388 mtk_w32(eth, PHY_IAC_ACCESS |
389 PHY_IAC_START_C22 |
390 PHY_IAC_CMD_C22_READ |
391 PHY_IAC_REG(phy_reg) |
392 PHY_IAC_ADDR(phy_addr),
393 MTK_PHY_IAC);
394
395 ret = mtk_mdio_busy_wait(eth);
396 if (ret < 0)
397 return ret;
398
399 return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
400 }
401
_mtk_mdio_read_c45(struct mtk_eth * eth,u32 phy_addr,u32 devad,u32 phy_reg)402 static int _mtk_mdio_read_c45(struct mtk_eth *eth, u32 phy_addr,
403 u32 devad, u32 phy_reg)
404 {
405 int ret;
406
407 ret = mtk_mdio_busy_wait(eth);
408 if (ret < 0)
409 return ret;
410
411 mtk_w32(eth, PHY_IAC_ACCESS |
412 PHY_IAC_START_C45 |
413 PHY_IAC_CMD_C45_ADDR |
414 PHY_IAC_REG(devad) |
415 PHY_IAC_ADDR(phy_addr) |
416 PHY_IAC_DATA(phy_reg),
417 MTK_PHY_IAC);
418
419 ret = mtk_mdio_busy_wait(eth);
420 if (ret < 0)
421 return ret;
422
423 mtk_w32(eth, PHY_IAC_ACCESS |
424 PHY_IAC_START_C45 |
425 PHY_IAC_CMD_C45_READ |
426 PHY_IAC_REG(devad) |
427 PHY_IAC_ADDR(phy_addr),
428 MTK_PHY_IAC);
429
430 ret = mtk_mdio_busy_wait(eth);
431 if (ret < 0)
432 return ret;
433
434 return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
435 }
436
mtk_mdio_write_c22(struct mii_bus * bus,int phy_addr,int phy_reg,u16 val)437 static int mtk_mdio_write_c22(struct mii_bus *bus, int phy_addr,
438 int phy_reg, u16 val)
439 {
440 struct mtk_eth *eth = bus->priv;
441
442 return _mtk_mdio_write_c22(eth, phy_addr, phy_reg, val);
443 }
444
mtk_mdio_write_c45(struct mii_bus * bus,int phy_addr,int devad,int phy_reg,u16 val)445 static int mtk_mdio_write_c45(struct mii_bus *bus, int phy_addr,
446 int devad, int phy_reg, u16 val)
447 {
448 struct mtk_eth *eth = bus->priv;
449
450 return _mtk_mdio_write_c45(eth, phy_addr, devad, phy_reg, val);
451 }
452
mtk_mdio_read_c22(struct mii_bus * bus,int phy_addr,int phy_reg)453 static int mtk_mdio_read_c22(struct mii_bus *bus, int phy_addr, int phy_reg)
454 {
455 struct mtk_eth *eth = bus->priv;
456
457 return _mtk_mdio_read_c22(eth, phy_addr, phy_reg);
458 }
459
mtk_mdio_read_c45(struct mii_bus * bus,int phy_addr,int devad,int phy_reg)460 static int mtk_mdio_read_c45(struct mii_bus *bus, int phy_addr, int devad,
461 int phy_reg)
462 {
463 struct mtk_eth *eth = bus->priv;
464
465 return _mtk_mdio_read_c45(eth, phy_addr, devad, phy_reg);
466 }
467
mt7621_gmac0_rgmii_adjust(struct mtk_eth * eth,phy_interface_t interface)468 static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
469 phy_interface_t interface)
470 {
471 u32 val;
472
473 val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
474 ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
475
476 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
477 ETHSYS_TRGMII_MT7621_MASK, val);
478
479 return 0;
480 }
481
mtk_gmac0_rgmii_adjust(struct mtk_eth * eth,phy_interface_t interface)482 static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
483 phy_interface_t interface)
484 {
485 int ret;
486
487 if (interface == PHY_INTERFACE_MODE_TRGMII) {
488 mtk_w32(eth, TRGMII_MODE, INTF_MODE);
489 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], 500000000);
490 if (ret)
491 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
492 return;
493 }
494
495 dev_err(eth->dev, "Missing PLL configuration, ethernet may not work\n");
496 }
497
mtk_setup_bridge_switch(struct mtk_eth * eth)498 static void mtk_setup_bridge_switch(struct mtk_eth *eth)
499 {
500 /* Force Port1 XGMAC Link Up */
501 mtk_m32(eth, 0, MTK_XGMAC_FORCE_LINK(MTK_GMAC1_ID),
502 MTK_XGMAC_STS(MTK_GMAC1_ID));
503
504 /* Adjust GSW bridge IPG to 11 */
505 mtk_m32(eth, GSWTX_IPG_MASK | GSWRX_IPG_MASK,
506 (GSW_IPG_11 << GSWTX_IPG_SHIFT) |
507 (GSW_IPG_11 << GSWRX_IPG_SHIFT),
508 MTK_GSW_CFG);
509 }
510
mtk_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)511 static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
512 phy_interface_t interface)
513 {
514 struct mtk_mac *mac = container_of(config, struct mtk_mac,
515 phylink_config);
516 struct mtk_eth *eth = mac->hw;
517 unsigned int sid;
518
519 if (interface == PHY_INTERFACE_MODE_SGMII ||
520 phy_interface_mode_is_8023z(interface)) {
521 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
522 0 : mac->id;
523
524 return eth->sgmii_pcs[sid];
525 }
526
527 return NULL;
528 }
529
mtk_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)530 static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
531 const struct phylink_link_state *state)
532 {
533 struct mtk_mac *mac = container_of(config, struct mtk_mac,
534 phylink_config);
535 struct mtk_eth *eth = mac->hw;
536 int val, ge_mode, err = 0;
537 u32 i;
538
539 /* MT76x8 has no hardware settings between for the MAC */
540 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
541 mac->interface != state->interface) {
542 /* Setup soc pin functions */
543 switch (state->interface) {
544 case PHY_INTERFACE_MODE_TRGMII:
545 case PHY_INTERFACE_MODE_RGMII_TXID:
546 case PHY_INTERFACE_MODE_RGMII_RXID:
547 case PHY_INTERFACE_MODE_RGMII_ID:
548 case PHY_INTERFACE_MODE_RGMII:
549 case PHY_INTERFACE_MODE_MII:
550 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
551 err = mtk_gmac_rgmii_path_setup(eth, mac->id);
552 if (err)
553 goto init_err;
554 }
555 break;
556 case PHY_INTERFACE_MODE_1000BASEX:
557 case PHY_INTERFACE_MODE_2500BASEX:
558 case PHY_INTERFACE_MODE_SGMII:
559 err = mtk_gmac_sgmii_path_setup(eth, mac->id);
560 if (err)
561 goto init_err;
562 break;
563 case PHY_INTERFACE_MODE_GMII:
564 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
565 err = mtk_gmac_gephy_path_setup(eth, mac->id);
566 if (err)
567 goto init_err;
568 }
569 break;
570 case PHY_INTERFACE_MODE_INTERNAL:
571 break;
572 default:
573 goto err_phy;
574 }
575
576 /* Setup clock for 1st gmac */
577 if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
578 !phy_interface_mode_is_8023z(state->interface) &&
579 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
580 if (MTK_HAS_CAPS(mac->hw->soc->caps,
581 MTK_TRGMII_MT7621_CLK)) {
582 if (mt7621_gmac0_rgmii_adjust(mac->hw,
583 state->interface))
584 goto err_phy;
585 } else {
586 mtk_gmac0_rgmii_adjust(mac->hw,
587 state->interface);
588
589 /* mt7623_pad_clk_setup */
590 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
591 mtk_w32(mac->hw,
592 TD_DM_DRVP(8) | TD_DM_DRVN(8),
593 TRGMII_TD_ODT(i));
594
595 /* Assert/release MT7623 RXC reset */
596 mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
597 TRGMII_RCK_CTRL);
598 mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
599 }
600 }
601
602 switch (state->interface) {
603 case PHY_INTERFACE_MODE_MII:
604 case PHY_INTERFACE_MODE_GMII:
605 ge_mode = 1;
606 break;
607 default:
608 ge_mode = 0;
609 break;
610 }
611
612 /* put the gmac into the right mode */
613 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
614 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
615 val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
616 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
617
618 mac->interface = state->interface;
619 }
620
621 /* SGMII */
622 if (state->interface == PHY_INTERFACE_MODE_SGMII ||
623 phy_interface_mode_is_8023z(state->interface)) {
624 /* The path GMAC to SGMII will be enabled once the SGMIISYS is
625 * being setup done.
626 */
627 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
628
629 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
630 SYSCFG0_SGMII_MASK,
631 ~(u32)SYSCFG0_SGMII_MASK);
632
633 /* Save the syscfg0 value for mac_finish */
634 mac->syscfg0 = val;
635 } else if (phylink_autoneg_inband(mode)) {
636 dev_err(eth->dev,
637 "In-band mode not supported in non SGMII mode!\n");
638 return;
639 }
640
641 /* Setup gmac */
642 if (mtk_is_netsys_v3_or_greater(eth) &&
643 mac->interface == PHY_INTERFACE_MODE_INTERNAL) {
644 mtk_w32(mac->hw, MTK_GDMA_XGDM_SEL, MTK_GDMA_EG_CTRL(mac->id));
645 mtk_w32(mac->hw, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(mac->id));
646
647 mtk_setup_bridge_switch(eth);
648 }
649
650 return;
651
652 err_phy:
653 dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
654 mac->id, phy_modes(state->interface));
655 return;
656
657 init_err:
658 dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
659 mac->id, phy_modes(state->interface), err);
660 }
661
mtk_mac_finish(struct phylink_config * config,unsigned int mode,phy_interface_t interface)662 static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
663 phy_interface_t interface)
664 {
665 struct mtk_mac *mac = container_of(config, struct mtk_mac,
666 phylink_config);
667 struct mtk_eth *eth = mac->hw;
668 u32 mcr_cur, mcr_new;
669
670 /* Enable SGMII */
671 if (interface == PHY_INTERFACE_MODE_SGMII ||
672 phy_interface_mode_is_8023z(interface))
673 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
674 SYSCFG0_SGMII_MASK, mac->syscfg0);
675
676 /* Setup gmac */
677 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
678 mcr_new = mcr_cur;
679 mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
680 MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_RX_FIFO_CLR_DIS;
681
682 /* Only update control register when needed! */
683 if (mcr_new != mcr_cur)
684 mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
685
686 return 0;
687 }
688
mtk_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)689 static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
690 phy_interface_t interface)
691 {
692 struct mtk_mac *mac = container_of(config, struct mtk_mac,
693 phylink_config);
694 u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
695
696 mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK);
697 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
698 }
699
mtk_set_queue_speed(struct mtk_eth * eth,unsigned int idx,int speed)700 static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
701 int speed)
702 {
703 const struct mtk_soc_data *soc = eth->soc;
704 u32 ofs, val;
705
706 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
707 return;
708
709 val = MTK_QTX_SCH_MIN_RATE_EN |
710 /* minimum: 10 Mbps */
711 FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
712 FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
713 MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
714 if (mtk_is_netsys_v1(eth))
715 val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
716
717 if (IS_ENABLED(CONFIG_SOC_MT7621)) {
718 switch (speed) {
719 case SPEED_10:
720 val |= MTK_QTX_SCH_MAX_RATE_EN |
721 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
722 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 2) |
723 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
724 break;
725 case SPEED_100:
726 val |= MTK_QTX_SCH_MAX_RATE_EN |
727 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
728 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3);
729 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
730 break;
731 case SPEED_1000:
732 val |= MTK_QTX_SCH_MAX_RATE_EN |
733 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 105) |
734 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
735 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
736 break;
737 default:
738 break;
739 }
740 } else {
741 switch (speed) {
742 case SPEED_10:
743 val |= MTK_QTX_SCH_MAX_RATE_EN |
744 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
745 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
746 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
747 break;
748 case SPEED_100:
749 val |= MTK_QTX_SCH_MAX_RATE_EN |
750 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
751 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5);
752 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
753 break;
754 case SPEED_1000:
755 val |= MTK_QTX_SCH_MAX_RATE_EN |
756 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 10) |
757 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
758 FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
759 break;
760 default:
761 break;
762 }
763 }
764
765 ofs = MTK_QTX_OFFSET * idx;
766 mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
767 }
768
mtk_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)769 static void mtk_mac_link_up(struct phylink_config *config,
770 struct phy_device *phy,
771 unsigned int mode, phy_interface_t interface,
772 int speed, int duplex, bool tx_pause, bool rx_pause)
773 {
774 struct mtk_mac *mac = container_of(config, struct mtk_mac,
775 phylink_config);
776 u32 mcr;
777
778 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
779 mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
780 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
781 MAC_MCR_FORCE_RX_FC);
782
783 /* Configure speed */
784 mac->speed = speed;
785 switch (speed) {
786 case SPEED_2500:
787 case SPEED_1000:
788 mcr |= MAC_MCR_SPEED_1000;
789 break;
790 case SPEED_100:
791 mcr |= MAC_MCR_SPEED_100;
792 break;
793 }
794
795 /* Configure duplex */
796 if (duplex == DUPLEX_FULL)
797 mcr |= MAC_MCR_FORCE_DPX;
798
799 /* Configure pause modes - phylink will avoid these for half duplex */
800 if (tx_pause)
801 mcr |= MAC_MCR_FORCE_TX_FC;
802 if (rx_pause)
803 mcr |= MAC_MCR_FORCE_RX_FC;
804
805 mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK;
806 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
807 }
808
809 static const struct phylink_mac_ops mtk_phylink_ops = {
810 .mac_select_pcs = mtk_mac_select_pcs,
811 .mac_config = mtk_mac_config,
812 .mac_finish = mtk_mac_finish,
813 .mac_link_down = mtk_mac_link_down,
814 .mac_link_up = mtk_mac_link_up,
815 };
816
mtk_mdio_init(struct mtk_eth * eth)817 static int mtk_mdio_init(struct mtk_eth *eth)
818 {
819 unsigned int max_clk = 2500000, divider;
820 struct device_node *mii_np;
821 int ret;
822 u32 val;
823
824 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
825 if (!mii_np) {
826 dev_err(eth->dev, "no %s child node found", "mdio-bus");
827 return -ENODEV;
828 }
829
830 if (!of_device_is_available(mii_np)) {
831 ret = -ENODEV;
832 goto err_put_node;
833 }
834
835 eth->mii_bus = devm_mdiobus_alloc(eth->dev);
836 if (!eth->mii_bus) {
837 ret = -ENOMEM;
838 goto err_put_node;
839 }
840
841 eth->mii_bus->name = "mdio";
842 eth->mii_bus->read = mtk_mdio_read_c22;
843 eth->mii_bus->write = mtk_mdio_write_c22;
844 eth->mii_bus->read_c45 = mtk_mdio_read_c45;
845 eth->mii_bus->write_c45 = mtk_mdio_write_c45;
846 eth->mii_bus->priv = eth;
847 eth->mii_bus->parent = eth->dev;
848
849 snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
850
851 if (!of_property_read_u32(mii_np, "clock-frequency", &val)) {
852 if (val > MDC_MAX_FREQ || val < MDC_MAX_FREQ / MDC_MAX_DIVIDER) {
853 dev_err(eth->dev, "MDIO clock frequency out of range");
854 ret = -EINVAL;
855 goto err_put_node;
856 }
857 max_clk = val;
858 }
859 divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63);
860
861 /* Configure MDC Turbo Mode */
862 if (mtk_is_netsys_v3_or_greater(eth))
863 mtk_m32(eth, 0, MISC_MDC_TURBO, MTK_MAC_MISC_V3);
864
865 /* Configure MDC Divider */
866 val = FIELD_PREP(PPSC_MDC_CFG, divider);
867 if (!mtk_is_netsys_v3_or_greater(eth))
868 val |= PPSC_MDC_TURBO;
869 mtk_m32(eth, PPSC_MDC_CFG, val, MTK_PPSC);
870
871 dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / divider);
872
873 ret = of_mdiobus_register(eth->mii_bus, mii_np);
874
875 err_put_node:
876 of_node_put(mii_np);
877 return ret;
878 }
879
mtk_mdio_cleanup(struct mtk_eth * eth)880 static void mtk_mdio_cleanup(struct mtk_eth *eth)
881 {
882 if (!eth->mii_bus)
883 return;
884
885 mdiobus_unregister(eth->mii_bus);
886 }
887
mtk_tx_irq_disable(struct mtk_eth * eth,u32 mask)888 static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
889 {
890 unsigned long flags;
891 u32 val;
892
893 spin_lock_irqsave(ð->tx_irq_lock, flags);
894 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
895 mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
896 spin_unlock_irqrestore(ð->tx_irq_lock, flags);
897 }
898
mtk_tx_irq_enable(struct mtk_eth * eth,u32 mask)899 static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
900 {
901 unsigned long flags;
902 u32 val;
903
904 spin_lock_irqsave(ð->tx_irq_lock, flags);
905 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
906 mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
907 spin_unlock_irqrestore(ð->tx_irq_lock, flags);
908 }
909
mtk_rx_irq_disable(struct mtk_eth * eth,u32 mask)910 static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
911 {
912 unsigned long flags;
913 u32 val;
914
915 spin_lock_irqsave(ð->rx_irq_lock, flags);
916 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
917 mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
918 spin_unlock_irqrestore(ð->rx_irq_lock, flags);
919 }
920
mtk_rx_irq_enable(struct mtk_eth * eth,u32 mask)921 static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
922 {
923 unsigned long flags;
924 u32 val;
925
926 spin_lock_irqsave(ð->rx_irq_lock, flags);
927 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
928 mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
929 spin_unlock_irqrestore(ð->rx_irq_lock, flags);
930 }
931
mtk_set_mac_address(struct net_device * dev,void * p)932 static int mtk_set_mac_address(struct net_device *dev, void *p)
933 {
934 int ret = eth_mac_addr(dev, p);
935 struct mtk_mac *mac = netdev_priv(dev);
936 struct mtk_eth *eth = mac->hw;
937 const char *macaddr = dev->dev_addr;
938
939 if (ret)
940 return ret;
941
942 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
943 return -EBUSY;
944
945 spin_lock_bh(&mac->hw->page_lock);
946 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
947 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
948 MT7628_SDM_MAC_ADRH);
949 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
950 (macaddr[4] << 8) | macaddr[5],
951 MT7628_SDM_MAC_ADRL);
952 } else {
953 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
954 MTK_GDMA_MAC_ADRH(mac->id));
955 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
956 (macaddr[4] << 8) | macaddr[5],
957 MTK_GDMA_MAC_ADRL(mac->id));
958 }
959 spin_unlock_bh(&mac->hw->page_lock);
960
961 return 0;
962 }
963
mtk_stats_update_mac(struct mtk_mac * mac)964 void mtk_stats_update_mac(struct mtk_mac *mac)
965 {
966 struct mtk_hw_stats *hw_stats = mac->hw_stats;
967 struct mtk_eth *eth = mac->hw;
968
969 u64_stats_update_begin(&hw_stats->syncp);
970
971 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
972 hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT);
973 hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT);
974 hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT);
975 hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT);
976 hw_stats->rx_checksum_errors +=
977 mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
978 } else {
979 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
980 unsigned int offs = hw_stats->reg_offset;
981 u64 stats;
982
983 hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
984 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
985 if (stats)
986 hw_stats->rx_bytes += (stats << 32);
987 hw_stats->rx_packets +=
988 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x8 + offs);
989 hw_stats->rx_overflow +=
990 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
991 hw_stats->rx_fcs_errors +=
992 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
993 hw_stats->rx_short_errors +=
994 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
995 hw_stats->rx_long_errors +=
996 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
997 hw_stats->rx_checksum_errors +=
998 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
999 hw_stats->rx_flow_control_packets +=
1000 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
1001
1002 if (mtk_is_netsys_v3_or_greater(eth)) {
1003 hw_stats->tx_skip +=
1004 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x50 + offs);
1005 hw_stats->tx_collisions +=
1006 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x54 + offs);
1007 hw_stats->tx_bytes +=
1008 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x40 + offs);
1009 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x44 + offs);
1010 if (stats)
1011 hw_stats->tx_bytes += (stats << 32);
1012 hw_stats->tx_packets +=
1013 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x48 + offs);
1014 } else {
1015 hw_stats->tx_skip +=
1016 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
1017 hw_stats->tx_collisions +=
1018 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
1019 hw_stats->tx_bytes +=
1020 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
1021 stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
1022 if (stats)
1023 hw_stats->tx_bytes += (stats << 32);
1024 hw_stats->tx_packets +=
1025 mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
1026 }
1027 }
1028
1029 u64_stats_update_end(&hw_stats->syncp);
1030 }
1031
mtk_stats_update(struct mtk_eth * eth)1032 static void mtk_stats_update(struct mtk_eth *eth)
1033 {
1034 int i;
1035
1036 for (i = 0; i < MTK_MAX_DEVS; i++) {
1037 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
1038 continue;
1039 if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) {
1040 mtk_stats_update_mac(eth->mac[i]);
1041 spin_unlock(ð->mac[i]->hw_stats->stats_lock);
1042 }
1043 }
1044 }
1045
mtk_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * storage)1046 static void mtk_get_stats64(struct net_device *dev,
1047 struct rtnl_link_stats64 *storage)
1048 {
1049 struct mtk_mac *mac = netdev_priv(dev);
1050 struct mtk_hw_stats *hw_stats = mac->hw_stats;
1051 unsigned int start;
1052
1053 if (netif_running(dev) && netif_device_present(dev)) {
1054 if (spin_trylock_bh(&hw_stats->stats_lock)) {
1055 mtk_stats_update_mac(mac);
1056 spin_unlock_bh(&hw_stats->stats_lock);
1057 }
1058 }
1059
1060 do {
1061 start = u64_stats_fetch_begin(&hw_stats->syncp);
1062 storage->rx_packets = hw_stats->rx_packets;
1063 storage->tx_packets = hw_stats->tx_packets;
1064 storage->rx_bytes = hw_stats->rx_bytes;
1065 storage->tx_bytes = hw_stats->tx_bytes;
1066 storage->collisions = hw_stats->tx_collisions;
1067 storage->rx_length_errors = hw_stats->rx_short_errors +
1068 hw_stats->rx_long_errors;
1069 storage->rx_over_errors = hw_stats->rx_overflow;
1070 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
1071 storage->rx_errors = hw_stats->rx_checksum_errors;
1072 storage->tx_aborted_errors = hw_stats->tx_skip;
1073 } while (u64_stats_fetch_retry(&hw_stats->syncp, start));
1074
1075 storage->tx_errors = dev->stats.tx_errors;
1076 storage->rx_dropped = dev->stats.rx_dropped;
1077 storage->tx_dropped = dev->stats.tx_dropped;
1078 }
1079
mtk_max_frag_size(int mtu)1080 static inline int mtk_max_frag_size(int mtu)
1081 {
1082 /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
1083 if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH_2K)
1084 mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
1085
1086 return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
1087 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1088 }
1089
mtk_max_buf_size(int frag_size)1090 static inline int mtk_max_buf_size(int frag_size)
1091 {
1092 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
1093 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1094
1095 WARN_ON(buf_size < MTK_MAX_RX_LENGTH_2K);
1096
1097 return buf_size;
1098 }
1099
mtk_rx_get_desc(struct mtk_eth * eth,struct mtk_rx_dma_v2 * rxd,struct mtk_rx_dma_v2 * dma_rxd)1100 static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
1101 struct mtk_rx_dma_v2 *dma_rxd)
1102 {
1103 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
1104 if (!(rxd->rxd2 & RX_DMA_DONE))
1105 return false;
1106
1107 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
1108 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
1109 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
1110 if (mtk_is_netsys_v3_or_greater(eth)) {
1111 rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
1112 rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
1113 }
1114
1115 return true;
1116 }
1117
mtk_max_lro_buf_alloc(gfp_t gfp_mask)1118 static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
1119 {
1120 unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
1121 unsigned long data;
1122
1123 data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
1124 get_order(size));
1125
1126 return (void *)data;
1127 }
1128
1129 /* the qdma core needs scratch memory to be setup */
mtk_init_fq_dma(struct mtk_eth * eth)1130 static int mtk_init_fq_dma(struct mtk_eth *eth)
1131 {
1132 const struct mtk_soc_data *soc = eth->soc;
1133 dma_addr_t phy_ring_tail;
1134 int cnt = soc->tx.fq_dma_size;
1135 dma_addr_t dma_addr;
1136 int i, j, len;
1137
1138 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM))
1139 eth->scratch_ring = eth->sram_base;
1140 else
1141 eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
1142 cnt * soc->tx.desc_size,
1143 ð->phy_scratch_ring,
1144 GFP_KERNEL);
1145
1146 if (unlikely(!eth->scratch_ring))
1147 return -ENOMEM;
1148
1149 phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
1150
1151 for (j = 0; j < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); j++) {
1152 len = min_t(int, cnt - j * MTK_FQ_DMA_LENGTH, MTK_FQ_DMA_LENGTH);
1153 eth->scratch_head[j] = kcalloc(len, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
1154
1155 if (unlikely(!eth->scratch_head[j]))
1156 return -ENOMEM;
1157
1158 dma_addr = dma_map_single(eth->dma_dev,
1159 eth->scratch_head[j], len * MTK_QDMA_PAGE_SIZE,
1160 DMA_FROM_DEVICE);
1161
1162 if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
1163 return -ENOMEM;
1164
1165 for (i = 0; i < cnt; i++) {
1166 struct mtk_tx_dma_v2 *txd;
1167
1168 txd = eth->scratch_ring + (j * MTK_FQ_DMA_LENGTH + i) * soc->tx.desc_size;
1169 txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
1170 if (j * MTK_FQ_DMA_LENGTH + i < cnt)
1171 txd->txd2 = eth->phy_scratch_ring +
1172 (j * MTK_FQ_DMA_LENGTH + i + 1) * soc->tx.desc_size;
1173
1174 txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
1175 if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
1176 txd->txd3 |= TX_DMA_PREP_ADDR64(dma_addr + i * MTK_QDMA_PAGE_SIZE);
1177
1178 txd->txd4 = 0;
1179 if (mtk_is_netsys_v2_or_greater(eth)) {
1180 txd->txd5 = 0;
1181 txd->txd6 = 0;
1182 txd->txd7 = 0;
1183 txd->txd8 = 0;
1184 }
1185 }
1186 }
1187
1188 mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
1189 mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
1190 mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
1191 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
1192
1193 return 0;
1194 }
1195
mtk_qdma_phys_to_virt(struct mtk_tx_ring * ring,u32 desc)1196 static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
1197 {
1198 return ring->dma + (desc - ring->phys);
1199 }
1200
mtk_desc_to_tx_buf(struct mtk_tx_ring * ring,void * txd,u32 txd_size)1201 static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
1202 void *txd, u32 txd_size)
1203 {
1204 int idx = (txd - ring->dma) / txd_size;
1205
1206 return &ring->buf[idx];
1207 }
1208
qdma_to_pdma(struct mtk_tx_ring * ring,struct mtk_tx_dma * dma)1209 static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
1210 struct mtk_tx_dma *dma)
1211 {
1212 return ring->dma_pdma - (struct mtk_tx_dma *)ring->dma + dma;
1213 }
1214
txd_to_idx(struct mtk_tx_ring * ring,void * dma,u32 txd_size)1215 static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
1216 {
1217 return (dma - ring->dma) / txd_size;
1218 }
1219
mtk_tx_unmap(struct mtk_eth * eth,struct mtk_tx_buf * tx_buf,struct xdp_frame_bulk * bq,bool napi)1220 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1221 struct xdp_frame_bulk *bq, bool napi)
1222 {
1223 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1224 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
1225 dma_unmap_single(eth->dma_dev,
1226 dma_unmap_addr(tx_buf, dma_addr0),
1227 dma_unmap_len(tx_buf, dma_len0),
1228 DMA_TO_DEVICE);
1229 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
1230 dma_unmap_page(eth->dma_dev,
1231 dma_unmap_addr(tx_buf, dma_addr0),
1232 dma_unmap_len(tx_buf, dma_len0),
1233 DMA_TO_DEVICE);
1234 }
1235 } else {
1236 if (dma_unmap_len(tx_buf, dma_len0)) {
1237 dma_unmap_page(eth->dma_dev,
1238 dma_unmap_addr(tx_buf, dma_addr0),
1239 dma_unmap_len(tx_buf, dma_len0),
1240 DMA_TO_DEVICE);
1241 }
1242
1243 if (dma_unmap_len(tx_buf, dma_len1)) {
1244 dma_unmap_page(eth->dma_dev,
1245 dma_unmap_addr(tx_buf, dma_addr1),
1246 dma_unmap_len(tx_buf, dma_len1),
1247 DMA_TO_DEVICE);
1248 }
1249 }
1250
1251 if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
1252 if (tx_buf->type == MTK_TYPE_SKB) {
1253 struct sk_buff *skb = tx_buf->data;
1254
1255 if (napi)
1256 napi_consume_skb(skb, napi);
1257 else
1258 dev_kfree_skb_any(skb);
1259 } else {
1260 struct xdp_frame *xdpf = tx_buf->data;
1261
1262 if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
1263 xdp_return_frame_rx_napi(xdpf);
1264 else if (bq)
1265 xdp_return_frame_bulk(xdpf, bq);
1266 else
1267 xdp_return_frame(xdpf);
1268 }
1269 }
1270 tx_buf->flags = 0;
1271 tx_buf->data = NULL;
1272 }
1273
setup_tx_buf(struct mtk_eth * eth,struct mtk_tx_buf * tx_buf,struct mtk_tx_dma * txd,dma_addr_t mapped_addr,size_t size,int idx)1274 static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1275 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
1276 size_t size, int idx)
1277 {
1278 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1279 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1280 dma_unmap_len_set(tx_buf, dma_len0, size);
1281 } else {
1282 if (idx & 1) {
1283 txd->txd3 = mapped_addr;
1284 txd->txd2 |= TX_DMA_PLEN1(size);
1285 dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
1286 dma_unmap_len_set(tx_buf, dma_len1, size);
1287 } else {
1288 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1289 txd->txd1 = mapped_addr;
1290 txd->txd2 = TX_DMA_PLEN0(size);
1291 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1292 dma_unmap_len_set(tx_buf, dma_len0, size);
1293 }
1294 }
1295 }
1296
mtk_tx_set_dma_desc_v1(struct net_device * dev,void * txd,struct mtk_tx_dma_desc_info * info)1297 static void mtk_tx_set_dma_desc_v1(struct net_device *dev, void *txd,
1298 struct mtk_tx_dma_desc_info *info)
1299 {
1300 struct mtk_mac *mac = netdev_priv(dev);
1301 struct mtk_eth *eth = mac->hw;
1302 struct mtk_tx_dma *desc = txd;
1303 u32 data;
1304
1305 WRITE_ONCE(desc->txd1, info->addr);
1306
1307 data = TX_DMA_SWC | TX_DMA_PLEN0(info->size) |
1308 FIELD_PREP(TX_DMA_PQID, info->qid);
1309 if (info->last)
1310 data |= TX_DMA_LS0;
1311 WRITE_ONCE(desc->txd3, data);
1312
1313 data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
1314 if (info->first) {
1315 if (info->gso)
1316 data |= TX_DMA_TSO;
1317 /* tx checksum offload */
1318 if (info->csum)
1319 data |= TX_DMA_CHKSUM;
1320 /* vlan header offload */
1321 if (info->vlan)
1322 data |= TX_DMA_INS_VLAN | info->vlan_tci;
1323 }
1324 WRITE_ONCE(desc->txd4, data);
1325 }
1326
mtk_tx_set_dma_desc_v2(struct net_device * dev,void * txd,struct mtk_tx_dma_desc_info * info)1327 static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
1328 struct mtk_tx_dma_desc_info *info)
1329 {
1330 struct mtk_mac *mac = netdev_priv(dev);
1331 struct mtk_tx_dma_v2 *desc = txd;
1332 struct mtk_eth *eth = mac->hw;
1333 u32 data;
1334
1335 WRITE_ONCE(desc->txd1, info->addr);
1336
1337 data = TX_DMA_PLEN0(info->size);
1338 if (info->last)
1339 data |= TX_DMA_LS0;
1340
1341 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
1342 data |= TX_DMA_PREP_ADDR64(info->addr);
1343
1344 WRITE_ONCE(desc->txd3, data);
1345
1346 /* set forward port */
1347 switch (mac->id) {
1348 case MTK_GMAC1_ID:
1349 data = PSE_GDM1_PORT << TX_DMA_FPORT_SHIFT_V2;
1350 break;
1351 case MTK_GMAC2_ID:
1352 data = PSE_GDM2_PORT << TX_DMA_FPORT_SHIFT_V2;
1353 break;
1354 case MTK_GMAC3_ID:
1355 data = PSE_GDM3_PORT << TX_DMA_FPORT_SHIFT_V2;
1356 break;
1357 }
1358
1359 data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
1360 WRITE_ONCE(desc->txd4, data);
1361
1362 data = 0;
1363 if (info->first) {
1364 if (info->gso)
1365 data |= TX_DMA_TSO_V2;
1366 /* tx checksum offload */
1367 if (info->csum)
1368 data |= TX_DMA_CHKSUM_V2;
1369 if (mtk_is_netsys_v3_or_greater(eth) && netdev_uses_dsa(dev))
1370 data |= TX_DMA_SPTAG_V3;
1371 }
1372 WRITE_ONCE(desc->txd5, data);
1373
1374 data = 0;
1375 if (info->first && info->vlan)
1376 data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
1377 WRITE_ONCE(desc->txd6, data);
1378
1379 WRITE_ONCE(desc->txd7, 0);
1380 WRITE_ONCE(desc->txd8, 0);
1381 }
1382
mtk_tx_set_dma_desc(struct net_device * dev,void * txd,struct mtk_tx_dma_desc_info * info)1383 static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd,
1384 struct mtk_tx_dma_desc_info *info)
1385 {
1386 struct mtk_mac *mac = netdev_priv(dev);
1387 struct mtk_eth *eth = mac->hw;
1388
1389 if (mtk_is_netsys_v2_or_greater(eth))
1390 mtk_tx_set_dma_desc_v2(dev, txd, info);
1391 else
1392 mtk_tx_set_dma_desc_v1(dev, txd, info);
1393 }
1394
mtk_tx_map(struct sk_buff * skb,struct net_device * dev,int tx_num,struct mtk_tx_ring * ring,bool gso)1395 static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
1396 int tx_num, struct mtk_tx_ring *ring, bool gso)
1397 {
1398 struct mtk_tx_dma_desc_info txd_info = {
1399 .size = skb_headlen(skb),
1400 .gso = gso,
1401 .csum = skb->ip_summed == CHECKSUM_PARTIAL,
1402 .vlan = skb_vlan_tag_present(skb),
1403 .qid = skb_get_queue_mapping(skb),
1404 .vlan_tci = skb_vlan_tag_get(skb),
1405 .first = true,
1406 .last = !skb_is_nonlinear(skb),
1407 };
1408 struct netdev_queue *txq;
1409 struct mtk_mac *mac = netdev_priv(dev);
1410 struct mtk_eth *eth = mac->hw;
1411 const struct mtk_soc_data *soc = eth->soc;
1412 struct mtk_tx_dma *itxd, *txd;
1413 struct mtk_tx_dma *itxd_pdma, *txd_pdma;
1414 struct mtk_tx_buf *itx_buf, *tx_buf;
1415 int i, n_desc = 1;
1416 int queue = skb_get_queue_mapping(skb);
1417 int k = 0;
1418
1419 txq = netdev_get_tx_queue(dev, queue);
1420 itxd = ring->next_free;
1421 itxd_pdma = qdma_to_pdma(ring, itxd);
1422 if (itxd == ring->last_free)
1423 return -ENOMEM;
1424
1425 itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
1426 memset(itx_buf, 0, sizeof(*itx_buf));
1427
1428 txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
1429 DMA_TO_DEVICE);
1430 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1431 return -ENOMEM;
1432
1433 mtk_tx_set_dma_desc(dev, itxd, &txd_info);
1434
1435 itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1436 itx_buf->mac_id = mac->id;
1437 setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
1438 k++);
1439
1440 /* TX SG offload */
1441 txd = itxd;
1442 txd_pdma = qdma_to_pdma(ring, txd);
1443
1444 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1445 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1446 unsigned int offset = 0;
1447 int frag_size = skb_frag_size(frag);
1448
1449 while (frag_size) {
1450 bool new_desc = true;
1451
1452 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
1453 (i & 0x1)) {
1454 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1455 txd_pdma = qdma_to_pdma(ring, txd);
1456 if (txd == ring->last_free)
1457 goto err_dma;
1458
1459 n_desc++;
1460 } else {
1461 new_desc = false;
1462 }
1463
1464 memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1465 txd_info.size = min_t(unsigned int, frag_size,
1466 soc->tx.dma_max_len);
1467 txd_info.qid = queue;
1468 txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
1469 !(frag_size - txd_info.size);
1470 txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
1471 offset, txd_info.size,
1472 DMA_TO_DEVICE);
1473 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
1474 goto err_dma;
1475
1476 mtk_tx_set_dma_desc(dev, txd, &txd_info);
1477
1478 tx_buf = mtk_desc_to_tx_buf(ring, txd,
1479 soc->tx.desc_size);
1480 if (new_desc)
1481 memset(tx_buf, 0, sizeof(*tx_buf));
1482 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1483 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
1484 tx_buf->mac_id = mac->id;
1485
1486 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
1487 txd_info.size, k++);
1488
1489 frag_size -= txd_info.size;
1490 offset += txd_info.size;
1491 }
1492 }
1493
1494 /* store skb to cleanup */
1495 itx_buf->type = MTK_TYPE_SKB;
1496 itx_buf->data = skb;
1497
1498 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1499 if (k & 0x1)
1500 txd_pdma->txd2 |= TX_DMA_LS0;
1501 else
1502 txd_pdma->txd2 |= TX_DMA_LS1;
1503 }
1504
1505 netdev_tx_sent_queue(txq, skb->len);
1506 skb_tx_timestamp(skb);
1507
1508 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1509 atomic_sub(n_desc, &ring->free_count);
1510
1511 /* make sure that all changes to the dma ring are flushed before we
1512 * continue
1513 */
1514 wmb();
1515
1516 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1517 if (netif_xmit_stopped(txq) || !netdev_xmit_more())
1518 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
1519 } else {
1520 int next_idx;
1521
1522 next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->tx.desc_size),
1523 ring->dma_size);
1524 mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1525 }
1526
1527 return 0;
1528
1529 err_dma:
1530 do {
1531 tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
1532
1533 /* unmap dma */
1534 mtk_tx_unmap(eth, tx_buf, NULL, false);
1535
1536 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1537 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
1538 itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1539
1540 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1541 itxd_pdma = qdma_to_pdma(ring, itxd);
1542 } while (itxd != txd);
1543
1544 return -ENOMEM;
1545 }
1546
mtk_cal_txd_req(struct mtk_eth * eth,struct sk_buff * skb)1547 static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb)
1548 {
1549 int i, nfrags = 1;
1550 skb_frag_t *frag;
1551
1552 if (skb_is_gso(skb)) {
1553 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1554 frag = &skb_shinfo(skb)->frags[i];
1555 nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1556 eth->soc->tx.dma_max_len);
1557 }
1558 } else {
1559 nfrags += skb_shinfo(skb)->nr_frags;
1560 }
1561
1562 return nfrags;
1563 }
1564
mtk_queue_stopped(struct mtk_eth * eth)1565 static int mtk_queue_stopped(struct mtk_eth *eth)
1566 {
1567 int i;
1568
1569 for (i = 0; i < MTK_MAX_DEVS; i++) {
1570 if (!eth->netdev[i])
1571 continue;
1572 if (netif_queue_stopped(eth->netdev[i]))
1573 return 1;
1574 }
1575
1576 return 0;
1577 }
1578
mtk_wake_queue(struct mtk_eth * eth)1579 static void mtk_wake_queue(struct mtk_eth *eth)
1580 {
1581 int i;
1582
1583 for (i = 0; i < MTK_MAX_DEVS; i++) {
1584 if (!eth->netdev[i])
1585 continue;
1586 netif_tx_wake_all_queues(eth->netdev[i]);
1587 }
1588 }
1589
mtk_start_xmit(struct sk_buff * skb,struct net_device * dev)1590 static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1591 {
1592 struct mtk_mac *mac = netdev_priv(dev);
1593 struct mtk_eth *eth = mac->hw;
1594 struct mtk_tx_ring *ring = ð->tx_ring;
1595 struct net_device_stats *stats = &dev->stats;
1596 bool gso = false;
1597 int tx_num;
1598
1599 /* normally we can rely on the stack not calling this more than once,
1600 * however we have 2 queues running on the same ring so we need to lock
1601 * the ring access
1602 */
1603 spin_lock(ð->page_lock);
1604
1605 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
1606 goto drop;
1607
1608 tx_num = mtk_cal_txd_req(eth, skb);
1609 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
1610 netif_tx_stop_all_queues(dev);
1611 netif_err(eth, tx_queued, dev,
1612 "Tx Ring full when queue awake!\n");
1613 spin_unlock(ð->page_lock);
1614 return NETDEV_TX_BUSY;
1615 }
1616
1617 /* TSO: fill MSS info in tcp checksum field */
1618 if (skb_is_gso(skb)) {
1619 if (skb_cow_head(skb, 0)) {
1620 netif_warn(eth, tx_err, dev,
1621 "GSO expand head fail.\n");
1622 goto drop;
1623 }
1624
1625 if (skb_shinfo(skb)->gso_type &
1626 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1627 gso = true;
1628 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1629 }
1630 }
1631
1632 if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
1633 goto drop;
1634
1635 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
1636 netif_tx_stop_all_queues(dev);
1637
1638 spin_unlock(ð->page_lock);
1639
1640 return NETDEV_TX_OK;
1641
1642 drop:
1643 spin_unlock(ð->page_lock);
1644 stats->tx_dropped++;
1645 dev_kfree_skb_any(skb);
1646 return NETDEV_TX_OK;
1647 }
1648
mtk_get_rx_ring(struct mtk_eth * eth)1649 static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
1650 {
1651 int i;
1652 struct mtk_rx_ring *ring;
1653 int idx;
1654
1655 if (!eth->hwlro)
1656 return ð->rx_ring[0];
1657
1658 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1659 struct mtk_rx_dma *rxd;
1660
1661 ring = ð->rx_ring[i];
1662 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1663 rxd = ring->dma + idx * eth->soc->rx.desc_size;
1664 if (rxd->rxd2 & RX_DMA_DONE) {
1665 ring->calc_idx_update = true;
1666 return ring;
1667 }
1668 }
1669
1670 return NULL;
1671 }
1672
mtk_update_rx_cpu_idx(struct mtk_eth * eth)1673 static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
1674 {
1675 struct mtk_rx_ring *ring;
1676 int i;
1677
1678 if (!eth->hwlro) {
1679 ring = ð->rx_ring[0];
1680 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1681 } else {
1682 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1683 ring = ð->rx_ring[i];
1684 if (ring->calc_idx_update) {
1685 ring->calc_idx_update = false;
1686 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1687 }
1688 }
1689 }
1690 }
1691
mtk_page_pool_enabled(struct mtk_eth * eth)1692 static bool mtk_page_pool_enabled(struct mtk_eth *eth)
1693 {
1694 return mtk_is_netsys_v2_or_greater(eth);
1695 }
1696
mtk_create_page_pool(struct mtk_eth * eth,struct xdp_rxq_info * xdp_q,int id,int size)1697 static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
1698 struct xdp_rxq_info *xdp_q,
1699 int id, int size)
1700 {
1701 struct page_pool_params pp_params = {
1702 .order = 0,
1703 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
1704 .pool_size = size,
1705 .nid = NUMA_NO_NODE,
1706 .dev = eth->dma_dev,
1707 .offset = MTK_PP_HEADROOM,
1708 .max_len = MTK_PP_MAX_BUF_SIZE,
1709 };
1710 struct page_pool *pp;
1711 int err;
1712
1713 pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL
1714 : DMA_FROM_DEVICE;
1715 pp = page_pool_create(&pp_params);
1716 if (IS_ERR(pp))
1717 return pp;
1718
1719 err = __xdp_rxq_info_reg(xdp_q, eth->dummy_dev, id,
1720 eth->rx_napi.napi_id, PAGE_SIZE);
1721 if (err < 0)
1722 goto err_free_pp;
1723
1724 err = xdp_rxq_info_reg_mem_model(xdp_q, MEM_TYPE_PAGE_POOL, pp);
1725 if (err)
1726 goto err_unregister_rxq;
1727
1728 return pp;
1729
1730 err_unregister_rxq:
1731 xdp_rxq_info_unreg(xdp_q);
1732 err_free_pp:
1733 page_pool_destroy(pp);
1734
1735 return ERR_PTR(err);
1736 }
1737
mtk_page_pool_get_buff(struct page_pool * pp,dma_addr_t * dma_addr,gfp_t gfp_mask)1738 static void *mtk_page_pool_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
1739 gfp_t gfp_mask)
1740 {
1741 struct page *page;
1742
1743 page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
1744 if (!page)
1745 return NULL;
1746
1747 *dma_addr = page_pool_get_dma_addr(page) + MTK_PP_HEADROOM;
1748 return page_address(page);
1749 }
1750
mtk_rx_put_buff(struct mtk_rx_ring * ring,void * data,bool napi)1751 static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
1752 {
1753 if (ring->page_pool)
1754 page_pool_put_full_page(ring->page_pool,
1755 virt_to_head_page(data), napi);
1756 else
1757 skb_free_frag(data);
1758 }
1759
mtk_xdp_frame_map(struct mtk_eth * eth,struct net_device * dev,struct mtk_tx_dma_desc_info * txd_info,struct mtk_tx_dma * txd,struct mtk_tx_buf * tx_buf,void * data,u16 headroom,int index,bool dma_map)1760 static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev,
1761 struct mtk_tx_dma_desc_info *txd_info,
1762 struct mtk_tx_dma *txd, struct mtk_tx_buf *tx_buf,
1763 void *data, u16 headroom, int index, bool dma_map)
1764 {
1765 struct mtk_tx_ring *ring = ð->tx_ring;
1766 struct mtk_mac *mac = netdev_priv(dev);
1767 struct mtk_tx_dma *txd_pdma;
1768
1769 if (dma_map) { /* ndo_xdp_xmit */
1770 txd_info->addr = dma_map_single(eth->dma_dev, data,
1771 txd_info->size, DMA_TO_DEVICE);
1772 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr)))
1773 return -ENOMEM;
1774
1775 tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1776 } else {
1777 struct page *page = virt_to_head_page(data);
1778
1779 txd_info->addr = page_pool_get_dma_addr(page) +
1780 sizeof(struct xdp_frame) + headroom;
1781 dma_sync_single_for_device(eth->dma_dev, txd_info->addr,
1782 txd_info->size, DMA_BIDIRECTIONAL);
1783 }
1784 mtk_tx_set_dma_desc(dev, txd, txd_info);
1785
1786 tx_buf->mac_id = mac->id;
1787 tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
1788 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1789
1790 txd_pdma = qdma_to_pdma(ring, txd);
1791 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size,
1792 index);
1793
1794 return 0;
1795 }
1796
mtk_xdp_submit_frame(struct mtk_eth * eth,struct xdp_frame * xdpf,struct net_device * dev,bool dma_map)1797 static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
1798 struct net_device *dev, bool dma_map)
1799 {
1800 struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
1801 const struct mtk_soc_data *soc = eth->soc;
1802 struct mtk_tx_ring *ring = ð->tx_ring;
1803 struct mtk_mac *mac = netdev_priv(dev);
1804 struct mtk_tx_dma_desc_info txd_info = {
1805 .size = xdpf->len,
1806 .first = true,
1807 .last = !xdp_frame_has_frags(xdpf),
1808 .qid = mac->id,
1809 };
1810 int err, index = 0, n_desc = 1, nr_frags;
1811 struct mtk_tx_buf *htx_buf, *tx_buf;
1812 struct mtk_tx_dma *htxd, *txd;
1813 void *data = xdpf->data;
1814
1815 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
1816 return -EBUSY;
1817
1818 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
1819 if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags))
1820 return -EBUSY;
1821
1822 spin_lock(ð->page_lock);
1823
1824 txd = ring->next_free;
1825 if (txd == ring->last_free) {
1826 spin_unlock(ð->page_lock);
1827 return -ENOMEM;
1828 }
1829 htxd = txd;
1830
1831 tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->tx.desc_size);
1832 memset(tx_buf, 0, sizeof(*tx_buf));
1833 htx_buf = tx_buf;
1834
1835 for (;;) {
1836 err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
1837 data, xdpf->headroom, index, dma_map);
1838 if (err < 0)
1839 goto unmap;
1840
1841 if (txd_info.last)
1842 break;
1843
1844 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) {
1845 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1846 if (txd == ring->last_free)
1847 goto unmap;
1848
1849 tx_buf = mtk_desc_to_tx_buf(ring, txd,
1850 soc->tx.desc_size);
1851 memset(tx_buf, 0, sizeof(*tx_buf));
1852 n_desc++;
1853 }
1854
1855 memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
1856 txd_info.size = skb_frag_size(&sinfo->frags[index]);
1857 txd_info.last = index + 1 == nr_frags;
1858 txd_info.qid = mac->id;
1859 data = skb_frag_address(&sinfo->frags[index]);
1860
1861 index++;
1862 }
1863 /* store xdpf for cleanup */
1864 htx_buf->data = xdpf;
1865
1866 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1867 struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, txd);
1868
1869 if (index & 1)
1870 txd_pdma->txd2 |= TX_DMA_LS0;
1871 else
1872 txd_pdma->txd2 |= TX_DMA_LS1;
1873 }
1874
1875 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1876 atomic_sub(n_desc, &ring->free_count);
1877
1878 /* make sure that all changes to the dma ring are flushed before we
1879 * continue
1880 */
1881 wmb();
1882
1883 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1884 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
1885 } else {
1886 int idx;
1887
1888 idx = txd_to_idx(ring, txd, soc->tx.desc_size);
1889 mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
1890 MT7628_TX_CTX_IDX0);
1891 }
1892
1893 spin_unlock(ð->page_lock);
1894
1895 return 0;
1896
1897 unmap:
1898 while (htxd != txd) {
1899 tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->tx.desc_size);
1900 mtk_tx_unmap(eth, tx_buf, NULL, false);
1901
1902 htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1903 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
1904 struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, htxd);
1905
1906 txd_pdma->txd2 = TX_DMA_DESP2_DEF;
1907 }
1908
1909 htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2);
1910 }
1911
1912 spin_unlock(ð->page_lock);
1913
1914 return err;
1915 }
1916
mtk_xdp_xmit(struct net_device * dev,int num_frame,struct xdp_frame ** frames,u32 flags)1917 static int mtk_xdp_xmit(struct net_device *dev, int num_frame,
1918 struct xdp_frame **frames, u32 flags)
1919 {
1920 struct mtk_mac *mac = netdev_priv(dev);
1921 struct mtk_hw_stats *hw_stats = mac->hw_stats;
1922 struct mtk_eth *eth = mac->hw;
1923 int i, nxmit = 0;
1924
1925 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1926 return -EINVAL;
1927
1928 for (i = 0; i < num_frame; i++) {
1929 if (mtk_xdp_submit_frame(eth, frames[i], dev, true))
1930 break;
1931 nxmit++;
1932 }
1933
1934 u64_stats_update_begin(&hw_stats->syncp);
1935 hw_stats->xdp_stats.tx_xdp_xmit += nxmit;
1936 hw_stats->xdp_stats.tx_xdp_xmit_errors += num_frame - nxmit;
1937 u64_stats_update_end(&hw_stats->syncp);
1938
1939 return nxmit;
1940 }
1941
mtk_xdp_run(struct mtk_eth * eth,struct mtk_rx_ring * ring,struct xdp_buff * xdp,struct net_device * dev)1942 static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
1943 struct xdp_buff *xdp, struct net_device *dev)
1944 {
1945 struct mtk_mac *mac = netdev_priv(dev);
1946 struct mtk_hw_stats *hw_stats = mac->hw_stats;
1947 u64 *count = &hw_stats->xdp_stats.rx_xdp_drop;
1948 struct bpf_prog *prog;
1949 u32 act = XDP_PASS;
1950
1951 rcu_read_lock();
1952
1953 prog = rcu_dereference(eth->prog);
1954 if (!prog)
1955 goto out;
1956
1957 act = bpf_prog_run_xdp(prog, xdp);
1958 switch (act) {
1959 case XDP_PASS:
1960 count = &hw_stats->xdp_stats.rx_xdp_pass;
1961 goto update_stats;
1962 case XDP_REDIRECT:
1963 if (unlikely(xdp_do_redirect(dev, xdp, prog))) {
1964 act = XDP_DROP;
1965 break;
1966 }
1967
1968 count = &hw_stats->xdp_stats.rx_xdp_redirect;
1969 goto update_stats;
1970 case XDP_TX: {
1971 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
1972
1973 if (!xdpf || mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
1974 count = &hw_stats->xdp_stats.rx_xdp_tx_errors;
1975 act = XDP_DROP;
1976 break;
1977 }
1978
1979 count = &hw_stats->xdp_stats.rx_xdp_tx;
1980 goto update_stats;
1981 }
1982 default:
1983 bpf_warn_invalid_xdp_action(dev, prog, act);
1984 fallthrough;
1985 case XDP_ABORTED:
1986 trace_xdp_exception(dev, prog, act);
1987 fallthrough;
1988 case XDP_DROP:
1989 break;
1990 }
1991
1992 page_pool_put_full_page(ring->page_pool,
1993 virt_to_head_page(xdp->data), true);
1994
1995 update_stats:
1996 u64_stats_update_begin(&hw_stats->syncp);
1997 *count = *count + 1;
1998 u64_stats_update_end(&hw_stats->syncp);
1999 out:
2000 rcu_read_unlock();
2001
2002 return act;
2003 }
2004
mtk_poll_rx(struct napi_struct * napi,int budget,struct mtk_eth * eth)2005 static int mtk_poll_rx(struct napi_struct *napi, int budget,
2006 struct mtk_eth *eth)
2007 {
2008 struct dim_sample dim_sample = {};
2009 struct mtk_rx_ring *ring;
2010 bool xdp_flush = false;
2011 int idx;
2012 struct sk_buff *skb;
2013 u64 addr64 = 0;
2014 u8 *data, *new_data;
2015 struct mtk_rx_dma_v2 *rxd, trxd;
2016 int done = 0, bytes = 0;
2017 dma_addr_t dma_addr = DMA_MAPPING_ERROR;
2018
2019 while (done < budget) {
2020 unsigned int pktlen, *rxdcsum;
2021 struct net_device *netdev;
2022 u32 hash, reason;
2023 int mac = 0;
2024
2025 ring = mtk_get_rx_ring(eth);
2026 if (unlikely(!ring))
2027 goto rx_done;
2028
2029 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
2030 rxd = ring->dma + idx * eth->soc->rx.desc_size;
2031 data = ring->data[idx];
2032
2033 if (!mtk_rx_get_desc(eth, &trxd, rxd))
2034 break;
2035
2036 /* find out which mac the packet come from. values start at 1 */
2037 if (mtk_is_netsys_v3_or_greater(eth)) {
2038 u32 val = RX_DMA_GET_SPORT_V2(trxd.rxd5);
2039
2040 switch (val) {
2041 case PSE_GDM1_PORT:
2042 case PSE_GDM2_PORT:
2043 mac = val - 1;
2044 break;
2045 case PSE_GDM3_PORT:
2046 mac = MTK_GMAC3_ID;
2047 break;
2048 default:
2049 break;
2050 }
2051 } else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
2052 !(trxd.rxd4 & RX_DMA_SPECIAL_TAG)) {
2053 mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1;
2054 }
2055
2056 if (unlikely(mac < 0 || mac >= MTK_MAX_DEVS ||
2057 !eth->netdev[mac]))
2058 goto release_desc;
2059
2060 netdev = eth->netdev[mac];
2061
2062 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
2063 goto release_desc;
2064
2065 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
2066
2067 /* alloc new buffer */
2068 if (ring->page_pool) {
2069 struct page *page = virt_to_head_page(data);
2070 struct xdp_buff xdp;
2071 u32 ret;
2072
2073 new_data = mtk_page_pool_get_buff(ring->page_pool,
2074 &dma_addr,
2075 GFP_ATOMIC);
2076 if (unlikely(!new_data)) {
2077 netdev->stats.rx_dropped++;
2078 goto release_desc;
2079 }
2080
2081 dma_sync_single_for_cpu(eth->dma_dev,
2082 page_pool_get_dma_addr(page) + MTK_PP_HEADROOM,
2083 pktlen, page_pool_get_dma_dir(ring->page_pool));
2084
2085 xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
2086 xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
2087 false);
2088 xdp_buff_clear_frags_flag(&xdp);
2089
2090 ret = mtk_xdp_run(eth, ring, &xdp, netdev);
2091 if (ret == XDP_REDIRECT)
2092 xdp_flush = true;
2093
2094 if (ret != XDP_PASS)
2095 goto skip_rx;
2096
2097 skb = build_skb(data, PAGE_SIZE);
2098 if (unlikely(!skb)) {
2099 page_pool_put_full_page(ring->page_pool,
2100 page, true);
2101 netdev->stats.rx_dropped++;
2102 goto skip_rx;
2103 }
2104
2105 skb_reserve(skb, xdp.data - xdp.data_hard_start);
2106 skb_put(skb, xdp.data_end - xdp.data);
2107 skb_mark_for_recycle(skb);
2108 } else {
2109 if (ring->frag_size <= PAGE_SIZE)
2110 new_data = napi_alloc_frag(ring->frag_size);
2111 else
2112 new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
2113
2114 if (unlikely(!new_data)) {
2115 netdev->stats.rx_dropped++;
2116 goto release_desc;
2117 }
2118
2119 dma_addr = dma_map_single(eth->dma_dev,
2120 new_data + NET_SKB_PAD + eth->ip_align,
2121 ring->buf_size, DMA_FROM_DEVICE);
2122 if (unlikely(dma_mapping_error(eth->dma_dev,
2123 dma_addr))) {
2124 skb_free_frag(new_data);
2125 netdev->stats.rx_dropped++;
2126 goto release_desc;
2127 }
2128
2129 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
2130 addr64 = RX_DMA_GET_ADDR64(trxd.rxd2);
2131
2132 dma_unmap_single(eth->dma_dev, ((u64)trxd.rxd1 | addr64),
2133 ring->buf_size, DMA_FROM_DEVICE);
2134
2135 skb = build_skb(data, ring->frag_size);
2136 if (unlikely(!skb)) {
2137 netdev->stats.rx_dropped++;
2138 skb_free_frag(data);
2139 goto skip_rx;
2140 }
2141
2142 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
2143 skb_put(skb, pktlen);
2144 }
2145
2146 skb->dev = netdev;
2147 bytes += skb->len;
2148
2149 if (mtk_is_netsys_v3_or_greater(eth)) {
2150 reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
2151 hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
2152 if (hash != MTK_RXD5_FOE_ENTRY)
2153 skb_set_hash(skb, jhash_1word(hash, 0),
2154 PKT_HASH_TYPE_L4);
2155 rxdcsum = &trxd.rxd3;
2156 } else {
2157 reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
2158 hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
2159 if (hash != MTK_RXD4_FOE_ENTRY)
2160 skb_set_hash(skb, jhash_1word(hash, 0),
2161 PKT_HASH_TYPE_L4);
2162 rxdcsum = &trxd.rxd4;
2163 }
2164
2165 if (*rxdcsum & eth->soc->rx.dma_l4_valid)
2166 skb->ip_summed = CHECKSUM_UNNECESSARY;
2167 else
2168 skb_checksum_none_assert(skb);
2169 skb->protocol = eth_type_trans(skb, netdev);
2170
2171 /* When using VLAN untagging in combination with DSA, the
2172 * hardware treats the MTK special tag as a VLAN and untags it.
2173 */
2174 if (mtk_is_netsys_v1(eth) && (trxd.rxd2 & RX_DMA_VTAG) &&
2175 netdev_uses_dsa(netdev)) {
2176 unsigned int port = RX_DMA_VPID(trxd.rxd3) & GENMASK(2, 0);
2177
2178 if (port < ARRAY_SIZE(eth->dsa_meta) &&
2179 eth->dsa_meta[port])
2180 skb_dst_set_noref(skb, ð->dsa_meta[port]->dst);
2181 }
2182
2183 if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
2184 mtk_ppe_check_skb(eth->ppe[0], skb, hash);
2185
2186 skb_record_rx_queue(skb, 0);
2187 napi_gro_receive(napi, skb);
2188
2189 skip_rx:
2190 ring->data[idx] = new_data;
2191 rxd->rxd1 = (unsigned int)dma_addr;
2192 release_desc:
2193 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2194 rxd->rxd2 = RX_DMA_LSO;
2195 else
2196 rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
2197
2198 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA) &&
2199 likely(dma_addr != DMA_MAPPING_ERROR))
2200 rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr);
2201
2202 ring->calc_idx = idx;
2203 done++;
2204 }
2205
2206 rx_done:
2207 if (done) {
2208 /* make sure that all changes to the dma ring are flushed before
2209 * we continue
2210 */
2211 wmb();
2212 mtk_update_rx_cpu_idx(eth);
2213 }
2214
2215 eth->rx_packets += done;
2216 eth->rx_bytes += bytes;
2217 dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes,
2218 &dim_sample);
2219 net_dim(ð->rx_dim, dim_sample);
2220
2221 if (xdp_flush)
2222 xdp_do_flush();
2223
2224 return done;
2225 }
2226
2227 struct mtk_poll_state {
2228 struct netdev_queue *txq;
2229 unsigned int total;
2230 unsigned int done;
2231 unsigned int bytes;
2232 };
2233
2234 static void
mtk_poll_tx_done(struct mtk_eth * eth,struct mtk_poll_state * state,u8 mac,struct sk_buff * skb)2235 mtk_poll_tx_done(struct mtk_eth *eth, struct mtk_poll_state *state, u8 mac,
2236 struct sk_buff *skb)
2237 {
2238 struct netdev_queue *txq;
2239 struct net_device *dev;
2240 unsigned int bytes = skb->len;
2241
2242 state->total++;
2243 eth->tx_packets++;
2244 eth->tx_bytes += bytes;
2245
2246 dev = eth->netdev[mac];
2247 if (!dev)
2248 return;
2249
2250 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2251 if (state->txq == txq) {
2252 state->done++;
2253 state->bytes += bytes;
2254 return;
2255 }
2256
2257 if (state->txq)
2258 netdev_tx_completed_queue(state->txq, state->done, state->bytes);
2259
2260 state->txq = txq;
2261 state->done = 1;
2262 state->bytes = bytes;
2263 }
2264
mtk_poll_tx_qdma(struct mtk_eth * eth,int budget,struct mtk_poll_state * state)2265 static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
2266 struct mtk_poll_state *state)
2267 {
2268 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2269 struct mtk_tx_ring *ring = ð->tx_ring;
2270 struct mtk_tx_buf *tx_buf;
2271 struct xdp_frame_bulk bq;
2272 struct mtk_tx_dma *desc;
2273 u32 cpu, dma;
2274
2275 cpu = ring->last_free_ptr;
2276 dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
2277
2278 desc = mtk_qdma_phys_to_virt(ring, cpu);
2279 xdp_frame_bulk_init(&bq);
2280
2281 while ((cpu != dma) && budget) {
2282 u32 next_cpu = desc->txd2;
2283
2284 desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
2285 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
2286 break;
2287
2288 tx_buf = mtk_desc_to_tx_buf(ring, desc,
2289 eth->soc->tx.desc_size);
2290 if (!tx_buf->data)
2291 break;
2292
2293 if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2294 if (tx_buf->type == MTK_TYPE_SKB)
2295 mtk_poll_tx_done(eth, state, tx_buf->mac_id,
2296 tx_buf->data);
2297
2298 budget--;
2299 }
2300 mtk_tx_unmap(eth, tx_buf, &bq, true);
2301
2302 ring->last_free = desc;
2303 atomic_inc(&ring->free_count);
2304
2305 cpu = next_cpu;
2306 }
2307 xdp_flush_frame_bulk(&bq);
2308
2309 ring->last_free_ptr = cpu;
2310 mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
2311
2312 return budget;
2313 }
2314
mtk_poll_tx_pdma(struct mtk_eth * eth,int budget,struct mtk_poll_state * state)2315 static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
2316 struct mtk_poll_state *state)
2317 {
2318 struct mtk_tx_ring *ring = ð->tx_ring;
2319 struct mtk_tx_buf *tx_buf;
2320 struct xdp_frame_bulk bq;
2321 struct mtk_tx_dma *desc;
2322 u32 cpu, dma;
2323
2324 cpu = ring->cpu_idx;
2325 dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
2326 xdp_frame_bulk_init(&bq);
2327
2328 while ((cpu != dma) && budget) {
2329 tx_buf = &ring->buf[cpu];
2330 if (!tx_buf->data)
2331 break;
2332
2333 if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2334 if (tx_buf->type == MTK_TYPE_SKB)
2335 mtk_poll_tx_done(eth, state, 0, tx_buf->data);
2336 budget--;
2337 }
2338 mtk_tx_unmap(eth, tx_buf, &bq, true);
2339
2340 desc = ring->dma + cpu * eth->soc->tx.desc_size;
2341 ring->last_free = desc;
2342 atomic_inc(&ring->free_count);
2343
2344 cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
2345 }
2346 xdp_flush_frame_bulk(&bq);
2347
2348 ring->cpu_idx = cpu;
2349
2350 return budget;
2351 }
2352
mtk_poll_tx(struct mtk_eth * eth,int budget)2353 static int mtk_poll_tx(struct mtk_eth *eth, int budget)
2354 {
2355 struct mtk_tx_ring *ring = ð->tx_ring;
2356 struct dim_sample dim_sample = {};
2357 struct mtk_poll_state state = {};
2358
2359 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2360 budget = mtk_poll_tx_qdma(eth, budget, &state);
2361 else
2362 budget = mtk_poll_tx_pdma(eth, budget, &state);
2363
2364 if (state.txq)
2365 netdev_tx_completed_queue(state.txq, state.done, state.bytes);
2366
2367 dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes,
2368 &dim_sample);
2369 net_dim(ð->tx_dim, dim_sample);
2370
2371 if (mtk_queue_stopped(eth) &&
2372 (atomic_read(&ring->free_count) > ring->thresh))
2373 mtk_wake_queue(eth);
2374
2375 return state.total;
2376 }
2377
mtk_handle_status_irq(struct mtk_eth * eth)2378 static void mtk_handle_status_irq(struct mtk_eth *eth)
2379 {
2380 u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
2381
2382 if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
2383 mtk_stats_update(eth);
2384 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
2385 MTK_INT_STATUS2);
2386 }
2387 }
2388
mtk_napi_tx(struct napi_struct * napi,int budget)2389 static int mtk_napi_tx(struct napi_struct *napi, int budget)
2390 {
2391 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
2392 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2393 int tx_done = 0;
2394
2395 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2396 mtk_handle_status_irq(eth);
2397 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status);
2398 tx_done = mtk_poll_tx(eth, budget);
2399
2400 if (unlikely(netif_msg_intr(eth))) {
2401 dev_info(eth->dev,
2402 "done tx %d, intr 0x%08x/0x%x\n", tx_done,
2403 mtk_r32(eth, reg_map->tx_irq_status),
2404 mtk_r32(eth, reg_map->tx_irq_mask));
2405 }
2406
2407 if (tx_done == budget)
2408 return budget;
2409
2410 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
2411 return budget;
2412
2413 if (napi_complete_done(napi, tx_done))
2414 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2415
2416 return tx_done;
2417 }
2418
mtk_napi_rx(struct napi_struct * napi,int budget)2419 static int mtk_napi_rx(struct napi_struct *napi, int budget)
2420 {
2421 struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
2422 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2423 int rx_done_total = 0;
2424
2425 mtk_handle_status_irq(eth);
2426
2427 do {
2428 int rx_done;
2429
2430 mtk_w32(eth, eth->soc->rx.irq_done_mask,
2431 reg_map->pdma.irq_status);
2432 rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
2433 rx_done_total += rx_done;
2434
2435 if (unlikely(netif_msg_intr(eth))) {
2436 dev_info(eth->dev,
2437 "done rx %d, intr 0x%08x/0x%x\n", rx_done,
2438 mtk_r32(eth, reg_map->pdma.irq_status),
2439 mtk_r32(eth, reg_map->pdma.irq_mask));
2440 }
2441
2442 if (rx_done_total == budget)
2443 return budget;
2444
2445 } while (mtk_r32(eth, reg_map->pdma.irq_status) &
2446 eth->soc->rx.irq_done_mask);
2447
2448 if (napi_complete_done(napi, rx_done_total))
2449 mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
2450
2451 return rx_done_total;
2452 }
2453
mtk_tx_alloc(struct mtk_eth * eth)2454 static int mtk_tx_alloc(struct mtk_eth *eth)
2455 {
2456 const struct mtk_soc_data *soc = eth->soc;
2457 struct mtk_tx_ring *ring = ð->tx_ring;
2458 int i, sz = soc->tx.desc_size;
2459 struct mtk_tx_dma_v2 *txd;
2460 int ring_size;
2461 u32 ofs, val;
2462
2463 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
2464 ring_size = MTK_QDMA_RING_SIZE;
2465 else
2466 ring_size = soc->tx.dma_size;
2467
2468 ring->buf = kcalloc(ring_size, sizeof(*ring->buf),
2469 GFP_KERNEL);
2470 if (!ring->buf)
2471 goto no_tx_mem;
2472
2473 if (MTK_HAS_CAPS(soc->caps, MTK_SRAM)) {
2474 ring->dma = eth->sram_base + soc->tx.fq_dma_size * sz;
2475 ring->phys = eth->phy_scratch_ring + soc->tx.fq_dma_size * (dma_addr_t)sz;
2476 } else {
2477 ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
2478 &ring->phys, GFP_KERNEL);
2479 }
2480
2481 if (!ring->dma)
2482 goto no_tx_mem;
2483
2484 for (i = 0; i < ring_size; i++) {
2485 int next = (i + 1) % ring_size;
2486 u32 next_ptr = ring->phys + next * sz;
2487
2488 txd = ring->dma + i * sz;
2489 txd->txd2 = next_ptr;
2490 txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
2491 txd->txd4 = 0;
2492 if (mtk_is_netsys_v2_or_greater(eth)) {
2493 txd->txd5 = 0;
2494 txd->txd6 = 0;
2495 txd->txd7 = 0;
2496 txd->txd8 = 0;
2497 }
2498 }
2499
2500 /* On MT7688 (PDMA only) this driver uses the ring->dma structs
2501 * only as the framework. The real HW descriptors are the PDMA
2502 * descriptors in ring->dma_pdma.
2503 */
2504 if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2505 ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
2506 &ring->phys_pdma, GFP_KERNEL);
2507 if (!ring->dma_pdma)
2508 goto no_tx_mem;
2509
2510 for (i = 0; i < ring_size; i++) {
2511 ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
2512 ring->dma_pdma[i].txd4 = 0;
2513 }
2514 }
2515
2516 ring->dma_size = ring_size;
2517 atomic_set(&ring->free_count, ring_size - 2);
2518 ring->next_free = ring->dma;
2519 ring->last_free = (void *)txd;
2520 ring->last_free_ptr = (u32)(ring->phys + ((ring_size - 1) * sz));
2521 ring->thresh = MAX_SKB_FRAGS;
2522
2523 /* make sure that all changes to the dma ring are flushed before we
2524 * continue
2525 */
2526 wmb();
2527
2528 if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
2529 mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
2530 mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
2531 mtk_w32(eth,
2532 ring->phys + ((ring_size - 1) * sz),
2533 soc->reg_map->qdma.crx_ptr);
2534 mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
2535
2536 for (i = 0, ofs = 0; i < MTK_QDMA_NUM_QUEUES; i++) {
2537 val = (QDMA_RES_THRES << 8) | QDMA_RES_THRES;
2538 mtk_w32(eth, val, soc->reg_map->qdma.qtx_cfg + ofs);
2539
2540 val = MTK_QTX_SCH_MIN_RATE_EN |
2541 /* minimum: 10 Mbps */
2542 FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
2543 FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
2544 MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
2545 if (mtk_is_netsys_v1(eth))
2546 val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
2547 mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
2548 ofs += MTK_QTX_OFFSET;
2549 }
2550 val = MTK_QDMA_TX_SCH_MAX_WFQ | (MTK_QDMA_TX_SCH_MAX_WFQ << 16);
2551 mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate);
2552 if (mtk_is_netsys_v2_or_greater(eth))
2553 mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate + 4);
2554 } else {
2555 mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
2556 mtk_w32(eth, ring_size, MT7628_TX_MAX_CNT0);
2557 mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
2558 mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
2559 }
2560
2561 return 0;
2562
2563 no_tx_mem:
2564 return -ENOMEM;
2565 }
2566
mtk_tx_clean(struct mtk_eth * eth)2567 static void mtk_tx_clean(struct mtk_eth *eth)
2568 {
2569 const struct mtk_soc_data *soc = eth->soc;
2570 struct mtk_tx_ring *ring = ð->tx_ring;
2571 int i;
2572
2573 if (ring->buf) {
2574 for (i = 0; i < ring->dma_size; i++)
2575 mtk_tx_unmap(eth, &ring->buf[i], NULL, false);
2576 kfree(ring->buf);
2577 ring->buf = NULL;
2578 }
2579 if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) {
2580 dma_free_coherent(eth->dma_dev,
2581 ring->dma_size * soc->tx.desc_size,
2582 ring->dma, ring->phys);
2583 ring->dma = NULL;
2584 }
2585
2586 if (ring->dma_pdma) {
2587 dma_free_coherent(eth->dma_dev,
2588 ring->dma_size * soc->tx.desc_size,
2589 ring->dma_pdma, ring->phys_pdma);
2590 ring->dma_pdma = NULL;
2591 }
2592 }
2593
mtk_rx_alloc(struct mtk_eth * eth,int ring_no,int rx_flag)2594 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
2595 {
2596 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
2597 const struct mtk_soc_data *soc = eth->soc;
2598 struct mtk_rx_ring *ring;
2599 int rx_data_len, rx_dma_size, tx_ring_size;
2600 int i;
2601
2602 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2603 tx_ring_size = MTK_QDMA_RING_SIZE;
2604 else
2605 tx_ring_size = soc->tx.dma_size;
2606
2607 if (rx_flag == MTK_RX_FLAGS_QDMA) {
2608 if (ring_no)
2609 return -EINVAL;
2610 ring = ð->rx_ring_qdma;
2611 } else {
2612 ring = ð->rx_ring[ring_no];
2613 }
2614
2615 if (rx_flag == MTK_RX_FLAGS_HWLRO) {
2616 rx_data_len = MTK_MAX_LRO_RX_LENGTH;
2617 rx_dma_size = MTK_HW_LRO_DMA_SIZE;
2618 } else {
2619 rx_data_len = ETH_DATA_LEN;
2620 rx_dma_size = soc->rx.dma_size;
2621 }
2622
2623 ring->frag_size = mtk_max_frag_size(rx_data_len);
2624 ring->buf_size = mtk_max_buf_size(ring->frag_size);
2625 ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
2626 GFP_KERNEL);
2627 if (!ring->data)
2628 return -ENOMEM;
2629
2630 if (mtk_page_pool_enabled(eth)) {
2631 struct page_pool *pp;
2632
2633 pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no,
2634 rx_dma_size);
2635 if (IS_ERR(pp))
2636 return PTR_ERR(pp);
2637
2638 ring->page_pool = pp;
2639 }
2640
2641 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) ||
2642 rx_flag != MTK_RX_FLAGS_NORMAL) {
2643 ring->dma = dma_alloc_coherent(eth->dma_dev,
2644 rx_dma_size * eth->soc->rx.desc_size,
2645 &ring->phys, GFP_KERNEL);
2646 } else {
2647 struct mtk_tx_ring *tx_ring = ð->tx_ring;
2648
2649 ring->dma = tx_ring->dma + tx_ring_size *
2650 eth->soc->tx.desc_size * (ring_no + 1);
2651 ring->phys = tx_ring->phys + tx_ring_size *
2652 eth->soc->tx.desc_size * (ring_no + 1);
2653 }
2654
2655 if (!ring->dma)
2656 return -ENOMEM;
2657
2658 for (i = 0; i < rx_dma_size; i++) {
2659 struct mtk_rx_dma_v2 *rxd;
2660 dma_addr_t dma_addr;
2661 void *data;
2662
2663 rxd = ring->dma + i * eth->soc->rx.desc_size;
2664 if (ring->page_pool) {
2665 data = mtk_page_pool_get_buff(ring->page_pool,
2666 &dma_addr, GFP_KERNEL);
2667 if (!data)
2668 return -ENOMEM;
2669 } else {
2670 if (ring->frag_size <= PAGE_SIZE)
2671 data = netdev_alloc_frag(ring->frag_size);
2672 else
2673 data = mtk_max_lro_buf_alloc(GFP_KERNEL);
2674
2675 if (!data)
2676 return -ENOMEM;
2677
2678 dma_addr = dma_map_single(eth->dma_dev,
2679 data + NET_SKB_PAD + eth->ip_align,
2680 ring->buf_size, DMA_FROM_DEVICE);
2681 if (unlikely(dma_mapping_error(eth->dma_dev,
2682 dma_addr))) {
2683 skb_free_frag(data);
2684 return -ENOMEM;
2685 }
2686 }
2687 rxd->rxd1 = (unsigned int)dma_addr;
2688 ring->data[i] = data;
2689
2690 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2691 rxd->rxd2 = RX_DMA_LSO;
2692 else
2693 rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
2694
2695 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
2696 rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr);
2697
2698 rxd->rxd3 = 0;
2699 rxd->rxd4 = 0;
2700 if (mtk_is_netsys_v3_or_greater(eth)) {
2701 rxd->rxd5 = 0;
2702 rxd->rxd6 = 0;
2703 rxd->rxd7 = 0;
2704 rxd->rxd8 = 0;
2705 }
2706 }
2707
2708 ring->dma_size = rx_dma_size;
2709 ring->calc_idx_update = false;
2710 ring->calc_idx = rx_dma_size - 1;
2711 if (rx_flag == MTK_RX_FLAGS_QDMA)
2712 ring->crx_idx_reg = reg_map->qdma.qcrx_ptr +
2713 ring_no * MTK_QRX_OFFSET;
2714 else
2715 ring->crx_idx_reg = reg_map->pdma.pcrx_ptr +
2716 ring_no * MTK_QRX_OFFSET;
2717 /* make sure that all changes to the dma ring are flushed before we
2718 * continue
2719 */
2720 wmb();
2721
2722 if (rx_flag == MTK_RX_FLAGS_QDMA) {
2723 mtk_w32(eth, ring->phys,
2724 reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2725 mtk_w32(eth, rx_dma_size,
2726 reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2727 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2728 reg_map->qdma.rst_idx);
2729 } else {
2730 mtk_w32(eth, ring->phys,
2731 reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
2732 mtk_w32(eth, rx_dma_size,
2733 reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
2734 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
2735 reg_map->pdma.rst_idx);
2736 }
2737 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
2738
2739 return 0;
2740 }
2741
mtk_rx_clean(struct mtk_eth * eth,struct mtk_rx_ring * ring,bool in_sram)2742 static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, bool in_sram)
2743 {
2744 u64 addr64 = 0;
2745 int i;
2746
2747 if (ring->data && ring->dma) {
2748 for (i = 0; i < ring->dma_size; i++) {
2749 struct mtk_rx_dma *rxd;
2750
2751 if (!ring->data[i])
2752 continue;
2753
2754 rxd = ring->dma + i * eth->soc->rx.desc_size;
2755 if (!rxd->rxd1)
2756 continue;
2757
2758 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
2759 addr64 = RX_DMA_GET_ADDR64(rxd->rxd2);
2760
2761 dma_unmap_single(eth->dma_dev, ((u64)rxd->rxd1 | addr64),
2762 ring->buf_size, DMA_FROM_DEVICE);
2763 mtk_rx_put_buff(ring, ring->data[i], false);
2764 }
2765 kfree(ring->data);
2766 ring->data = NULL;
2767 }
2768
2769 if (!in_sram && ring->dma) {
2770 dma_free_coherent(eth->dma_dev,
2771 ring->dma_size * eth->soc->rx.desc_size,
2772 ring->dma, ring->phys);
2773 ring->dma = NULL;
2774 }
2775
2776 if (ring->page_pool) {
2777 if (xdp_rxq_info_is_reg(&ring->xdp_q))
2778 xdp_rxq_info_unreg(&ring->xdp_q);
2779 page_pool_destroy(ring->page_pool);
2780 ring->page_pool = NULL;
2781 }
2782 }
2783
mtk_hwlro_rx_init(struct mtk_eth * eth)2784 static int mtk_hwlro_rx_init(struct mtk_eth *eth)
2785 {
2786 int i;
2787 u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
2788 u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
2789
2790 /* set LRO rings to auto-learn modes */
2791 ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
2792
2793 /* validate LRO ring */
2794 ring_ctrl_dw2 |= MTK_RING_VLD;
2795
2796 /* set AGE timer (unit: 20us) */
2797 ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
2798 ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
2799
2800 /* set max AGG timer (unit: 20us) */
2801 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
2802
2803 /* set max LRO AGG count */
2804 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
2805 ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
2806
2807 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
2808 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
2809 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
2810 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
2811 }
2812
2813 /* IPv4 checksum update enable */
2814 lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
2815
2816 /* switch priority comparison to packet count mode */
2817 lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
2818
2819 /* bandwidth threshold setting */
2820 mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
2821
2822 /* auto-learn score delta setting */
2823 mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
2824
2825 /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
2826 mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
2827 MTK_PDMA_LRO_ALT_REFRESH_TIMER);
2828
2829 /* set HW LRO mode & the max aggregation count for rx packets */
2830 lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
2831
2832 /* the minimal remaining room of SDL0 in RXD for lro aggregation */
2833 lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
2834
2835 /* enable HW LRO */
2836 lro_ctrl_dw0 |= MTK_LRO_EN;
2837
2838 mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
2839 mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
2840
2841 return 0;
2842 }
2843
mtk_hwlro_rx_uninit(struct mtk_eth * eth)2844 static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
2845 {
2846 int i;
2847 u32 val;
2848
2849 /* relinquish lro rings, flush aggregated packets */
2850 mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
2851
2852 /* wait for relinquishments done */
2853 for (i = 0; i < 10; i++) {
2854 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
2855 if (val & MTK_LRO_RING_RELINQUISH_DONE) {
2856 msleep(20);
2857 continue;
2858 }
2859 break;
2860 }
2861
2862 /* invalidate lro rings */
2863 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
2864 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
2865
2866 /* disable HW LRO */
2867 mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
2868 }
2869
mtk_hwlro_val_ipaddr(struct mtk_eth * eth,int idx,__be32 ip)2870 static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
2871 {
2872 u32 reg_val;
2873
2874 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2875
2876 /* invalidate the IP setting */
2877 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2878
2879 mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
2880
2881 /* validate the IP setting */
2882 mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2883 }
2884
mtk_hwlro_inval_ipaddr(struct mtk_eth * eth,int idx)2885 static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
2886 {
2887 u32 reg_val;
2888
2889 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
2890
2891 /* invalidate the IP setting */
2892 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
2893
2894 mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
2895 }
2896
mtk_hwlro_get_ip_cnt(struct mtk_mac * mac)2897 static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
2898 {
2899 int cnt = 0;
2900 int i;
2901
2902 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2903 if (mac->hwlro_ip[i])
2904 cnt++;
2905 }
2906
2907 return cnt;
2908 }
2909
mtk_hwlro_add_ipaddr(struct net_device * dev,struct ethtool_rxnfc * cmd)2910 static int mtk_hwlro_add_ipaddr(struct net_device *dev,
2911 struct ethtool_rxnfc *cmd)
2912 {
2913 struct ethtool_rx_flow_spec *fsp =
2914 (struct ethtool_rx_flow_spec *)&cmd->fs;
2915 struct mtk_mac *mac = netdev_priv(dev);
2916 struct mtk_eth *eth = mac->hw;
2917 int hwlro_idx;
2918
2919 if ((fsp->flow_type != TCP_V4_FLOW) ||
2920 (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
2921 (fsp->location > 1))
2922 return -EINVAL;
2923
2924 mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
2925 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2926
2927 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2928
2929 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
2930
2931 return 0;
2932 }
2933
mtk_hwlro_del_ipaddr(struct net_device * dev,struct ethtool_rxnfc * cmd)2934 static int mtk_hwlro_del_ipaddr(struct net_device *dev,
2935 struct ethtool_rxnfc *cmd)
2936 {
2937 struct ethtool_rx_flow_spec *fsp =
2938 (struct ethtool_rx_flow_spec *)&cmd->fs;
2939 struct mtk_mac *mac = netdev_priv(dev);
2940 struct mtk_eth *eth = mac->hw;
2941 int hwlro_idx;
2942
2943 if (fsp->location > 1)
2944 return -EINVAL;
2945
2946 mac->hwlro_ip[fsp->location] = 0;
2947 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
2948
2949 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2950
2951 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2952
2953 return 0;
2954 }
2955
mtk_hwlro_netdev_disable(struct net_device * dev)2956 static void mtk_hwlro_netdev_disable(struct net_device *dev)
2957 {
2958 struct mtk_mac *mac = netdev_priv(dev);
2959 struct mtk_eth *eth = mac->hw;
2960 int i, hwlro_idx;
2961
2962 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2963 mac->hwlro_ip[i] = 0;
2964 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
2965
2966 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
2967 }
2968
2969 mac->hwlro_ip_cnt = 0;
2970 }
2971
mtk_hwlro_get_fdir_entry(struct net_device * dev,struct ethtool_rxnfc * cmd)2972 static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
2973 struct ethtool_rxnfc *cmd)
2974 {
2975 struct mtk_mac *mac = netdev_priv(dev);
2976 struct ethtool_rx_flow_spec *fsp =
2977 (struct ethtool_rx_flow_spec *)&cmd->fs;
2978
2979 if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip))
2980 return -EINVAL;
2981
2982 /* only tcp dst ipv4 is meaningful, others are meaningless */
2983 fsp->flow_type = TCP_V4_FLOW;
2984 fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
2985 fsp->m_u.tcp_ip4_spec.ip4dst = 0;
2986
2987 fsp->h_u.tcp_ip4_spec.ip4src = 0;
2988 fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
2989 fsp->h_u.tcp_ip4_spec.psrc = 0;
2990 fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
2991 fsp->h_u.tcp_ip4_spec.pdst = 0;
2992 fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
2993 fsp->h_u.tcp_ip4_spec.tos = 0;
2994 fsp->m_u.tcp_ip4_spec.tos = 0xff;
2995
2996 return 0;
2997 }
2998
mtk_hwlro_get_fdir_all(struct net_device * dev,struct ethtool_rxnfc * cmd,u32 * rule_locs)2999 static int mtk_hwlro_get_fdir_all(struct net_device *dev,
3000 struct ethtool_rxnfc *cmd,
3001 u32 *rule_locs)
3002 {
3003 struct mtk_mac *mac = netdev_priv(dev);
3004 int cnt = 0;
3005 int i;
3006
3007 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
3008 if (cnt == cmd->rule_cnt)
3009 return -EMSGSIZE;
3010
3011 if (mac->hwlro_ip[i]) {
3012 rule_locs[cnt] = i;
3013 cnt++;
3014 }
3015 }
3016
3017 cmd->rule_cnt = cnt;
3018
3019 return 0;
3020 }
3021
mtk_fix_features(struct net_device * dev,netdev_features_t features)3022 static netdev_features_t mtk_fix_features(struct net_device *dev,
3023 netdev_features_t features)
3024 {
3025 if (!(features & NETIF_F_LRO)) {
3026 struct mtk_mac *mac = netdev_priv(dev);
3027 int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
3028
3029 if (ip_cnt) {
3030 netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
3031
3032 features |= NETIF_F_LRO;
3033 }
3034 }
3035
3036 return features;
3037 }
3038
mtk_set_features(struct net_device * dev,netdev_features_t features)3039 static int mtk_set_features(struct net_device *dev, netdev_features_t features)
3040 {
3041 netdev_features_t diff = dev->features ^ features;
3042
3043 if ((diff & NETIF_F_LRO) && !(features & NETIF_F_LRO))
3044 mtk_hwlro_netdev_disable(dev);
3045
3046 return 0;
3047 }
3048
3049 /* wait for DMA to finish whatever it is doing before we start using it again */
mtk_dma_busy_wait(struct mtk_eth * eth)3050 static int mtk_dma_busy_wait(struct mtk_eth *eth)
3051 {
3052 unsigned int reg;
3053 int ret;
3054 u32 val;
3055
3056 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3057 reg = eth->soc->reg_map->qdma.glo_cfg;
3058 else
3059 reg = eth->soc->reg_map->pdma.glo_cfg;
3060
3061 ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val,
3062 !(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)),
3063 5, MTK_DMA_BUSY_TIMEOUT_US);
3064 if (ret)
3065 dev_err(eth->dev, "DMA init timeout\n");
3066
3067 return ret;
3068 }
3069
mtk_dma_init(struct mtk_eth * eth)3070 static int mtk_dma_init(struct mtk_eth *eth)
3071 {
3072 int err;
3073 u32 i;
3074
3075 if (mtk_dma_busy_wait(eth))
3076 return -EBUSY;
3077
3078 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3079 /* QDMA needs scratch memory for internal reordering of the
3080 * descriptors
3081 */
3082 err = mtk_init_fq_dma(eth);
3083 if (err)
3084 return err;
3085 }
3086
3087 err = mtk_tx_alloc(eth);
3088 if (err)
3089 return err;
3090
3091 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3092 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
3093 if (err)
3094 return err;
3095 }
3096
3097 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
3098 if (err)
3099 return err;
3100
3101 if (eth->hwlro) {
3102 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
3103 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
3104 if (err)
3105 return err;
3106 }
3107 err = mtk_hwlro_rx_init(eth);
3108 if (err)
3109 return err;
3110 }
3111
3112 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3113 /* Enable random early drop and set drop threshold
3114 * automatically
3115 */
3116 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
3117 FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th);
3118 mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred);
3119 }
3120
3121 return 0;
3122 }
3123
mtk_dma_free(struct mtk_eth * eth)3124 static void mtk_dma_free(struct mtk_eth *eth)
3125 {
3126 const struct mtk_soc_data *soc = eth->soc;
3127 int i;
3128
3129 for (i = 0; i < MTK_MAX_DEVS; i++)
3130 if (eth->netdev[i])
3131 netdev_reset_queue(eth->netdev[i]);
3132 if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
3133 dma_free_coherent(eth->dma_dev,
3134 MTK_QDMA_RING_SIZE * soc->tx.desc_size,
3135 eth->scratch_ring, eth->phy_scratch_ring);
3136 eth->scratch_ring = NULL;
3137 eth->phy_scratch_ring = 0;
3138 }
3139 mtk_tx_clean(eth);
3140 mtk_rx_clean(eth, ð->rx_ring[0], MTK_HAS_CAPS(soc->caps, MTK_SRAM));
3141 mtk_rx_clean(eth, ð->rx_ring_qdma, false);
3142
3143 if (eth->hwlro) {
3144 mtk_hwlro_rx_uninit(eth);
3145 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
3146 mtk_rx_clean(eth, ð->rx_ring[i], false);
3147 }
3148
3149 for (i = 0; i < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); i++) {
3150 kfree(eth->scratch_head[i]);
3151 eth->scratch_head[i] = NULL;
3152 }
3153 }
3154
mtk_hw_reset_check(struct mtk_eth * eth)3155 static bool mtk_hw_reset_check(struct mtk_eth *eth)
3156 {
3157 u32 val = mtk_r32(eth, MTK_INT_STATUS2);
3158
3159 return (val & MTK_FE_INT_FQ_EMPTY) || (val & MTK_FE_INT_RFIFO_UF) ||
3160 (val & MTK_FE_INT_RFIFO_OV) || (val & MTK_FE_INT_TSO_FAIL) ||
3161 (val & MTK_FE_INT_TSO_ALIGN) || (val & MTK_FE_INT_TSO_ILLEGAL);
3162 }
3163
mtk_tx_timeout(struct net_device * dev,unsigned int txqueue)3164 static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
3165 {
3166 struct mtk_mac *mac = netdev_priv(dev);
3167 struct mtk_eth *eth = mac->hw;
3168
3169 if (test_bit(MTK_RESETTING, ð->state))
3170 return;
3171
3172 if (!mtk_hw_reset_check(eth))
3173 return;
3174
3175 eth->netdev[mac->id]->stats.tx_errors++;
3176 netif_err(eth, tx_err, dev, "transmit timed out\n");
3177
3178 schedule_work(ð->pending_work);
3179 }
3180
mtk_handle_irq_rx(int irq,void * _eth)3181 static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
3182 {
3183 struct mtk_eth *eth = _eth;
3184
3185 eth->rx_events++;
3186 if (likely(napi_schedule_prep(ð->rx_napi))) {
3187 mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
3188 __napi_schedule(ð->rx_napi);
3189 }
3190
3191 return IRQ_HANDLED;
3192 }
3193
mtk_handle_irq_tx(int irq,void * _eth)3194 static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
3195 {
3196 struct mtk_eth *eth = _eth;
3197
3198 eth->tx_events++;
3199 if (likely(napi_schedule_prep(ð->tx_napi))) {
3200 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3201 __napi_schedule(ð->tx_napi);
3202 }
3203
3204 return IRQ_HANDLED;
3205 }
3206
mtk_handle_irq(int irq,void * _eth)3207 static irqreturn_t mtk_handle_irq(int irq, void *_eth)
3208 {
3209 struct mtk_eth *eth = _eth;
3210 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3211
3212 if (mtk_r32(eth, reg_map->pdma.irq_mask) &
3213 eth->soc->rx.irq_done_mask) {
3214 if (mtk_r32(eth, reg_map->pdma.irq_status) &
3215 eth->soc->rx.irq_done_mask)
3216 mtk_handle_irq_rx(irq, _eth);
3217 }
3218 if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
3219 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
3220 mtk_handle_irq_tx(irq, _eth);
3221 }
3222
3223 return IRQ_HANDLED;
3224 }
3225
3226 #ifdef CONFIG_NET_POLL_CONTROLLER
mtk_poll_controller(struct net_device * dev)3227 static void mtk_poll_controller(struct net_device *dev)
3228 {
3229 struct mtk_mac *mac = netdev_priv(dev);
3230 struct mtk_eth *eth = mac->hw;
3231
3232 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3233 mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
3234 mtk_handle_irq_rx(eth->irq[2], dev);
3235 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
3236 mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
3237 }
3238 #endif
3239
mtk_start_dma(struct mtk_eth * eth)3240 static int mtk_start_dma(struct mtk_eth *eth)
3241 {
3242 u32 val, rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
3243 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3244 int err;
3245
3246 err = mtk_dma_init(eth);
3247 if (err) {
3248 mtk_dma_free(eth);
3249 return err;
3250 }
3251
3252 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3253 val = mtk_r32(eth, reg_map->qdma.glo_cfg);
3254 val |= MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3255 MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
3256 MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE;
3257
3258 if (mtk_is_netsys_v2_or_greater(eth))
3259 val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
3260 MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
3261 MTK_CHK_DDONE_EN | MTK_LEAKY_BUCKET_EN;
3262 else
3263 val |= MTK_RX_BT_32DWORDS;
3264 mtk_w32(eth, val, reg_map->qdma.glo_cfg);
3265
3266 mtk_w32(eth,
3267 MTK_RX_DMA_EN | rx_2b_offset |
3268 MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
3269 reg_map->pdma.glo_cfg);
3270 } else {
3271 mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
3272 MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
3273 reg_map->pdma.glo_cfg);
3274 }
3275
3276 return 0;
3277 }
3278
mtk_gdm_config(struct mtk_eth * eth,u32 config)3279 static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
3280 {
3281 int i;
3282
3283 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3284 return;
3285
3286 for (i = 0; i < MTK_MAX_DEVS; i++) {
3287 u32 val;
3288
3289 if (!eth->netdev[i])
3290 continue;
3291
3292 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
3293
3294 /* default setup the forward port to send frame to PDMA */
3295 val &= ~0xffff;
3296
3297 /* Enable RX checksum */
3298 val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
3299
3300 val |= config;
3301
3302 if (netdev_uses_dsa(eth->netdev[i]))
3303 val |= MTK_GDMA_SPECIAL_TAG;
3304
3305 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
3306 }
3307 /* Reset and enable PSE */
3308 mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
3309 mtk_w32(eth, 0, MTK_RST_GL);
3310 }
3311
3312
mtk_uses_dsa(struct net_device * dev)3313 static bool mtk_uses_dsa(struct net_device *dev)
3314 {
3315 #if IS_ENABLED(CONFIG_NET_DSA)
3316 return netdev_uses_dsa(dev) &&
3317 dev->dsa_ptr->tag_ops->proto == DSA_TAG_PROTO_MTK;
3318 #else
3319 return false;
3320 #endif
3321 }
3322
mtk_device_event(struct notifier_block * n,unsigned long event,void * ptr)3323 static int mtk_device_event(struct notifier_block *n, unsigned long event, void *ptr)
3324 {
3325 struct mtk_mac *mac = container_of(n, struct mtk_mac, device_notifier);
3326 struct mtk_eth *eth = mac->hw;
3327 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3328 struct ethtool_link_ksettings s;
3329 struct net_device *ldev;
3330 struct list_head *iter;
3331 struct dsa_port *dp;
3332
3333 if (event != NETDEV_CHANGE)
3334 return NOTIFY_DONE;
3335
3336 netdev_for_each_lower_dev(dev, ldev, iter) {
3337 if (netdev_priv(ldev) == mac)
3338 goto found;
3339 }
3340
3341 return NOTIFY_DONE;
3342
3343 found:
3344 if (!dsa_user_dev_check(dev))
3345 return NOTIFY_DONE;
3346
3347 if (__ethtool_get_link_ksettings(dev, &s))
3348 return NOTIFY_DONE;
3349
3350 if (s.base.speed == 0 || s.base.speed == ((__u32)-1))
3351 return NOTIFY_DONE;
3352
3353 dp = dsa_port_from_netdev(dev);
3354 if (dp->index >= MTK_QDMA_NUM_QUEUES)
3355 return NOTIFY_DONE;
3356
3357 if (mac->speed > 0 && mac->speed <= s.base.speed)
3358 s.base.speed = 0;
3359
3360 mtk_set_queue_speed(eth, dp->index + 3, s.base.speed);
3361
3362 return NOTIFY_DONE;
3363 }
3364
mtk_open(struct net_device * dev)3365 static int mtk_open(struct net_device *dev)
3366 {
3367 struct mtk_mac *mac = netdev_priv(dev);
3368 struct mtk_eth *eth = mac->hw;
3369 int i, err;
3370
3371 err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
3372 if (err) {
3373 netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
3374 err);
3375 return err;
3376 }
3377
3378 /* we run 2 netdevs on the same dma ring so we only bring it up once */
3379 if (!refcount_read(ð->dma_refcnt)) {
3380 const struct mtk_soc_data *soc = eth->soc;
3381 u32 gdm_config;
3382 int i;
3383
3384 err = mtk_start_dma(eth);
3385 if (err) {
3386 phylink_disconnect_phy(mac->phylink);
3387 return err;
3388 }
3389
3390 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3391 mtk_ppe_start(eth->ppe[i]);
3392
3393 gdm_config = soc->offload_version ? soc->reg_map->gdma_to_ppe
3394 : MTK_GDMA_TO_PDMA;
3395 mtk_gdm_config(eth, gdm_config);
3396
3397 napi_enable(ð->tx_napi);
3398 napi_enable(ð->rx_napi);
3399 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
3400 mtk_rx_irq_enable(eth, soc->rx.irq_done_mask);
3401 refcount_set(ð->dma_refcnt, 1);
3402 }
3403 else
3404 refcount_inc(ð->dma_refcnt);
3405
3406 phylink_start(mac->phylink);
3407 netif_tx_start_all_queues(dev);
3408
3409 if (mtk_is_netsys_v2_or_greater(eth))
3410 return 0;
3411
3412 if (mtk_uses_dsa(dev) && !eth->prog) {
3413 for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
3414 struct metadata_dst *md_dst = eth->dsa_meta[i];
3415
3416 if (md_dst)
3417 continue;
3418
3419 md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
3420 GFP_KERNEL);
3421 if (!md_dst)
3422 return -ENOMEM;
3423
3424 md_dst->u.port_info.port_id = i;
3425 eth->dsa_meta[i] = md_dst;
3426 }
3427 } else {
3428 /* Hardware DSA untagging and VLAN RX offloading need to be
3429 * disabled if at least one MAC does not use DSA.
3430 */
3431 u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
3432
3433 val &= ~MTK_CDMP_STAG_EN;
3434 mtk_w32(eth, val, MTK_CDMP_IG_CTRL);
3435
3436 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
3437 }
3438
3439 return 0;
3440 }
3441
mtk_stop_dma(struct mtk_eth * eth,u32 glo_cfg)3442 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
3443 {
3444 u32 val;
3445 int i;
3446
3447 /* stop the dma engine */
3448 spin_lock_bh(ð->page_lock);
3449 val = mtk_r32(eth, glo_cfg);
3450 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
3451 glo_cfg);
3452 spin_unlock_bh(ð->page_lock);
3453
3454 /* wait for dma stop */
3455 for (i = 0; i < 10; i++) {
3456 val = mtk_r32(eth, glo_cfg);
3457 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
3458 msleep(20);
3459 continue;
3460 }
3461 break;
3462 }
3463 }
3464
mtk_stop(struct net_device * dev)3465 static int mtk_stop(struct net_device *dev)
3466 {
3467 struct mtk_mac *mac = netdev_priv(dev);
3468 struct mtk_eth *eth = mac->hw;
3469 int i;
3470
3471 phylink_stop(mac->phylink);
3472
3473 netif_tx_disable(dev);
3474
3475 phylink_disconnect_phy(mac->phylink);
3476
3477 /* only shutdown DMA if this is the last user */
3478 if (!refcount_dec_and_test(ð->dma_refcnt))
3479 return 0;
3480
3481 mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
3482
3483 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
3484 mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
3485 napi_disable(ð->tx_napi);
3486 napi_disable(ð->rx_napi);
3487
3488 cancel_work_sync(ð->rx_dim.work);
3489 cancel_work_sync(ð->tx_dim.work);
3490
3491 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3492 mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg);
3493 mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg);
3494
3495 mtk_dma_free(eth);
3496
3497 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
3498 mtk_ppe_stop(eth->ppe[i]);
3499
3500 return 0;
3501 }
3502
mtk_xdp_setup(struct net_device * dev,struct bpf_prog * prog,struct netlink_ext_ack * extack)3503 static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
3504 struct netlink_ext_ack *extack)
3505 {
3506 struct mtk_mac *mac = netdev_priv(dev);
3507 struct mtk_eth *eth = mac->hw;
3508 struct bpf_prog *old_prog;
3509 bool need_update;
3510
3511 if (eth->hwlro) {
3512 NL_SET_ERR_MSG_MOD(extack, "XDP not supported with HWLRO");
3513 return -EOPNOTSUPP;
3514 }
3515
3516 if (dev->mtu > MTK_PP_MAX_BUF_SIZE) {
3517 NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
3518 return -EOPNOTSUPP;
3519 }
3520
3521 need_update = !!eth->prog != !!prog;
3522 if (netif_running(dev) && need_update)
3523 mtk_stop(dev);
3524
3525 old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held());
3526 if (old_prog)
3527 bpf_prog_put(old_prog);
3528
3529 if (netif_running(dev) && need_update)
3530 return mtk_open(dev);
3531
3532 return 0;
3533 }
3534
mtk_xdp(struct net_device * dev,struct netdev_bpf * xdp)3535 static int mtk_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3536 {
3537 switch (xdp->command) {
3538 case XDP_SETUP_PROG:
3539 return mtk_xdp_setup(dev, xdp->prog, xdp->extack);
3540 default:
3541 return -EINVAL;
3542 }
3543 }
3544
ethsys_reset(struct mtk_eth * eth,u32 reset_bits)3545 static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
3546 {
3547 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3548 reset_bits,
3549 reset_bits);
3550
3551 usleep_range(1000, 1100);
3552 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
3553 reset_bits,
3554 ~reset_bits);
3555 mdelay(10);
3556 }
3557
mtk_clk_disable(struct mtk_eth * eth)3558 static void mtk_clk_disable(struct mtk_eth *eth)
3559 {
3560 int clk;
3561
3562 for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
3563 clk_disable_unprepare(eth->clks[clk]);
3564 }
3565
mtk_clk_enable(struct mtk_eth * eth)3566 static int mtk_clk_enable(struct mtk_eth *eth)
3567 {
3568 int clk, ret;
3569
3570 for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
3571 ret = clk_prepare_enable(eth->clks[clk]);
3572 if (ret)
3573 goto err_disable_clks;
3574 }
3575
3576 return 0;
3577
3578 err_disable_clks:
3579 while (--clk >= 0)
3580 clk_disable_unprepare(eth->clks[clk]);
3581
3582 return ret;
3583 }
3584
mtk_dim_rx(struct work_struct * work)3585 static void mtk_dim_rx(struct work_struct *work)
3586 {
3587 struct dim *dim = container_of(work, struct dim, work);
3588 struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
3589 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3590 struct dim_cq_moder cur_profile;
3591 u32 val, cur;
3592
3593 cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode,
3594 dim->profile_ix);
3595 spin_lock_bh(ð->dim_lock);
3596
3597 val = mtk_r32(eth, reg_map->pdma.delay_irq);
3598 val &= MTK_PDMA_DELAY_TX_MASK;
3599 val |= MTK_PDMA_DELAY_RX_EN;
3600
3601 cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3602 val |= cur << MTK_PDMA_DELAY_RX_PTIME_SHIFT;
3603
3604 cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3605 val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
3606
3607 mtk_w32(eth, val, reg_map->pdma.delay_irq);
3608 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3609 mtk_w32(eth, val, reg_map->qdma.delay_irq);
3610
3611 spin_unlock_bh(ð->dim_lock);
3612
3613 dim->state = DIM_START_MEASURE;
3614 }
3615
mtk_dim_tx(struct work_struct * work)3616 static void mtk_dim_tx(struct work_struct *work)
3617 {
3618 struct dim *dim = container_of(work, struct dim, work);
3619 struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
3620 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3621 struct dim_cq_moder cur_profile;
3622 u32 val, cur;
3623
3624 cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode,
3625 dim->profile_ix);
3626 spin_lock_bh(ð->dim_lock);
3627
3628 val = mtk_r32(eth, reg_map->pdma.delay_irq);
3629 val &= MTK_PDMA_DELAY_RX_MASK;
3630 val |= MTK_PDMA_DELAY_TX_EN;
3631
3632 cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
3633 val |= cur << MTK_PDMA_DELAY_TX_PTIME_SHIFT;
3634
3635 cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
3636 val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
3637
3638 mtk_w32(eth, val, reg_map->pdma.delay_irq);
3639 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
3640 mtk_w32(eth, val, reg_map->qdma.delay_irq);
3641
3642 spin_unlock_bh(ð->dim_lock);
3643
3644 dim->state = DIM_START_MEASURE;
3645 }
3646
mtk_set_mcr_max_rx(struct mtk_mac * mac,u32 val)3647 static void mtk_set_mcr_max_rx(struct mtk_mac *mac, u32 val)
3648 {
3649 struct mtk_eth *eth = mac->hw;
3650 u32 mcr_cur, mcr_new;
3651
3652 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3653 return;
3654
3655 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
3656 mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
3657
3658 if (val <= 1518)
3659 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518);
3660 else if (val <= 1536)
3661 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536);
3662 else if (val <= 1552)
3663 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552);
3664 else
3665 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048);
3666
3667 if (mcr_new != mcr_cur)
3668 mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
3669 }
3670
mtk_hw_reset(struct mtk_eth * eth)3671 static void mtk_hw_reset(struct mtk_eth *eth)
3672 {
3673 u32 val;
3674
3675 if (mtk_is_netsys_v2_or_greater(eth))
3676 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
3677
3678 if (mtk_is_netsys_v3_or_greater(eth)) {
3679 val = RSTCTRL_PPE0_V3;
3680
3681 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3682 val |= RSTCTRL_PPE1_V3;
3683
3684 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
3685 val |= RSTCTRL_PPE2;
3686
3687 val |= RSTCTRL_WDMA0 | RSTCTRL_WDMA1 | RSTCTRL_WDMA2;
3688 } else if (mtk_is_netsys_v2_or_greater(eth)) {
3689 val = RSTCTRL_PPE0_V2;
3690
3691 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3692 val |= RSTCTRL_PPE1;
3693 } else {
3694 val = RSTCTRL_PPE0;
3695 }
3696
3697 ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val);
3698
3699 if (mtk_is_netsys_v3_or_greater(eth))
3700 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
3701 0x6f8ff);
3702 else if (mtk_is_netsys_v2_or_greater(eth))
3703 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
3704 0x3ffffff);
3705 }
3706
mtk_hw_reset_read(struct mtk_eth * eth)3707 static u32 mtk_hw_reset_read(struct mtk_eth *eth)
3708 {
3709 u32 val;
3710
3711 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val);
3712 return val;
3713 }
3714
mtk_hw_warm_reset(struct mtk_eth * eth)3715 static void mtk_hw_warm_reset(struct mtk_eth *eth)
3716 {
3717 u32 rst_mask, val;
3718
3719 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, RSTCTRL_FE,
3720 RSTCTRL_FE);
3721 if (readx_poll_timeout_atomic(mtk_hw_reset_read, eth, val,
3722 val & RSTCTRL_FE, 1, 1000)) {
3723 dev_err(eth->dev, "warm reset failed\n");
3724 mtk_hw_reset(eth);
3725 return;
3726 }
3727
3728 if (mtk_is_netsys_v3_or_greater(eth)) {
3729 rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V3;
3730 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3731 rst_mask |= RSTCTRL_PPE1_V3;
3732 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
3733 rst_mask |= RSTCTRL_PPE2;
3734
3735 rst_mask |= RSTCTRL_WDMA0 | RSTCTRL_WDMA1 | RSTCTRL_WDMA2;
3736 } else if (mtk_is_netsys_v2_or_greater(eth)) {
3737 rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V2;
3738 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
3739 rst_mask |= RSTCTRL_PPE1;
3740 } else {
3741 rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0;
3742 }
3743
3744 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, rst_mask);
3745
3746 udelay(1);
3747 val = mtk_hw_reset_read(eth);
3748 if (!(val & rst_mask))
3749 dev_err(eth->dev, "warm reset stage0 failed %08x (%08x)\n",
3750 val, rst_mask);
3751
3752 rst_mask |= RSTCTRL_FE;
3753 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, ~rst_mask);
3754
3755 udelay(1);
3756 val = mtk_hw_reset_read(eth);
3757 if (val & rst_mask)
3758 dev_err(eth->dev, "warm reset stage1 failed %08x (%08x)\n",
3759 val, rst_mask);
3760 }
3761
mtk_hw_check_dma_hang(struct mtk_eth * eth)3762 static bool mtk_hw_check_dma_hang(struct mtk_eth *eth)
3763 {
3764 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3765 bool gmac1_tx, gmac2_tx, gdm1_tx, gdm2_tx;
3766 bool oq_hang, cdm1_busy, adma_busy;
3767 bool wtx_busy, cdm_full, oq_free;
3768 u32 wdidx, val, gdm1_fc, gdm2_fc;
3769 bool qfsm_hang, qfwd_hang;
3770 bool ret = false;
3771
3772 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3773 return false;
3774
3775 /* WDMA sanity checks */
3776 wdidx = mtk_r32(eth, reg_map->wdma_base[0] + 0xc);
3777
3778 val = mtk_r32(eth, reg_map->wdma_base[0] + 0x204);
3779 wtx_busy = FIELD_GET(MTK_TX_DMA_BUSY, val);
3780
3781 val = mtk_r32(eth, reg_map->wdma_base[0] + 0x230);
3782 cdm_full = !FIELD_GET(MTK_CDM_TXFIFO_RDY, val);
3783
3784 oq_free = (!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(24, 16)) &&
3785 !(mtk_r32(eth, reg_map->pse_oq_sta + 0x4) & GENMASK(8, 0)) &&
3786 !(mtk_r32(eth, reg_map->pse_oq_sta + 0x10) & GENMASK(24, 16)));
3787
3788 if (wdidx == eth->reset.wdidx && wtx_busy && cdm_full && oq_free) {
3789 if (++eth->reset.wdma_hang_count > 2) {
3790 eth->reset.wdma_hang_count = 0;
3791 ret = true;
3792 }
3793 goto out;
3794 }
3795
3796 /* QDMA sanity checks */
3797 qfsm_hang = !!mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x234);
3798 qfwd_hang = !mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x308);
3799
3800 gdm1_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM1_FSM)) > 0;
3801 gdm2_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM2_FSM)) > 0;
3802 gmac1_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(0))) != 1;
3803 gmac2_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(1))) != 1;
3804 gdm1_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x24);
3805 gdm2_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x64);
3806
3807 if (qfsm_hang && qfwd_hang &&
3808 ((gdm1_tx && gmac1_tx && gdm1_fc < 1) ||
3809 (gdm2_tx && gmac2_tx && gdm2_fc < 1))) {
3810 if (++eth->reset.qdma_hang_count > 2) {
3811 eth->reset.qdma_hang_count = 0;
3812 ret = true;
3813 }
3814 goto out;
3815 }
3816
3817 /* ADMA sanity checks */
3818 oq_hang = !!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(8, 0));
3819 cdm1_busy = !!(mtk_r32(eth, MTK_FE_CDM1_FSM) & GENMASK(31, 16));
3820 adma_busy = !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & GENMASK(4, 0)) &&
3821 !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & BIT(6));
3822
3823 if (oq_hang && cdm1_busy && adma_busy) {
3824 if (++eth->reset.adma_hang_count > 2) {
3825 eth->reset.adma_hang_count = 0;
3826 ret = true;
3827 }
3828 goto out;
3829 }
3830
3831 eth->reset.wdma_hang_count = 0;
3832 eth->reset.qdma_hang_count = 0;
3833 eth->reset.adma_hang_count = 0;
3834 out:
3835 eth->reset.wdidx = wdidx;
3836
3837 return ret;
3838 }
3839
mtk_hw_reset_monitor_work(struct work_struct * work)3840 static void mtk_hw_reset_monitor_work(struct work_struct *work)
3841 {
3842 struct delayed_work *del_work = to_delayed_work(work);
3843 struct mtk_eth *eth = container_of(del_work, struct mtk_eth,
3844 reset.monitor_work);
3845
3846 if (test_bit(MTK_RESETTING, ð->state))
3847 goto out;
3848
3849 /* DMA stuck checks */
3850 if (mtk_hw_check_dma_hang(eth))
3851 schedule_work(ð->pending_work);
3852
3853 out:
3854 schedule_delayed_work(ð->reset.monitor_work,
3855 MTK_DMA_MONITOR_TIMEOUT);
3856 }
3857
mtk_hw_init(struct mtk_eth * eth,bool reset)3858 static int mtk_hw_init(struct mtk_eth *eth, bool reset)
3859 {
3860 u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
3861 ETHSYS_DMA_AG_MAP_PPE;
3862 const struct mtk_reg_map *reg_map = eth->soc->reg_map;
3863 int i, val, ret;
3864
3865 if (!reset && test_and_set_bit(MTK_HW_INIT, ð->state))
3866 return 0;
3867
3868 if (!reset) {
3869 pm_runtime_enable(eth->dev);
3870 pm_runtime_get_sync(eth->dev);
3871
3872 ret = mtk_clk_enable(eth);
3873 if (ret)
3874 goto err_disable_pm;
3875 }
3876
3877 if (eth->ethsys)
3878 regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
3879 of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
3880
3881 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3882 ret = device_reset(eth->dev);
3883 if (ret) {
3884 dev_err(eth->dev, "MAC reset failed!\n");
3885 goto err_disable_pm;
3886 }
3887
3888 /* set interrupt delays based on current Net DIM sample */
3889 mtk_dim_rx(ð->rx_dim.work);
3890 mtk_dim_tx(ð->tx_dim.work);
3891
3892 /* disable delay and normal interrupt */
3893 mtk_tx_irq_disable(eth, ~0);
3894 mtk_rx_irq_disable(eth, ~0);
3895
3896 return 0;
3897 }
3898
3899 msleep(100);
3900
3901 if (reset)
3902 mtk_hw_warm_reset(eth);
3903 else
3904 mtk_hw_reset(eth);
3905
3906 if (mtk_is_netsys_v3_or_greater(eth)) {
3907 /* Set FE to PDMAv2 if necessary */
3908 val = mtk_r32(eth, MTK_FE_GLO_MISC);
3909 mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC);
3910 }
3911
3912 if (eth->pctl) {
3913 /* Set GE2 driving and slew rate */
3914 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
3915
3916 /* set GE2 TDSEL */
3917 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
3918
3919 /* set GE2 TUNE */
3920 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
3921 }
3922
3923 /* Set linkdown as the default for each GMAC. Its own MCR would be set
3924 * up with the more appropriate value when mtk_mac_config call is being
3925 * invoked.
3926 */
3927 for (i = 0; i < MTK_MAX_DEVS; i++) {
3928 struct net_device *dev = eth->netdev[i];
3929
3930 if (!dev)
3931 continue;
3932
3933 mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
3934 mtk_set_mcr_max_rx(netdev_priv(dev),
3935 dev->mtu + MTK_RX_ETH_HLEN);
3936 }
3937
3938 /* Indicates CDM to parse the MTK special tag from CPU
3939 * which also is working out for untag packets.
3940 */
3941 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
3942 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
3943 if (mtk_is_netsys_v1(eth)) {
3944 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
3945 mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
3946
3947 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
3948 }
3949
3950 /* set interrupt delays based on current Net DIM sample */
3951 mtk_dim_rx(ð->rx_dim.work);
3952 mtk_dim_tx(ð->tx_dim.work);
3953
3954 /* disable delay and normal interrupt */
3955 mtk_tx_irq_disable(eth, ~0);
3956 mtk_rx_irq_disable(eth, ~0);
3957
3958 /* FE int grouping */
3959 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
3960 mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->pdma.int_grp + 4);
3961 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
3962 mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->qdma.int_grp + 4);
3963 mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
3964
3965 if (mtk_is_netsys_v3_or_greater(eth)) {
3966 /* PSE should not drop port1, port8 and port9 packets */
3967 mtk_w32(eth, 0x00000302, PSE_DROP_CFG);
3968
3969 /* GDM and CDM Threshold */
3970 mtk_w32(eth, 0x00000707, MTK_CDMW0_THRES);
3971 mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES);
3972
3973 /* Disable GDM1 RX CRC stripping */
3974 mtk_m32(eth, MTK_GDMA_STRP_CRC, 0, MTK_GDMA_FWD_CFG(0));
3975
3976 /* PSE GDM3 MIB counter has incorrect hw default values,
3977 * so the driver ought to read clear the values beforehand
3978 * in case ethtool retrieve wrong mib values.
3979 */
3980 for (i = 0; i < 0x80; i += 0x4)
3981 mtk_r32(eth, reg_map->gdm1_cnt + 0x100 + i);
3982 } else if (!mtk_is_netsys_v1(eth)) {
3983 /* PSE should not drop port8 and port9 packets from WDMA Tx */
3984 mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
3985
3986 /* PSE should drop packets to port 8/9 on WDMA Rx ring full */
3987 mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
3988
3989 /* PSE Free Queue Flow Control */
3990 mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
3991
3992 /* PSE config input queue threshold */
3993 mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
3994 mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
3995 mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
3996 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
3997 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
3998 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
3999 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
4000 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8));
4001
4002 /* PSE config output queue threshold */
4003 mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
4004 mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
4005 mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
4006 mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
4007 mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
4008 mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
4009 mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
4010 mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
4011
4012 /* GDM and CDM Threshold */
4013 mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
4014 mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
4015 mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
4016 mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
4017 mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
4018 mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
4019 }
4020
4021 return 0;
4022
4023 err_disable_pm:
4024 if (!reset) {
4025 pm_runtime_put_sync(eth->dev);
4026 pm_runtime_disable(eth->dev);
4027 }
4028
4029 return ret;
4030 }
4031
mtk_hw_deinit(struct mtk_eth * eth)4032 static int mtk_hw_deinit(struct mtk_eth *eth)
4033 {
4034 if (!test_and_clear_bit(MTK_HW_INIT, ð->state))
4035 return 0;
4036
4037 mtk_clk_disable(eth);
4038
4039 pm_runtime_put_sync(eth->dev);
4040 pm_runtime_disable(eth->dev);
4041
4042 return 0;
4043 }
4044
mtk_uninit(struct net_device * dev)4045 static void mtk_uninit(struct net_device *dev)
4046 {
4047 struct mtk_mac *mac = netdev_priv(dev);
4048 struct mtk_eth *eth = mac->hw;
4049
4050 phylink_disconnect_phy(mac->phylink);
4051 mtk_tx_irq_disable(eth, ~0);
4052 mtk_rx_irq_disable(eth, ~0);
4053 }
4054
mtk_change_mtu(struct net_device * dev,int new_mtu)4055 static int mtk_change_mtu(struct net_device *dev, int new_mtu)
4056 {
4057 int length = new_mtu + MTK_RX_ETH_HLEN;
4058 struct mtk_mac *mac = netdev_priv(dev);
4059 struct mtk_eth *eth = mac->hw;
4060
4061 if (rcu_access_pointer(eth->prog) &&
4062 length > MTK_PP_MAX_BUF_SIZE) {
4063 netdev_err(dev, "Invalid MTU for XDP mode\n");
4064 return -EINVAL;
4065 }
4066
4067 mtk_set_mcr_max_rx(mac, length);
4068 WRITE_ONCE(dev->mtu, new_mtu);
4069
4070 return 0;
4071 }
4072
mtk_do_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)4073 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4074 {
4075 struct mtk_mac *mac = netdev_priv(dev);
4076
4077 switch (cmd) {
4078 case SIOCGMIIPHY:
4079 case SIOCGMIIREG:
4080 case SIOCSMIIREG:
4081 return phylink_mii_ioctl(mac->phylink, ifr, cmd);
4082 default:
4083 break;
4084 }
4085
4086 return -EOPNOTSUPP;
4087 }
4088
mtk_prepare_for_reset(struct mtk_eth * eth)4089 static void mtk_prepare_for_reset(struct mtk_eth *eth)
4090 {
4091 u32 val;
4092 int i;
4093
4094 /* set FE PPE ports link down */
4095 for (i = MTK_GMAC1_ID;
4096 i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID);
4097 i += 2) {
4098 val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) | MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT);
4099 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
4100 val |= MTK_FE_LINK_DOWN_P(PSE_PPE1_PORT);
4101 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
4102 val |= MTK_FE_LINK_DOWN_P(PSE_PPE2_PORT);
4103 mtk_w32(eth, val, MTK_FE_GLO_CFG(i));
4104 }
4105
4106 /* adjust PPE configurations to prepare for reset */
4107 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
4108 mtk_ppe_prepare_reset(eth->ppe[i]);
4109
4110 /* disable NETSYS interrupts */
4111 mtk_w32(eth, 0, MTK_FE_INT_ENABLE);
4112
4113 /* force link down GMAC */
4114 for (i = 0; i < 2; i++) {
4115 val = mtk_r32(eth, MTK_MAC_MCR(i)) & ~MAC_MCR_FORCE_LINK;
4116 mtk_w32(eth, val, MTK_MAC_MCR(i));
4117 }
4118 }
4119
mtk_pending_work(struct work_struct * work)4120 static void mtk_pending_work(struct work_struct *work)
4121 {
4122 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
4123 unsigned long restart = 0;
4124 u32 val;
4125 int i;
4126
4127 rtnl_lock();
4128 set_bit(MTK_RESETTING, ð->state);
4129
4130 mtk_prepare_for_reset(eth);
4131 mtk_wed_fe_reset();
4132 /* Run again reset preliminary configuration in order to avoid any
4133 * possible race during FE reset since it can run releasing RTNL lock.
4134 */
4135 mtk_prepare_for_reset(eth);
4136
4137 /* stop all devices to make sure that dma is properly shut down */
4138 for (i = 0; i < MTK_MAX_DEVS; i++) {
4139 if (!eth->netdev[i] || !netif_running(eth->netdev[i]))
4140 continue;
4141
4142 mtk_stop(eth->netdev[i]);
4143 __set_bit(i, &restart);
4144 }
4145
4146 usleep_range(15000, 16000);
4147
4148 if (eth->dev->pins)
4149 pinctrl_select_state(eth->dev->pins->p,
4150 eth->dev->pins->default_state);
4151 mtk_hw_init(eth, true);
4152
4153 /* restart DMA and enable IRQs */
4154 for (i = 0; i < MTK_MAX_DEVS; i++) {
4155 if (!eth->netdev[i] || !test_bit(i, &restart))
4156 continue;
4157
4158 if (mtk_open(eth->netdev[i])) {
4159 netif_alert(eth, ifup, eth->netdev[i],
4160 "Driver up/down cycle failed\n");
4161 dev_close(eth->netdev[i]);
4162 }
4163 }
4164
4165 /* set FE PPE ports link up */
4166 for (i = MTK_GMAC1_ID;
4167 i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID);
4168 i += 2) {
4169 val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) & ~MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT);
4170 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
4171 val &= ~MTK_FE_LINK_DOWN_P(PSE_PPE1_PORT);
4172 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
4173 val &= ~MTK_FE_LINK_DOWN_P(PSE_PPE2_PORT);
4174
4175 mtk_w32(eth, val, MTK_FE_GLO_CFG(i));
4176 }
4177
4178 clear_bit(MTK_RESETTING, ð->state);
4179
4180 mtk_wed_fe_reset_complete();
4181
4182 rtnl_unlock();
4183 }
4184
mtk_free_dev(struct mtk_eth * eth)4185 static int mtk_free_dev(struct mtk_eth *eth)
4186 {
4187 int i;
4188
4189 for (i = 0; i < MTK_MAX_DEVS; i++) {
4190 if (!eth->netdev[i])
4191 continue;
4192 free_netdev(eth->netdev[i]);
4193 }
4194
4195 for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
4196 if (!eth->dsa_meta[i])
4197 break;
4198 metadata_dst_free(eth->dsa_meta[i]);
4199 }
4200
4201 free_netdev(eth->dummy_dev);
4202
4203 return 0;
4204 }
4205
mtk_unreg_dev(struct mtk_eth * eth)4206 static int mtk_unreg_dev(struct mtk_eth *eth)
4207 {
4208 int i;
4209
4210 for (i = 0; i < MTK_MAX_DEVS; i++) {
4211 struct mtk_mac *mac;
4212 if (!eth->netdev[i])
4213 continue;
4214 mac = netdev_priv(eth->netdev[i]);
4215 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4216 unregister_netdevice_notifier(&mac->device_notifier);
4217 unregister_netdev(eth->netdev[i]);
4218 }
4219
4220 return 0;
4221 }
4222
mtk_sgmii_destroy(struct mtk_eth * eth)4223 static void mtk_sgmii_destroy(struct mtk_eth *eth)
4224 {
4225 int i;
4226
4227 for (i = 0; i < MTK_MAX_DEVS; i++)
4228 mtk_pcs_lynxi_destroy(eth->sgmii_pcs[i]);
4229 }
4230
mtk_cleanup(struct mtk_eth * eth)4231 static int mtk_cleanup(struct mtk_eth *eth)
4232 {
4233 mtk_sgmii_destroy(eth);
4234 mtk_unreg_dev(eth);
4235 mtk_free_dev(eth);
4236 cancel_work_sync(ð->pending_work);
4237 cancel_delayed_work_sync(ð->reset.monitor_work);
4238
4239 return 0;
4240 }
4241
mtk_get_link_ksettings(struct net_device * ndev,struct ethtool_link_ksettings * cmd)4242 static int mtk_get_link_ksettings(struct net_device *ndev,
4243 struct ethtool_link_ksettings *cmd)
4244 {
4245 struct mtk_mac *mac = netdev_priv(ndev);
4246
4247 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4248 return -EBUSY;
4249
4250 return phylink_ethtool_ksettings_get(mac->phylink, cmd);
4251 }
4252
mtk_set_link_ksettings(struct net_device * ndev,const struct ethtool_link_ksettings * cmd)4253 static int mtk_set_link_ksettings(struct net_device *ndev,
4254 const struct ethtool_link_ksettings *cmd)
4255 {
4256 struct mtk_mac *mac = netdev_priv(ndev);
4257
4258 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4259 return -EBUSY;
4260
4261 return phylink_ethtool_ksettings_set(mac->phylink, cmd);
4262 }
4263
mtk_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)4264 static void mtk_get_drvinfo(struct net_device *dev,
4265 struct ethtool_drvinfo *info)
4266 {
4267 struct mtk_mac *mac = netdev_priv(dev);
4268
4269 strscpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
4270 strscpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
4271 info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
4272 }
4273
mtk_get_msglevel(struct net_device * dev)4274 static u32 mtk_get_msglevel(struct net_device *dev)
4275 {
4276 struct mtk_mac *mac = netdev_priv(dev);
4277
4278 return mac->hw->msg_enable;
4279 }
4280
mtk_set_msglevel(struct net_device * dev,u32 value)4281 static void mtk_set_msglevel(struct net_device *dev, u32 value)
4282 {
4283 struct mtk_mac *mac = netdev_priv(dev);
4284
4285 mac->hw->msg_enable = value;
4286 }
4287
mtk_nway_reset(struct net_device * dev)4288 static int mtk_nway_reset(struct net_device *dev)
4289 {
4290 struct mtk_mac *mac = netdev_priv(dev);
4291
4292 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4293 return -EBUSY;
4294
4295 if (!mac->phylink)
4296 return -ENOTSUPP;
4297
4298 return phylink_ethtool_nway_reset(mac->phylink);
4299 }
4300
mtk_get_strings(struct net_device * dev,u32 stringset,u8 * data)4301 static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4302 {
4303 int i;
4304
4305 switch (stringset) {
4306 case ETH_SS_STATS: {
4307 struct mtk_mac *mac = netdev_priv(dev);
4308
4309 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
4310 memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
4311 data += ETH_GSTRING_LEN;
4312 }
4313 if (mtk_page_pool_enabled(mac->hw))
4314 page_pool_ethtool_stats_get_strings(data);
4315 break;
4316 }
4317 default:
4318 break;
4319 }
4320 }
4321
mtk_get_sset_count(struct net_device * dev,int sset)4322 static int mtk_get_sset_count(struct net_device *dev, int sset)
4323 {
4324 switch (sset) {
4325 case ETH_SS_STATS: {
4326 int count = ARRAY_SIZE(mtk_ethtool_stats);
4327 struct mtk_mac *mac = netdev_priv(dev);
4328
4329 if (mtk_page_pool_enabled(mac->hw))
4330 count += page_pool_ethtool_stats_get_count();
4331 return count;
4332 }
4333 default:
4334 return -EOPNOTSUPP;
4335 }
4336 }
4337
mtk_ethtool_pp_stats(struct mtk_eth * eth,u64 * data)4338 static void mtk_ethtool_pp_stats(struct mtk_eth *eth, u64 *data)
4339 {
4340 struct page_pool_stats stats = {};
4341 int i;
4342
4343 for (i = 0; i < ARRAY_SIZE(eth->rx_ring); i++) {
4344 struct mtk_rx_ring *ring = ð->rx_ring[i];
4345
4346 if (!ring->page_pool)
4347 continue;
4348
4349 page_pool_get_stats(ring->page_pool, &stats);
4350 }
4351 page_pool_ethtool_stats_get(data, &stats);
4352 }
4353
mtk_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)4354 static void mtk_get_ethtool_stats(struct net_device *dev,
4355 struct ethtool_stats *stats, u64 *data)
4356 {
4357 struct mtk_mac *mac = netdev_priv(dev);
4358 struct mtk_hw_stats *hwstats = mac->hw_stats;
4359 u64 *data_src, *data_dst;
4360 unsigned int start;
4361 int i;
4362
4363 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
4364 return;
4365
4366 if (netif_running(dev) && netif_device_present(dev)) {
4367 if (spin_trylock_bh(&hwstats->stats_lock)) {
4368 mtk_stats_update_mac(mac);
4369 spin_unlock_bh(&hwstats->stats_lock);
4370 }
4371 }
4372
4373 data_src = (u64 *)hwstats;
4374
4375 do {
4376 data_dst = data;
4377 start = u64_stats_fetch_begin(&hwstats->syncp);
4378
4379 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
4380 *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
4381 if (mtk_page_pool_enabled(mac->hw))
4382 mtk_ethtool_pp_stats(mac->hw, data_dst);
4383 } while (u64_stats_fetch_retry(&hwstats->syncp, start));
4384 }
4385
mtk_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd,u32 * rule_locs)4386 static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
4387 u32 *rule_locs)
4388 {
4389 int ret = -EOPNOTSUPP;
4390
4391 switch (cmd->cmd) {
4392 case ETHTOOL_GRXRINGS:
4393 if (dev->hw_features & NETIF_F_LRO) {
4394 cmd->data = MTK_MAX_RX_RING_NUM;
4395 ret = 0;
4396 }
4397 break;
4398 case ETHTOOL_GRXCLSRLCNT:
4399 if (dev->hw_features & NETIF_F_LRO) {
4400 struct mtk_mac *mac = netdev_priv(dev);
4401
4402 cmd->rule_cnt = mac->hwlro_ip_cnt;
4403 ret = 0;
4404 }
4405 break;
4406 case ETHTOOL_GRXCLSRULE:
4407 if (dev->hw_features & NETIF_F_LRO)
4408 ret = mtk_hwlro_get_fdir_entry(dev, cmd);
4409 break;
4410 case ETHTOOL_GRXCLSRLALL:
4411 if (dev->hw_features & NETIF_F_LRO)
4412 ret = mtk_hwlro_get_fdir_all(dev, cmd,
4413 rule_locs);
4414 break;
4415 default:
4416 break;
4417 }
4418
4419 return ret;
4420 }
4421
mtk_set_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd)4422 static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
4423 {
4424 int ret = -EOPNOTSUPP;
4425
4426 switch (cmd->cmd) {
4427 case ETHTOOL_SRXCLSRLINS:
4428 if (dev->hw_features & NETIF_F_LRO)
4429 ret = mtk_hwlro_add_ipaddr(dev, cmd);
4430 break;
4431 case ETHTOOL_SRXCLSRLDEL:
4432 if (dev->hw_features & NETIF_F_LRO)
4433 ret = mtk_hwlro_del_ipaddr(dev, cmd);
4434 break;
4435 default:
4436 break;
4437 }
4438
4439 return ret;
4440 }
4441
mtk_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)4442 static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb,
4443 struct net_device *sb_dev)
4444 {
4445 struct mtk_mac *mac = netdev_priv(dev);
4446 unsigned int queue = 0;
4447
4448 if (netdev_uses_dsa(dev))
4449 queue = skb_get_queue_mapping(skb) + 3;
4450 else
4451 queue = mac->id;
4452
4453 if (queue >= dev->num_tx_queues)
4454 queue = 0;
4455
4456 return queue;
4457 }
4458
4459 static const struct ethtool_ops mtk_ethtool_ops = {
4460 .get_link_ksettings = mtk_get_link_ksettings,
4461 .set_link_ksettings = mtk_set_link_ksettings,
4462 .get_drvinfo = mtk_get_drvinfo,
4463 .get_msglevel = mtk_get_msglevel,
4464 .set_msglevel = mtk_set_msglevel,
4465 .nway_reset = mtk_nway_reset,
4466 .get_link = ethtool_op_get_link,
4467 .get_strings = mtk_get_strings,
4468 .get_sset_count = mtk_get_sset_count,
4469 .get_ethtool_stats = mtk_get_ethtool_stats,
4470 .get_rxnfc = mtk_get_rxnfc,
4471 .set_rxnfc = mtk_set_rxnfc,
4472 };
4473
4474 static const struct net_device_ops mtk_netdev_ops = {
4475 .ndo_uninit = mtk_uninit,
4476 .ndo_open = mtk_open,
4477 .ndo_stop = mtk_stop,
4478 .ndo_start_xmit = mtk_start_xmit,
4479 .ndo_set_mac_address = mtk_set_mac_address,
4480 .ndo_validate_addr = eth_validate_addr,
4481 .ndo_eth_ioctl = mtk_do_ioctl,
4482 .ndo_change_mtu = mtk_change_mtu,
4483 .ndo_tx_timeout = mtk_tx_timeout,
4484 .ndo_get_stats64 = mtk_get_stats64,
4485 .ndo_fix_features = mtk_fix_features,
4486 .ndo_set_features = mtk_set_features,
4487 #ifdef CONFIG_NET_POLL_CONTROLLER
4488 .ndo_poll_controller = mtk_poll_controller,
4489 #endif
4490 .ndo_setup_tc = mtk_eth_setup_tc,
4491 .ndo_bpf = mtk_xdp,
4492 .ndo_xdp_xmit = mtk_xdp_xmit,
4493 .ndo_select_queue = mtk_select_queue,
4494 };
4495
mtk_add_mac(struct mtk_eth * eth,struct device_node * np)4496 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
4497 {
4498 const __be32 *_id = of_get_property(np, "reg", NULL);
4499 phy_interface_t phy_mode;
4500 struct phylink *phylink;
4501 struct mtk_mac *mac;
4502 int id, err;
4503 int txqs = 1;
4504 u32 val;
4505
4506 if (!_id) {
4507 dev_err(eth->dev, "missing mac id\n");
4508 return -EINVAL;
4509 }
4510
4511 id = be32_to_cpup(_id);
4512 if (id >= MTK_MAX_DEVS) {
4513 dev_err(eth->dev, "%d is not a valid mac id\n", id);
4514 return -EINVAL;
4515 }
4516
4517 if (eth->netdev[id]) {
4518 dev_err(eth->dev, "duplicate mac id found: %d\n", id);
4519 return -EINVAL;
4520 }
4521
4522 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
4523 txqs = MTK_QDMA_NUM_QUEUES;
4524
4525 eth->netdev[id] = alloc_etherdev_mqs(sizeof(*mac), txqs, 1);
4526 if (!eth->netdev[id]) {
4527 dev_err(eth->dev, "alloc_etherdev failed\n");
4528 return -ENOMEM;
4529 }
4530 mac = netdev_priv(eth->netdev[id]);
4531 eth->mac[id] = mac;
4532 mac->id = id;
4533 mac->hw = eth;
4534 mac->of_node = np;
4535
4536 err = of_get_ethdev_address(mac->of_node, eth->netdev[id]);
4537 if (err == -EPROBE_DEFER)
4538 return err;
4539
4540 if (err) {
4541 /* If the mac address is invalid, use random mac address */
4542 eth_hw_addr_random(eth->netdev[id]);
4543 dev_err(eth->dev, "generated random MAC address %pM\n",
4544 eth->netdev[id]->dev_addr);
4545 }
4546
4547 memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
4548 mac->hwlro_ip_cnt = 0;
4549
4550 mac->hw_stats = devm_kzalloc(eth->dev,
4551 sizeof(*mac->hw_stats),
4552 GFP_KERNEL);
4553 if (!mac->hw_stats) {
4554 dev_err(eth->dev, "failed to allocate counter memory\n");
4555 err = -ENOMEM;
4556 goto free_netdev;
4557 }
4558 spin_lock_init(&mac->hw_stats->stats_lock);
4559 u64_stats_init(&mac->hw_stats->syncp);
4560
4561 if (mtk_is_netsys_v3_or_greater(eth))
4562 mac->hw_stats->reg_offset = id * 0x80;
4563 else
4564 mac->hw_stats->reg_offset = id * 0x40;
4565
4566 /* phylink create */
4567 err = of_get_phy_mode(np, &phy_mode);
4568 if (err) {
4569 dev_err(eth->dev, "incorrect phy-mode\n");
4570 goto free_netdev;
4571 }
4572
4573 /* mac config is not set */
4574 mac->interface = PHY_INTERFACE_MODE_NA;
4575 mac->speed = SPEED_UNKNOWN;
4576
4577 mac->phylink_config.dev = ð->netdev[id]->dev;
4578 mac->phylink_config.type = PHYLINK_NETDEV;
4579 mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
4580 MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
4581
4582 /* MT7623 gmac0 is now missing its speed-specific PLL configuration
4583 * in its .mac_config method (since state->speed is not valid there.
4584 * Disable support for MII, GMII and RGMII.
4585 */
4586 if (!mac->hw->soc->disable_pll_modes || mac->id != 0) {
4587 __set_bit(PHY_INTERFACE_MODE_MII,
4588 mac->phylink_config.supported_interfaces);
4589 __set_bit(PHY_INTERFACE_MODE_GMII,
4590 mac->phylink_config.supported_interfaces);
4591
4592 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII))
4593 phy_interface_set_rgmii(mac->phylink_config.supported_interfaces);
4594 }
4595
4596 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id)
4597 __set_bit(PHY_INTERFACE_MODE_TRGMII,
4598 mac->phylink_config.supported_interfaces);
4599
4600 /* TRGMII is not permitted on MT7621 if using DDR2 */
4601 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII) &&
4602 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII_MT7621_CLK)) {
4603 regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
4604 if (val & SYSCFG_DRAM_TYPE_DDR2)
4605 __clear_bit(PHY_INTERFACE_MODE_TRGMII,
4606 mac->phylink_config.supported_interfaces);
4607 }
4608
4609 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
4610 __set_bit(PHY_INTERFACE_MODE_SGMII,
4611 mac->phylink_config.supported_interfaces);
4612 __set_bit(PHY_INTERFACE_MODE_1000BASEX,
4613 mac->phylink_config.supported_interfaces);
4614 __set_bit(PHY_INTERFACE_MODE_2500BASEX,
4615 mac->phylink_config.supported_interfaces);
4616 }
4617
4618 if (mtk_is_netsys_v3_or_greater(mac->hw) &&
4619 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW_BIT) &&
4620 id == MTK_GMAC1_ID) {
4621 mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
4622 MAC_SYM_PAUSE |
4623 MAC_10000FD;
4624 phy_interface_zero(mac->phylink_config.supported_interfaces);
4625 __set_bit(PHY_INTERFACE_MODE_INTERNAL,
4626 mac->phylink_config.supported_interfaces);
4627 }
4628
4629 phylink = phylink_create(&mac->phylink_config,
4630 of_fwnode_handle(mac->of_node),
4631 phy_mode, &mtk_phylink_ops);
4632 if (IS_ERR(phylink)) {
4633 err = PTR_ERR(phylink);
4634 goto free_netdev;
4635 }
4636
4637 mac->phylink = phylink;
4638
4639 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
4640 eth->netdev[id]->watchdog_timeo = 5 * HZ;
4641 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
4642 eth->netdev[id]->base_addr = (unsigned long)eth->base;
4643
4644 eth->netdev[id]->hw_features = eth->soc->hw_features;
4645 if (eth->hwlro)
4646 eth->netdev[id]->hw_features |= NETIF_F_LRO;
4647
4648 eth->netdev[id]->vlan_features = eth->soc->hw_features &
4649 ~NETIF_F_HW_VLAN_CTAG_TX;
4650 eth->netdev[id]->features |= eth->soc->hw_features;
4651 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
4652
4653 eth->netdev[id]->irq = eth->irq[0];
4654 eth->netdev[id]->dev.of_node = np;
4655
4656 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4657 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
4658 else
4659 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
4660
4661 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
4662 mac->device_notifier.notifier_call = mtk_device_event;
4663 register_netdevice_notifier(&mac->device_notifier);
4664 }
4665
4666 if (mtk_page_pool_enabled(eth))
4667 eth->netdev[id]->xdp_features = NETDEV_XDP_ACT_BASIC |
4668 NETDEV_XDP_ACT_REDIRECT |
4669 NETDEV_XDP_ACT_NDO_XMIT |
4670 NETDEV_XDP_ACT_NDO_XMIT_SG;
4671
4672 return 0;
4673
4674 free_netdev:
4675 free_netdev(eth->netdev[id]);
4676 return err;
4677 }
4678
mtk_eth_set_dma_device(struct mtk_eth * eth,struct device * dma_dev)4679 void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
4680 {
4681 struct net_device *dev, *tmp;
4682 LIST_HEAD(dev_list);
4683 int i;
4684
4685 rtnl_lock();
4686
4687 for (i = 0; i < MTK_MAX_DEVS; i++) {
4688 dev = eth->netdev[i];
4689
4690 if (!dev || !(dev->flags & IFF_UP))
4691 continue;
4692
4693 list_add_tail(&dev->close_list, &dev_list);
4694 }
4695
4696 dev_close_many(&dev_list, false);
4697
4698 eth->dma_dev = dma_dev;
4699
4700 list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
4701 list_del_init(&dev->close_list);
4702 dev_open(dev, NULL);
4703 }
4704
4705 rtnl_unlock();
4706 }
4707
mtk_sgmii_init(struct mtk_eth * eth)4708 static int mtk_sgmii_init(struct mtk_eth *eth)
4709 {
4710 struct device_node *np;
4711 struct regmap *regmap;
4712 u32 flags;
4713 int i;
4714
4715 for (i = 0; i < MTK_MAX_DEVS; i++) {
4716 np = of_parse_phandle(eth->dev->of_node, "mediatek,sgmiisys", i);
4717 if (!np)
4718 break;
4719
4720 regmap = syscon_node_to_regmap(np);
4721 flags = 0;
4722 if (of_property_read_bool(np, "mediatek,pnswap"))
4723 flags |= MTK_SGMII_FLAG_PN_SWAP;
4724
4725 of_node_put(np);
4726
4727 if (IS_ERR(regmap))
4728 return PTR_ERR(regmap);
4729
4730 eth->sgmii_pcs[i] = mtk_pcs_lynxi_create(eth->dev, regmap,
4731 eth->soc->ana_rgc3,
4732 flags);
4733 }
4734
4735 return 0;
4736 }
4737
mtk_probe(struct platform_device * pdev)4738 static int mtk_probe(struct platform_device *pdev)
4739 {
4740 struct resource *res = NULL, *res_sram;
4741 struct device_node *mac_np;
4742 struct mtk_eth *eth;
4743 int err, i;
4744
4745 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
4746 if (!eth)
4747 return -ENOMEM;
4748
4749 eth->soc = of_device_get_match_data(&pdev->dev);
4750
4751 eth->dev = &pdev->dev;
4752 eth->dma_dev = &pdev->dev;
4753 eth->base = devm_platform_ioremap_resource(pdev, 0);
4754 if (IS_ERR(eth->base))
4755 return PTR_ERR(eth->base);
4756
4757 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
4758 eth->ip_align = NET_IP_ALIGN;
4759
4760 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) {
4761 /* SRAM is actual memory and supports transparent access just like DRAM.
4762 * Hence we don't require __iomem being set and don't need to use accessor
4763 * functions to read from or write to SRAM.
4764 */
4765 if (mtk_is_netsys_v3_or_greater(eth)) {
4766 eth->sram_base = (void __force *)devm_platform_ioremap_resource(pdev, 1);
4767 if (IS_ERR(eth->sram_base))
4768 return PTR_ERR(eth->sram_base);
4769 } else {
4770 eth->sram_base = (void __force *)eth->base + MTK_ETH_SRAM_OFFSET;
4771 }
4772 }
4773
4774 if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) {
4775 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36));
4776 if (!err)
4777 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
4778
4779 if (err) {
4780 dev_err(&pdev->dev, "Wrong DMA config\n");
4781 return -EINVAL;
4782 }
4783 }
4784
4785 spin_lock_init(ð->page_lock);
4786 spin_lock_init(ð->tx_irq_lock);
4787 spin_lock_init(ð->rx_irq_lock);
4788 spin_lock_init(ð->dim_lock);
4789
4790 eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4791 INIT_WORK(ð->rx_dim.work, mtk_dim_rx);
4792 INIT_DELAYED_WORK(ð->reset.monitor_work, mtk_hw_reset_monitor_work);
4793
4794 eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4795 INIT_WORK(ð->tx_dim.work, mtk_dim_tx);
4796
4797 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4798 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4799 "mediatek,ethsys");
4800 if (IS_ERR(eth->ethsys)) {
4801 dev_err(&pdev->dev, "no ethsys regmap found\n");
4802 return PTR_ERR(eth->ethsys);
4803 }
4804 }
4805
4806 if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
4807 eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4808 "mediatek,infracfg");
4809 if (IS_ERR(eth->infra)) {
4810 dev_err(&pdev->dev, "no infracfg regmap found\n");
4811 return PTR_ERR(eth->infra);
4812 }
4813 }
4814
4815 if (of_dma_is_coherent(pdev->dev.of_node)) {
4816 struct regmap *cci;
4817
4818 cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4819 "cci-control-port");
4820 /* enable CPU/bus coherency */
4821 if (!IS_ERR(cci))
4822 regmap_write(cci, 0, 3);
4823 }
4824
4825 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
4826 err = mtk_sgmii_init(eth);
4827
4828 if (err)
4829 return err;
4830 }
4831
4832 if (eth->soc->required_pctl) {
4833 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
4834 "mediatek,pctl");
4835 if (IS_ERR(eth->pctl)) {
4836 dev_err(&pdev->dev, "no pctl regmap found\n");
4837 err = PTR_ERR(eth->pctl);
4838 goto err_destroy_sgmii;
4839 }
4840 }
4841
4842 if (mtk_is_netsys_v2_or_greater(eth)) {
4843 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4844 if (!res) {
4845 err = -EINVAL;
4846 goto err_destroy_sgmii;
4847 }
4848 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) {
4849 if (mtk_is_netsys_v3_or_greater(eth)) {
4850 res_sram = platform_get_resource(pdev, IORESOURCE_MEM, 1);
4851 if (!res_sram) {
4852 err = -EINVAL;
4853 goto err_destroy_sgmii;
4854 }
4855 eth->phy_scratch_ring = res_sram->start;
4856 } else {
4857 eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET;
4858 }
4859 }
4860 }
4861
4862 if (eth->soc->offload_version) {
4863 for (i = 0;; i++) {
4864 struct device_node *np;
4865 phys_addr_t wdma_phy;
4866 u32 wdma_base;
4867
4868 if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base))
4869 break;
4870
4871 np = of_parse_phandle(pdev->dev.of_node,
4872 "mediatek,wed", i);
4873 if (!np)
4874 break;
4875
4876 wdma_base = eth->soc->reg_map->wdma_base[i];
4877 wdma_phy = res ? res->start + wdma_base : 0;
4878 mtk_wed_add_hw(np, eth, eth->base + wdma_base,
4879 wdma_phy, i);
4880 }
4881 }
4882
4883 for (i = 0; i < 3; i++) {
4884 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
4885 eth->irq[i] = eth->irq[0];
4886 else
4887 eth->irq[i] = platform_get_irq(pdev, i);
4888 if (eth->irq[i] < 0) {
4889 dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
4890 err = -ENXIO;
4891 goto err_wed_exit;
4892 }
4893 }
4894 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
4895 eth->clks[i] = devm_clk_get(eth->dev,
4896 mtk_clks_source_name[i]);
4897 if (IS_ERR(eth->clks[i])) {
4898 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) {
4899 err = -EPROBE_DEFER;
4900 goto err_wed_exit;
4901 }
4902 if (eth->soc->required_clks & BIT(i)) {
4903 dev_err(&pdev->dev, "clock %s not found\n",
4904 mtk_clks_source_name[i]);
4905 err = -EINVAL;
4906 goto err_wed_exit;
4907 }
4908 eth->clks[i] = NULL;
4909 }
4910 }
4911
4912 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
4913 INIT_WORK(ð->pending_work, mtk_pending_work);
4914
4915 err = mtk_hw_init(eth, false);
4916 if (err)
4917 goto err_wed_exit;
4918
4919 eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
4920
4921 for_each_child_of_node(pdev->dev.of_node, mac_np) {
4922 if (!of_device_is_compatible(mac_np,
4923 "mediatek,eth-mac"))
4924 continue;
4925
4926 if (!of_device_is_available(mac_np))
4927 continue;
4928
4929 err = mtk_add_mac(eth, mac_np);
4930 if (err) {
4931 of_node_put(mac_np);
4932 goto err_deinit_hw;
4933 }
4934 }
4935
4936 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
4937 err = devm_request_irq(eth->dev, eth->irq[0],
4938 mtk_handle_irq, 0,
4939 dev_name(eth->dev), eth);
4940 } else {
4941 err = devm_request_irq(eth->dev, eth->irq[1],
4942 mtk_handle_irq_tx, 0,
4943 dev_name(eth->dev), eth);
4944 if (err)
4945 goto err_free_dev;
4946
4947 err = devm_request_irq(eth->dev, eth->irq[2],
4948 mtk_handle_irq_rx, 0,
4949 dev_name(eth->dev), eth);
4950 }
4951 if (err)
4952 goto err_free_dev;
4953
4954 /* No MT7628/88 support yet */
4955 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
4956 err = mtk_mdio_init(eth);
4957 if (err)
4958 goto err_free_dev;
4959 }
4960
4961 if (eth->soc->offload_version) {
4962 u32 num_ppe = mtk_is_netsys_v2_or_greater(eth) ? 2 : 1;
4963
4964 num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe);
4965 for (i = 0; i < num_ppe; i++) {
4966 u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400;
4967
4968 eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr, i);
4969
4970 if (!eth->ppe[i]) {
4971 err = -ENOMEM;
4972 goto err_deinit_ppe;
4973 }
4974 }
4975
4976 err = mtk_eth_offload_init(eth);
4977 if (err)
4978 goto err_deinit_ppe;
4979 }
4980
4981 for (i = 0; i < MTK_MAX_DEVS; i++) {
4982 if (!eth->netdev[i])
4983 continue;
4984
4985 err = register_netdev(eth->netdev[i]);
4986 if (err) {
4987 dev_err(eth->dev, "error bringing up device\n");
4988 goto err_deinit_ppe;
4989 } else
4990 netif_info(eth, probe, eth->netdev[i],
4991 "mediatek frame engine at 0x%08lx, irq %d\n",
4992 eth->netdev[i]->base_addr, eth->irq[0]);
4993 }
4994
4995 /* we run 2 devices on the same DMA ring so we need a dummy device
4996 * for NAPI to work
4997 */
4998 eth->dummy_dev = alloc_netdev_dummy(0);
4999 if (!eth->dummy_dev) {
5000 err = -ENOMEM;
5001 dev_err(eth->dev, "failed to allocated dummy device\n");
5002 goto err_unreg_netdev;
5003 }
5004 netif_napi_add(eth->dummy_dev, ð->tx_napi, mtk_napi_tx);
5005 netif_napi_add(eth->dummy_dev, ð->rx_napi, mtk_napi_rx);
5006
5007 platform_set_drvdata(pdev, eth);
5008 schedule_delayed_work(ð->reset.monitor_work,
5009 MTK_DMA_MONITOR_TIMEOUT);
5010
5011 return 0;
5012
5013 err_unreg_netdev:
5014 mtk_unreg_dev(eth);
5015 err_deinit_ppe:
5016 mtk_ppe_deinit(eth);
5017 mtk_mdio_cleanup(eth);
5018 err_free_dev:
5019 mtk_free_dev(eth);
5020 err_deinit_hw:
5021 mtk_hw_deinit(eth);
5022 err_wed_exit:
5023 mtk_wed_exit();
5024 err_destroy_sgmii:
5025 mtk_sgmii_destroy(eth);
5026
5027 return err;
5028 }
5029
mtk_remove(struct platform_device * pdev)5030 static void mtk_remove(struct platform_device *pdev)
5031 {
5032 struct mtk_eth *eth = platform_get_drvdata(pdev);
5033 struct mtk_mac *mac;
5034 int i;
5035
5036 /* stop all devices to make sure that dma is properly shut down */
5037 for (i = 0; i < MTK_MAX_DEVS; i++) {
5038 if (!eth->netdev[i])
5039 continue;
5040 mtk_stop(eth->netdev[i]);
5041 mac = netdev_priv(eth->netdev[i]);
5042 phylink_disconnect_phy(mac->phylink);
5043 }
5044
5045 mtk_wed_exit();
5046 mtk_hw_deinit(eth);
5047
5048 netif_napi_del(ð->tx_napi);
5049 netif_napi_del(ð->rx_napi);
5050 mtk_cleanup(eth);
5051 mtk_mdio_cleanup(eth);
5052 }
5053
5054 static const struct mtk_soc_data mt2701_data = {
5055 .reg_map = &mtk_reg_map,
5056 .caps = MT7623_CAPS | MTK_HWLRO,
5057 .hw_features = MTK_HW_FEATURES,
5058 .required_clks = MT7623_CLKS_BITMAP,
5059 .required_pctl = true,
5060 .version = 1,
5061 .tx = {
5062 .desc_size = sizeof(struct mtk_tx_dma),
5063 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5064 .dma_len_offset = 16,
5065 .dma_size = MTK_DMA_SIZE(2K),
5066 .fq_dma_size = MTK_DMA_SIZE(2K),
5067 },
5068 .rx = {
5069 .desc_size = sizeof(struct mtk_rx_dma),
5070 .irq_done_mask = MTK_RX_DONE_INT,
5071 .dma_l4_valid = RX_DMA_L4_VALID,
5072 .dma_size = MTK_DMA_SIZE(2K),
5073 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5074 .dma_len_offset = 16,
5075 },
5076 };
5077
5078 static const struct mtk_soc_data mt7621_data = {
5079 .reg_map = &mtk_reg_map,
5080 .caps = MT7621_CAPS,
5081 .hw_features = MTK_HW_FEATURES,
5082 .required_clks = MT7621_CLKS_BITMAP,
5083 .required_pctl = false,
5084 .version = 1,
5085 .offload_version = 1,
5086 .hash_offset = 2,
5087 .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
5088 .tx = {
5089 .desc_size = sizeof(struct mtk_tx_dma),
5090 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5091 .dma_len_offset = 16,
5092 .dma_size = MTK_DMA_SIZE(2K),
5093 .fq_dma_size = MTK_DMA_SIZE(2K),
5094 },
5095 .rx = {
5096 .desc_size = sizeof(struct mtk_rx_dma),
5097 .irq_done_mask = MTK_RX_DONE_INT,
5098 .dma_l4_valid = RX_DMA_L4_VALID,
5099 .dma_size = MTK_DMA_SIZE(2K),
5100 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5101 .dma_len_offset = 16,
5102 },
5103 };
5104
5105 static const struct mtk_soc_data mt7622_data = {
5106 .reg_map = &mtk_reg_map,
5107 .ana_rgc3 = 0x2028,
5108 .caps = MT7622_CAPS | MTK_HWLRO,
5109 .hw_features = MTK_HW_FEATURES,
5110 .required_clks = MT7622_CLKS_BITMAP,
5111 .required_pctl = false,
5112 .version = 1,
5113 .offload_version = 2,
5114 .hash_offset = 2,
5115 .has_accounting = true,
5116 .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
5117 .tx = {
5118 .desc_size = sizeof(struct mtk_tx_dma),
5119 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5120 .dma_len_offset = 16,
5121 .dma_size = MTK_DMA_SIZE(2K),
5122 .fq_dma_size = MTK_DMA_SIZE(2K),
5123 },
5124 .rx = {
5125 .desc_size = sizeof(struct mtk_rx_dma),
5126 .irq_done_mask = MTK_RX_DONE_INT,
5127 .dma_l4_valid = RX_DMA_L4_VALID,
5128 .dma_size = MTK_DMA_SIZE(2K),
5129 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5130 .dma_len_offset = 16,
5131 },
5132 };
5133
5134 static const struct mtk_soc_data mt7623_data = {
5135 .reg_map = &mtk_reg_map,
5136 .caps = MT7623_CAPS | MTK_HWLRO,
5137 .hw_features = MTK_HW_FEATURES,
5138 .required_clks = MT7623_CLKS_BITMAP,
5139 .required_pctl = true,
5140 .version = 1,
5141 .offload_version = 1,
5142 .hash_offset = 2,
5143 .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
5144 .disable_pll_modes = true,
5145 .tx = {
5146 .desc_size = sizeof(struct mtk_tx_dma),
5147 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5148 .dma_len_offset = 16,
5149 .dma_size = MTK_DMA_SIZE(2K),
5150 .fq_dma_size = MTK_DMA_SIZE(2K),
5151 },
5152 .rx = {
5153 .desc_size = sizeof(struct mtk_rx_dma),
5154 .irq_done_mask = MTK_RX_DONE_INT,
5155 .dma_l4_valid = RX_DMA_L4_VALID,
5156 .dma_size = MTK_DMA_SIZE(2K),
5157 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5158 .dma_len_offset = 16,
5159 },
5160 };
5161
5162 static const struct mtk_soc_data mt7629_data = {
5163 .reg_map = &mtk_reg_map,
5164 .ana_rgc3 = 0x128,
5165 .caps = MT7629_CAPS | MTK_HWLRO,
5166 .hw_features = MTK_HW_FEATURES,
5167 .required_clks = MT7629_CLKS_BITMAP,
5168 .required_pctl = false,
5169 .has_accounting = true,
5170 .version = 1,
5171 .tx = {
5172 .desc_size = sizeof(struct mtk_tx_dma),
5173 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5174 .dma_len_offset = 16,
5175 .dma_size = MTK_DMA_SIZE(2K),
5176 .fq_dma_size = MTK_DMA_SIZE(2K),
5177 },
5178 .rx = {
5179 .desc_size = sizeof(struct mtk_rx_dma),
5180 .irq_done_mask = MTK_RX_DONE_INT,
5181 .dma_l4_valid = RX_DMA_L4_VALID,
5182 .dma_size = MTK_DMA_SIZE(2K),
5183 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5184 .dma_len_offset = 16,
5185 },
5186 };
5187
5188 static const struct mtk_soc_data mt7981_data = {
5189 .reg_map = &mt7986_reg_map,
5190 .ana_rgc3 = 0x128,
5191 .caps = MT7981_CAPS,
5192 .hw_features = MTK_HW_FEATURES,
5193 .required_clks = MT7981_CLKS_BITMAP,
5194 .required_pctl = false,
5195 .version = 2,
5196 .offload_version = 2,
5197 .hash_offset = 4,
5198 .has_accounting = true,
5199 .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
5200 .tx = {
5201 .desc_size = sizeof(struct mtk_tx_dma_v2),
5202 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5203 .dma_len_offset = 8,
5204 .dma_size = MTK_DMA_SIZE(2K),
5205 .fq_dma_size = MTK_DMA_SIZE(2K),
5206 },
5207 .rx = {
5208 .desc_size = sizeof(struct mtk_rx_dma),
5209 .irq_done_mask = MTK_RX_DONE_INT,
5210 .dma_l4_valid = RX_DMA_L4_VALID_V2,
5211 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5212 .dma_len_offset = 16,
5213 .dma_size = MTK_DMA_SIZE(2K),
5214 },
5215 };
5216
5217 static const struct mtk_soc_data mt7986_data = {
5218 .reg_map = &mt7986_reg_map,
5219 .ana_rgc3 = 0x128,
5220 .caps = MT7986_CAPS,
5221 .hw_features = MTK_HW_FEATURES,
5222 .required_clks = MT7986_CLKS_BITMAP,
5223 .required_pctl = false,
5224 .version = 2,
5225 .offload_version = 2,
5226 .hash_offset = 4,
5227 .has_accounting = true,
5228 .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
5229 .tx = {
5230 .desc_size = sizeof(struct mtk_tx_dma_v2),
5231 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5232 .dma_len_offset = 8,
5233 .dma_size = MTK_DMA_SIZE(2K),
5234 .fq_dma_size = MTK_DMA_SIZE(2K),
5235 },
5236 .rx = {
5237 .desc_size = sizeof(struct mtk_rx_dma),
5238 .irq_done_mask = MTK_RX_DONE_INT,
5239 .dma_l4_valid = RX_DMA_L4_VALID_V2,
5240 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5241 .dma_len_offset = 16,
5242 .dma_size = MTK_DMA_SIZE(2K),
5243 },
5244 };
5245
5246 static const struct mtk_soc_data mt7988_data = {
5247 .reg_map = &mt7988_reg_map,
5248 .ana_rgc3 = 0x128,
5249 .caps = MT7988_CAPS,
5250 .hw_features = MTK_HW_FEATURES,
5251 .required_clks = MT7988_CLKS_BITMAP,
5252 .required_pctl = false,
5253 .version = 3,
5254 .offload_version = 2,
5255 .hash_offset = 4,
5256 .has_accounting = true,
5257 .foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
5258 .tx = {
5259 .desc_size = sizeof(struct mtk_tx_dma_v2),
5260 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5261 .dma_len_offset = 8,
5262 .dma_size = MTK_DMA_SIZE(2K),
5263 .fq_dma_size = MTK_DMA_SIZE(4K),
5264 },
5265 .rx = {
5266 .desc_size = sizeof(struct mtk_rx_dma_v2),
5267 .irq_done_mask = MTK_RX_DONE_INT_V2,
5268 .dma_l4_valid = RX_DMA_L4_VALID_V2,
5269 .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
5270 .dma_len_offset = 8,
5271 .dma_size = MTK_DMA_SIZE(2K),
5272 },
5273 };
5274
5275 static const struct mtk_soc_data rt5350_data = {
5276 .reg_map = &mt7628_reg_map,
5277 .caps = MT7628_CAPS,
5278 .hw_features = MTK_HW_FEATURES_MT7628,
5279 .required_clks = MT7628_CLKS_BITMAP,
5280 .required_pctl = false,
5281 .version = 1,
5282 .tx = {
5283 .desc_size = sizeof(struct mtk_tx_dma),
5284 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5285 .dma_len_offset = 16,
5286 .dma_size = MTK_DMA_SIZE(2K),
5287 },
5288 .rx = {
5289 .desc_size = sizeof(struct mtk_rx_dma),
5290 .irq_done_mask = MTK_RX_DONE_INT,
5291 .dma_l4_valid = RX_DMA_L4_VALID_PDMA,
5292 .dma_max_len = MTK_TX_DMA_BUF_LEN,
5293 .dma_len_offset = 16,
5294 .dma_size = MTK_DMA_SIZE(2K),
5295 },
5296 };
5297
5298 const struct of_device_id of_mtk_match[] = {
5299 { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data },
5300 { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data },
5301 { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data },
5302 { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data },
5303 { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data },
5304 { .compatible = "mediatek,mt7981-eth", .data = &mt7981_data },
5305 { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data },
5306 { .compatible = "mediatek,mt7988-eth", .data = &mt7988_data },
5307 { .compatible = "ralink,rt5350-eth", .data = &rt5350_data },
5308 {},
5309 };
5310 MODULE_DEVICE_TABLE(of, of_mtk_match);
5311
5312 static struct platform_driver mtk_driver = {
5313 .probe = mtk_probe,
5314 .remove_new = mtk_remove,
5315 .driver = {
5316 .name = "mtk_soc_eth",
5317 .of_match_table = of_mtk_match,
5318 },
5319 };
5320
5321 module_platform_driver(mtk_driver);
5322
5323 MODULE_LICENSE("GPL");
5324 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
5325 MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
5326