1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /*
3  * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
4  * stmmac XGMAC support.
5  */
6 
7 #include <linux/bitrev.h>
8 #include <linux/crc32.h>
9 #include <linux/iopoll.h>
10 #include "stmmac.h"
11 #include "stmmac_ptp.h"
12 #include "dwxlgmac2.h"
13 #include "dwxgmac2.h"
14 
15 static void dwxgmac2_core_init(struct mac_device_info *hw,
16 			       struct net_device *dev)
17 {
18 	void __iomem *ioaddr = hw->pcsr;
19 	u32 tx, rx;
20 
21 	tx = readl(ioaddr + XGMAC_TX_CONFIG);
22 	rx = readl(ioaddr + XGMAC_RX_CONFIG);
23 
24 	tx |= XGMAC_CORE_INIT_TX;
25 	rx |= XGMAC_CORE_INIT_RX;
26 
27 	if (hw->ps) {
28 		tx |= XGMAC_CONFIG_TE;
29 		tx &= ~hw->link.speed_mask;
30 
31 		switch (hw->ps) {
32 		case SPEED_10000:
33 			tx |= hw->link.xgmii.speed10000;
34 			break;
35 		case SPEED_2500:
36 			tx |= hw->link.speed2500;
37 			break;
38 		case SPEED_1000:
39 		default:
40 			tx |= hw->link.speed1000;
41 			break;
42 		}
43 	}
44 
45 	writel(tx, ioaddr + XGMAC_TX_CONFIG);
46 	writel(rx, ioaddr + XGMAC_RX_CONFIG);
47 	writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
48 }
49 
50 static void xgmac_phylink_get_caps(struct stmmac_priv *priv)
51 {
52 	priv->phylink_config.mac_capabilities |= MAC_2500FD | MAC_5000FD |
53 						 MAC_10000FD | MAC_25000FD |
54 						 MAC_40000FD | MAC_50000FD |
55 						 MAC_100000FD;
56 }
57 
58 static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
59 {
60 	u32 tx = readl(ioaddr + XGMAC_TX_CONFIG);
61 	u32 rx = readl(ioaddr + XGMAC_RX_CONFIG);
62 
63 	if (enable) {
64 		tx |= XGMAC_CONFIG_TE;
65 		rx |= XGMAC_CONFIG_RE;
66 	} else {
67 		tx &= ~XGMAC_CONFIG_TE;
68 		rx &= ~XGMAC_CONFIG_RE;
69 	}
70 
71 	writel(tx, ioaddr + XGMAC_TX_CONFIG);
72 	writel(rx, ioaddr + XGMAC_RX_CONFIG);
73 }
74 
75 static int dwxgmac2_rx_ipc(struct mac_device_info *hw)
76 {
77 	void __iomem *ioaddr = hw->pcsr;
78 	u32 value;
79 
80 	value = readl(ioaddr + XGMAC_RX_CONFIG);
81 	if (hw->rx_csum)
82 		value |= XGMAC_CONFIG_IPC;
83 	else
84 		value &= ~XGMAC_CONFIG_IPC;
85 	writel(value, ioaddr + XGMAC_RX_CONFIG);
86 
87 	return !!(readl(ioaddr + XGMAC_RX_CONFIG) & XGMAC_CONFIG_IPC);
88 }
89 
90 static void dwxgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
91 				     u32 queue)
92 {
93 	void __iomem *ioaddr = hw->pcsr;
94 	u32 value;
95 
96 	value = readl(ioaddr + XGMAC_RXQ_CTRL0) & ~XGMAC_RXQEN(queue);
97 	if (mode == MTL_QUEUE_AVB)
98 		value |= 0x1 << XGMAC_RXQEN_SHIFT(queue);
99 	else if (mode == MTL_QUEUE_DCB)
100 		value |= 0x2 << XGMAC_RXQEN_SHIFT(queue);
101 	writel(value, ioaddr + XGMAC_RXQ_CTRL0);
102 }
103 
104 static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
105 				   u32 queue)
106 {
107 	void __iomem *ioaddr = hw->pcsr;
108 	u32 value, reg;
109 
110 	reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3;
111 	if (queue >= 4)
112 		queue -= 4;
113 
114 	value = readl(ioaddr + reg);
115 	value &= ~XGMAC_PSRQ(queue);
116 	value |= (prio << XGMAC_PSRQ_SHIFT(queue)) & XGMAC_PSRQ(queue);
117 
118 	writel(value, ioaddr + reg);
119 }
120 
121 static void dwxgmac2_tx_queue_prio(struct mac_device_info *hw, u32 prio,
122 				   u32 queue)
123 {
124 	void __iomem *ioaddr = hw->pcsr;
125 	u32 value, reg;
126 
127 	reg = (queue < 4) ? XGMAC_TC_PRTY_MAP0 : XGMAC_TC_PRTY_MAP1;
128 	if (queue >= 4)
129 		queue -= 4;
130 
131 	value = readl(ioaddr + reg);
132 	value &= ~XGMAC_PSTC(queue);
133 	value |= (prio << XGMAC_PSTC_SHIFT(queue)) & XGMAC_PSTC(queue);
134 
135 	writel(value, ioaddr + reg);
136 }
137 
138 static void dwxgmac2_rx_queue_routing(struct mac_device_info *hw,
139 				      u8 packet, u32 queue)
140 {
141 	void __iomem *ioaddr = hw->pcsr;
142 	u32 value;
143 
144 	static const struct stmmac_rx_routing dwxgmac2_route_possibilities[] = {
145 		{ XGMAC_AVCPQ, XGMAC_AVCPQ_SHIFT },
146 		{ XGMAC_PTPQ, XGMAC_PTPQ_SHIFT },
147 		{ XGMAC_DCBCPQ, XGMAC_DCBCPQ_SHIFT },
148 		{ XGMAC_UPQ, XGMAC_UPQ_SHIFT },
149 		{ XGMAC_MCBCQ, XGMAC_MCBCQ_SHIFT },
150 	};
151 
152 	value = readl(ioaddr + XGMAC_RXQ_CTRL1);
153 
154 	/* routing configuration */
155 	value &= ~dwxgmac2_route_possibilities[packet - 1].reg_mask;
156 	value |= (queue << dwxgmac2_route_possibilities[packet - 1].reg_shift) &
157 		 dwxgmac2_route_possibilities[packet - 1].reg_mask;
158 
159 	/* some packets require extra ops */
160 	if (packet == PACKET_AVCPQ)
161 		value |= FIELD_PREP(XGMAC_TACPQE, 1);
162 	else if (packet == PACKET_MCBCQ)
163 		value |= FIELD_PREP(XGMAC_MCBCQEN, 1);
164 
165 	writel(value, ioaddr + XGMAC_RXQ_CTRL1);
166 }
167 
168 static void dwxgmac2_prog_mtl_rx_algorithms(struct mac_device_info *hw,
169 					    u32 rx_alg)
170 {
171 	void __iomem *ioaddr = hw->pcsr;
172 	u32 value;
173 
174 	value = readl(ioaddr + XGMAC_MTL_OPMODE);
175 	value &= ~XGMAC_RAA;
176 
177 	switch (rx_alg) {
178 	case MTL_RX_ALGORITHM_SP:
179 		break;
180 	case MTL_RX_ALGORITHM_WSP:
181 		value |= XGMAC_RAA;
182 		break;
183 	default:
184 		break;
185 	}
186 
187 	writel(value, ioaddr + XGMAC_MTL_OPMODE);
188 }
189 
190 static void dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info *hw,
191 					    u32 tx_alg)
192 {
193 	void __iomem *ioaddr = hw->pcsr;
194 	bool ets = true;
195 	u32 value;
196 	int i;
197 
198 	value = readl(ioaddr + XGMAC_MTL_OPMODE);
199 	value &= ~XGMAC_ETSALG;
200 
201 	switch (tx_alg) {
202 	case MTL_TX_ALGORITHM_WRR:
203 		value |= XGMAC_WRR;
204 		break;
205 	case MTL_TX_ALGORITHM_WFQ:
206 		value |= XGMAC_WFQ;
207 		break;
208 	case MTL_TX_ALGORITHM_DWRR:
209 		value |= XGMAC_DWRR;
210 		break;
211 	default:
212 		ets = false;
213 		break;
214 	}
215 
216 	writel(value, ioaddr + XGMAC_MTL_OPMODE);
217 
218 	/* Set ETS if desired */
219 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
220 		value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
221 		value &= ~XGMAC_TSA;
222 		if (ets)
223 			value |= XGMAC_ETS;
224 		writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
225 	}
226 }
227 
228 static void dwxgmac2_set_mtl_tx_queue_weight(struct stmmac_priv *priv,
229 					     struct mac_device_info *hw,
230 					     u32 weight, u32 queue)
231 {
232 	void __iomem *ioaddr = hw->pcsr;
233 
234 	writel(weight, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
235 }
236 
237 static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue,
238 				    u32 chan)
239 {
240 	void __iomem *ioaddr = hw->pcsr;
241 	u32 value, reg;
242 
243 	reg = (queue < 4) ? XGMAC_MTL_RXQ_DMA_MAP0 : XGMAC_MTL_RXQ_DMA_MAP1;
244 	if (queue >= 4)
245 		queue -= 4;
246 
247 	value = readl(ioaddr + reg);
248 	value &= ~XGMAC_QxMDMACH(queue);
249 	value |= (chan << XGMAC_QxMDMACH_SHIFT(queue)) & XGMAC_QxMDMACH(queue);
250 
251 	writel(value, ioaddr + reg);
252 }
253 
254 static void dwxgmac2_config_cbs(struct stmmac_priv *priv,
255 				struct mac_device_info *hw,
256 				u32 send_slope, u32 idle_slope,
257 				u32 high_credit, u32 low_credit, u32 queue)
258 {
259 	void __iomem *ioaddr = hw->pcsr;
260 	u32 value;
261 
262 	writel(send_slope, ioaddr + XGMAC_MTL_TCx_SENDSLOPE(queue));
263 	writel(idle_slope, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
264 	writel(high_credit, ioaddr + XGMAC_MTL_TCx_HICREDIT(queue));
265 	writel(low_credit, ioaddr + XGMAC_MTL_TCx_LOCREDIT(queue));
266 
267 	value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
268 	value &= ~XGMAC_TSA;
269 	value |= XGMAC_CC | XGMAC_CBS;
270 	writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
271 }
272 
273 static void dwxgmac2_dump_regs(struct mac_device_info *hw, u32 *reg_space)
274 {
275 	void __iomem *ioaddr = hw->pcsr;
276 	int i;
277 
278 	for (i = 0; i < XGMAC_MAC_REGSIZE; i++)
279 		reg_space[i] = readl(ioaddr + i * 4);
280 }
281 
282 static int dwxgmac2_host_irq_status(struct mac_device_info *hw,
283 				    struct stmmac_extra_stats *x)
284 {
285 	void __iomem *ioaddr = hw->pcsr;
286 	u32 stat, en;
287 	int ret = 0;
288 
289 	en = readl(ioaddr + XGMAC_INT_EN);
290 	stat = readl(ioaddr + XGMAC_INT_STATUS);
291 
292 	stat &= en;
293 
294 	if (stat & XGMAC_PMTIS) {
295 		x->irq_receive_pmt_irq_n++;
296 		readl(ioaddr + XGMAC_PMT);
297 	}
298 
299 	if (stat & XGMAC_LPIIS) {
300 		u32 lpi = readl(ioaddr + XGMAC_LPI_CTRL);
301 
302 		if (lpi & XGMAC_TLPIEN) {
303 			ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
304 			x->irq_tx_path_in_lpi_mode_n++;
305 		}
306 		if (lpi & XGMAC_TLPIEX) {
307 			ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
308 			x->irq_tx_path_exit_lpi_mode_n++;
309 		}
310 		if (lpi & XGMAC_RLPIEN)
311 			x->irq_rx_path_in_lpi_mode_n++;
312 		if (lpi & XGMAC_RLPIEX)
313 			x->irq_rx_path_exit_lpi_mode_n++;
314 	}
315 
316 	return ret;
317 }
318 
319 static int dwxgmac2_host_mtl_irq_status(struct stmmac_priv *priv,
320 					struct mac_device_info *hw, u32 chan)
321 {
322 	void __iomem *ioaddr = hw->pcsr;
323 	int ret = 0;
324 	u32 status;
325 
326 	status = readl(ioaddr + XGMAC_MTL_INT_STATUS);
327 	if (status & BIT(chan)) {
328 		u32 chan_status = readl(ioaddr + XGMAC_MTL_QINT_STATUS(chan));
329 
330 		if (chan_status & XGMAC_RXOVFIS)
331 			ret |= CORE_IRQ_MTL_RX_OVERFLOW;
332 
333 		writel(~0x0, ioaddr + XGMAC_MTL_QINT_STATUS(chan));
334 	}
335 
336 	return ret;
337 }
338 
339 static void dwxgmac2_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
340 			       unsigned int fc, unsigned int pause_time,
341 			       u32 tx_cnt)
342 {
343 	void __iomem *ioaddr = hw->pcsr;
344 	u32 i;
345 
346 	if (fc & FLOW_RX)
347 		writel(XGMAC_RFE, ioaddr + XGMAC_RX_FLOW_CTRL);
348 	if (fc & FLOW_TX) {
349 		for (i = 0; i < tx_cnt; i++) {
350 			u32 value = XGMAC_TFE;
351 
352 			if (duplex)
353 				value |= pause_time << XGMAC_PT_SHIFT;
354 
355 			writel(value, ioaddr + XGMAC_Qx_TX_FLOW_CTRL(i));
356 		}
357 	}
358 }
359 
360 static void dwxgmac2_pmt(struct mac_device_info *hw, unsigned long mode)
361 {
362 	void __iomem *ioaddr = hw->pcsr;
363 	u32 val = 0x0;
364 
365 	if (mode & WAKE_MAGIC)
366 		val |= XGMAC_PWRDWN | XGMAC_MGKPKTEN;
367 	if (mode & WAKE_UCAST)
368 		val |= XGMAC_PWRDWN | XGMAC_GLBLUCAST | XGMAC_RWKPKTEN;
369 	if (val) {
370 		u32 cfg = readl(ioaddr + XGMAC_RX_CONFIG);
371 		cfg |= XGMAC_CONFIG_RE;
372 		writel(cfg, ioaddr + XGMAC_RX_CONFIG);
373 	}
374 
375 	writel(val, ioaddr + XGMAC_PMT);
376 }
377 
378 static void dwxgmac2_set_umac_addr(struct mac_device_info *hw,
379 				   const unsigned char *addr,
380 				   unsigned int reg_n)
381 {
382 	void __iomem *ioaddr = hw->pcsr;
383 	u32 value;
384 
385 	value = (addr[5] << 8) | addr[4];
386 	writel(value | XGMAC_AE, ioaddr + XGMAC_ADDRx_HIGH(reg_n));
387 
388 	value = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
389 	writel(value, ioaddr + XGMAC_ADDRx_LOW(reg_n));
390 }
391 
392 static void dwxgmac2_get_umac_addr(struct mac_device_info *hw,
393 				   unsigned char *addr, unsigned int reg_n)
394 {
395 	void __iomem *ioaddr = hw->pcsr;
396 	u32 hi_addr, lo_addr;
397 
398 	/* Read the MAC address from the hardware */
399 	hi_addr = readl(ioaddr + XGMAC_ADDRx_HIGH(reg_n));
400 	lo_addr = readl(ioaddr + XGMAC_ADDRx_LOW(reg_n));
401 
402 	/* Extract the MAC address from the high and low words */
403 	addr[0] = lo_addr & 0xff;
404 	addr[1] = (lo_addr >> 8) & 0xff;
405 	addr[2] = (lo_addr >> 16) & 0xff;
406 	addr[3] = (lo_addr >> 24) & 0xff;
407 	addr[4] = hi_addr & 0xff;
408 	addr[5] = (hi_addr >> 8) & 0xff;
409 }
410 
411 static void dwxgmac2_set_eee_mode(struct mac_device_info *hw,
412 				  bool en_tx_lpi_clockgating)
413 {
414 	void __iomem *ioaddr = hw->pcsr;
415 	u32 value;
416 
417 	value = readl(ioaddr + XGMAC_LPI_CTRL);
418 
419 	value |= XGMAC_LPITXEN | XGMAC_LPITXA;
420 	if (en_tx_lpi_clockgating)
421 		value |= XGMAC_TXCGE;
422 
423 	writel(value, ioaddr + XGMAC_LPI_CTRL);
424 }
425 
426 static void dwxgmac2_reset_eee_mode(struct mac_device_info *hw)
427 {
428 	void __iomem *ioaddr = hw->pcsr;
429 	u32 value;
430 
431 	value = readl(ioaddr + XGMAC_LPI_CTRL);
432 	value &= ~(XGMAC_LPITXEN | XGMAC_LPITXA | XGMAC_TXCGE);
433 	writel(value, ioaddr + XGMAC_LPI_CTRL);
434 }
435 
436 static void dwxgmac2_set_eee_pls(struct mac_device_info *hw, int link)
437 {
438 	void __iomem *ioaddr = hw->pcsr;
439 	u32 value;
440 
441 	value = readl(ioaddr + XGMAC_LPI_CTRL);
442 	if (link)
443 		value |= XGMAC_PLS;
444 	else
445 		value &= ~XGMAC_PLS;
446 	writel(value, ioaddr + XGMAC_LPI_CTRL);
447 }
448 
449 static void dwxgmac2_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
450 {
451 	void __iomem *ioaddr = hw->pcsr;
452 	u32 value;
453 
454 	value = (tw & 0xffff) | ((ls & 0x3ff) << 16);
455 	writel(value, ioaddr + XGMAC_LPI_TIMER_CTRL);
456 }
457 
458 static void dwxgmac2_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits,
459 				int mcbitslog2)
460 {
461 	int numhashregs, regs;
462 
463 	switch (mcbitslog2) {
464 	case 6:
465 		numhashregs = 2;
466 		break;
467 	case 7:
468 		numhashregs = 4;
469 		break;
470 	case 8:
471 		numhashregs = 8;
472 		break;
473 	default:
474 		return;
475 	}
476 
477 	for (regs = 0; regs < numhashregs; regs++)
478 		writel(mcfilterbits[regs], ioaddr + XGMAC_HASH_TABLE(regs));
479 }
480 
481 static void dwxgmac2_set_filter(struct mac_device_info *hw,
482 				struct net_device *dev)
483 {
484 	void __iomem *ioaddr = (void __iomem *)dev->base_addr;
485 	u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
486 	int mcbitslog2 = hw->mcast_bits_log2;
487 	u32 mc_filter[8];
488 	int i;
489 
490 	value &= ~(XGMAC_FILTER_PR | XGMAC_FILTER_HMC | XGMAC_FILTER_PM);
491 	value |= XGMAC_FILTER_HPF;
492 
493 	memset(mc_filter, 0, sizeof(mc_filter));
494 
495 	if (dev->flags & IFF_PROMISC) {
496 		value |= XGMAC_FILTER_PR;
497 		value |= XGMAC_FILTER_PCF;
498 	} else if ((dev->flags & IFF_ALLMULTI) ||
499 		   (netdev_mc_count(dev) > hw->multicast_filter_bins)) {
500 		value |= XGMAC_FILTER_PM;
501 
502 		for (i = 0; i < XGMAC_MAX_HASH_TABLE; i++)
503 			writel(~0x0, ioaddr + XGMAC_HASH_TABLE(i));
504 	} else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) {
505 		struct netdev_hw_addr *ha;
506 
507 		value |= XGMAC_FILTER_HMC;
508 
509 		netdev_for_each_mc_addr(ha, dev) {
510 			u32 nr = (bitrev32(~crc32_le(~0, ha->addr, 6)) >>
511 					(32 - mcbitslog2));
512 			mc_filter[nr >> 5] |= (1 << (nr & 0x1F));
513 		}
514 	}
515 
516 	dwxgmac2_set_mchash(ioaddr, mc_filter, mcbitslog2);
517 
518 	/* Handle multiple unicast addresses */
519 	if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
520 		value |= XGMAC_FILTER_PR;
521 	} else {
522 		struct netdev_hw_addr *ha;
523 		int reg = 1;
524 
525 		netdev_for_each_uc_addr(ha, dev) {
526 			dwxgmac2_set_umac_addr(hw, ha->addr, reg);
527 			reg++;
528 		}
529 
530 		for ( ; reg < XGMAC_ADDR_MAX; reg++) {
531 			writel(0, ioaddr + XGMAC_ADDRx_HIGH(reg));
532 			writel(0, ioaddr + XGMAC_ADDRx_LOW(reg));
533 		}
534 	}
535 
536 	writel(value, ioaddr + XGMAC_PACKET_FILTER);
537 }
538 
539 static void dwxgmac2_set_mac_loopback(void __iomem *ioaddr, bool enable)
540 {
541 	u32 value = readl(ioaddr + XGMAC_RX_CONFIG);
542 
543 	if (enable)
544 		value |= XGMAC_CONFIG_LM;
545 	else
546 		value &= ~XGMAC_CONFIG_LM;
547 
548 	writel(value, ioaddr + XGMAC_RX_CONFIG);
549 }
550 
551 static int dwxgmac2_rss_write_reg(void __iomem *ioaddr, bool is_key, int idx,
552 				  u32 val)
553 {
554 	u32 ctrl = 0;
555 
556 	writel(val, ioaddr + XGMAC_RSS_DATA);
557 	ctrl |= idx << XGMAC_RSSIA_SHIFT;
558 	ctrl |= is_key ? XGMAC_ADDRT : 0x0;
559 	ctrl |= XGMAC_OB;
560 	writel(ctrl, ioaddr + XGMAC_RSS_ADDR);
561 
562 	return readl_poll_timeout(ioaddr + XGMAC_RSS_ADDR, ctrl,
563 				  !(ctrl & XGMAC_OB), 100, 10000);
564 }
565 
566 static int dwxgmac2_rss_configure(struct mac_device_info *hw,
567 				  struct stmmac_rss *cfg, u32 num_rxq)
568 {
569 	void __iomem *ioaddr = hw->pcsr;
570 	u32 value, *key;
571 	int i, ret;
572 
573 	value = readl(ioaddr + XGMAC_RSS_CTRL);
574 	if (!cfg || !cfg->enable) {
575 		value &= ~XGMAC_RSSE;
576 		writel(value, ioaddr + XGMAC_RSS_CTRL);
577 		return 0;
578 	}
579 
580 	key = (u32 *)cfg->key;
581 	for (i = 0; i < (ARRAY_SIZE(cfg->key) / sizeof(u32)); i++) {
582 		ret = dwxgmac2_rss_write_reg(ioaddr, true, i, key[i]);
583 		if (ret)
584 			return ret;
585 	}
586 
587 	for (i = 0; i < ARRAY_SIZE(cfg->table); i++) {
588 		ret = dwxgmac2_rss_write_reg(ioaddr, false, i, cfg->table[i]);
589 		if (ret)
590 			return ret;
591 	}
592 
593 	for (i = 0; i < num_rxq; i++)
594 		dwxgmac2_map_mtl_to_dma(hw, i, XGMAC_QDDMACH);
595 
596 	value |= XGMAC_UDP4TE | XGMAC_TCP4TE | XGMAC_IP2TE | XGMAC_RSSE;
597 	writel(value, ioaddr + XGMAC_RSS_CTRL);
598 	return 0;
599 }
600 
601 static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
602 				      __le16 perfect_match, bool is_double)
603 {
604 	void __iomem *ioaddr = hw->pcsr;
605 
606 	writel(hash, ioaddr + XGMAC_VLAN_HASH_TABLE);
607 
608 	if (hash) {
609 		u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
610 
611 		value |= XGMAC_FILTER_VTFE;
612 
613 		writel(value, ioaddr + XGMAC_PACKET_FILTER);
614 
615 		value = readl(ioaddr + XGMAC_VLAN_TAG);
616 
617 		value |= XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV;
618 		if (is_double) {
619 			value |= XGMAC_VLAN_EDVLP;
620 			value |= XGMAC_VLAN_ESVL;
621 			value |= XGMAC_VLAN_DOVLTC;
622 		} else {
623 			value &= ~XGMAC_VLAN_EDVLP;
624 			value &= ~XGMAC_VLAN_ESVL;
625 			value &= ~XGMAC_VLAN_DOVLTC;
626 		}
627 
628 		value &= ~XGMAC_VLAN_VID;
629 		writel(value, ioaddr + XGMAC_VLAN_TAG);
630 	} else if (perfect_match) {
631 		u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
632 
633 		value |= XGMAC_FILTER_VTFE;
634 
635 		writel(value, ioaddr + XGMAC_PACKET_FILTER);
636 
637 		value = readl(ioaddr + XGMAC_VLAN_TAG);
638 
639 		value &= ~XGMAC_VLAN_VTHM;
640 		value |= XGMAC_VLAN_ETV;
641 		if (is_double) {
642 			value |= XGMAC_VLAN_EDVLP;
643 			value |= XGMAC_VLAN_ESVL;
644 			value |= XGMAC_VLAN_DOVLTC;
645 		} else {
646 			value &= ~XGMAC_VLAN_EDVLP;
647 			value &= ~XGMAC_VLAN_ESVL;
648 			value &= ~XGMAC_VLAN_DOVLTC;
649 		}
650 
651 		value &= ~XGMAC_VLAN_VID;
652 		writel(value | perfect_match, ioaddr + XGMAC_VLAN_TAG);
653 	} else {
654 		u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
655 
656 		value &= ~XGMAC_FILTER_VTFE;
657 
658 		writel(value, ioaddr + XGMAC_PACKET_FILTER);
659 
660 		value = readl(ioaddr + XGMAC_VLAN_TAG);
661 
662 		value &= ~(XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV);
663 		value &= ~(XGMAC_VLAN_EDVLP | XGMAC_VLAN_ESVL);
664 		value &= ~XGMAC_VLAN_DOVLTC;
665 		value &= ~XGMAC_VLAN_VID;
666 
667 		writel(value, ioaddr + XGMAC_VLAN_TAG);
668 	}
669 }
670 
671 struct dwxgmac3_error_desc {
672 	bool valid;
673 	const char *desc;
674 	const char *detailed_desc;
675 };
676 
677 #define STAT_OFF(field)		offsetof(struct stmmac_safety_stats, field)
678 
679 static void dwxgmac3_log_error(struct net_device *ndev, u32 value, bool corr,
680 			       const char *module_name,
681 			       const struct dwxgmac3_error_desc *desc,
682 			       unsigned long field_offset,
683 			       struct stmmac_safety_stats *stats)
684 {
685 	unsigned long loc, mask;
686 	u8 *bptr = (u8 *)stats;
687 	unsigned long *ptr;
688 
689 	ptr = (unsigned long *)(bptr + field_offset);
690 
691 	mask = value;
692 	for_each_set_bit(loc, &mask, 32) {
693 		netdev_err(ndev, "Found %s error in %s: '%s: %s'\n", corr ?
694 				"correctable" : "uncorrectable", module_name,
695 				desc[loc].desc, desc[loc].detailed_desc);
696 
697 		/* Update counters */
698 		ptr[loc]++;
699 	}
700 }
701 
702 static const struct dwxgmac3_error_desc dwxgmac3_mac_errors[32]= {
703 	{ true, "ATPES", "Application Transmit Interface Parity Check Error" },
704 	{ true, "DPES", "Descriptor Cache Data Path Parity Check Error" },
705 	{ true, "TPES", "TSO Data Path Parity Check Error" },
706 	{ true, "TSOPES", "TSO Header Data Path Parity Check Error" },
707 	{ true, "MTPES", "MTL Data Path Parity Check Error" },
708 	{ true, "MTSPES", "MTL TX Status Data Path Parity Check Error" },
709 	{ true, "MTBUPES", "MAC TBU Data Path Parity Check Error" },
710 	{ true, "MTFCPES", "MAC TFC Data Path Parity Check Error" },
711 	{ true, "ARPES", "Application Receive Interface Data Path Parity Check Error" },
712 	{ true, "MRWCPES", "MTL RWC Data Path Parity Check Error" },
713 	{ true, "MRRCPES", "MTL RCC Data Path Parity Check Error" },
714 	{ true, "CWPES", "CSR Write Data Path Parity Check Error" },
715 	{ true, "ASRPES", "AXI Slave Read Data Path Parity Check Error" },
716 	{ true, "TTES", "TX FSM Timeout Error" },
717 	{ true, "RTES", "RX FSM Timeout Error" },
718 	{ true, "CTES", "CSR FSM Timeout Error" },
719 	{ true, "ATES", "APP FSM Timeout Error" },
720 	{ true, "PTES", "PTP FSM Timeout Error" },
721 	{ false, "UNKNOWN", "Unknown Error" }, /* 18 */
722 	{ false, "UNKNOWN", "Unknown Error" }, /* 19 */
723 	{ false, "UNKNOWN", "Unknown Error" }, /* 20 */
724 	{ true, "MSTTES", "Master Read/Write Timeout Error" },
725 	{ true, "SLVTES", "Slave Read/Write Timeout Error" },
726 	{ true, "ATITES", "Application Timeout on ATI Interface Error" },
727 	{ true, "ARITES", "Application Timeout on ARI Interface Error" },
728 	{ true, "FSMPES", "FSM State Parity Error" },
729 	{ false, "UNKNOWN", "Unknown Error" }, /* 26 */
730 	{ false, "UNKNOWN", "Unknown Error" }, /* 27 */
731 	{ false, "UNKNOWN", "Unknown Error" }, /* 28 */
732 	{ false, "UNKNOWN", "Unknown Error" }, /* 29 */
733 	{ false, "UNKNOWN", "Unknown Error" }, /* 30 */
734 	{ true, "CPI", "Control Register Parity Check Error" },
735 };
736 
737 static void dwxgmac3_handle_mac_err(struct net_device *ndev,
738 				    void __iomem *ioaddr, bool correctable,
739 				    struct stmmac_safety_stats *stats)
740 {
741 	u32 value;
742 
743 	value = readl(ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
744 	writel(value, ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
745 
746 	dwxgmac3_log_error(ndev, value, correctable, "MAC",
747 			   dwxgmac3_mac_errors, STAT_OFF(mac_errors), stats);
748 }
749 
750 static const struct dwxgmac3_error_desc dwxgmac3_mtl_errors[32]= {
751 	{ true, "TXCES", "MTL TX Memory Error" },
752 	{ true, "TXAMS", "MTL TX Memory Address Mismatch Error" },
753 	{ true, "TXUES", "MTL TX Memory Error" },
754 	{ false, "UNKNOWN", "Unknown Error" }, /* 3 */
755 	{ true, "RXCES", "MTL RX Memory Error" },
756 	{ true, "RXAMS", "MTL RX Memory Address Mismatch Error" },
757 	{ true, "RXUES", "MTL RX Memory Error" },
758 	{ false, "UNKNOWN", "Unknown Error" }, /* 7 */
759 	{ true, "ECES", "MTL EST Memory Error" },
760 	{ true, "EAMS", "MTL EST Memory Address Mismatch Error" },
761 	{ true, "EUES", "MTL EST Memory Error" },
762 	{ false, "UNKNOWN", "Unknown Error" }, /* 11 */
763 	{ true, "RPCES", "MTL RX Parser Memory Error" },
764 	{ true, "RPAMS", "MTL RX Parser Memory Address Mismatch Error" },
765 	{ true, "RPUES", "MTL RX Parser Memory Error" },
766 	{ false, "UNKNOWN", "Unknown Error" }, /* 15 */
767 	{ false, "UNKNOWN", "Unknown Error" }, /* 16 */
768 	{ false, "UNKNOWN", "Unknown Error" }, /* 17 */
769 	{ false, "UNKNOWN", "Unknown Error" }, /* 18 */
770 	{ false, "UNKNOWN", "Unknown Error" }, /* 19 */
771 	{ false, "UNKNOWN", "Unknown Error" }, /* 20 */
772 	{ false, "UNKNOWN", "Unknown Error" }, /* 21 */
773 	{ false, "UNKNOWN", "Unknown Error" }, /* 22 */
774 	{ false, "UNKNOWN", "Unknown Error" }, /* 23 */
775 	{ false, "UNKNOWN", "Unknown Error" }, /* 24 */
776 	{ false, "UNKNOWN", "Unknown Error" }, /* 25 */
777 	{ false, "UNKNOWN", "Unknown Error" }, /* 26 */
778 	{ false, "UNKNOWN", "Unknown Error" }, /* 27 */
779 	{ false, "UNKNOWN", "Unknown Error" }, /* 28 */
780 	{ false, "UNKNOWN", "Unknown Error" }, /* 29 */
781 	{ false, "UNKNOWN", "Unknown Error" }, /* 30 */
782 	{ false, "UNKNOWN", "Unknown Error" }, /* 31 */
783 };
784 
785 static void dwxgmac3_handle_mtl_err(struct net_device *ndev,
786 				    void __iomem *ioaddr, bool correctable,
787 				    struct stmmac_safety_stats *stats)
788 {
789 	u32 value;
790 
791 	value = readl(ioaddr + XGMAC_MTL_ECC_INT_STATUS);
792 	writel(value, ioaddr + XGMAC_MTL_ECC_INT_STATUS);
793 
794 	dwxgmac3_log_error(ndev, value, correctable, "MTL",
795 			   dwxgmac3_mtl_errors, STAT_OFF(mtl_errors), stats);
796 }
797 
798 static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= {
799 	{ true, "TCES", "DMA TSO Memory Error" },
800 	{ true, "TAMS", "DMA TSO Memory Address Mismatch Error" },
801 	{ true, "TUES", "DMA TSO Memory Error" },
802 	{ false, "UNKNOWN", "Unknown Error" }, /* 3 */
803 	{ true, "DCES", "DMA DCACHE Memory Error" },
804 	{ true, "DAMS", "DMA DCACHE Address Mismatch Error" },
805 	{ true, "DUES", "DMA DCACHE Memory Error" },
806 	{ false, "UNKNOWN", "Unknown Error" }, /* 7 */
807 	{ false, "UNKNOWN", "Unknown Error" }, /* 8 */
808 	{ false, "UNKNOWN", "Unknown Error" }, /* 9 */
809 	{ false, "UNKNOWN", "Unknown Error" }, /* 10 */
810 	{ false, "UNKNOWN", "Unknown Error" }, /* 11 */
811 	{ false, "UNKNOWN", "Unknown Error" }, /* 12 */
812 	{ false, "UNKNOWN", "Unknown Error" }, /* 13 */
813 	{ false, "UNKNOWN", "Unknown Error" }, /* 14 */
814 	{ false, "UNKNOWN", "Unknown Error" }, /* 15 */
815 	{ false, "UNKNOWN", "Unknown Error" }, /* 16 */
816 	{ false, "UNKNOWN", "Unknown Error" }, /* 17 */
817 	{ false, "UNKNOWN", "Unknown Error" }, /* 18 */
818 	{ false, "UNKNOWN", "Unknown Error" }, /* 19 */
819 	{ false, "UNKNOWN", "Unknown Error" }, /* 20 */
820 	{ false, "UNKNOWN", "Unknown Error" }, /* 21 */
821 	{ false, "UNKNOWN", "Unknown Error" }, /* 22 */
822 	{ false, "UNKNOWN", "Unknown Error" }, /* 23 */
823 	{ false, "UNKNOWN", "Unknown Error" }, /* 24 */
824 	{ false, "UNKNOWN", "Unknown Error" }, /* 25 */
825 	{ false, "UNKNOWN", "Unknown Error" }, /* 26 */
826 	{ false, "UNKNOWN", "Unknown Error" }, /* 27 */
827 	{ false, "UNKNOWN", "Unknown Error" }, /* 28 */
828 	{ false, "UNKNOWN", "Unknown Error" }, /* 29 */
829 	{ false, "UNKNOWN", "Unknown Error" }, /* 30 */
830 	{ false, "UNKNOWN", "Unknown Error" }, /* 31 */
831 };
832 
833 static void dwxgmac3_handle_dma_err(struct net_device *ndev,
834 				    void __iomem *ioaddr, bool correctable,
835 				    struct stmmac_safety_stats *stats)
836 {
837 	u32 value;
838 
839 	value = readl(ioaddr + XGMAC_DMA_ECC_INT_STATUS);
840 	writel(value, ioaddr + XGMAC_DMA_ECC_INT_STATUS);
841 
842 	dwxgmac3_log_error(ndev, value, correctable, "DMA",
843 			   dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats);
844 }
845 
846 static int
847 dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp,
848 			    struct stmmac_safety_feature_cfg *safety_cfg)
849 {
850 	u32 value;
851 
852 	if (!asp)
853 		return -EINVAL;
854 
855 	/* 1. Enable Safety Features */
856 	writel(0x0, ioaddr + XGMAC_MTL_ECC_CONTROL);
857 
858 	/* 2. Enable MTL Safety Interrupts */
859 	value = readl(ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
860 	value |= XGMAC_RPCEIE; /* RX Parser Memory Correctable Error */
861 	value |= XGMAC_ECEIE; /* EST Memory Correctable Error */
862 	value |= XGMAC_RXCEIE; /* RX Memory Correctable Error */
863 	value |= XGMAC_TXCEIE; /* TX Memory Correctable Error */
864 	writel(value, ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
865 
866 	/* 3. Enable DMA Safety Interrupts */
867 	value = readl(ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
868 	value |= XGMAC_DCEIE; /* Descriptor Cache Memory Correctable Error */
869 	value |= XGMAC_TCEIE; /* TSO Memory Correctable Error */
870 	writel(value, ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
871 
872 	/* 0x2: Without ECC or Parity Ports on External Application Interface
873 	 * 0x4: Only ECC Protection for External Memory feature is selected
874 	 */
875 	if (asp == 0x2 || asp == 0x4)
876 		return 0;
877 
878 	/* 4. Enable Parity and Timeout for FSM */
879 	value = readl(ioaddr + XGMAC_MAC_FSM_CONTROL);
880 	value |= XGMAC_PRTYEN; /* FSM Parity Feature */
881 	value |= XGMAC_TMOUTEN; /* FSM Timeout Feature */
882 	writel(value, ioaddr + XGMAC_MAC_FSM_CONTROL);
883 
884 	return 0;
885 }
886 
887 static int dwxgmac3_safety_feat_irq_status(struct net_device *ndev,
888 					   void __iomem *ioaddr,
889 					   unsigned int asp,
890 					   struct stmmac_safety_stats *stats)
891 {
892 	bool err, corr;
893 	u32 mtl, dma;
894 	int ret = 0;
895 
896 	if (!asp)
897 		return -EINVAL;
898 
899 	mtl = readl(ioaddr + XGMAC_MTL_SAFETY_INT_STATUS);
900 	dma = readl(ioaddr + XGMAC_DMA_SAFETY_INT_STATUS);
901 
902 	err = (mtl & XGMAC_MCSIS) || (dma & XGMAC_MCSIS);
903 	corr = false;
904 	if (err) {
905 		dwxgmac3_handle_mac_err(ndev, ioaddr, corr, stats);
906 		ret |= !corr;
907 	}
908 
909 	err = (mtl & (XGMAC_MEUIS | XGMAC_MECIS)) ||
910 	      (dma & (XGMAC_MSUIS | XGMAC_MSCIS));
911 	corr = (mtl & XGMAC_MECIS) || (dma & XGMAC_MSCIS);
912 	if (err) {
913 		dwxgmac3_handle_mtl_err(ndev, ioaddr, corr, stats);
914 		ret |= !corr;
915 	}
916 
917 	err = dma & (XGMAC_DEUIS | XGMAC_DECIS);
918 	corr = dma & XGMAC_DECIS;
919 	if (err) {
920 		dwxgmac3_handle_dma_err(ndev, ioaddr, corr, stats);
921 		ret |= !corr;
922 	}
923 
924 	return ret;
925 }
926 
927 static const struct dwxgmac3_error {
928 	const struct dwxgmac3_error_desc *desc;
929 } dwxgmac3_all_errors[] = {
930 	{ dwxgmac3_mac_errors },
931 	{ dwxgmac3_mtl_errors },
932 	{ dwxgmac3_dma_errors },
933 };
934 
935 static int dwxgmac3_safety_feat_dump(struct stmmac_safety_stats *stats,
936 				     int index, unsigned long *count,
937 				     const char **desc)
938 {
939 	int module = index / 32, offset = index % 32;
940 	unsigned long *ptr = (unsigned long *)stats;
941 
942 	if (module >= ARRAY_SIZE(dwxgmac3_all_errors))
943 		return -EINVAL;
944 	if (!dwxgmac3_all_errors[module].desc[offset].valid)
945 		return -EINVAL;
946 	if (count)
947 		*count = *(ptr + index);
948 	if (desc)
949 		*desc = dwxgmac3_all_errors[module].desc[offset].desc;
950 	return 0;
951 }
952 
953 static int dwxgmac3_rxp_disable(void __iomem *ioaddr)
954 {
955 	u32 val = readl(ioaddr + XGMAC_MTL_OPMODE);
956 
957 	val &= ~XGMAC_FRPE;
958 	writel(val, ioaddr + XGMAC_MTL_OPMODE);
959 
960 	return 0;
961 }
962 
963 static void dwxgmac3_rxp_enable(void __iomem *ioaddr)
964 {
965 	u32 val;
966 
967 	val = readl(ioaddr + XGMAC_MTL_OPMODE);
968 	val |= XGMAC_FRPE;
969 	writel(val, ioaddr + XGMAC_MTL_OPMODE);
970 }
971 
972 static int dwxgmac3_rxp_update_single_entry(void __iomem *ioaddr,
973 					    struct stmmac_tc_entry *entry,
974 					    int pos)
975 {
976 	int ret, i;
977 
978 	for (i = 0; i < (sizeof(entry->val) / sizeof(u32)); i++) {
979 		int real_pos = pos * (sizeof(entry->val) / sizeof(u32)) + i;
980 		u32 val;
981 
982 		/* Wait for ready */
983 		ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
984 					 val, !(val & XGMAC_STARTBUSY), 1, 10000);
985 		if (ret)
986 			return ret;
987 
988 		/* Write data */
989 		val = *((u32 *)&entry->val + i);
990 		writel(val, ioaddr + XGMAC_MTL_RXP_IACC_DATA);
991 
992 		/* Write pos */
993 		val = real_pos & XGMAC_ADDR;
994 		writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
995 
996 		/* Write OP */
997 		val |= XGMAC_WRRDN;
998 		writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
999 
1000 		/* Start Write */
1001 		val |= XGMAC_STARTBUSY;
1002 		writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
1003 
1004 		/* Wait for done */
1005 		ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
1006 					 val, !(val & XGMAC_STARTBUSY), 1, 10000);
1007 		if (ret)
1008 			return ret;
1009 	}
1010 
1011 	return 0;
1012 }
1013 
1014 static struct stmmac_tc_entry *
1015 dwxgmac3_rxp_get_next_entry(struct stmmac_tc_entry *entries,
1016 			    unsigned int count, u32 curr_prio)
1017 {
1018 	struct stmmac_tc_entry *entry;
1019 	u32 min_prio = ~0x0;
1020 	int i, min_prio_idx;
1021 	bool found = false;
1022 
1023 	for (i = count - 1; i >= 0; i--) {
1024 		entry = &entries[i];
1025 
1026 		/* Do not update unused entries */
1027 		if (!entry->in_use)
1028 			continue;
1029 		/* Do not update already updated entries (i.e. fragments) */
1030 		if (entry->in_hw)
1031 			continue;
1032 		/* Let last entry be updated last */
1033 		if (entry->is_last)
1034 			continue;
1035 		/* Do not return fragments */
1036 		if (entry->is_frag)
1037 			continue;
1038 		/* Check if we already checked this prio */
1039 		if (entry->prio < curr_prio)
1040 			continue;
1041 		/* Check if this is the minimum prio */
1042 		if (entry->prio < min_prio) {
1043 			min_prio = entry->prio;
1044 			min_prio_idx = i;
1045 			found = true;
1046 		}
1047 	}
1048 
1049 	if (found)
1050 		return &entries[min_prio_idx];
1051 	return NULL;
1052 }
1053 
1054 static int dwxgmac3_rxp_config(void __iomem *ioaddr,
1055 			       struct stmmac_tc_entry *entries,
1056 			       unsigned int count)
1057 {
1058 	struct stmmac_tc_entry *entry, *frag;
1059 	int i, ret, nve = 0;
1060 	u32 curr_prio = 0;
1061 	u32 old_val, val;
1062 
1063 	/* Force disable RX */
1064 	old_val = readl(ioaddr + XGMAC_RX_CONFIG);
1065 	val = old_val & ~XGMAC_CONFIG_RE;
1066 	writel(val, ioaddr + XGMAC_RX_CONFIG);
1067 
1068 	/* Disable RX Parser */
1069 	ret = dwxgmac3_rxp_disable(ioaddr);
1070 	if (ret)
1071 		goto re_enable;
1072 
1073 	/* Set all entries as NOT in HW */
1074 	for (i = 0; i < count; i++) {
1075 		entry = &entries[i];
1076 		entry->in_hw = false;
1077 	}
1078 
1079 	/* Update entries by reverse order */
1080 	while (1) {
1081 		entry = dwxgmac3_rxp_get_next_entry(entries, count, curr_prio);
1082 		if (!entry)
1083 			break;
1084 
1085 		curr_prio = entry->prio;
1086 		frag = entry->frag_ptr;
1087 
1088 		/* Set special fragment requirements */
1089 		if (frag) {
1090 			entry->val.af = 0;
1091 			entry->val.rf = 0;
1092 			entry->val.nc = 1;
1093 			entry->val.ok_index = nve + 2;
1094 		}
1095 
1096 		ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
1097 		if (ret)
1098 			goto re_enable;
1099 
1100 		entry->table_pos = nve++;
1101 		entry->in_hw = true;
1102 
1103 		if (frag && !frag->in_hw) {
1104 			ret = dwxgmac3_rxp_update_single_entry(ioaddr, frag, nve);
1105 			if (ret)
1106 				goto re_enable;
1107 			frag->table_pos = nve++;
1108 			frag->in_hw = true;
1109 		}
1110 	}
1111 
1112 	if (!nve)
1113 		goto re_enable;
1114 
1115 	/* Update all pass entry */
1116 	for (i = 0; i < count; i++) {
1117 		entry = &entries[i];
1118 		if (!entry->is_last)
1119 			continue;
1120 
1121 		ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
1122 		if (ret)
1123 			goto re_enable;
1124 
1125 		entry->table_pos = nve++;
1126 	}
1127 
1128 	/* Assume n. of parsable entries == n. of valid entries */
1129 	val = (nve << 16) & XGMAC_NPE;
1130 	val |= nve & XGMAC_NVE;
1131 	writel(val, ioaddr + XGMAC_MTL_RXP_CONTROL_STATUS);
1132 
1133 	/* Enable RX Parser */
1134 	dwxgmac3_rxp_enable(ioaddr);
1135 
1136 re_enable:
1137 	/* Re-enable RX */
1138 	writel(old_val, ioaddr + XGMAC_RX_CONFIG);
1139 	return ret;
1140 }
1141 
1142 static int dwxgmac2_get_mac_tx_timestamp(struct mac_device_info *hw, u64 *ts)
1143 {
1144 	void __iomem *ioaddr = hw->pcsr;
1145 	u32 value;
1146 
1147 	if (readl_poll_timeout_atomic(ioaddr + XGMAC_TIMESTAMP_STATUS,
1148 				      value, value & XGMAC_TXTSC, 100, 10000))
1149 		return -EBUSY;
1150 
1151 	*ts = readl(ioaddr + XGMAC_TXTIMESTAMP_NSEC) & XGMAC_TXTSSTSLO;
1152 	*ts += readl(ioaddr + XGMAC_TXTIMESTAMP_SEC) * 1000000000ULL;
1153 	return 0;
1154 }
1155 
1156 static int dwxgmac2_flex_pps_config(void __iomem *ioaddr, int index,
1157 				    struct stmmac_pps_cfg *cfg, bool enable,
1158 				    u32 sub_second_inc, u32 systime_flags)
1159 {
1160 	u32 tnsec = readl(ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
1161 	u32 val = readl(ioaddr + XGMAC_PPS_CONTROL);
1162 	u64 period;
1163 
1164 	if (!cfg->available)
1165 		return -EINVAL;
1166 	if (tnsec & XGMAC_TRGTBUSY0)
1167 		return -EBUSY;
1168 	if (!sub_second_inc || !systime_flags)
1169 		return -EINVAL;
1170 
1171 	val &= ~XGMAC_PPSx_MASK(index);
1172 
1173 	if (!enable) {
1174 		val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_STOP);
1175 		writel(val, ioaddr + XGMAC_PPS_CONTROL);
1176 		return 0;
1177 	}
1178 
1179 	val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_START);
1180 	val |= XGMAC_TRGTMODSELx(index, XGMAC_PPSCMD_START);
1181 
1182 	/* XGMAC Core has 4 PPS outputs at most.
1183 	 *
1184 	 * Prior XGMAC Core 3.20, Fixed mode or Flexible mode are selectable for
1185 	 * PPS0 only via PPSEN0. PPS{1,2,3} are in Flexible mode by default,
1186 	 * and can not be switched to Fixed mode, since PPSEN{1,2,3} are
1187 	 * read-only reserved to 0.
1188 	 * But we always set PPSEN{1,2,3} do not make things worse ;-)
1189 	 *
1190 	 * From XGMAC Core 3.20 and later, PPSEN{0,1,2,3} are writable and must
1191 	 * be set, or the PPS outputs stay in Fixed PPS mode by default.
1192 	 */
1193 	val |= XGMAC_PPSENx(index);
1194 
1195 	writel(cfg->start.tv_sec, ioaddr + XGMAC_PPSx_TARGET_TIME_SEC(index));
1196 
1197 	if (!(systime_flags & PTP_TCR_TSCTRLSSR))
1198 		cfg->start.tv_nsec = (cfg->start.tv_nsec * 1000) / 465;
1199 	writel(cfg->start.tv_nsec, ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
1200 
1201 	period = cfg->period.tv_sec * 1000000000;
1202 	period += cfg->period.tv_nsec;
1203 
1204 	do_div(period, sub_second_inc);
1205 
1206 	if (period <= 1)
1207 		return -EINVAL;
1208 
1209 	writel(period - 1, ioaddr + XGMAC_PPSx_INTERVAL(index));
1210 
1211 	period >>= 1;
1212 	if (period <= 1)
1213 		return -EINVAL;
1214 
1215 	writel(period - 1, ioaddr + XGMAC_PPSx_WIDTH(index));
1216 
1217 	/* Finally, activate it */
1218 	writel(val, ioaddr + XGMAC_PPS_CONTROL);
1219 	return 0;
1220 }
1221 
1222 static void dwxgmac2_sarc_configure(void __iomem *ioaddr, int val)
1223 {
1224 	u32 value = readl(ioaddr + XGMAC_TX_CONFIG);
1225 
1226 	value &= ~XGMAC_CONFIG_SARC;
1227 	value |= val << XGMAC_CONFIG_SARC_SHIFT;
1228 
1229 	writel(value, ioaddr + XGMAC_TX_CONFIG);
1230 }
1231 
1232 static void dwxgmac2_enable_vlan(struct mac_device_info *hw, u32 type)
1233 {
1234 	void __iomem *ioaddr = hw->pcsr;
1235 	u32 value;
1236 
1237 	value = readl(ioaddr + XGMAC_VLAN_INCL);
1238 	value |= XGMAC_VLAN_VLTI;
1239 	value |= XGMAC_VLAN_CSVL; /* Only use SVLAN */
1240 	value &= ~XGMAC_VLAN_VLC;
1241 	value |= (type << XGMAC_VLAN_VLC_SHIFT) & XGMAC_VLAN_VLC;
1242 	writel(value, ioaddr + XGMAC_VLAN_INCL);
1243 }
1244 
1245 static int dwxgmac2_filter_wait(struct mac_device_info *hw)
1246 {
1247 	void __iomem *ioaddr = hw->pcsr;
1248 	u32 value;
1249 
1250 	if (readl_poll_timeout(ioaddr + XGMAC_L3L4_ADDR_CTRL, value,
1251 			       !(value & XGMAC_XB), 100, 10000))
1252 		return -EBUSY;
1253 	return 0;
1254 }
1255 
1256 static int dwxgmac2_filter_read(struct mac_device_info *hw, u32 filter_no,
1257 				u8 reg, u32 *data)
1258 {
1259 	void __iomem *ioaddr = hw->pcsr;
1260 	u32 value;
1261 	int ret;
1262 
1263 	ret = dwxgmac2_filter_wait(hw);
1264 	if (ret)
1265 		return ret;
1266 
1267 	value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT;
1268 	value |= XGMAC_TT | XGMAC_XB;
1269 	writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL);
1270 
1271 	ret = dwxgmac2_filter_wait(hw);
1272 	if (ret)
1273 		return ret;
1274 
1275 	*data = readl(ioaddr + XGMAC_L3L4_DATA);
1276 	return 0;
1277 }
1278 
1279 static int dwxgmac2_filter_write(struct mac_device_info *hw, u32 filter_no,
1280 				 u8 reg, u32 data)
1281 {
1282 	void __iomem *ioaddr = hw->pcsr;
1283 	u32 value;
1284 	int ret;
1285 
1286 	ret = dwxgmac2_filter_wait(hw);
1287 	if (ret)
1288 		return ret;
1289 
1290 	writel(data, ioaddr + XGMAC_L3L4_DATA);
1291 
1292 	value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT;
1293 	value |= XGMAC_XB;
1294 	writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL);
1295 
1296 	return dwxgmac2_filter_wait(hw);
1297 }
1298 
1299 static int dwxgmac2_config_l3_filter(struct mac_device_info *hw, u32 filter_no,
1300 				     bool en, bool ipv6, bool sa, bool inv,
1301 				     u32 match)
1302 {
1303 	void __iomem *ioaddr = hw->pcsr;
1304 	u32 value;
1305 	int ret;
1306 
1307 	value = readl(ioaddr + XGMAC_PACKET_FILTER);
1308 	value |= XGMAC_FILTER_IPFE;
1309 	writel(value, ioaddr + XGMAC_PACKET_FILTER);
1310 
1311 	ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value);
1312 	if (ret)
1313 		return ret;
1314 
1315 	/* For IPv6 not both SA/DA filters can be active */
1316 	if (ipv6) {
1317 		value |= XGMAC_L3PEN0;
1318 		value &= ~(XGMAC_L3SAM0 | XGMAC_L3SAIM0);
1319 		value &= ~(XGMAC_L3DAM0 | XGMAC_L3DAIM0);
1320 		if (sa) {
1321 			value |= XGMAC_L3SAM0;
1322 			if (inv)
1323 				value |= XGMAC_L3SAIM0;
1324 		} else {
1325 			value |= XGMAC_L3DAM0;
1326 			if (inv)
1327 				value |= XGMAC_L3DAIM0;
1328 		}
1329 	} else {
1330 		value &= ~XGMAC_L3PEN0;
1331 		if (sa) {
1332 			value |= XGMAC_L3SAM0;
1333 			if (inv)
1334 				value |= XGMAC_L3SAIM0;
1335 		} else {
1336 			value |= XGMAC_L3DAM0;
1337 			if (inv)
1338 				value |= XGMAC_L3DAIM0;
1339 		}
1340 	}
1341 
1342 	ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value);
1343 	if (ret)
1344 		return ret;
1345 
1346 	if (sa) {
1347 		ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR0, match);
1348 		if (ret)
1349 			return ret;
1350 	} else {
1351 		ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR1, match);
1352 		if (ret)
1353 			return ret;
1354 	}
1355 
1356 	if (!en)
1357 		return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0);
1358 
1359 	return 0;
1360 }
1361 
1362 static int dwxgmac2_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
1363 				     bool en, bool udp, bool sa, bool inv,
1364 				     u32 match)
1365 {
1366 	void __iomem *ioaddr = hw->pcsr;
1367 	u32 value;
1368 	int ret;
1369 
1370 	value = readl(ioaddr + XGMAC_PACKET_FILTER);
1371 	value |= XGMAC_FILTER_IPFE;
1372 	writel(value, ioaddr + XGMAC_PACKET_FILTER);
1373 
1374 	ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value);
1375 	if (ret)
1376 		return ret;
1377 
1378 	if (udp) {
1379 		value |= XGMAC_L4PEN0;
1380 	} else {
1381 		value &= ~XGMAC_L4PEN0;
1382 	}
1383 
1384 	value &= ~(XGMAC_L4SPM0 | XGMAC_L4SPIM0);
1385 	value &= ~(XGMAC_L4DPM0 | XGMAC_L4DPIM0);
1386 	if (sa) {
1387 		value |= XGMAC_L4SPM0;
1388 		if (inv)
1389 			value |= XGMAC_L4SPIM0;
1390 	} else {
1391 		value |= XGMAC_L4DPM0;
1392 		if (inv)
1393 			value |= XGMAC_L4DPIM0;
1394 	}
1395 
1396 	ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value);
1397 	if (ret)
1398 		return ret;
1399 
1400 	if (sa) {
1401 		value = match & XGMAC_L4SP0;
1402 
1403 		ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value);
1404 		if (ret)
1405 			return ret;
1406 	} else {
1407 		value = (match << XGMAC_L4DP0_SHIFT) & XGMAC_L4DP0;
1408 
1409 		ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value);
1410 		if (ret)
1411 			return ret;
1412 	}
1413 
1414 	if (!en)
1415 		return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0);
1416 
1417 	return 0;
1418 }
1419 
1420 static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en,
1421 				     u32 addr)
1422 {
1423 	void __iomem *ioaddr = hw->pcsr;
1424 	u32 value;
1425 
1426 	writel(addr, ioaddr + XGMAC_ARP_ADDR);
1427 
1428 	value = readl(ioaddr + XGMAC_RX_CONFIG);
1429 	if (en)
1430 		value |= XGMAC_CONFIG_ARPEN;
1431 	else
1432 		value &= ~XGMAC_CONFIG_ARPEN;
1433 	writel(value, ioaddr + XGMAC_RX_CONFIG);
1434 }
1435 
1436 static int dwxgmac3_est_write(void __iomem *ioaddr, u32 reg, u32 val, bool gcl)
1437 {
1438 	u32 ctrl;
1439 
1440 	writel(val, ioaddr + XGMAC_MTL_EST_GCL_DATA);
1441 
1442 	ctrl = (reg << XGMAC_ADDR_SHIFT);
1443 	ctrl |= gcl ? 0 : XGMAC_GCRR;
1444 
1445 	writel(ctrl, ioaddr + XGMAC_MTL_EST_GCL_CONTROL);
1446 
1447 	ctrl |= XGMAC_SRWO;
1448 	writel(ctrl, ioaddr + XGMAC_MTL_EST_GCL_CONTROL);
1449 
1450 	return readl_poll_timeout_atomic(ioaddr + XGMAC_MTL_EST_GCL_CONTROL,
1451 					 ctrl, !(ctrl & XGMAC_SRWO), 100, 5000);
1452 }
1453 
1454 static int dwxgmac3_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
1455 				  unsigned int ptp_rate)
1456 {
1457 	int i, ret = 0x0;
1458 	u32 ctrl;
1459 
1460 	ret |= dwxgmac3_est_write(ioaddr, XGMAC_BTR_LOW, cfg->btr[0], false);
1461 	ret |= dwxgmac3_est_write(ioaddr, XGMAC_BTR_HIGH, cfg->btr[1], false);
1462 	ret |= dwxgmac3_est_write(ioaddr, XGMAC_TER, cfg->ter, false);
1463 	ret |= dwxgmac3_est_write(ioaddr, XGMAC_LLR, cfg->gcl_size, false);
1464 	ret |= dwxgmac3_est_write(ioaddr, XGMAC_CTR_LOW, cfg->ctr[0], false);
1465 	ret |= dwxgmac3_est_write(ioaddr, XGMAC_CTR_HIGH, cfg->ctr[1], false);
1466 	if (ret)
1467 		return ret;
1468 
1469 	for (i = 0; i < cfg->gcl_size; i++) {
1470 		ret = dwxgmac3_est_write(ioaddr, i, cfg->gcl[i], true);
1471 		if (ret)
1472 			return ret;
1473 	}
1474 
1475 	ctrl = readl(ioaddr + XGMAC_MTL_EST_CONTROL);
1476 	ctrl &= ~XGMAC_PTOV;
1477 	ctrl |= ((1000000000 / ptp_rate) * 9) << XGMAC_PTOV_SHIFT;
1478 	if (cfg->enable)
1479 		ctrl |= XGMAC_EEST | XGMAC_SSWL;
1480 	else
1481 		ctrl &= ~XGMAC_EEST;
1482 
1483 	writel(ctrl, ioaddr + XGMAC_MTL_EST_CONTROL);
1484 	return 0;
1485 }
1486 
1487 static void dwxgmac3_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
1488 				   u32 num_txq,
1489 				   u32 num_rxq, bool enable)
1490 {
1491 	u32 value;
1492 
1493 	if (!enable) {
1494 		value = readl(ioaddr + XGMAC_FPE_CTRL_STS);
1495 
1496 		value &= ~XGMAC_EFPE;
1497 
1498 		writel(value, ioaddr + XGMAC_FPE_CTRL_STS);
1499 		return;
1500 	}
1501 
1502 	value = readl(ioaddr + XGMAC_RXQ_CTRL1);
1503 	value &= ~XGMAC_RQ;
1504 	value |= (num_rxq - 1) << XGMAC_RQ_SHIFT;
1505 	writel(value, ioaddr + XGMAC_RXQ_CTRL1);
1506 
1507 	value = readl(ioaddr + XGMAC_FPE_CTRL_STS);
1508 	value |= XGMAC_EFPE;
1509 	writel(value, ioaddr + XGMAC_FPE_CTRL_STS);
1510 }
1511 
1512 const struct stmmac_ops dwxgmac210_ops = {
1513 	.core_init = dwxgmac2_core_init,
1514 	.phylink_get_caps = xgmac_phylink_get_caps,
1515 	.set_mac = dwxgmac2_set_mac,
1516 	.rx_ipc = dwxgmac2_rx_ipc,
1517 	.rx_queue_enable = dwxgmac2_rx_queue_enable,
1518 	.rx_queue_prio = dwxgmac2_rx_queue_prio,
1519 	.tx_queue_prio = dwxgmac2_tx_queue_prio,
1520 	.rx_queue_routing = dwxgmac2_rx_queue_routing,
1521 	.prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
1522 	.prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
1523 	.set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight,
1524 	.map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
1525 	.config_cbs = dwxgmac2_config_cbs,
1526 	.dump_regs = dwxgmac2_dump_regs,
1527 	.host_irq_status = dwxgmac2_host_irq_status,
1528 	.host_mtl_irq_status = dwxgmac2_host_mtl_irq_status,
1529 	.flow_ctrl = dwxgmac2_flow_ctrl,
1530 	.pmt = dwxgmac2_pmt,
1531 	.set_umac_addr = dwxgmac2_set_umac_addr,
1532 	.get_umac_addr = dwxgmac2_get_umac_addr,
1533 	.set_eee_mode = dwxgmac2_set_eee_mode,
1534 	.reset_eee_mode = dwxgmac2_reset_eee_mode,
1535 	.set_eee_timer = dwxgmac2_set_eee_timer,
1536 	.set_eee_pls = dwxgmac2_set_eee_pls,
1537 	.pcs_ctrl_ane = NULL,
1538 	.pcs_rane = NULL,
1539 	.pcs_get_adv_lp = NULL,
1540 	.debug = NULL,
1541 	.set_filter = dwxgmac2_set_filter,
1542 	.safety_feat_config = dwxgmac3_safety_feat_config,
1543 	.safety_feat_irq_status = dwxgmac3_safety_feat_irq_status,
1544 	.safety_feat_dump = dwxgmac3_safety_feat_dump,
1545 	.set_mac_loopback = dwxgmac2_set_mac_loopback,
1546 	.rss_configure = dwxgmac2_rss_configure,
1547 	.update_vlan_hash = dwxgmac2_update_vlan_hash,
1548 	.rxp_config = dwxgmac3_rxp_config,
1549 	.get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
1550 	.flex_pps_config = dwxgmac2_flex_pps_config,
1551 	.sarc_configure = dwxgmac2_sarc_configure,
1552 	.enable_vlan = dwxgmac2_enable_vlan,
1553 	.config_l3_filter = dwxgmac2_config_l3_filter,
1554 	.config_l4_filter = dwxgmac2_config_l4_filter,
1555 	.set_arp_offload = dwxgmac2_set_arp_offload,
1556 	.est_configure = dwxgmac3_est_configure,
1557 	.fpe_configure = dwxgmac3_fpe_configure,
1558 };
1559 
1560 static void dwxlgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
1561 				      u32 queue)
1562 {
1563 	void __iomem *ioaddr = hw->pcsr;
1564 	u32 value;
1565 
1566 	value = readl(ioaddr + XLGMAC_RXQ_ENABLE_CTRL0) & ~XGMAC_RXQEN(queue);
1567 	if (mode == MTL_QUEUE_AVB)
1568 		value |= 0x1 << XGMAC_RXQEN_SHIFT(queue);
1569 	else if (mode == MTL_QUEUE_DCB)
1570 		value |= 0x2 << XGMAC_RXQEN_SHIFT(queue);
1571 	writel(value, ioaddr + XLGMAC_RXQ_ENABLE_CTRL0);
1572 }
1573 
1574 const struct stmmac_ops dwxlgmac2_ops = {
1575 	.core_init = dwxgmac2_core_init,
1576 	.phylink_get_caps = xgmac_phylink_get_caps,
1577 	.set_mac = dwxgmac2_set_mac,
1578 	.rx_ipc = dwxgmac2_rx_ipc,
1579 	.rx_queue_enable = dwxlgmac2_rx_queue_enable,
1580 	.rx_queue_prio = dwxgmac2_rx_queue_prio,
1581 	.tx_queue_prio = dwxgmac2_tx_queue_prio,
1582 	.rx_queue_routing = dwxgmac2_rx_queue_routing,
1583 	.prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
1584 	.prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
1585 	.set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight,
1586 	.map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
1587 	.config_cbs = dwxgmac2_config_cbs,
1588 	.dump_regs = dwxgmac2_dump_regs,
1589 	.host_irq_status = dwxgmac2_host_irq_status,
1590 	.host_mtl_irq_status = dwxgmac2_host_mtl_irq_status,
1591 	.flow_ctrl = dwxgmac2_flow_ctrl,
1592 	.pmt = dwxgmac2_pmt,
1593 	.set_umac_addr = dwxgmac2_set_umac_addr,
1594 	.get_umac_addr = dwxgmac2_get_umac_addr,
1595 	.set_eee_mode = dwxgmac2_set_eee_mode,
1596 	.reset_eee_mode = dwxgmac2_reset_eee_mode,
1597 	.set_eee_timer = dwxgmac2_set_eee_timer,
1598 	.set_eee_pls = dwxgmac2_set_eee_pls,
1599 	.pcs_ctrl_ane = NULL,
1600 	.pcs_rane = NULL,
1601 	.pcs_get_adv_lp = NULL,
1602 	.debug = NULL,
1603 	.set_filter = dwxgmac2_set_filter,
1604 	.safety_feat_config = dwxgmac3_safety_feat_config,
1605 	.safety_feat_irq_status = dwxgmac3_safety_feat_irq_status,
1606 	.safety_feat_dump = dwxgmac3_safety_feat_dump,
1607 	.set_mac_loopback = dwxgmac2_set_mac_loopback,
1608 	.rss_configure = dwxgmac2_rss_configure,
1609 	.update_vlan_hash = dwxgmac2_update_vlan_hash,
1610 	.rxp_config = dwxgmac3_rxp_config,
1611 	.get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
1612 	.flex_pps_config = dwxgmac2_flex_pps_config,
1613 	.sarc_configure = dwxgmac2_sarc_configure,
1614 	.enable_vlan = dwxgmac2_enable_vlan,
1615 	.config_l3_filter = dwxgmac2_config_l3_filter,
1616 	.config_l4_filter = dwxgmac2_config_l4_filter,
1617 	.set_arp_offload = dwxgmac2_set_arp_offload,
1618 	.est_configure = dwxgmac3_est_configure,
1619 	.fpe_configure = dwxgmac3_fpe_configure,
1620 };
1621 
1622 int dwxgmac2_setup(struct stmmac_priv *priv)
1623 {
1624 	struct mac_device_info *mac = priv->hw;
1625 
1626 	dev_info(priv->device, "\tXGMAC2\n");
1627 
1628 	priv->dev->priv_flags |= IFF_UNICAST_FLT;
1629 	mac->pcsr = priv->ioaddr;
1630 	mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1631 	mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1632 	mac->mcast_bits_log2 = 0;
1633 
1634 	if (mac->multicast_filter_bins)
1635 		mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1636 
1637 	mac->link.duplex = 0;
1638 	mac->link.speed10 = XGMAC_CONFIG_SS_10_MII;
1639 	mac->link.speed100 = XGMAC_CONFIG_SS_100_MII;
1640 	mac->link.speed1000 = XGMAC_CONFIG_SS_1000_GMII;
1641 	mac->link.speed2500 = XGMAC_CONFIG_SS_2500_GMII;
1642 	mac->link.xgmii.speed2500 = XGMAC_CONFIG_SS_2500;
1643 	mac->link.xgmii.speed5000 = XGMAC_CONFIG_SS_5000;
1644 	mac->link.xgmii.speed10000 = XGMAC_CONFIG_SS_10000;
1645 	mac->link.speed_mask = XGMAC_CONFIG_SS_MASK;
1646 
1647 	mac->mii.addr = XGMAC_MDIO_ADDR;
1648 	mac->mii.data = XGMAC_MDIO_DATA;
1649 	mac->mii.addr_shift = 16;
1650 	mac->mii.addr_mask = GENMASK(20, 16);
1651 	mac->mii.reg_shift = 0;
1652 	mac->mii.reg_mask = GENMASK(15, 0);
1653 	mac->mii.clk_csr_shift = 19;
1654 	mac->mii.clk_csr_mask = GENMASK(21, 19);
1655 
1656 	return 0;
1657 }
1658 
1659 int dwxlgmac2_setup(struct stmmac_priv *priv)
1660 {
1661 	struct mac_device_info *mac = priv->hw;
1662 
1663 	dev_info(priv->device, "\tXLGMAC\n");
1664 
1665 	priv->dev->priv_flags |= IFF_UNICAST_FLT;
1666 	mac->pcsr = priv->ioaddr;
1667 	mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1668 	mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1669 	mac->mcast_bits_log2 = 0;
1670 
1671 	if (mac->multicast_filter_bins)
1672 		mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1673 
1674 	mac->link.duplex = 0;
1675 	mac->link.speed1000 = XLGMAC_CONFIG_SS_1000;
1676 	mac->link.speed2500 = XLGMAC_CONFIG_SS_2500;
1677 	mac->link.xgmii.speed10000 = XLGMAC_CONFIG_SS_10G;
1678 	mac->link.xlgmii.speed25000 = XLGMAC_CONFIG_SS_25G;
1679 	mac->link.xlgmii.speed40000 = XLGMAC_CONFIG_SS_40G;
1680 	mac->link.xlgmii.speed50000 = XLGMAC_CONFIG_SS_50G;
1681 	mac->link.xlgmii.speed100000 = XLGMAC_CONFIG_SS_100G;
1682 	mac->link.speed_mask = XLGMAC_CONFIG_SS;
1683 
1684 	mac->mii.addr = XGMAC_MDIO_ADDR;
1685 	mac->mii.data = XGMAC_MDIO_DATA;
1686 	mac->mii.addr_shift = 16;
1687 	mac->mii.addr_mask = GENMASK(20, 16);
1688 	mac->mii.reg_shift = 0;
1689 	mac->mii.reg_mask = GENMASK(15, 0);
1690 	mac->mii.clk_csr_shift = 19;
1691 	mac->mii.clk_csr_mask = GENMASK(21, 19);
1692 
1693 	return 0;
1694 }
1695