1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * (C) Copyright 2009
4  * Marvell Semiconductor <www.marvell.com>
5  * Written-by: Prafulla Wadaskar <prafulla@marvell.com>
6  *
7  * (C) Copyright 2003
8  * Ingo Assmus <ingo.assmus@keymile.com>
9  *
10  * based on - Driver for MV64360X ethernet ports
11  * Copyright (C) 2002 rabeeh@galileo.co.il
12  */
13 
14 #include <common.h>
15 #include <dm.h>
16 #include <log.h>
17 #include <net.h>
18 #include <malloc.h>
19 #include <miiphy.h>
20 #include <wait_bit.h>
21 #include <asm/global_data.h>
22 #include <asm/io.h>
23 #include <linux/delay.h>
24 #include <linux/errno.h>
25 #include <asm/types.h>
26 #include <asm/system.h>
27 #include <asm/byteorder.h>
28 #include <asm/arch/cpu.h>
29 
30 #if defined(CONFIG_ARCH_KIRKWOOD)
31 #include <asm/arch/soc.h>
32 #elif defined(CONFIG_ARCH_ORION5X)
33 #include <asm/arch/orion5x.h>
34 #endif
35 
36 #include "mvgbe.h"
37 
38 DECLARE_GLOBAL_DATA_PTR;
39 
40 #ifndef CONFIG_MVGBE_PORTS
41 # define CONFIG_MVGBE_PORTS {0, 0}
42 #endif
43 
44 #define MV_PHY_ADR_REQUEST 0xee
45 #define MVGBE_SMI_REG (((struct mvgbe_registers *)MVGBE0_BASE)->smi)
46 
47 #if defined(CONFIG_PHYLIB) || defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
smi_wait_ready(struct mvgbe_device * dmvgbe)48 static int smi_wait_ready(struct mvgbe_device *dmvgbe)
49 {
50 	int ret;
51 
52 	ret = wait_for_bit_le32(&MVGBE_SMI_REG, MVGBE_PHY_SMI_BUSY_MASK, false,
53 				MVGBE_PHY_SMI_TIMEOUT_MS, false);
54 	if (ret) {
55 		printf("Error: SMI busy timeout\n");
56 		return ret;
57 	}
58 
59 	return 0;
60 }
61 
__mvgbe_mdio_read(struct mvgbe_device * dmvgbe,int phy_adr,int devad,int reg_ofs)62 static int __mvgbe_mdio_read(struct mvgbe_device *dmvgbe, int phy_adr,
63 			     int devad, int reg_ofs)
64 {
65 	struct mvgbe_registers *regs = dmvgbe->regs;
66 	u32 smi_reg;
67 	u32 timeout;
68 	u16 data = 0;
69 
70 	/* Phyadr read request */
71 	if (phy_adr == MV_PHY_ADR_REQUEST &&
72 			reg_ofs == MV_PHY_ADR_REQUEST) {
73 		/* */
74 		data = (u16) (MVGBE_REG_RD(regs->phyadr) & PHYADR_MASK);
75 		return data;
76 	}
77 	/* check parameters */
78 	if (phy_adr > PHYADR_MASK) {
79 		printf("Err..(%s) Invalid PHY address %d\n",
80 			__func__, phy_adr);
81 		return -EFAULT;
82 	}
83 	if (reg_ofs > PHYREG_MASK) {
84 		printf("Err..(%s) Invalid register offset %d\n",
85 			__func__, reg_ofs);
86 		return -EFAULT;
87 	}
88 
89 	/* wait till the SMI is not busy */
90 	if (smi_wait_ready(dmvgbe) < 0)
91 		return -EFAULT;
92 
93 	/* fill the phy address and regiser offset and read opcode */
94 	smi_reg = (phy_adr << MVGBE_PHY_SMI_DEV_ADDR_OFFS)
95 		| (reg_ofs << MVGBE_SMI_REG_ADDR_OFFS)
96 		| MVGBE_PHY_SMI_OPCODE_READ;
97 
98 	/* write the smi register */
99 	MVGBE_REG_WR(MVGBE_SMI_REG, smi_reg);
100 
101 	/*wait till read value is ready */
102 	timeout = MVGBE_PHY_SMI_TIMEOUT;
103 
104 	do {
105 		/* read smi register */
106 		smi_reg = MVGBE_REG_RD(MVGBE_SMI_REG);
107 		if (timeout-- == 0) {
108 			printf("Err..(%s) SMI read ready timeout\n",
109 				__func__);
110 			return -EFAULT;
111 		}
112 	} while (!(smi_reg & MVGBE_PHY_SMI_READ_VALID_MASK));
113 
114 	/* Wait for the data to update in the SMI register */
115 	for (timeout = 0; timeout < MVGBE_PHY_SMI_TIMEOUT; timeout++)
116 		;
117 
118 	data = (u16) (MVGBE_REG_RD(MVGBE_SMI_REG) & MVGBE_PHY_SMI_DATA_MASK);
119 
120 	debug("%s:(adr %d, off %d) value= %04x\n", __func__, phy_adr, reg_ofs,
121 	      data);
122 
123 	return data;
124 }
125 
126 /*
127  * smi_reg_read - miiphy_read callback function.
128  *
129  * Returns 16bit phy register value, or -EFAULT on error
130  */
smi_reg_read(struct mii_dev * bus,int phy_adr,int devad,int reg_ofs)131 static int smi_reg_read(struct mii_dev *bus, int phy_adr, int devad,
132 			int reg_ofs)
133 {
134 #ifdef CONFIG_DM_ETH
135 	struct mvgbe_device *dmvgbe = bus->priv;
136 #else
137 	struct eth_device *dev = eth_get_dev_by_name(bus->name);
138 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
139 #endif
140 
141 	return __mvgbe_mdio_read(dmvgbe, phy_adr, devad, reg_ofs);
142 }
143 
__mvgbe_mdio_write(struct mvgbe_device * dmvgbe,int phy_adr,int devad,int reg_ofs,u16 data)144 static int __mvgbe_mdio_write(struct mvgbe_device *dmvgbe, int phy_adr,
145 			      int devad, int reg_ofs, u16 data)
146 {
147 	struct mvgbe_registers *regs = dmvgbe->regs;
148 	u32 smi_reg;
149 
150 	/* Phyadr write request*/
151 	if (phy_adr == MV_PHY_ADR_REQUEST &&
152 			reg_ofs == MV_PHY_ADR_REQUEST) {
153 		MVGBE_REG_WR(regs->phyadr, data);
154 		return 0;
155 	}
156 
157 	/* check parameters */
158 	if (phy_adr > PHYADR_MASK) {
159 		printf("Err..(%s) Invalid phy address\n", __func__);
160 		return -EINVAL;
161 	}
162 	if (reg_ofs > PHYREG_MASK) {
163 		printf("Err..(%s) Invalid register offset\n", __func__);
164 		return -EFAULT;
165 	}
166 
167 	/* wait till the SMI is not busy */
168 	if (smi_wait_ready(dmvgbe) < 0)
169 		return -EFAULT;
170 
171 	/* fill the phy addr and reg offset and write opcode and data */
172 	smi_reg = (data << MVGBE_PHY_SMI_DATA_OFFS);
173 	smi_reg |= (phy_adr << MVGBE_PHY_SMI_DEV_ADDR_OFFS)
174 		| (reg_ofs << MVGBE_SMI_REG_ADDR_OFFS);
175 	smi_reg &= ~MVGBE_PHY_SMI_OPCODE_READ;
176 
177 	/* write the smi register */
178 	MVGBE_REG_WR(MVGBE_SMI_REG, smi_reg);
179 
180 	return 0;
181 }
182 
183 /*
184  * smi_reg_write - miiphy_write callback function.
185  *
186  * Returns 0 if write succeed, -EFAULT on error
187  */
smi_reg_write(struct mii_dev * bus,int phy_adr,int devad,int reg_ofs,u16 data)188 static int smi_reg_write(struct mii_dev *bus, int phy_adr, int devad,
189 			 int reg_ofs, u16 data)
190 {
191 #ifdef CONFIG_DM_ETH
192 	struct mvgbe_device *dmvgbe = bus->priv;
193 #else
194 	struct eth_device *dev = eth_get_dev_by_name(bus->name);
195 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
196 #endif
197 
198 	return __mvgbe_mdio_write(dmvgbe, phy_adr, devad, reg_ofs, data);
199 }
200 #endif
201 
202 /* Stop and checks all queues */
stop_queue(u32 * qreg)203 static void stop_queue(u32 * qreg)
204 {
205 	u32 reg_data;
206 
207 	reg_data = readl(qreg);
208 
209 	if (reg_data & 0xFF) {
210 		/* Issue stop command for active channels only */
211 		writel((reg_data << 8), qreg);
212 
213 		/* Wait for all queue activity to terminate. */
214 		do {
215 			/*
216 			 * Check port cause register that all queues
217 			 * are stopped
218 			 */
219 			reg_data = readl(qreg);
220 		}
221 		while (reg_data & 0xFF);
222 	}
223 }
224 
225 /*
226  * set_access_control - Config address decode parameters for Ethernet unit
227  *
228  * This function configures the address decode parameters for the Gigabit
229  * Ethernet Controller according the given parameters struct.
230  *
231  * @regs	Register struct pointer.
232  * @param	Address decode parameter struct.
233  */
set_access_control(struct mvgbe_registers * regs,struct mvgbe_winparam * param)234 static void set_access_control(struct mvgbe_registers *regs,
235 				struct mvgbe_winparam *param)
236 {
237 	u32 access_prot_reg;
238 
239 	/* Set access control register */
240 	access_prot_reg = MVGBE_REG_RD(regs->epap);
241 	/* clear window permission */
242 	access_prot_reg &= (~(3 << (param->win * 2)));
243 	access_prot_reg |= (param->access_ctrl << (param->win * 2));
244 	MVGBE_REG_WR(regs->epap, access_prot_reg);
245 
246 	/* Set window Size reg (SR) */
247 	MVGBE_REG_WR(regs->barsz[param->win].size,
248 			(((param->size / 0x10000) - 1) << 16));
249 
250 	/* Set window Base address reg (BA) */
251 	MVGBE_REG_WR(regs->barsz[param->win].bar,
252 			(param->target | param->attrib | param->base_addr));
253 	/* High address remap reg (HARR) */
254 	if (param->win < 4)
255 		MVGBE_REG_WR(regs->ha_remap[param->win], param->high_addr);
256 
257 	/* Base address enable reg (BARER) */
258 	if (param->enable == 1)
259 		MVGBE_REG_BITS_RESET(regs->bare, (1 << param->win));
260 	else
261 		MVGBE_REG_BITS_SET(regs->bare, (1 << param->win));
262 }
263 
set_dram_access(struct mvgbe_registers * regs)264 static void set_dram_access(struct mvgbe_registers *regs)
265 {
266 	struct mvgbe_winparam win_param;
267 	int i;
268 
269 	for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
270 		/* Set access parameters for DRAM bank i */
271 		win_param.win = i;	/* Use Ethernet window i */
272 		/* Window target - DDR */
273 		win_param.target = MVGBE_TARGET_DRAM;
274 		/* Enable full access */
275 		win_param.access_ctrl = EWIN_ACCESS_FULL;
276 		win_param.high_addr = 0;
277 		/* Get bank base and size */
278 		win_param.base_addr = gd->bd->bi_dram[i].start;
279 		win_param.size = gd->bd->bi_dram[i].size;
280 		if (win_param.size == 0)
281 			win_param.enable = 0;
282 		else
283 			win_param.enable = 1;	/* Enable the access */
284 
285 		/* Enable DRAM bank */
286 		switch (i) {
287 		case 0:
288 			win_param.attrib = EBAR_DRAM_CS0;
289 			break;
290 		case 1:
291 			win_param.attrib = EBAR_DRAM_CS1;
292 			break;
293 		case 2:
294 			win_param.attrib = EBAR_DRAM_CS2;
295 			break;
296 		case 3:
297 			win_param.attrib = EBAR_DRAM_CS3;
298 			break;
299 		default:
300 			/* invalid bank, disable access */
301 			win_param.enable = 0;
302 			win_param.attrib = 0;
303 			break;
304 		}
305 		/* Set the access control for address window(EPAPR) RD/WR */
306 		set_access_control(regs, &win_param);
307 	}
308 }
309 
310 /*
311  * port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables
312  *
313  * Go through all the DA filter tables (Unicast, Special Multicast & Other
314  * Multicast) and set each entry to 0.
315  */
port_init_mac_tables(struct mvgbe_registers * regs)316 static void port_init_mac_tables(struct mvgbe_registers *regs)
317 {
318 	int table_index;
319 
320 	/* Clear DA filter unicast table (Ex_dFUT) */
321 	for (table_index = 0; table_index < 4; ++table_index)
322 		MVGBE_REG_WR(regs->dfut[table_index], 0);
323 
324 	for (table_index = 0; table_index < 64; ++table_index) {
325 		/* Clear DA filter special multicast table (Ex_dFSMT) */
326 		MVGBE_REG_WR(regs->dfsmt[table_index], 0);
327 		/* Clear DA filter other multicast table (Ex_dFOMT) */
328 		MVGBE_REG_WR(regs->dfomt[table_index], 0);
329 	}
330 }
331 
332 /*
333  * port_uc_addr - This function Set the port unicast address table
334  *
335  * This function locates the proper entry in the Unicast table for the
336  * specified MAC nibble and sets its properties according to function
337  * parameters.
338  * This function add/removes MAC addresses from the port unicast address
339  * table.
340  *
341  * @uc_nibble	Unicast MAC Address last nibble.
342  * @option      0 = Add, 1 = remove address.
343  *
344  * RETURN: 1 if output succeeded. 0 if option parameter is invalid.
345  */
port_uc_addr(struct mvgbe_registers * regs,u8 uc_nibble,int option)346 static int port_uc_addr(struct mvgbe_registers *regs, u8 uc_nibble,
347 			int option)
348 {
349 	u32 unicast_reg;
350 	u32 tbl_offset;
351 	u32 reg_offset;
352 
353 	/* Locate the Unicast table entry */
354 	uc_nibble = (0xf & uc_nibble);
355 	/* Register offset from unicast table base */
356 	tbl_offset = (uc_nibble / 4);
357 	/* Entry offset within the above register */
358 	reg_offset = uc_nibble % 4;
359 
360 	switch (option) {
361 	case REJECT_MAC_ADDR:
362 		/*
363 		 * Clear accepts frame bit at specified unicast
364 		 * DA table entry
365 		 */
366 		unicast_reg = MVGBE_REG_RD(regs->dfut[tbl_offset]);
367 		unicast_reg &= (0xFF << (8 * reg_offset));
368 		MVGBE_REG_WR(regs->dfut[tbl_offset], unicast_reg);
369 		break;
370 	case ACCEPT_MAC_ADDR:
371 		/* Set accepts frame bit at unicast DA filter table entry */
372 		unicast_reg = MVGBE_REG_RD(regs->dfut[tbl_offset]);
373 		unicast_reg &= (0xFF << (8 * reg_offset));
374 		unicast_reg |= ((0x01 | (RXUQ << 1)) << (8 * reg_offset));
375 		MVGBE_REG_WR(regs->dfut[tbl_offset], unicast_reg);
376 		break;
377 	default:
378 		return 0;
379 	}
380 	return 1;
381 }
382 
383 /*
384  * port_uc_addr_set - This function Set the port Unicast address.
385  */
port_uc_addr_set(struct mvgbe_device * dmvgbe,u8 * p_addr)386 static void port_uc_addr_set(struct mvgbe_device *dmvgbe, u8 *p_addr)
387 {
388 	struct mvgbe_registers *regs = dmvgbe->regs;
389 	u32 mac_h;
390 	u32 mac_l;
391 
392 	mac_l = (p_addr[4] << 8) | (p_addr[5]);
393 	mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) |
394 		(p_addr[3] << 0);
395 
396 	MVGBE_REG_WR(regs->macal, mac_l);
397 	MVGBE_REG_WR(regs->macah, mac_h);
398 
399 	/* Accept frames of this address */
400 	port_uc_addr(regs, p_addr[5], ACCEPT_MAC_ADDR);
401 }
402 
403 /*
404  * mvgbe_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory.
405  */
mvgbe_init_rx_desc_ring(struct mvgbe_device * dmvgbe)406 static void mvgbe_init_rx_desc_ring(struct mvgbe_device *dmvgbe)
407 {
408 	struct mvgbe_rxdesc *p_rx_desc;
409 	int i;
410 
411 	/* initialize the Rx descriptors ring */
412 	p_rx_desc = dmvgbe->p_rxdesc;
413 	for (i = 0; i < RINGSZ; i++) {
414 		p_rx_desc->cmd_sts =
415 			MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_EN_INTERRUPT;
416 		p_rx_desc->buf_size = PKTSIZE_ALIGN;
417 		p_rx_desc->byte_cnt = 0;
418 		p_rx_desc->buf_ptr = dmvgbe->p_rxbuf + i * PKTSIZE_ALIGN;
419 		if (i == (RINGSZ - 1))
420 			p_rx_desc->nxtdesc_p = dmvgbe->p_rxdesc;
421 		else {
422 			p_rx_desc->nxtdesc_p = (struct mvgbe_rxdesc *)
423 				((u32) p_rx_desc + MV_RXQ_DESC_ALIGNED_SIZE);
424 			p_rx_desc = p_rx_desc->nxtdesc_p;
425 		}
426 	}
427 	dmvgbe->p_rxdesc_curr = dmvgbe->p_rxdesc;
428 }
429 
__mvgbe_init(struct mvgbe_device * dmvgbe,u8 * enetaddr,const char * name)430 static int __mvgbe_init(struct mvgbe_device *dmvgbe, u8 *enetaddr,
431 			const char *name)
432 {
433 	struct mvgbe_registers *regs = dmvgbe->regs;
434 #if (defined(CONFIG_MII) || defined(CONFIG_CMD_MII)) &&  \
435 	!defined(CONFIG_PHYLIB) &&			 \
436 	!defined(CONFIG_DM_ETH) &&			 \
437 	defined(CONFIG_SYS_FAULT_ECHO_LINK_DOWN)
438 	int i;
439 #endif
440 	/* setup RX rings */
441 	mvgbe_init_rx_desc_ring(dmvgbe);
442 
443 	/* Clear the ethernet port interrupts */
444 	MVGBE_REG_WR(regs->ic, 0);
445 	MVGBE_REG_WR(regs->ice, 0);
446 	/* Unmask RX buffer and TX end interrupt */
447 	MVGBE_REG_WR(regs->pim, INT_CAUSE_UNMASK_ALL);
448 	/* Unmask phy and link status changes interrupts */
449 	MVGBE_REG_WR(regs->peim, INT_CAUSE_UNMASK_ALL_EXT);
450 
451 	set_dram_access(regs);
452 	port_init_mac_tables(regs);
453 	port_uc_addr_set(dmvgbe, enetaddr);
454 
455 	/* Assign port configuration and command. */
456 	MVGBE_REG_WR(regs->pxc, PRT_CFG_VAL);
457 	MVGBE_REG_WR(regs->pxcx, PORT_CFG_EXTEND_VALUE);
458 	MVGBE_REG_WR(regs->psc0, PORT_SERIAL_CONTROL_VALUE);
459 
460 	/* Assign port SDMA configuration */
461 	MVGBE_REG_WR(regs->sdc, PORT_SDMA_CFG_VALUE);
462 	MVGBE_REG_WR(regs->tqx[0].qxttbc, QTKNBKT_DEF_VAL);
463 	MVGBE_REG_WR(regs->tqx[0].tqxtbc,
464 		(QMTBS_DEF_VAL << 16) | QTKNRT_DEF_VAL);
465 	/* Turn off the port/RXUQ bandwidth limitation */
466 	MVGBE_REG_WR(regs->pmtu, 0);
467 
468 	/* Set maximum receive buffer to 9700 bytes */
469 	MVGBE_REG_WR(regs->psc0, MVGBE_MAX_RX_PACKET_9700BYTE
470 			| (MVGBE_REG_RD(regs->psc0) & MRU_MASK));
471 
472 	/* Enable port initially */
473 	MVGBE_REG_BITS_SET(regs->psc0, MVGBE_SERIAL_PORT_EN);
474 
475 	/*
476 	 * Set ethernet MTU for leaky bucket mechanism to 0 - this will
477 	 * disable the leaky bucket mechanism .
478 	 */
479 	MVGBE_REG_WR(regs->pmtu, 0);
480 
481 	/* Assignment of Rx CRDB of given RXUQ */
482 	MVGBE_REG_WR(regs->rxcdp[RXUQ], (u32) dmvgbe->p_rxdesc_curr);
483 	/* ensure previous write is done before enabling Rx DMA */
484 	isb();
485 	/* Enable port Rx. */
486 	MVGBE_REG_WR(regs->rqc, (1 << RXUQ));
487 
488 #if (defined(CONFIG_MII) || defined(CONFIG_CMD_MII)) && \
489 	!defined(CONFIG_PHYLIB) && \
490 	!defined(CONFIG_DM_ETH) && \
491 	defined(CONFIG_SYS_FAULT_ECHO_LINK_DOWN)
492 	/* Wait up to 5s for the link status */
493 	for (i = 0; i < 5; i++) {
494 		u16 phyadr;
495 
496 		miiphy_read(name, MV_PHY_ADR_REQUEST,
497 				MV_PHY_ADR_REQUEST, &phyadr);
498 		/* Return if we get link up */
499 		if (miiphy_link(name, phyadr))
500 			return 0;
501 		udelay(1000000);
502 	}
503 
504 	printf("No link on %s\n", name);
505 	return -1;
506 #endif
507 	return 0;
508 }
509 
510 #ifndef CONFIG_DM_ETH
mvgbe_init(struct eth_device * dev)511 static int mvgbe_init(struct eth_device *dev)
512 {
513 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
514 
515 	return __mvgbe_init(dmvgbe, dmvgbe->dev.enetaddr, dmvgbe->dev.name);
516 }
517 #endif
518 
__mvgbe_halt(struct mvgbe_device * dmvgbe)519 static void __mvgbe_halt(struct mvgbe_device *dmvgbe)
520 {
521 	struct mvgbe_registers *regs = dmvgbe->regs;
522 
523 	/* Disable all gigE address decoder */
524 	MVGBE_REG_WR(regs->bare, 0x3f);
525 
526 	stop_queue(&regs->tqc);
527 	stop_queue(&regs->rqc);
528 
529 	/* Disable port */
530 	MVGBE_REG_BITS_RESET(regs->psc0, MVGBE_SERIAL_PORT_EN);
531 	/* Set port is not reset */
532 	MVGBE_REG_BITS_RESET(regs->psc1, 1 << 4);
533 #ifdef CONFIG_SYS_MII_MODE
534 	/* Set MMI interface up */
535 	MVGBE_REG_BITS_RESET(regs->psc1, 1 << 3);
536 #endif
537 	/* Disable & mask ethernet port interrupts */
538 	MVGBE_REG_WR(regs->ic, 0);
539 	MVGBE_REG_WR(regs->ice, 0);
540 	MVGBE_REG_WR(regs->pim, 0);
541 	MVGBE_REG_WR(regs->peim, 0);
542 }
543 
544 #ifndef CONFIG_DM_ETH
mvgbe_halt(struct eth_device * dev)545 static int mvgbe_halt(struct eth_device *dev)
546 {
547 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
548 
549 	__mvgbe_halt(dmvgbe);
550 
551 	return 0;
552 }
553 #endif
554 
555 #ifdef CONFIG_DM_ETH
mvgbe_write_hwaddr(struct udevice * dev)556 static int mvgbe_write_hwaddr(struct udevice *dev)
557 {
558 	struct eth_pdata *pdata = dev_get_plat(dev);
559 
560 	port_uc_addr_set(dev_get_priv(dev), pdata->enetaddr);
561 
562 	return 0;
563 }
564 #else
mvgbe_write_hwaddr(struct eth_device * dev)565 static int mvgbe_write_hwaddr(struct eth_device *dev)
566 {
567 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
568 
569 	/* Programs net device MAC address after initialization */
570 	port_uc_addr_set(dmvgbe, dmvgbe->dev.enetaddr);
571 	return 0;
572 }
573 #endif
574 
__mvgbe_send(struct mvgbe_device * dmvgbe,void * dataptr,int datasize)575 static int __mvgbe_send(struct mvgbe_device *dmvgbe, void *dataptr,
576 			int datasize)
577 {
578 	struct mvgbe_registers *regs = dmvgbe->regs;
579 	struct mvgbe_txdesc *p_txdesc = dmvgbe->p_txdesc;
580 	void *p = (void *)dataptr;
581 	u32 cmd_sts;
582 	u32 txuq0_reg_addr;
583 
584 	/* Copy buffer if it's misaligned */
585 	if ((u32) dataptr & 0x07) {
586 		if (datasize > PKTSIZE_ALIGN) {
587 			printf("Non-aligned data too large (%d)\n",
588 					datasize);
589 			return -1;
590 		}
591 
592 		memcpy(dmvgbe->p_aligned_txbuf, p, datasize);
593 		p = dmvgbe->p_aligned_txbuf;
594 	}
595 
596 	p_txdesc->cmd_sts = MVGBE_ZERO_PADDING | MVGBE_GEN_CRC;
597 	p_txdesc->cmd_sts |= MVGBE_TX_FIRST_DESC | MVGBE_TX_LAST_DESC;
598 	p_txdesc->cmd_sts |= MVGBE_BUFFER_OWNED_BY_DMA;
599 	p_txdesc->cmd_sts |= MVGBE_TX_EN_INTERRUPT;
600 	p_txdesc->buf_ptr = (u8 *) p;
601 	p_txdesc->byte_cnt = datasize;
602 
603 	/* Set this tc desc as zeroth TXUQ */
604 	txuq0_reg_addr = (u32)&regs->tcqdp[TXUQ];
605 	writel((u32) p_txdesc, txuq0_reg_addr);
606 
607 	/* ensure tx desc writes above are performed before we start Tx DMA */
608 	isb();
609 
610 	/* Apply send command using zeroth TXUQ */
611 	MVGBE_REG_WR(regs->tqc, (1 << TXUQ));
612 
613 	/*
614 	 * wait for packet xmit completion
615 	 */
616 	cmd_sts = readl(&p_txdesc->cmd_sts);
617 	while (cmd_sts & MVGBE_BUFFER_OWNED_BY_DMA) {
618 		/* return fail if error is detected */
619 		if ((cmd_sts & (MVGBE_ERROR_SUMMARY | MVGBE_TX_LAST_FRAME)) ==
620 				(MVGBE_ERROR_SUMMARY | MVGBE_TX_LAST_FRAME) &&
621 				cmd_sts & (MVGBE_UR_ERROR | MVGBE_RL_ERROR)) {
622 			printf("Err..(%s) in xmit packet\n", __func__);
623 			return -1;
624 		}
625 		cmd_sts = readl(&p_txdesc->cmd_sts);
626 	};
627 	return 0;
628 }
629 
630 #ifndef CONFIG_DM_ETH
mvgbe_send(struct eth_device * dev,void * dataptr,int datasize)631 static int mvgbe_send(struct eth_device *dev, void *dataptr, int datasize)
632 {
633 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
634 
635 	return __mvgbe_send(dmvgbe, dataptr, datasize);
636 }
637 #endif
638 
__mvgbe_recv(struct mvgbe_device * dmvgbe,uchar ** packetp)639 static int __mvgbe_recv(struct mvgbe_device *dmvgbe, uchar **packetp)
640 {
641 	struct mvgbe_rxdesc *p_rxdesc_curr = dmvgbe->p_rxdesc_curr;
642 	u32 cmd_sts;
643 	u32 timeout = 0;
644 	u32 rxdesc_curr_addr;
645 	unsigned char *data;
646 	int rx_bytes = 0;
647 
648 	*packetp = NULL;
649 
650 	/* wait untill rx packet available or timeout */
651 	do {
652 		if (timeout < MVGBE_PHY_SMI_TIMEOUT)
653 			timeout++;
654 		else {
655 			debug("%s time out...\n", __func__);
656 			return -1;
657 		}
658 	} while (readl(&p_rxdesc_curr->cmd_sts) & MVGBE_BUFFER_OWNED_BY_DMA);
659 
660 	if (p_rxdesc_curr->byte_cnt != 0) {
661 		debug("%s: Received %d byte Packet @ 0x%x (cmd_sts= %08x)\n",
662 			__func__, (u32) p_rxdesc_curr->byte_cnt,
663 			(u32) p_rxdesc_curr->buf_ptr,
664 			(u32) p_rxdesc_curr->cmd_sts);
665 	}
666 
667 	/*
668 	 * In case received a packet without first/last bits on
669 	 * OR the error summary bit is on,
670 	 * the packets needs to be dropeed.
671 	 */
672 	cmd_sts = readl(&p_rxdesc_curr->cmd_sts);
673 
674 	if ((cmd_sts &
675 		(MVGBE_RX_FIRST_DESC | MVGBE_RX_LAST_DESC))
676 		!= (MVGBE_RX_FIRST_DESC | MVGBE_RX_LAST_DESC)) {
677 
678 		printf("Err..(%s) Dropping packet spread on"
679 			" multiple descriptors\n", __func__);
680 
681 	} else if (cmd_sts & MVGBE_ERROR_SUMMARY) {
682 
683 		printf("Err..(%s) Dropping packet with errors\n",
684 			__func__);
685 
686 	} else {
687 		/* !!! call higher layer processing */
688 		debug("%s: Sending Received packet to"
689 		      " upper layer (net_process_received_packet)\n",
690 		      __func__);
691 
692 		data = (p_rxdesc_curr->buf_ptr + RX_BUF_OFFSET);
693 		rx_bytes = (int)(p_rxdesc_curr->byte_cnt -
694 						  RX_BUF_OFFSET);
695 
696 		*packetp = data;
697 	}
698 	/*
699 	 * free these descriptors and point next in the ring
700 	 */
701 	p_rxdesc_curr->cmd_sts =
702 		MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_EN_INTERRUPT;
703 	p_rxdesc_curr->buf_size = PKTSIZE_ALIGN;
704 	p_rxdesc_curr->byte_cnt = 0;
705 
706 	rxdesc_curr_addr = (u32)&dmvgbe->p_rxdesc_curr;
707 	writel((unsigned)p_rxdesc_curr->nxtdesc_p, rxdesc_curr_addr);
708 
709 	return rx_bytes;
710 }
711 
712 #ifndef CONFIG_DM_ETH
mvgbe_recv(struct eth_device * dev)713 static int mvgbe_recv(struct eth_device *dev)
714 {
715 	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
716 	uchar *packet;
717 	int ret;
718 
719 	ret = __mvgbe_recv(dmvgbe, &packet);
720 	if (ret < 0)
721 		return ret;
722 
723 	net_process_received_packet(packet, ret);
724 
725 	return 0;
726 }
727 #endif
728 
729 #if defined(CONFIG_PHYLIB) || defined(CONFIG_DM_ETH)
730 #if defined(CONFIG_DM_ETH)
__mvgbe_phy_init(struct udevice * dev,struct mii_dev * bus,phy_interface_t phy_interface,int phyid)731 static struct phy_device *__mvgbe_phy_init(struct udevice *dev,
732 					   struct mii_dev *bus,
733 					   phy_interface_t phy_interface,
734 					   int phyid)
735 #else
736 static struct phy_device *__mvgbe_phy_init(struct eth_device *dev,
737 					   struct mii_dev *bus,
738 					   phy_interface_t phy_interface,
739 					   int phyid)
740 #endif
741 {
742 	struct phy_device *phydev;
743 
744 	/* Set phy address of the port */
745 	miiphy_write(dev->name, MV_PHY_ADR_REQUEST, MV_PHY_ADR_REQUEST,
746 		     phyid);
747 
748 	phydev = phy_connect(bus, phyid, dev, phy_interface);
749 	if (!phydev) {
750 		printf("phy_connect failed\n");
751 		return NULL;
752 	}
753 
754 	phy_config(phydev);
755 	phy_startup(phydev);
756 
757 	return phydev;
758 }
759 #endif /* CONFIG_PHYLIB || CONFIG_DM_ETH */
760 
761 #if defined(CONFIG_PHYLIB) && !defined(CONFIG_DM_ETH)
mvgbe_phylib_init(struct eth_device * dev,int phyid)762 int mvgbe_phylib_init(struct eth_device *dev, int phyid)
763 {
764 	struct mii_dev *bus;
765 	struct phy_device *phydev;
766 	int ret;
767 
768 	bus = mdio_alloc();
769 	if (!bus) {
770 		printf("mdio_alloc failed\n");
771 		return -ENOMEM;
772 	}
773 	bus->read = smi_reg_read;
774 	bus->write = smi_reg_write;
775 	strcpy(bus->name, dev->name);
776 
777 	ret = mdio_register(bus);
778 	if (ret) {
779 		printf("mdio_register failed\n");
780 		free(bus);
781 		return -ENOMEM;
782 	}
783 
784 	phydev = __mvgbe_phy_init(dev, bus, PHY_INTERFACE_MODE_RGMII, phyid);
785 	if (!phydev)
786 		return -ENODEV;
787 
788 	return 0;
789 }
790 #endif
791 
mvgbe_alloc_buffers(struct mvgbe_device * dmvgbe)792 static int mvgbe_alloc_buffers(struct mvgbe_device *dmvgbe)
793 {
794 	dmvgbe->p_rxdesc = memalign(PKTALIGN,
795 				    MV_RXQ_DESC_ALIGNED_SIZE * RINGSZ + 1);
796 	if (!dmvgbe->p_rxdesc)
797 		goto error1;
798 
799 	dmvgbe->p_rxbuf = memalign(PKTALIGN,
800 				   RINGSZ * PKTSIZE_ALIGN + 1);
801 	if (!dmvgbe->p_rxbuf)
802 		goto error2;
803 
804 	dmvgbe->p_aligned_txbuf = memalign(8, PKTSIZE_ALIGN);
805 	if (!dmvgbe->p_aligned_txbuf)
806 		goto error3;
807 
808 	dmvgbe->p_txdesc = memalign(PKTALIGN, sizeof(struct mvgbe_txdesc) + 1);
809 	if (!dmvgbe->p_txdesc)
810 		goto error4;
811 
812 	return 0;
813 
814 error4:
815 	free(dmvgbe->p_aligned_txbuf);
816 error3:
817 	free(dmvgbe->p_rxbuf);
818 error2:
819 	free(dmvgbe->p_rxdesc);
820 error1:
821 	return -ENOMEM;
822 }
823 
824 #ifndef CONFIG_DM_ETH
mvgbe_initialize(struct bd_info * bis)825 int mvgbe_initialize(struct bd_info *bis)
826 {
827 	struct mvgbe_device *dmvgbe;
828 	struct eth_device *dev;
829 	int devnum;
830 	int ret;
831 	u8 used_ports[MAX_MVGBE_DEVS] = CONFIG_MVGBE_PORTS;
832 
833 	for (devnum = 0; devnum < MAX_MVGBE_DEVS; devnum++) {
834 		/*skip if port is configured not to use */
835 		if (used_ports[devnum] == 0)
836 			continue;
837 
838 		dmvgbe = malloc(sizeof(struct mvgbe_device));
839 		if (!dmvgbe)
840 			return -ENOMEM;
841 
842 		memset(dmvgbe, 0, sizeof(struct mvgbe_device));
843 		ret = mvgbe_alloc_buffers(dmvgbe);
844 		if (ret) {
845 			printf("Err.. %s Failed to allocate memory\n",
846 				__func__);
847 			free(dmvgbe);
848 			return ret;
849 		}
850 
851 		dev = &dmvgbe->dev;
852 
853 		/* must be less than sizeof(dev->name) */
854 		sprintf(dev->name, "egiga%d", devnum);
855 
856 		switch (devnum) {
857 		case 0:
858 			dmvgbe->regs = (void *)MVGBE0_BASE;
859 			break;
860 #if defined(MVGBE1_BASE)
861 		case 1:
862 			dmvgbe->regs = (void *)MVGBE1_BASE;
863 			break;
864 #endif
865 		default:	/* this should never happen */
866 			printf("Err..(%s) Invalid device number %d\n",
867 				__func__, devnum);
868 			return -1;
869 		}
870 
871 		dev->init = (void *)mvgbe_init;
872 		dev->halt = (void *)mvgbe_halt;
873 		dev->send = (void *)mvgbe_send;
874 		dev->recv = (void *)mvgbe_recv;
875 		dev->write_hwaddr = (void *)mvgbe_write_hwaddr;
876 
877 		eth_register(dev);
878 
879 #if defined(CONFIG_PHYLIB)
880 		mvgbe_phylib_init(dev, PHY_BASE_ADR + devnum);
881 #elif defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
882 		int retval;
883 		struct mii_dev *mdiodev = mdio_alloc();
884 		if (!mdiodev)
885 			return -ENOMEM;
886 		strncpy(mdiodev->name, dev->name, MDIO_NAME_LEN);
887 		mdiodev->read = smi_reg_read;
888 		mdiodev->write = smi_reg_write;
889 
890 		retval = mdio_register(mdiodev);
891 		if (retval < 0)
892 			return retval;
893 		/* Set phy address of the port */
894 		miiphy_write(dev->name, MV_PHY_ADR_REQUEST,
895 				MV_PHY_ADR_REQUEST, PHY_BASE_ADR + devnum);
896 #endif
897 	}
898 	return 0;
899 }
900 #endif
901 
902 #ifdef CONFIG_DM_ETH
mvgbe_port_is_fixed_link(struct mvgbe_device * dmvgbe)903 static int mvgbe_port_is_fixed_link(struct mvgbe_device *dmvgbe)
904 {
905 	return dmvgbe->phyaddr > PHY_MAX_ADDR;
906 }
907 
mvgbe_start(struct udevice * dev)908 static int mvgbe_start(struct udevice *dev)
909 {
910 	struct eth_pdata *pdata = dev_get_plat(dev);
911 	struct mvgbe_device *dmvgbe = dev_get_priv(dev);
912 	int ret;
913 
914 	ret = __mvgbe_init(dmvgbe, pdata->enetaddr, dev->name);
915 	if (ret)
916 		return ret;
917 
918 	if (!mvgbe_port_is_fixed_link(dmvgbe)) {
919 		dmvgbe->phydev = __mvgbe_phy_init(dev, dmvgbe->bus,
920 						  dmvgbe->phy_interface,
921 						  dmvgbe->phyaddr);
922 		if (!dmvgbe->phydev)
923 			return -ENODEV;
924 	}
925 
926 	return 0;
927 }
928 
mvgbe_send(struct udevice * dev,void * packet,int length)929 static int mvgbe_send(struct udevice *dev, void *packet, int length)
930 {
931 	struct mvgbe_device *dmvgbe = dev_get_priv(dev);
932 
933 	return __mvgbe_send(dmvgbe, packet, length);
934 }
935 
mvgbe_recv(struct udevice * dev,int flags,uchar ** packetp)936 static int mvgbe_recv(struct udevice *dev, int flags, uchar **packetp)
937 {
938 	struct mvgbe_device *dmvgbe = dev_get_priv(dev);
939 
940 	return __mvgbe_recv(dmvgbe, packetp);
941 }
942 
mvgbe_stop(struct udevice * dev)943 static void mvgbe_stop(struct udevice *dev)
944 {
945 	struct mvgbe_device *dmvgbe = dev_get_priv(dev);
946 
947 	__mvgbe_halt(dmvgbe);
948 }
949 
mvgbe_probe(struct udevice * dev)950 static int mvgbe_probe(struct udevice *dev)
951 {
952 	struct eth_pdata *pdata = dev_get_plat(dev);
953 	struct mvgbe_device *dmvgbe = dev_get_priv(dev);
954 	struct mii_dev *bus;
955 	int ret;
956 
957 	ret = mvgbe_alloc_buffers(dmvgbe);
958 	if (ret)
959 		return ret;
960 
961 	dmvgbe->regs = (void __iomem *)pdata->iobase;
962 
963 	bus  = mdio_alloc();
964 	if (!bus) {
965 		printf("Failed to allocate MDIO bus\n");
966 		return -ENOMEM;
967 	}
968 
969 	bus->read = smi_reg_read;
970 	bus->write = smi_reg_write;
971 	snprintf(bus->name, sizeof(bus->name), dev->name);
972 	bus->priv = dmvgbe;
973 	dmvgbe->bus = bus;
974 
975 	ret = mdio_register(bus);
976 	if (ret < 0)
977 		return ret;
978 
979 	return 0;
980 }
981 
982 static const struct eth_ops mvgbe_ops = {
983 	.start		= mvgbe_start,
984 	.send		= mvgbe_send,
985 	.recv		= mvgbe_recv,
986 	.stop		= mvgbe_stop,
987 	.write_hwaddr	= mvgbe_write_hwaddr,
988 };
989 
mvgbe_of_to_plat(struct udevice * dev)990 static int mvgbe_of_to_plat(struct udevice *dev)
991 {
992 	struct eth_pdata *pdata = dev_get_plat(dev);
993 	struct mvgbe_device *dmvgbe = dev_get_priv(dev);
994 	void *blob = (void *)gd->fdt_blob;
995 	int node = dev_of_offset(dev);
996 	const char *phy_mode;
997 	int fl_node;
998 	int pnode;
999 	unsigned long addr;
1000 
1001 	pdata->iobase = dev_read_addr(dev);
1002 	pdata->phy_interface = -1;
1003 
1004 	pnode = fdt_node_offset_by_compatible(blob, node,
1005 					      "marvell,kirkwood-eth-port");
1006 
1007 	/* Get phy-mode / phy_interface from DT */
1008 	phy_mode = fdt_getprop(gd->fdt_blob, pnode, "phy-mode", NULL);
1009 	if (phy_mode)
1010 		pdata->phy_interface = phy_get_interface_by_name(phy_mode);
1011 	else
1012 		pdata->phy_interface = PHY_INTERFACE_MODE_GMII;
1013 
1014 	dmvgbe->phy_interface = pdata->phy_interface;
1015 
1016 	/* fetch 'fixed-link' property */
1017 	fl_node = fdt_subnode_offset(blob, pnode, "fixed-link");
1018 	if (fl_node != -FDT_ERR_NOTFOUND) {
1019 		/* set phy_addr to invalid value for fixed link */
1020 		dmvgbe->phyaddr = PHY_MAX_ADDR + 1;
1021 		dmvgbe->duplex = fdtdec_get_bool(blob, fl_node, "full-duplex");
1022 		dmvgbe->speed = fdtdec_get_int(blob, fl_node, "speed", 0);
1023 	} else {
1024 		/* Now read phyaddr from DT */
1025 		addr = fdtdec_lookup_phandle(blob, pnode, "phy-handle");
1026 		if (addr > 0)
1027 			dmvgbe->phyaddr = fdtdec_get_int(blob, addr, "reg", 0);
1028 	}
1029 
1030 	return 0;
1031 }
1032 
1033 static const struct udevice_id mvgbe_ids[] = {
1034 	{ .compatible = "marvell,kirkwood-eth" },
1035 	{ }
1036 };
1037 
1038 U_BOOT_DRIVER(mvgbe) = {
1039 	.name	= "mvgbe",
1040 	.id	= UCLASS_ETH,
1041 	.of_match = mvgbe_ids,
1042 	.of_to_plat = mvgbe_of_to_plat,
1043 	.probe	= mvgbe_probe,
1044 	.ops	= &mvgbe_ops,
1045 	.priv_auto	= sizeof(struct mvgbe_device),
1046 	.plat_auto	= sizeof(struct eth_pdata),
1047 };
1048 #endif /* CONFIG_DM_ETH */
1049