1 /*
2  * SPDX-License-Identifier:	GPL-2.0	IBM-pibs
3  */
4 /*-----------------------------------------------------------------------------+
5  *
6  *  File Name:	enetemac.c
7  *
8  *  Function:	Device driver for the ethernet EMAC3 macro on the 405GP.
9  *
10  *  Author:	Mark Wisner
11  *
12  *  Change Activity-
13  *
14  *  Date	Description of Change					    BY
15  *  ---------	---------------------					    ---
16  *  05-May-99	Created							    MKW
17  *  27-Jun-99	Clean up						    JWB
18  *  16-Jul-99	Added MAL error recovery and better IP packet handling	    MKW
19  *  29-Jul-99	Added Full duplex support				    MKW
20  *  06-Aug-99	Changed names for Mal CR reg				    MKW
21  *  23-Aug-99	Turned off SYE when running at 10Mbs			    MKW
22  *  24-Aug-99	Marked descriptor empty after call_xlc			    MKW
23  *  07-Sep-99	Set MAL RX buffer size reg to ENET_MAX_MTU_ALIGNED / 16	    MCG
24  *		to avoid chaining maximum sized packets. Push starting
25  *		RX descriptor address up to the next cache line boundary.
26  *  16-Jan-00	Added support for booting with IP of 0x0		    MKW
27  *  15-Mar-00	Updated enetInit() to enable broadcast addresses in the
28  *		EMAC0_RXM register.					    JWB
29  *  12-Mar-01	anne-sophie.harnois@nextream.fr
30  *		 - Variables are compatible with those already defined in
31  *		  include/net.h
32  *		- Receive buffer descriptor ring is used to send buffers
33  *		  to the user
34  *		- Info print about send/received/handled packet number if
35  *		  INFO_405_ENET is set
36  *  17-Apr-01	stefan.roese@esd-electronics.com
37  *		- MAL reset in "eth_halt" included
38  *		- Enet speed and duplex output now in one line
39  *  08-May-01	stefan.roese@esd-electronics.com
40  *		- MAL error handling added (eth_init called again)
41  *  13-Nov-01	stefan.roese@esd-electronics.com
42  *		- Set IST bit in EMAC0_MR1 reg upon 100MBit or full duplex
43  *  04-Jan-02	stefan.roese@esd-electronics.com
44  *		- Wait for PHY auto negotiation to complete added
45  *  06-Feb-02	stefan.roese@esd-electronics.com
46  *		- Bug fixed in waiting for auto negotiation to complete
47  *  26-Feb-02	stefan.roese@esd-electronics.com
48  *		- rx and tx buffer descriptors now allocated (no fixed address
49  *		  used anymore)
50  *  17-Jun-02	stefan.roese@esd-electronics.com
51  *		- MAL error debug printf 'M' removed (rx de interrupt may
52  *		  occur upon many incoming packets with only 4 rx buffers).
53  *-----------------------------------------------------------------------------*
54  *  17-Nov-03	travis.sawyer@sandburst.com
55  *		- ported from 405gp_enet.c to utilized upto 4 EMAC ports
56  *		  in the 440GX.	 This port should work with the 440GP
57  *		  (2 EMACs) also
58  *  15-Aug-05	sr@denx.de
59  *		- merged 405gp_enet.c and 440gx_enet.c to generic 4xx_enet.c
60 		  now handling all 4xx cpu's.
61  *-----------------------------------------------------------------------------*/
62 
63 #include <config.h>
64 #include <common.h>
65 #include <net.h>
66 #include <asm/processor.h>
67 #include <asm/io.h>
68 #include <asm/cache.h>
69 #include <asm/mmu.h>
70 #include <commproc.h>
71 #include <asm/ppc4xx.h>
72 #include <asm/ppc4xx-emac.h>
73 #include <asm/ppc4xx-mal.h>
74 #include <miiphy.h>
75 #include <malloc.h>
76 #include <linux/compiler.h>
77 
78 #if !(defined(CONFIG_MII) || defined(CONFIG_CMD_MII))
79 #error "CONFIG_MII has to be defined!"
80 #endif
81 
82 #define EMAC_RESET_TIMEOUT 1000 /* 1000 ms reset timeout */
83 #define PHY_AUTONEGOTIATE_TIMEOUT 5000	/* 5000 ms autonegotiate timeout */
84 
85 /* Ethernet Transmit and Receive Buffers */
86 /* AS.HARNOIS
87  * In the same way ENET_MAX_MTU and ENET_MAX_MTU_ALIGNED are set from
88  * PKTSIZE and PKTSIZE_ALIGN (include/net.h)
89  */
90 #define ENET_MAX_MTU	       PKTSIZE
91 #define ENET_MAX_MTU_ALIGNED   PKTSIZE_ALIGN
92 
93 /*-----------------------------------------------------------------------------+
94  * Defines for MAL/EMAC interrupt conditions as reported in the UIC (Universal
95  * Interrupt Controller).
96  *-----------------------------------------------------------------------------*/
97 #define ETH_IRQ_NUM(dev)	(VECNUM_ETH0 + ((dev) * VECNUM_ETH1_OFFS))
98 
99 #if defined(CONFIG_HAS_ETH3)
100 #if !defined(CONFIG_440GX)
101 #define UIC_ETHx	(UIC_MASK(ETH_IRQ_NUM(0)) || UIC_MASK(ETH_IRQ_NUM(1)) || \
102 			 UIC_MASK(ETH_IRQ_NUM(2)) || UIC_MASK(ETH_IRQ_NUM(3)))
103 #else
104 /* Unfortunately 440GX spreads EMAC interrupts on multiple UIC's */
105 #define UIC_ETHx	(UIC_MASK(ETH_IRQ_NUM(0)) || UIC_MASK(ETH_IRQ_NUM(1)))
106 #define UIC_ETHxB	(UIC_MASK(ETH_IRQ_NUM(2)) || UIC_MASK(ETH_IRQ_NUM(3)))
107 #endif /* !defined(CONFIG_440GX) */
108 #elif defined(CONFIG_HAS_ETH2)
109 #define UIC_ETHx	(UIC_MASK(ETH_IRQ_NUM(0)) || UIC_MASK(ETH_IRQ_NUM(1)) || \
110 			 UIC_MASK(ETH_IRQ_NUM(2)))
111 #elif defined(CONFIG_HAS_ETH1)
112 #define UIC_ETHx	(UIC_MASK(ETH_IRQ_NUM(0)) || UIC_MASK(ETH_IRQ_NUM(1)))
113 #else
114 #define UIC_ETHx	UIC_MASK(ETH_IRQ_NUM(0))
115 #endif
116 
117 /*
118  * Define a default version for UIC_ETHxB for non 440GX so that we can
119  * use common code for all 4xx variants
120  */
121 #if !defined(UIC_ETHxB)
122 #define UIC_ETHxB	0
123 #endif
124 
125 #define UIC_MAL_SERR	UIC_MASK(VECNUM_MAL_SERR)
126 #define UIC_MAL_TXDE	UIC_MASK(VECNUM_MAL_TXDE)
127 #define UIC_MAL_RXDE	UIC_MASK(VECNUM_MAL_RXDE)
128 #define UIC_MAL_TXEOB	UIC_MASK(VECNUM_MAL_TXEOB)
129 #define UIC_MAL_RXEOB	UIC_MASK(VECNUM_MAL_RXEOB)
130 
131 #define MAL_UIC_ERR	(UIC_MAL_SERR | UIC_MAL_TXDE | UIC_MAL_RXDE)
132 #define MAL_UIC_DEF	(UIC_MAL_RXEOB | MAL_UIC_ERR)
133 
134 /*
135  * We have 3 different interrupt types:
136  * - MAL interrupts indicating successful transfer
137  * - MAL error interrupts indicating MAL related errors
138  * - EMAC interrupts indicating EMAC related errors
139  *
140  * All those interrupts can be on different UIC's, but since
141  * now at least all interrupts from one type are on the same
142  * UIC. Only exception is 440GX where the EMAC interrupts are
143  * spread over two UIC's!
144  */
145 #if defined(CONFIG_440GX)
146 #define UIC_BASE_MAL	UIC1_DCR_BASE
147 #define UIC_BASE_MAL_ERR UIC2_DCR_BASE
148 #define UIC_BASE_EMAC	UIC2_DCR_BASE
149 #define UIC_BASE_EMAC_B	UIC3_DCR_BASE
150 #else
151 #define UIC_BASE_MAL	(UIC0_DCR_BASE + (UIC_NR(VECNUM_MAL_TXEOB) * 0x10))
152 #define UIC_BASE_MAL_ERR (UIC0_DCR_BASE + (UIC_NR(VECNUM_MAL_SERR) * 0x10))
153 #define UIC_BASE_EMAC	(UIC0_DCR_BASE + (UIC_NR(ETH_IRQ_NUM(0)) * 0x10))
154 #define UIC_BASE_EMAC_B	(UIC0_DCR_BASE + (UIC_NR(ETH_IRQ_NUM(0)) * 0x10))
155 #endif
156 
157 #undef INFO_4XX_ENET
158 
159 #define BI_PHYMODE_NONE	 0
160 #define BI_PHYMODE_ZMII	 1
161 #define BI_PHYMODE_RGMII 2
162 #define BI_PHYMODE_GMII  3
163 #define BI_PHYMODE_RTBI  4
164 #define BI_PHYMODE_TBI   5
165 #if defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
166     defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
167     defined(CONFIG_405EX)
168 #define BI_PHYMODE_SMII  6
169 #define BI_PHYMODE_MII   7
170 #if defined(CONFIG_460EX) || defined(CONFIG_460GT)
171 #define BI_PHYMODE_RMII  8
172 #endif
173 #endif
174 #define BI_PHYMODE_SGMII 9
175 
176 #if defined(CONFIG_440SP) || defined(CONFIG_440SPE) || \
177     defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
178     defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
179     defined(CONFIG_405EX)
180 #define SDR0_MFR_ETH_CLK_SEL_V(n)	((0x01<<27) / (n+1))
181 #endif
182 
183 #if defined(CONFIG_460EX) || defined(CONFIG_460GT)
184 #define SDR0_ETH_CFG_CLK_SEL_V(n)	(0x01 << (8 + n))
185 #endif
186 
187 #if defined(CONFIG_460EX) || defined(CONFIG_460GT)
188 #define MAL_RX_CHAN_MUL	8	/* 460EX/GT uses MAL channel 8 for EMAC1 */
189 #else
190 #define MAL_RX_CHAN_MUL	1
191 #endif
192 
193 /*--------------------------------------------------------------------+
194  * Fixed PHY (PHY-less) support for Ethernet Ports.
195  *--------------------------------------------------------------------*/
196 
197 /*
198  * Some boards do not have a PHY for each ethernet port. These ports
199  * are known as Fixed PHY (or PHY-less) ports. For such ports, set
200  * the appropriate CONFIG_PHY_ADDR equal to CONFIG_FIXED_PHY and
201  * then define CONFIG_SYS_FIXED_PHY_PORTS to define what the speed and
202  * duplex should be for these ports in the board configuration
203  * file.
204  *
205  * For Example:
206  *     #define CONFIG_FIXED_PHY   0xFFFFFFFF
207  *
208  *     #define CONFIG_PHY_ADDR    CONFIG_FIXED_PHY
209  *     #define CONFIG_PHY1_ADDR   1
210  *     #define CONFIG_PHY2_ADDR   CONFIG_FIXED_PHY
211  *     #define CONFIG_PHY3_ADDR   3
212  *
213  *     #define CONFIG_SYS_FIXED_PHY_PORT(devnum,speed,duplex) \
214  *                     {devnum, speed, duplex},
215  *
216  *     #define CONFIG_SYS_FIXED_PHY_PORTS \
217  *                     CONFIG_SYS_FIXED_PHY_PORT(0,1000,FULL) \
218  *                     CONFIG_SYS_FIXED_PHY_PORT(2,100,HALF)
219  */
220 
221 #ifndef CONFIG_FIXED_PHY
222 #define CONFIG_FIXED_PHY	0xFFFFFFFF /* Fixed PHY (PHY-less) */
223 #endif
224 
225 #ifndef CONFIG_SYS_FIXED_PHY_PORTS
226 #define CONFIG_SYS_FIXED_PHY_PORTS	/* default is an empty array */
227 #endif
228 
229 struct fixed_phy_port {
230 	unsigned int devnum;	/* ethernet port */
231 	unsigned int speed;	/* specified speed 10,100 or 1000 */
232 	unsigned int duplex;	/* specified duplex FULL or HALF */
233 };
234 
235 static const struct fixed_phy_port fixed_phy_port[] = {
236 	CONFIG_SYS_FIXED_PHY_PORTS	/* defined in board configuration file */
237 };
238 
239 /*-----------------------------------------------------------------------------+
240  * Global variables. TX and RX descriptors and buffers.
241  *-----------------------------------------------------------------------------*/
242 
243 /*
244  * Get count of EMAC devices (doesn't have to be the max. possible number
245  * supported by the cpu)
246  *
247  * CONFIG_BOARD_EMAC_COUNT added so now a "dynamic" way to configure the
248  * EMAC count is possible. As it is needed for the Kilauea/Haleakala
249  * 405EX/405EXr eval board, using the same binary.
250  */
251 #if defined(CONFIG_BOARD_EMAC_COUNT)
252 #define LAST_EMAC_NUM	board_emac_count()
253 #else /* CONFIG_BOARD_EMAC_COUNT */
254 #if defined(CONFIG_HAS_ETH3)
255 #define LAST_EMAC_NUM	4
256 #elif defined(CONFIG_HAS_ETH2)
257 #define LAST_EMAC_NUM	3
258 #elif defined(CONFIG_HAS_ETH1)
259 #define LAST_EMAC_NUM	2
260 #else
261 #define LAST_EMAC_NUM	1
262 #endif
263 #endif /* CONFIG_BOARD_EMAC_COUNT */
264 
265 /* normal boards start with EMAC0 */
266 #if !defined(CONFIG_EMAC_NR_START)
267 #define CONFIG_EMAC_NR_START	0
268 #endif
269 
270 #define MAL_RX_DESC_SIZE	2048
271 #define MAL_TX_DESC_SIZE	2048
272 #define MAL_ALLOC_SIZE		(MAL_TX_DESC_SIZE + MAL_RX_DESC_SIZE)
273 
274 /*-----------------------------------------------------------------------------+
275  * Prototypes and externals.
276  *-----------------------------------------------------------------------------*/
277 static void enet_rcv (struct eth_device *dev, unsigned long malisr);
278 
279 int enetInt (struct eth_device *dev);
280 static void mal_err (struct eth_device *dev, unsigned long isr,
281 		     unsigned long uic, unsigned long maldef,
282 		     unsigned long mal_errr);
283 static void emac_err (struct eth_device *dev, unsigned long isr);
284 
285 extern int phy_setup_aneg (char *devname, unsigned char addr);
286 extern int emac4xx_miiphy_read (const char *devname, unsigned char addr,
287 		unsigned char reg, unsigned short *value);
288 extern int emac4xx_miiphy_write (const char *devname, unsigned char addr,
289 		unsigned char reg, unsigned short value);
290 
291 int board_emac_count(void);
292 
emac_loopback_enable(EMAC_4XX_HW_PST hw_p)293 static void emac_loopback_enable(EMAC_4XX_HW_PST hw_p)
294 {
295 #if defined(CONFIG_440SPE) || \
296     defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
297     defined(CONFIG_405EX)
298 	u32 val;
299 
300 	mfsdr(SDR0_MFR, val);
301 	val |= SDR0_MFR_ETH_CLK_SEL_V(hw_p->devnum);
302 	mtsdr(SDR0_MFR, val);
303 #elif defined(CONFIG_460EX) || defined(CONFIG_460GT)
304 	u32 val;
305 
306 	mfsdr(SDR0_ETH_CFG, val);
307 	val |= SDR0_ETH_CFG_CLK_SEL_V(hw_p->devnum);
308 	mtsdr(SDR0_ETH_CFG, val);
309 #endif
310 }
311 
emac_loopback_disable(EMAC_4XX_HW_PST hw_p)312 static void emac_loopback_disable(EMAC_4XX_HW_PST hw_p)
313 {
314 #if defined(CONFIG_440SPE) || \
315     defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
316     defined(CONFIG_405EX)
317 	u32 val;
318 
319 	mfsdr(SDR0_MFR, val);
320 	val &= ~SDR0_MFR_ETH_CLK_SEL_V(hw_p->devnum);
321 	mtsdr(SDR0_MFR, val);
322 #elif defined(CONFIG_460EX) || defined(CONFIG_460GT)
323 	u32 val;
324 
325 	mfsdr(SDR0_ETH_CFG, val);
326 	val &= ~SDR0_ETH_CFG_CLK_SEL_V(hw_p->devnum);
327 	mtsdr(SDR0_ETH_CFG, val);
328 #endif
329 }
330 
331 /*-----------------------------------------------------------------------------+
332 | ppc_4xx_eth_halt
333 | Disable MAL channel, and EMACn
334 +-----------------------------------------------------------------------------*/
ppc_4xx_eth_halt(struct eth_device * dev)335 static void ppc_4xx_eth_halt (struct eth_device *dev)
336 {
337 	EMAC_4XX_HW_PST hw_p = dev->priv;
338 	u32 val = 10000;
339 
340 	out_be32((void *)EMAC0_IER + hw_p->hw_addr, 0x00000000);	/* disable emac interrupts */
341 
342 	/* 1st reset MAL channel */
343 	/* Note: writing a 0 to a channel has no effect */
344 #if defined(CONFIG_405EP) || defined(CONFIG_440EP) || defined(CONFIG_440GR)
345 	mtdcr (MAL0_TXCARR, (MAL_CR_MMSR >> (hw_p->devnum * 2)));
346 #else
347 	mtdcr (MAL0_TXCARR, (MAL_CR_MMSR >> hw_p->devnum));
348 #endif
349 	mtdcr (MAL0_RXCARR, (MAL_CR_MMSR >> hw_p->devnum));
350 
351 	/* wait for reset */
352 	while (mfdcr (MAL0_RXCASR) & (MAL_CR_MMSR >> hw_p->devnum)) {
353 		udelay (1000);	/* Delay 1 MS so as not to hammer the register */
354 		val--;
355 		if (val == 0)
356 			break;
357 	}
358 
359 	/* provide clocks for EMAC internal loopback  */
360 	emac_loopback_enable(hw_p);
361 
362 	/* EMAC RESET */
363 	out_be32((void *)EMAC0_MR0 + hw_p->hw_addr, EMAC_MR0_SRST);
364 
365 	/* remove clocks for EMAC internal loopback  */
366 	emac_loopback_disable(hw_p);
367 
368 #ifndef CONFIG_NETCONSOLE
369 	hw_p->print_speed = 1;	/* print speed message again next time */
370 #endif
371 
372 #if defined(CONFIG_460EX) || defined(CONFIG_460GT)
373 	/* don't bypass the TAHOE0/TAHOE1 cores for Linux */
374 	mfsdr(SDR0_ETH_CFG, val);
375 	val &= ~(SDR0_ETH_CFG_TAHOE0_BYPASS | SDR0_ETH_CFG_TAHOE1_BYPASS);
376 	mtsdr(SDR0_ETH_CFG, val);
377 #endif
378 
379 	return;
380 }
381 
382 #if defined (CONFIG_440GX)
ppc_4xx_eth_setup_bridge(int devnum,bd_t * bis)383 int ppc_4xx_eth_setup_bridge(int devnum, bd_t * bis)
384 {
385 	unsigned long pfc1;
386 	unsigned long zmiifer;
387 	unsigned long rmiifer;
388 
389 	mfsdr(SDR0_PFC1, pfc1);
390 	pfc1 = SDR0_PFC1_EPS_DECODE(pfc1);
391 
392 	zmiifer = 0;
393 	rmiifer = 0;
394 
395 	switch (pfc1) {
396 	case 1:
397 		zmiifer |= ZMII_FER_RMII << ZMII_FER_V(0);
398 		zmiifer |= ZMII_FER_RMII << ZMII_FER_V(1);
399 		zmiifer |= ZMII_FER_RMII << ZMII_FER_V(2);
400 		zmiifer |= ZMII_FER_RMII << ZMII_FER_V(3);
401 		bis->bi_phymode[0] = BI_PHYMODE_ZMII;
402 		bis->bi_phymode[1] = BI_PHYMODE_ZMII;
403 		bis->bi_phymode[2] = BI_PHYMODE_ZMII;
404 		bis->bi_phymode[3] = BI_PHYMODE_ZMII;
405 		break;
406 	case 2:
407 		zmiifer |= ZMII_FER_SMII << ZMII_FER_V(0);
408 		zmiifer |= ZMII_FER_SMII << ZMII_FER_V(1);
409 		zmiifer |= ZMII_FER_SMII << ZMII_FER_V(2);
410 		zmiifer |= ZMII_FER_SMII << ZMII_FER_V(3);
411 		bis->bi_phymode[0] = BI_PHYMODE_ZMII;
412 		bis->bi_phymode[1] = BI_PHYMODE_ZMII;
413 		bis->bi_phymode[2] = BI_PHYMODE_ZMII;
414 		bis->bi_phymode[3] = BI_PHYMODE_ZMII;
415 		break;
416 	case 3:
417 		zmiifer |= ZMII_FER_RMII << ZMII_FER_V(0);
418 		rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(2);
419 		bis->bi_phymode[0] = BI_PHYMODE_ZMII;
420 		bis->bi_phymode[1] = BI_PHYMODE_NONE;
421 		bis->bi_phymode[2] = BI_PHYMODE_RGMII;
422 		bis->bi_phymode[3] = BI_PHYMODE_NONE;
423 		break;
424 	case 4:
425 		zmiifer |= ZMII_FER_SMII << ZMII_FER_V(0);
426 		zmiifer |= ZMII_FER_SMII << ZMII_FER_V(1);
427 		rmiifer |= RGMII_FER_RGMII << RGMII_FER_V (2);
428 		rmiifer |= RGMII_FER_RGMII << RGMII_FER_V (3);
429 		bis->bi_phymode[0] = BI_PHYMODE_ZMII;
430 		bis->bi_phymode[1] = BI_PHYMODE_ZMII;
431 		bis->bi_phymode[2] = BI_PHYMODE_RGMII;
432 		bis->bi_phymode[3] = BI_PHYMODE_RGMII;
433 		break;
434 	case 5:
435 		zmiifer |= ZMII_FER_SMII << ZMII_FER_V (0);
436 		zmiifer |= ZMII_FER_SMII << ZMII_FER_V (1);
437 		zmiifer |= ZMII_FER_SMII << ZMII_FER_V (2);
438 		rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(3);
439 		bis->bi_phymode[0] = BI_PHYMODE_ZMII;
440 		bis->bi_phymode[1] = BI_PHYMODE_ZMII;
441 		bis->bi_phymode[2] = BI_PHYMODE_ZMII;
442 		bis->bi_phymode[3] = BI_PHYMODE_RGMII;
443 		break;
444 	case 6:
445 		zmiifer |= ZMII_FER_SMII << ZMII_FER_V (0);
446 		zmiifer |= ZMII_FER_SMII << ZMII_FER_V (1);
447 		rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(2);
448 		bis->bi_phymode[0] = BI_PHYMODE_ZMII;
449 		bis->bi_phymode[1] = BI_PHYMODE_ZMII;
450 		bis->bi_phymode[2] = BI_PHYMODE_RGMII;
451 		break;
452 	case 0:
453 	default:
454 		zmiifer = ZMII_FER_MII << ZMII_FER_V(devnum);
455 		rmiifer = 0x0;
456 		bis->bi_phymode[0] = BI_PHYMODE_ZMII;
457 		bis->bi_phymode[1] = BI_PHYMODE_ZMII;
458 		bis->bi_phymode[2] = BI_PHYMODE_ZMII;
459 		bis->bi_phymode[3] = BI_PHYMODE_ZMII;
460 		break;
461 	}
462 
463 	/* Ensure we setup mdio for this devnum and ONLY this devnum */
464 	zmiifer |= (ZMII_FER_MDI) << ZMII_FER_V(devnum);
465 
466 	out_be32((void *)ZMII0_FER, zmiifer);
467 	out_be32((void *)RGMII_FER, rmiifer);
468 
469 	return ((int)pfc1);
470 }
471 #endif	/* CONFIG_440_GX */
472 
473 #if defined(CONFIG_440EPX) || defined(CONFIG_440GRX)
ppc_4xx_eth_setup_bridge(int devnum,bd_t * bis)474 int ppc_4xx_eth_setup_bridge(int devnum, bd_t * bis)
475 {
476 	unsigned long zmiifer=0x0;
477 	unsigned long pfc1;
478 
479 	mfsdr(SDR0_PFC1, pfc1);
480 	pfc1 &= SDR0_PFC1_SELECT_MASK;
481 
482 	switch (pfc1) {
483 	case SDR0_PFC1_SELECT_CONFIG_2:
484 		/* 1 x GMII port */
485 		out_be32((void *)ZMII0_FER, 0x00);
486 		out_be32((void *)RGMII_FER, 0x00000037);
487 		bis->bi_phymode[0] = BI_PHYMODE_GMII;
488 		bis->bi_phymode[1] = BI_PHYMODE_NONE;
489 		break;
490 	case SDR0_PFC1_SELECT_CONFIG_4:
491 		/* 2 x RGMII ports */
492 		out_be32((void *)ZMII0_FER, 0x00);
493 		out_be32((void *)RGMII_FER, 0x00000055);
494 		bis->bi_phymode[0] = BI_PHYMODE_RGMII;
495 		bis->bi_phymode[1] = BI_PHYMODE_RGMII;
496 		break;
497 	case SDR0_PFC1_SELECT_CONFIG_6:
498 		/* 2 x SMII ports */
499 		out_be32((void *)ZMII0_FER,
500 			 ((ZMII_FER_SMII) << ZMII_FER_V(0)) |
501 			 ((ZMII_FER_SMII) << ZMII_FER_V(1)));
502 		out_be32((void *)RGMII_FER, 0x00000000);
503 		bis->bi_phymode[0] = BI_PHYMODE_SMII;
504 		bis->bi_phymode[1] = BI_PHYMODE_SMII;
505 		break;
506 	case SDR0_PFC1_SELECT_CONFIG_1_2:
507 		/* only 1 x MII supported */
508 		out_be32((void *)ZMII0_FER, (ZMII_FER_MII) << ZMII_FER_V(0));
509 		out_be32((void *)RGMII_FER, 0x00000000);
510 		bis->bi_phymode[0] = BI_PHYMODE_MII;
511 		bis->bi_phymode[1] = BI_PHYMODE_NONE;
512 		break;
513 	default:
514 		break;
515 	}
516 
517 	/* Ensure we setup mdio for this devnum and ONLY this devnum */
518 	zmiifer = in_be32((void *)ZMII0_FER);
519 	zmiifer |= (ZMII_FER_MDI) << ZMII_FER_V(devnum);
520 	out_be32((void *)ZMII0_FER, zmiifer);
521 
522 	return ((int)0x0);
523 }
524 #endif	/* CONFIG_440EPX */
525 
526 #if defined(CONFIG_405EX)
ppc_4xx_eth_setup_bridge(int devnum,bd_t * bis)527 int ppc_4xx_eth_setup_bridge(int devnum, bd_t * bis)
528 {
529 	u32 rgmiifer = 0;
530 
531 	/*
532 	 * The 405EX(r)'s RGMII bridge can operate in one of several
533 	 * modes, only one of which (2 x RGMII) allows the
534 	 * simultaneous use of both EMACs on the 405EX.
535 	 */
536 
537 	switch (CONFIG_EMAC_PHY_MODE) {
538 
539 	case EMAC_PHY_MODE_NONE:
540 		/* No ports */
541 		rgmiifer |= RGMII_FER_DIS	<< 0;
542 		rgmiifer |= RGMII_FER_DIS	<< 4;
543 		out_be32((void *)RGMII_FER, rgmiifer);
544 		bis->bi_phymode[0] = BI_PHYMODE_NONE;
545 		bis->bi_phymode[1] = BI_PHYMODE_NONE;
546 		break;
547 	case EMAC_PHY_MODE_NONE_RGMII:
548 		/* 1 x RGMII port on channel 0 */
549 		rgmiifer |= RGMII_FER_RGMII	<< 0;
550 		rgmiifer |= RGMII_FER_DIS	<< 4;
551 		out_be32((void *)RGMII_FER, rgmiifer);
552 		bis->bi_phymode[0] = BI_PHYMODE_RGMII;
553 		bis->bi_phymode[1] = BI_PHYMODE_NONE;
554 		break;
555 	case EMAC_PHY_MODE_RGMII_NONE:
556 		/* 1 x RGMII port on channel 1 */
557 		rgmiifer |= RGMII_FER_DIS	<< 0;
558 		rgmiifer |= RGMII_FER_RGMII	<< 4;
559 		out_be32((void *)RGMII_FER, rgmiifer);
560 		bis->bi_phymode[0] = BI_PHYMODE_NONE;
561 		bis->bi_phymode[1] = BI_PHYMODE_RGMII;
562 		break;
563 	case EMAC_PHY_MODE_RGMII_RGMII:
564 		/* 2 x RGMII ports */
565 		rgmiifer |= RGMII_FER_RGMII	<< 0;
566 		rgmiifer |= RGMII_FER_RGMII	<< 4;
567 		out_be32((void *)RGMII_FER, rgmiifer);
568 		bis->bi_phymode[0] = BI_PHYMODE_RGMII;
569 		bis->bi_phymode[1] = BI_PHYMODE_RGMII;
570 		break;
571 	case EMAC_PHY_MODE_NONE_GMII:
572 		/* 1 x GMII port on channel 0 */
573 		rgmiifer |= RGMII_FER_GMII	<< 0;
574 		rgmiifer |= RGMII_FER_DIS	<< 4;
575 		out_be32((void *)RGMII_FER, rgmiifer);
576 		bis->bi_phymode[0] = BI_PHYMODE_GMII;
577 		bis->bi_phymode[1] = BI_PHYMODE_NONE;
578 		break;
579 	case EMAC_PHY_MODE_NONE_MII:
580 		/* 1 x MII port on channel 0 */
581 		rgmiifer |= RGMII_FER_MII	<< 0;
582 		rgmiifer |= RGMII_FER_DIS	<< 4;
583 		out_be32((void *)RGMII_FER, rgmiifer);
584 		bis->bi_phymode[0] = BI_PHYMODE_MII;
585 		bis->bi_phymode[1] = BI_PHYMODE_NONE;
586 		break;
587 	case EMAC_PHY_MODE_GMII_NONE:
588 		/* 1 x GMII port on channel 1 */
589 		rgmiifer |= RGMII_FER_DIS	<< 0;
590 		rgmiifer |= RGMII_FER_GMII	<< 4;
591 		out_be32((void *)RGMII_FER, rgmiifer);
592 		bis->bi_phymode[0] = BI_PHYMODE_NONE;
593 		bis->bi_phymode[1] = BI_PHYMODE_GMII;
594 		break;
595 	case EMAC_PHY_MODE_MII_NONE:
596 		/* 1 x MII port on channel 1 */
597 		rgmiifer |= RGMII_FER_DIS	<< 0;
598 		rgmiifer |= RGMII_FER_MII	<< 4;
599 		out_be32((void *)RGMII_FER, rgmiifer);
600 		bis->bi_phymode[0] = BI_PHYMODE_NONE;
601 		bis->bi_phymode[1] = BI_PHYMODE_MII;
602 		break;
603 	default:
604 		break;
605 	}
606 
607 	/* Ensure we setup mdio for this devnum and ONLY this devnum */
608 	rgmiifer = in_be32((void *)RGMII_FER);
609 	rgmiifer |= (1 << (19-devnum));
610 	out_be32((void *)RGMII_FER, rgmiifer);
611 
612 	return ((int)0x0);
613 }
614 #endif  /* CONFIG_405EX */
615 
616 #if defined(CONFIG_460EX) || defined(CONFIG_460GT)
ppc_4xx_eth_setup_bridge(int devnum,bd_t * bis)617 int ppc_4xx_eth_setup_bridge(int devnum, bd_t * bis)
618 {
619 	u32 eth_cfg;
620 	u32 zmiifer;		/* ZMII0_FER reg. */
621 	u32 rmiifer;		/* RGMII0_FER reg. Bridge 0 */
622 	u32 rmiifer1;		/* RGMII0_FER reg. Bridge 1 */
623 	int mode;
624 
625 	zmiifer  = 0;
626 	rmiifer  = 0;
627 	rmiifer1 = 0;
628 
629 #if defined(CONFIG_460EX)
630 	mode = 9;
631 	mfsdr(SDR0_ETH_CFG, eth_cfg);
632 	if (((eth_cfg & SDR0_ETH_CFG_SGMII0_ENABLE) > 0) &&
633 	    ((eth_cfg & SDR0_ETH_CFG_SGMII1_ENABLE) > 0))
634 		mode = 11; /* config SGMII */
635 #else
636 	mode = 10;
637 	mfsdr(SDR0_ETH_CFG, eth_cfg);
638 	if (((eth_cfg & SDR0_ETH_CFG_SGMII0_ENABLE) > 0) &&
639 	    ((eth_cfg & SDR0_ETH_CFG_SGMII1_ENABLE) > 0) &&
640 	    ((eth_cfg & SDR0_ETH_CFG_SGMII2_ENABLE) > 0))
641 		mode = 12; /* config SGMII */
642 #endif
643 
644 	/* TODO:
645 	 * NOTE: 460GT has 2 RGMII bridge cores:
646 	 *		emac0 ------ RGMII0_BASE
647 	 *		           |
648 	 *		emac1 -----+
649 	 *
650 	 *		emac2 ------ RGMII1_BASE
651 	 *		           |
652 	 *		emac3 -----+
653 	 *
654 	 *	460EX has 1 RGMII bridge core:
655 	 *	and RGMII1_BASE is disabled
656 	 *		emac0 ------ RGMII0_BASE
657 	 *		           |
658 	 *		emac1 -----+
659 	 */
660 
661 	/*
662 	 * Right now only 2*RGMII is supported. Please extend when needed.
663 	 * sr - 2008-02-19
664 	 * Add SGMII support.
665 	 * vg - 2008-07-28
666 	 */
667 	switch (mode) {
668 	case 1:
669 		/* 1 MII - 460EX */
670 		/* GMC0 EMAC4_0, ZMII Bridge */
671 		zmiifer |= ZMII_FER_MII << ZMII_FER_V(0);
672 		bis->bi_phymode[0] = BI_PHYMODE_MII;
673 		bis->bi_phymode[1] = BI_PHYMODE_NONE;
674 		bis->bi_phymode[2] = BI_PHYMODE_NONE;
675 		bis->bi_phymode[3] = BI_PHYMODE_NONE;
676 		break;
677 	case 2:
678 		/* 2 MII - 460GT */
679 		/* GMC0 EMAC4_0, GMC1 EMAC4_2, ZMII Bridge */
680 		zmiifer |= ZMII_FER_MII << ZMII_FER_V(0);
681 		zmiifer |= ZMII_FER_MII << ZMII_FER_V(2);
682 		bis->bi_phymode[0] = BI_PHYMODE_MII;
683 		bis->bi_phymode[1] = BI_PHYMODE_NONE;
684 		bis->bi_phymode[2] = BI_PHYMODE_MII;
685 		bis->bi_phymode[3] = BI_PHYMODE_NONE;
686 		break;
687 	case 3:
688 		/* 2 RMII - 460EX */
689 		/* GMC0 EMAC4_0, GMC0 EMAC4_1, ZMII Bridge */
690 		zmiifer |= ZMII_FER_RMII << ZMII_FER_V(0);
691 		zmiifer |= ZMII_FER_RMII << ZMII_FER_V(1);
692 		bis->bi_phymode[0] = BI_PHYMODE_RMII;
693 		bis->bi_phymode[1] = BI_PHYMODE_RMII;
694 		bis->bi_phymode[2] = BI_PHYMODE_NONE;
695 		bis->bi_phymode[3] = BI_PHYMODE_NONE;
696 		break;
697 	case 4:
698 		/* 4 RMII - 460GT */
699 		/* GMC0 EMAC4_0, GMC0 EMAC4_1, GMC1 EMAC4_2, GMC1, EMAC4_3 */
700 		/* ZMII Bridge */
701 		zmiifer |= ZMII_FER_RMII << ZMII_FER_V(0);
702 		zmiifer |= ZMII_FER_RMII << ZMII_FER_V(1);
703 		zmiifer |= ZMII_FER_RMII << ZMII_FER_V(2);
704 		zmiifer |= ZMII_FER_RMII << ZMII_FER_V(3);
705 		bis->bi_phymode[0] = BI_PHYMODE_RMII;
706 		bis->bi_phymode[1] = BI_PHYMODE_RMII;
707 		bis->bi_phymode[2] = BI_PHYMODE_RMII;
708 		bis->bi_phymode[3] = BI_PHYMODE_RMII;
709 		break;
710 	case 5:
711 		/* 2 SMII - 460EX */
712 		/* GMC0 EMAC4_0, GMC0 EMAC4_1, ZMII Bridge */
713 		zmiifer |= ZMII_FER_SMII << ZMII_FER_V(0);
714 		zmiifer |= ZMII_FER_SMII << ZMII_FER_V(1);
715 		bis->bi_phymode[0] = BI_PHYMODE_SMII;
716 		bis->bi_phymode[1] = BI_PHYMODE_SMII;
717 		bis->bi_phymode[2] = BI_PHYMODE_NONE;
718 		bis->bi_phymode[3] = BI_PHYMODE_NONE;
719 		break;
720 	case 6:
721 		/* 4 SMII - 460GT */
722 		/* GMC0 EMAC4_0, GMC0 EMAC4_1, GMC0 EMAC4_3, GMC0 EMAC4_3 */
723 		/* ZMII Bridge */
724 		zmiifer |= ZMII_FER_SMII << ZMII_FER_V(0);
725 		zmiifer |= ZMII_FER_SMII << ZMII_FER_V(1);
726 		zmiifer |= ZMII_FER_SMII << ZMII_FER_V(2);
727 		zmiifer |= ZMII_FER_SMII << ZMII_FER_V(3);
728 		bis->bi_phymode[0] = BI_PHYMODE_SMII;
729 		bis->bi_phymode[1] = BI_PHYMODE_SMII;
730 		bis->bi_phymode[2] = BI_PHYMODE_SMII;
731 		bis->bi_phymode[3] = BI_PHYMODE_SMII;
732 		break;
733 	case 7:
734 		/* This is the default mode that we want for board bringup - Maple */
735 		/* 1 GMII - 460EX */
736 		/* GMC0 EMAC4_0, RGMII Bridge 0 */
737 		rmiifer |= RGMII_FER_MDIO(0);
738 
739 		if (devnum == 0) {
740 			rmiifer |= RGMII_FER_GMII << RGMII_FER_V(2); /* CH0CFG - EMAC0 */
741 			bis->bi_phymode[0] = BI_PHYMODE_GMII;
742 			bis->bi_phymode[1] = BI_PHYMODE_NONE;
743 			bis->bi_phymode[2] = BI_PHYMODE_NONE;
744 			bis->bi_phymode[3] = BI_PHYMODE_NONE;
745 		} else {
746 			rmiifer |= RGMII_FER_GMII << RGMII_FER_V(3); /* CH1CFG - EMAC1 */
747 			bis->bi_phymode[0] = BI_PHYMODE_NONE;
748 			bis->bi_phymode[1] = BI_PHYMODE_GMII;
749 			bis->bi_phymode[2] = BI_PHYMODE_NONE;
750 			bis->bi_phymode[3] = BI_PHYMODE_NONE;
751 		}
752 		break;
753 	case 8:
754 		/* 2 GMII - 460GT */
755 		/* GMC0 EMAC4_0, RGMII Bridge 0 */
756 		/* GMC1 EMAC4_2, RGMII Bridge 1 */
757 		rmiifer |= RGMII_FER_GMII << RGMII_FER_V(2);	/* CH0CFG - EMAC0 */
758 		rmiifer1 |= RGMII_FER_GMII << RGMII_FER_V(2);	/* CH0CFG - EMAC2 */
759 		rmiifer |= RGMII_FER_MDIO(0);			/* enable MDIO - EMAC0 */
760 		rmiifer1 |= RGMII_FER_MDIO(0);			/* enable MDIO - EMAC2 */
761 
762 		bis->bi_phymode[0] = BI_PHYMODE_GMII;
763 		bis->bi_phymode[1] = BI_PHYMODE_NONE;
764 		bis->bi_phymode[2] = BI_PHYMODE_GMII;
765 		bis->bi_phymode[3] = BI_PHYMODE_NONE;
766 		break;
767 	case 9:
768 		/* 2 RGMII - 460EX */
769 		/* GMC0 EMAC4_0, GMC0 EMAC4_1, RGMII Bridge 0 */
770 		rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(2);
771 		rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(3);
772 		rmiifer |= RGMII_FER_MDIO(0);			/* enable MDIO - EMAC0 */
773 
774 		bis->bi_phymode[0] = BI_PHYMODE_RGMII;
775 		bis->bi_phymode[1] = BI_PHYMODE_RGMII;
776 		bis->bi_phymode[2] = BI_PHYMODE_NONE;
777 		bis->bi_phymode[3] = BI_PHYMODE_NONE;
778 		break;
779 	case 10:
780 		/* 4 RGMII - 460GT */
781 		/* GMC0 EMAC4_0, GMC0 EMAC4_1, RGMII Bridge 0 */
782 		/* GMC1 EMAC4_2, GMC1 EMAC4_3, RGMII Bridge 1 */
783 		rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(2);
784 		rmiifer |= RGMII_FER_RGMII << RGMII_FER_V(3);
785 		rmiifer1 |= RGMII_FER_RGMII << RGMII_FER_V(2);
786 		rmiifer1 |= RGMII_FER_RGMII << RGMII_FER_V(3);
787 		bis->bi_phymode[0] = BI_PHYMODE_RGMII;
788 		bis->bi_phymode[1] = BI_PHYMODE_RGMII;
789 		bis->bi_phymode[2] = BI_PHYMODE_RGMII;
790 		bis->bi_phymode[3] = BI_PHYMODE_RGMII;
791 		break;
792 	case 11:
793 		/* 2 SGMII - 460EX */
794 		bis->bi_phymode[0] = BI_PHYMODE_SGMII;
795 		bis->bi_phymode[1] = BI_PHYMODE_SGMII;
796 		bis->bi_phymode[2] = BI_PHYMODE_NONE;
797 		bis->bi_phymode[3] = BI_PHYMODE_NONE;
798 		break;
799 	case 12:
800 		/* 3 SGMII - 460GT */
801 		bis->bi_phymode[0] = BI_PHYMODE_SGMII;
802 		bis->bi_phymode[1] = BI_PHYMODE_SGMII;
803 		bis->bi_phymode[2] = BI_PHYMODE_SGMII;
804 		bis->bi_phymode[3] = BI_PHYMODE_NONE;
805 		break;
806 	default:
807 		break;
808 	}
809 
810 	/* Set EMAC for MDIO */
811 	mfsdr(SDR0_ETH_CFG, eth_cfg);
812 	eth_cfg |= SDR0_ETH_CFG_MDIO_SEL_EMAC0;
813 	mtsdr(SDR0_ETH_CFG, eth_cfg);
814 
815 	out_be32((void *)RGMII_FER, rmiifer);
816 #if defined(CONFIG_460GT)
817 	out_be32((void *)RGMII_FER + RGMII1_BASE_OFFSET, rmiifer1);
818 #endif
819 
820 	/* bypass the TAHOE0/TAHOE1 cores for U-Boot */
821 	mfsdr(SDR0_ETH_CFG, eth_cfg);
822 	eth_cfg |= (SDR0_ETH_CFG_TAHOE0_BYPASS | SDR0_ETH_CFG_TAHOE1_BYPASS);
823 	mtsdr(SDR0_ETH_CFG, eth_cfg);
824 
825 	return 0;
826 }
827 #endif /* CONFIG_460EX || CONFIG_460GT */
828 
malloc_aligned(u32 size,u32 align)829 static inline void *malloc_aligned(u32 size, u32 align)
830 {
831 	return (void *)(((u32)malloc(size + align) + align - 1) &
832 			~(align - 1));
833 }
834 
ppc_4xx_eth_init(struct eth_device * dev,bd_t * bis)835 static int ppc_4xx_eth_init (struct eth_device *dev, bd_t * bis)
836 {
837 	int i;
838 	unsigned long reg = 0;
839 	unsigned long msr;
840 	unsigned long speed;
841 	unsigned long duplex;
842 	unsigned long failsafe;
843 	unsigned mode_reg;
844 	unsigned short devnum;
845 	unsigned short reg_short;
846 #if defined(CONFIG_440GX) || \
847     defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
848     defined(CONFIG_440SP) || defined(CONFIG_440SPE) || \
849     defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
850     defined(CONFIG_405EX)
851 	u32 opbfreq;
852 	sys_info_t sysinfo;
853 #if defined(CONFIG_440GX) || \
854     defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
855     defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
856     defined(CONFIG_405EX)
857 	__maybe_unused int ethgroup = -1;
858 #endif
859 #endif
860 	u32 bd_cached;
861 	u32 bd_uncached = 0;
862 #ifdef CONFIG_4xx_DCACHE
863 	static u32 last_used_ea = 0;
864 #endif
865 #if defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
866     defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
867     defined(CONFIG_405EX)
868 	int rgmii_channel;
869 #endif
870 
871 	EMAC_4XX_HW_PST hw_p = dev->priv;
872 
873 	/* before doing anything, figure out if we have a MAC address */
874 	/* if not, bail */
875 	if (memcmp (dev->enetaddr, "\0\0\0\0\0\0", 6) == 0) {
876 		printf("ERROR: ethaddr not set!\n");
877 		return -1;
878 	}
879 
880 #if defined(CONFIG_440GX) || \
881     defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
882     defined(CONFIG_440SP) || defined(CONFIG_440SPE) || \
883     defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
884     defined(CONFIG_405EX)
885 	/* Need to get the OPB frequency so we can access the PHY */
886 	get_sys_info (&sysinfo);
887 #endif
888 
889 	msr = mfmsr ();
890 	mtmsr (msr & ~(MSR_EE));	/* disable interrupts */
891 
892 	devnum = hw_p->devnum;
893 
894 #ifdef INFO_4XX_ENET
895 	/* AS.HARNOIS
896 	 * We should have :
897 	 * hw_p->stats.pkts_handled <=	hw_p->stats.pkts_rx <= hw_p->stats.pkts_handled+PKTBUFSRX
898 	 * In the most cases hw_p->stats.pkts_handled = hw_p->stats.pkts_rx, but it
899 	 * is possible that new packets (without relationship with
900 	 * current transfer) have got the time to arrived before
901 	 * netloop calls eth_halt
902 	 */
903 	printf ("About preceeding transfer (eth%d):\n"
904 		"- Sent packet number %d\n"
905 		"- Received packet number %d\n"
906 		"- Handled packet number %d\n",
907 		hw_p->devnum,
908 		hw_p->stats.pkts_tx,
909 		hw_p->stats.pkts_rx, hw_p->stats.pkts_handled);
910 
911 	hw_p->stats.pkts_tx = 0;
912 	hw_p->stats.pkts_rx = 0;
913 	hw_p->stats.pkts_handled = 0;
914 	hw_p->print_speed = 1;	/* print speed message again next time */
915 #endif
916 
917 	hw_p->tx_err_index = 0; /* Transmit Error Index for tx_err_log */
918 	hw_p->rx_err_index = 0; /* Receive Error Index for rx_err_log */
919 
920 	hw_p->rx_slot = 0;	/* MAL Receive Slot */
921 	hw_p->rx_i_index = 0;	/* Receive Interrupt Queue Index */
922 	hw_p->rx_u_index = 0;	/* Receive User Queue Index */
923 
924 	hw_p->tx_slot = 0;	/* MAL Transmit Slot */
925 	hw_p->tx_i_index = 0;	/* Transmit Interrupt Queue Index */
926 	hw_p->tx_u_index = 0;	/* Transmit User Queue Index */
927 
928 #if defined(CONFIG_440) && !defined(CONFIG_440SP) && !defined(CONFIG_440SPE)
929 	/* set RMII mode */
930 	/* NOTE: 440GX spec states that mode is mutually exclusive */
931 	/* NOTE: Therefore, disable all other EMACS, since we handle */
932 	/* NOTE: only one emac at a time */
933 	reg = 0;
934 	out_be32((void *)ZMII0_FER, 0);
935 	udelay (100);
936 
937 #if defined(CONFIG_440GP) || defined(CONFIG_440EP) || defined(CONFIG_440GR)
938 	out_be32((void *)ZMII0_FER, (ZMII_FER_RMII | ZMII_FER_MDI) << ZMII_FER_V (devnum));
939 #elif defined(CONFIG_440GX) || \
940     defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
941     defined(CONFIG_460EX) || defined(CONFIG_460GT)
942 	ethgroup = ppc_4xx_eth_setup_bridge(devnum, bis);
943 #endif
944 
945 	out_be32((void *)ZMII0_SSR, ZMII0_SSR_SP << ZMII0_SSR_V(devnum));
946 #endif /* defined(CONFIG_440) && !defined(CONFIG_440SP) */
947 #if defined(CONFIG_405EX)
948 	ethgroup = ppc_4xx_eth_setup_bridge(devnum, bis);
949 #endif
950 
951 	sync();
952 
953 	/* provide clocks for EMAC internal loopback  */
954 	emac_loopback_enable(hw_p);
955 
956 	/* EMAC RESET */
957 	out_be32((void *)EMAC0_MR0 + hw_p->hw_addr, EMAC_MR0_SRST);
958 
959 	/* remove clocks for EMAC internal loopback  */
960 	emac_loopback_disable(hw_p);
961 
962 	failsafe = 1000;
963 	while ((in_be32((void *)EMAC0_MR0 + hw_p->hw_addr) & (EMAC_MR0_SRST)) && failsafe) {
964 		udelay (1000);
965 		failsafe--;
966 	}
967 	if (failsafe <= 0)
968 		printf("\nProblem resetting EMAC!\n");
969 
970 #if defined(CONFIG_440GX) || \
971     defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
972     defined(CONFIG_440SP) || defined(CONFIG_440SPE) || \
973     defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
974     defined(CONFIG_405EX)
975 	/* Whack the M1 register */
976 	mode_reg = 0x0;
977 	mode_reg &= ~0x00000038;
978 	opbfreq = sysinfo.freqOPB / 1000000;
979 	if (opbfreq <= 50);
980 	else if (opbfreq <= 66)
981 		mode_reg |= EMAC_MR1_OBCI_66;
982 	else if (opbfreq <= 83)
983 		mode_reg |= EMAC_MR1_OBCI_83;
984 	else if (opbfreq <= 100)
985 		mode_reg |= EMAC_MR1_OBCI_100;
986 	else
987 		mode_reg |= EMAC_MR1_OBCI_GT100;
988 
989 	out_be32((void *)EMAC0_MR1 + hw_p->hw_addr, mode_reg);
990 #endif /* defined(CONFIG_440GX) || defined(CONFIG_440SP) */
991 
992 #if defined(CONFIG_GPCS_PHY_ADDR) || defined(CONFIG_GPCS_PHY1_ADDR) || \
993     defined(CONFIG_GPCS_PHY2_ADDR) || defined(CONFIG_GPCS_PHY3_ADDR)
994 	if (bis->bi_phymode[devnum] == BI_PHYMODE_SGMII) {
995 		/*
996 		 * In SGMII mode, GPCS access is needed for
997 		 * communication with the internal SGMII SerDes.
998 		 */
999 		switch (devnum) {
1000 #if defined(CONFIG_GPCS_PHY_ADDR)
1001 		case 0:
1002 			reg = CONFIG_GPCS_PHY_ADDR;
1003 			break;
1004 #endif
1005 #if defined(CONFIG_GPCS_PHY1_ADDR)
1006 		case 1:
1007 			reg = CONFIG_GPCS_PHY1_ADDR;
1008 			break;
1009 #endif
1010 #if defined(CONFIG_GPCS_PHY2_ADDR)
1011 		case 2:
1012 			reg = CONFIG_GPCS_PHY2_ADDR;
1013 			break;
1014 #endif
1015 #if defined(CONFIG_GPCS_PHY3_ADDR)
1016 		case 3:
1017 			reg = CONFIG_GPCS_PHY3_ADDR;
1018 			break;
1019 #endif
1020 		}
1021 
1022 		mode_reg = in_be32((void *)EMAC0_MR1 + hw_p->hw_addr);
1023 		mode_reg |= EMAC_MR1_MF_1000GPCS | EMAC_MR1_IPPA_SET(reg);
1024 		out_be32((void *)EMAC0_MR1 + hw_p->hw_addr, mode_reg);
1025 
1026 		/* Configure GPCS interface to recommended setting for SGMII */
1027 		miiphy_reset(dev->name, reg);
1028 		miiphy_write(dev->name, reg, 0x04, 0x8120); /* AsymPause, FDX */
1029 		miiphy_write(dev->name, reg, 0x07, 0x2801); /* msg_pg, toggle */
1030 		miiphy_write(dev->name, reg, 0x00, 0x0140); /* 1Gbps, FDX     */
1031 	}
1032 #endif /* defined(CONFIG_GPCS_PHY_ADDR) */
1033 
1034 	/* wait for PHY to complete auto negotiation */
1035 	reg_short = 0;
1036 	switch (devnum) {
1037 	case 0:
1038 		reg = CONFIG_PHY_ADDR;
1039 		break;
1040 #if defined (CONFIG_PHY1_ADDR)
1041 	case 1:
1042 		reg = CONFIG_PHY1_ADDR;
1043 		break;
1044 #endif
1045 #if defined (CONFIG_PHY2_ADDR)
1046 	case 2:
1047 		reg = CONFIG_PHY2_ADDR;
1048 		break;
1049 #endif
1050 #if defined (CONFIG_PHY3_ADDR)
1051 	case 3:
1052 		reg = CONFIG_PHY3_ADDR;
1053 		break;
1054 #endif
1055 	default:
1056 		reg = CONFIG_PHY_ADDR;
1057 		break;
1058 	}
1059 
1060 	bis->bi_phynum[devnum] = reg;
1061 
1062 	if (reg == CONFIG_FIXED_PHY)
1063 		goto get_speed;
1064 
1065 #if defined(CONFIG_PHY_RESET)
1066 	/*
1067 	 * Reset the phy, only if its the first time through
1068 	 * otherwise, just check the speeds & feeds
1069 	 */
1070 	if (hw_p->first_init == 0) {
1071 #if defined(CONFIG_M88E1111_PHY)
1072 		miiphy_write (dev->name, reg, 0x14, 0x0ce3);
1073 		miiphy_write (dev->name, reg, 0x18, 0x4101);
1074 		miiphy_write (dev->name, reg, 0x09, 0x0e00);
1075 		miiphy_write (dev->name, reg, 0x04, 0x01e1);
1076 #if defined(CONFIG_M88E1111_DISABLE_FIBER)
1077 		miiphy_read(dev->name, reg, 0x1b, &reg_short);
1078 		reg_short |= 0x8000;
1079 		miiphy_write(dev->name, reg, 0x1b, reg_short);
1080 #endif
1081 #endif
1082 #if defined(CONFIG_M88E1112_PHY)
1083 		if (bis->bi_phymode[devnum] == BI_PHYMODE_SGMII) {
1084 			/*
1085 			 * Marvell 88E1112 PHY needs to have the SGMII MAC
1086 			 * interace (page 2) properly configured to
1087 			 * communicate with the 460EX/GT GPCS interface.
1088 			 */
1089 
1090 			/* Set access to Page 2 */
1091 			miiphy_write(dev->name, reg, 0x16, 0x0002);
1092 
1093 			miiphy_write(dev->name, reg, 0x00, 0x0040); /* 1Gbps */
1094 			miiphy_read(dev->name, reg, 0x1a, &reg_short);
1095 			reg_short |= 0x8000; /* bypass Auto-Negotiation */
1096 			miiphy_write(dev->name, reg, 0x1a, reg_short);
1097 			miiphy_reset(dev->name, reg); /* reset MAC interface */
1098 
1099 			/* Reset access to Page 0 */
1100 			miiphy_write(dev->name, reg, 0x16, 0x0000);
1101 		}
1102 #endif /* defined(CONFIG_M88E1112_PHY) */
1103 		miiphy_reset (dev->name, reg);
1104 
1105 #if defined(CONFIG_440GX) || \
1106     defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
1107     defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
1108     defined(CONFIG_405EX)
1109 
1110 #if defined(CONFIG_CIS8201_PHY)
1111 		/*
1112 		 * Cicada 8201 PHY needs to have an extended register whacked
1113 		 * for RGMII mode.
1114 		 */
1115 		if (((devnum == 2) || (devnum == 3)) && (4 == ethgroup)) {
1116 #if defined(CONFIG_CIS8201_SHORT_ETCH)
1117 			miiphy_write (dev->name, reg, 23, 0x1300);
1118 #else
1119 			miiphy_write (dev->name, reg, 23, 0x1000);
1120 #endif
1121 			/*
1122 			 * Vitesse VSC8201/Cicada CIS8201 errata:
1123 			 * Interoperability problem with Intel 82547EI phys
1124 			 * This work around (provided by Vitesse) changes
1125 			 * the default timer convergence from 8ms to 12ms
1126 			 */
1127 			miiphy_write (dev->name, reg, 0x1f, 0x2a30);
1128 			miiphy_write (dev->name, reg, 0x08, 0x0200);
1129 			miiphy_write (dev->name, reg, 0x1f, 0x52b5);
1130 			miiphy_write (dev->name, reg, 0x02, 0x0004);
1131 			miiphy_write (dev->name, reg, 0x01, 0x0671);
1132 			miiphy_write (dev->name, reg, 0x00, 0x8fae);
1133 			miiphy_write (dev->name, reg, 0x1f, 0x2a30);
1134 			miiphy_write (dev->name, reg, 0x08, 0x0000);
1135 			miiphy_write (dev->name, reg, 0x1f, 0x0000);
1136 			/* end Vitesse/Cicada errata */
1137 		}
1138 #endif /* defined(CONFIG_CIS8201_PHY) */
1139 
1140 #if defined(CONFIG_ET1011C_PHY)
1141 		/*
1142 		 * Agere ET1011c PHY needs to have an extended register whacked
1143 		 * for RGMII mode.
1144 		 */
1145 		if (((devnum == 2) || (devnum ==3)) && (4 == ethgroup)) {
1146 			miiphy_read (dev->name, reg, 0x16, &reg_short);
1147 			reg_short &= ~(0x7);
1148 			reg_short |= 0x6;	/* RGMII DLL Delay*/
1149 			miiphy_write (dev->name, reg, 0x16, reg_short);
1150 
1151 			miiphy_read (dev->name, reg, 0x17, &reg_short);
1152 			reg_short &= ~(0x40);
1153 			miiphy_write (dev->name, reg, 0x17, reg_short);
1154 
1155 			miiphy_write(dev->name, reg, 0x1c, 0x74f0);
1156 		}
1157 #endif /* defined(CONFIG_ET1011C_PHY) */
1158 
1159 #endif /* defined(CONFIG_440GX) ... */
1160 		/* Start/Restart autonegotiation */
1161 		phy_setup_aneg (dev->name, reg);
1162 		udelay (1000);
1163 	}
1164 #endif /* defined(CONFIG_PHY_RESET) */
1165 
1166 	miiphy_read (dev->name, reg, MII_BMSR, &reg_short);
1167 
1168 	/*
1169 	 * Wait if PHY is capable of autonegotiation and autonegotiation is not complete
1170 	 */
1171 	if ((reg_short & BMSR_ANEGCAPABLE)
1172 	    && !(reg_short & BMSR_ANEGCOMPLETE)) {
1173 		puts ("Waiting for PHY auto negotiation to complete");
1174 		i = 0;
1175 		while (!(reg_short & BMSR_ANEGCOMPLETE)) {
1176 			/*
1177 			 * Timeout reached ?
1178 			 */
1179 			if (i > PHY_AUTONEGOTIATE_TIMEOUT) {
1180 				puts (" TIMEOUT !\n");
1181 				break;
1182 			}
1183 
1184 			if ((i++ % 1000) == 0) {
1185 				putc ('.');
1186 			}
1187 			udelay (1000);	/* 1 ms */
1188 			miiphy_read (dev->name, reg, MII_BMSR, &reg_short);
1189 		}
1190 		puts (" done\n");
1191 		udelay (500000);	/* another 500 ms (results in faster booting) */
1192 	}
1193 
1194 get_speed:
1195 	if (reg == CONFIG_FIXED_PHY) {
1196 		for (i = 0; i < ARRAY_SIZE(fixed_phy_port); i++) {
1197 			if (devnum == fixed_phy_port[i].devnum) {
1198 				speed = fixed_phy_port[i].speed;
1199 				duplex = fixed_phy_port[i].duplex;
1200 				break;
1201 			}
1202 		}
1203 
1204 		if (i == ARRAY_SIZE(fixed_phy_port)) {
1205 			printf("ERROR: PHY (%s) not configured correctly!\n",
1206 				dev->name);
1207 			return -1;
1208 		}
1209 	} else {
1210 		speed = miiphy_speed(dev->name, reg);
1211 		duplex = miiphy_duplex(dev->name, reg);
1212 	}
1213 
1214 	if (hw_p->print_speed) {
1215 		hw_p->print_speed = 0;
1216 		printf ("ENET Speed is %d Mbps - %s duplex connection (EMAC%d)\n",
1217 			(int) speed, (duplex == HALF) ? "HALF" : "FULL",
1218 			hw_p->devnum);
1219 	}
1220 
1221 #if defined(CONFIG_440) && \
1222     !defined(CONFIG_440SP) && !defined(CONFIG_440SPE) && \
1223     !defined(CONFIG_440EPX) && !defined(CONFIG_440GRX) && \
1224     !defined(CONFIG_460EX) && !defined(CONFIG_460GT)
1225 #if defined(CONFIG_440EP) || defined(CONFIG_440GR)
1226 	mfsdr(SDR0_MFR, reg);
1227 	if (speed == 100) {
1228 		reg = (reg & ~SDR0_MFR_ZMII_MODE_MASK) | SDR0_MFR_ZMII_MODE_RMII_100M;
1229 	} else {
1230 		reg = (reg & ~SDR0_MFR_ZMII_MODE_MASK) | SDR0_MFR_ZMII_MODE_RMII_10M;
1231 	}
1232 	mtsdr(SDR0_MFR, reg);
1233 #endif
1234 
1235 	/* Set ZMII/RGMII speed according to the phy link speed */
1236 	reg = in_be32((void *)ZMII0_SSR);
1237 	if ( (speed == 100) || (speed == 1000) )
1238 		out_be32((void *)ZMII0_SSR, reg | (ZMII0_SSR_SP << ZMII0_SSR_V (devnum)));
1239 	else
1240 		out_be32((void *)ZMII0_SSR, reg & (~(ZMII0_SSR_SP << ZMII0_SSR_V (devnum))));
1241 
1242 	if ((devnum == 2) || (devnum == 3)) {
1243 		if (speed == 1000)
1244 			reg = (RGMII_SSR_SP_1000MBPS << RGMII_SSR_V (devnum));
1245 		else if (speed == 100)
1246 			reg = (RGMII_SSR_SP_100MBPS << RGMII_SSR_V (devnum));
1247 		else if (speed == 10)
1248 			reg = (RGMII_SSR_SP_10MBPS << RGMII_SSR_V (devnum));
1249 		else {
1250 			printf("Error in RGMII Speed\n");
1251 			return -1;
1252 		}
1253 		out_be32((void *)RGMII_SSR, reg);
1254 	}
1255 #endif /* defined(CONFIG_440) && !defined(CONFIG_440SP) */
1256 
1257 #if defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
1258     defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
1259     defined(CONFIG_405EX)
1260 	if (devnum >= 2)
1261 		rgmii_channel = devnum - 2;
1262 	else
1263 		rgmii_channel = devnum;
1264 
1265 	if (speed == 1000)
1266 		reg = (RGMII_SSR_SP_1000MBPS << RGMII_SSR_V(rgmii_channel));
1267 	else if (speed == 100)
1268 		reg = (RGMII_SSR_SP_100MBPS << RGMII_SSR_V(rgmii_channel));
1269 	else if (speed == 10)
1270 		reg = (RGMII_SSR_SP_10MBPS << RGMII_SSR_V(rgmii_channel));
1271 	else {
1272 		printf("Error in RGMII Speed\n");
1273 		return -1;
1274 	}
1275 	out_be32((void *)RGMII_SSR, reg);
1276 #if defined(CONFIG_460GT)
1277 	if ((devnum == 2) || (devnum == 3))
1278 		out_be32((void *)RGMII_SSR + RGMII1_BASE_OFFSET, reg);
1279 #endif
1280 #endif
1281 
1282 	/* set the Mal configuration reg */
1283 #if defined(CONFIG_440GX) || \
1284     defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
1285     defined(CONFIG_440SP) || defined(CONFIG_440SPE) || \
1286     defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
1287     defined(CONFIG_405EX)
1288 	mtdcr (MAL0_CFG, MAL_CR_PLBB | MAL_CR_OPBBL | MAL_CR_LEA |
1289 	       MAL_CR_PLBLT_DEFAULT | MAL_CR_EOPIE | 0x00330000);
1290 #else
1291 	mtdcr (MAL0_CFG, MAL_CR_PLBB | MAL_CR_OPBBL | MAL_CR_LEA | MAL_CR_PLBLT_DEFAULT);
1292 	/* Errata 1.12: MAL_1 -- Disable MAL bursting */
1293 	if (get_pvr() == PVR_440GP_RB) {
1294 		mtdcr (MAL0_CFG, mfdcr(MAL0_CFG) & ~MAL_CR_PLBB);
1295 	}
1296 #endif
1297 
1298 	/*
1299 	 * Malloc MAL buffer desciptors, make sure they are
1300 	 * aligned on cache line boundary size
1301 	 * (401/403/IOP480 = 16, 405 = 32)
1302 	 * and doesn't cross cache block boundaries.
1303 	 */
1304 	if (hw_p->first_init == 0) {
1305 		debug("*** Allocating descriptor memory ***\n");
1306 
1307 		bd_cached = (u32)malloc_aligned(MAL_ALLOC_SIZE, 4096);
1308 		if (!bd_cached) {
1309 			printf("%s: Error allocating MAL descriptor buffers!\n", __func__);
1310 			return -1;
1311 		}
1312 
1313 #ifdef CONFIG_4xx_DCACHE
1314 		flush_dcache_range(bd_cached, bd_cached + MAL_ALLOC_SIZE);
1315 		if (!last_used_ea)
1316 #if defined(CONFIG_SYS_MEM_TOP_HIDE)
1317 			bd_uncached = bis->bi_memsize + CONFIG_SYS_MEM_TOP_HIDE;
1318 #else
1319 			bd_uncached = bis->bi_memsize;
1320 #endif
1321 		else
1322 			bd_uncached = last_used_ea + MAL_ALLOC_SIZE;
1323 
1324 		last_used_ea = bd_uncached;
1325 		program_tlb(bd_cached, bd_uncached, MAL_ALLOC_SIZE,
1326 			    TLB_WORD2_I_ENABLE);
1327 #else
1328 		bd_uncached = bd_cached;
1329 #endif
1330 		hw_p->tx_phys = bd_cached;
1331 		hw_p->rx_phys = bd_cached + MAL_TX_DESC_SIZE;
1332 		hw_p->tx = (mal_desc_t *)(bd_uncached);
1333 		hw_p->rx = (mal_desc_t *)(bd_uncached + MAL_TX_DESC_SIZE);
1334 		debug("hw_p->tx=%p, hw_p->rx=%p\n", hw_p->tx, hw_p->rx);
1335 	}
1336 
1337 	for (i = 0; i < NUM_TX_BUFF; i++) {
1338 		hw_p->tx[i].ctrl = 0;
1339 		hw_p->tx[i].data_len = 0;
1340 		if (hw_p->first_init == 0)
1341 			hw_p->txbuf_ptr = malloc_aligned(MAL_ALLOC_SIZE,
1342 							 L1_CACHE_BYTES);
1343 		hw_p->tx[i].data_ptr = hw_p->txbuf_ptr;
1344 		if ((NUM_TX_BUFF - 1) == i)
1345 			hw_p->tx[i].ctrl |= MAL_TX_CTRL_WRAP;
1346 		hw_p->tx_run[i] = -1;
1347 		debug("TX_BUFF %d @ 0x%08x\n", i, (u32)hw_p->tx[i].data_ptr);
1348 	}
1349 
1350 	for (i = 0; i < NUM_RX_BUFF; i++) {
1351 		hw_p->rx[i].ctrl = 0;
1352 		hw_p->rx[i].data_len = 0;
1353 		hw_p->rx[i].data_ptr = (char *)net_rx_packets[i];
1354 		if ((NUM_RX_BUFF - 1) == i)
1355 			hw_p->rx[i].ctrl |= MAL_RX_CTRL_WRAP;
1356 		hw_p->rx[i].ctrl |= MAL_RX_CTRL_EMPTY | MAL_RX_CTRL_INTR;
1357 		hw_p->rx_ready[i] = -1;
1358 		debug("RX_BUFF %d @ 0x%08x\n", i, (u32)hw_p->rx[i].data_ptr);
1359 	}
1360 
1361 	reg = 0x00000000;
1362 
1363 	reg |= dev->enetaddr[0];	/* set high address */
1364 	reg = reg << 8;
1365 	reg |= dev->enetaddr[1];
1366 
1367 	out_be32((void *)EMAC0_IAH + hw_p->hw_addr, reg);
1368 
1369 	reg = 0x00000000;
1370 	reg |= dev->enetaddr[2];	/* set low address  */
1371 	reg = reg << 8;
1372 	reg |= dev->enetaddr[3];
1373 	reg = reg << 8;
1374 	reg |= dev->enetaddr[4];
1375 	reg = reg << 8;
1376 	reg |= dev->enetaddr[5];
1377 
1378 	out_be32((void *)EMAC0_IAL + hw_p->hw_addr, reg);
1379 
1380 	switch (devnum) {
1381 	case 1:
1382 		/* setup MAL tx & rx channel pointers */
1383 #if defined (CONFIG_405EP) || defined (CONFIG_440EP) || defined (CONFIG_440GR)
1384 		mtdcr (MAL0_TXCTP2R, hw_p->tx_phys);
1385 #else
1386 		mtdcr (MAL0_TXCTP1R, hw_p->tx_phys);
1387 #endif
1388 #if defined(CONFIG_440)
1389 		mtdcr (MAL0_TXBADDR, 0x0);
1390 		mtdcr (MAL0_RXBADDR, 0x0);
1391 #endif
1392 
1393 #if defined(CONFIG_460EX) || defined(CONFIG_460GT)
1394 		mtdcr (MAL0_RXCTP8R, hw_p->rx_phys);
1395 		/* set RX buffer size */
1396 		mtdcr (MAL0_RCBS8, ENET_MAX_MTU_ALIGNED / 16);
1397 #else
1398 		mtdcr (MAL0_RXCTP1R, hw_p->rx_phys);
1399 		/* set RX buffer size */
1400 		mtdcr (MAL0_RCBS1, ENET_MAX_MTU_ALIGNED / 16);
1401 #endif
1402 		break;
1403 #if defined (CONFIG_440GX)
1404 	case 2:
1405 		/* setup MAL tx & rx channel pointers */
1406 		mtdcr (MAL0_TXBADDR, 0x0);
1407 		mtdcr (MAL0_RXBADDR, 0x0);
1408 		mtdcr (MAL0_TXCTP2R, hw_p->tx_phys);
1409 		mtdcr (MAL0_RXCTP2R, hw_p->rx_phys);
1410 		/* set RX buffer size */
1411 		mtdcr (MAL0_RCBS2, ENET_MAX_MTU_ALIGNED / 16);
1412 		break;
1413 	case 3:
1414 		/* setup MAL tx & rx channel pointers */
1415 		mtdcr (MAL0_TXBADDR, 0x0);
1416 		mtdcr (MAL0_TXCTP3R, hw_p->tx_phys);
1417 		mtdcr (MAL0_RXBADDR, 0x0);
1418 		mtdcr (MAL0_RXCTP3R, hw_p->rx_phys);
1419 		/* set RX buffer size */
1420 		mtdcr (MAL0_RCBS3, ENET_MAX_MTU_ALIGNED / 16);
1421 		break;
1422 #endif /* CONFIG_440GX */
1423 #if defined (CONFIG_460GT)
1424 	case 2:
1425 		/* setup MAL tx & rx channel pointers */
1426 		mtdcr (MAL0_TXBADDR, 0x0);
1427 		mtdcr (MAL0_RXBADDR, 0x0);
1428 		mtdcr (MAL0_TXCTP2R, hw_p->tx_phys);
1429 		mtdcr (MAL0_RXCTP16R, hw_p->rx_phys);
1430 		/* set RX buffer size */
1431 		mtdcr (MAL0_RCBS16, ENET_MAX_MTU_ALIGNED / 16);
1432 		break;
1433 	case 3:
1434 		/* setup MAL tx & rx channel pointers */
1435 		mtdcr (MAL0_TXBADDR, 0x0);
1436 		mtdcr (MAL0_RXBADDR, 0x0);
1437 		mtdcr (MAL0_TXCTP3R, hw_p->tx_phys);
1438 		mtdcr (MAL0_RXCTP24R, hw_p->rx_phys);
1439 		/* set RX buffer size */
1440 		mtdcr (MAL0_RCBS24, ENET_MAX_MTU_ALIGNED / 16);
1441 		break;
1442 #endif /* CONFIG_460GT */
1443 	case 0:
1444 	default:
1445 		/* setup MAL tx & rx channel pointers */
1446 #if defined(CONFIG_440)
1447 		mtdcr (MAL0_TXBADDR, 0x0);
1448 		mtdcr (MAL0_RXBADDR, 0x0);
1449 #endif
1450 		mtdcr (MAL0_TXCTP0R, hw_p->tx_phys);
1451 		mtdcr (MAL0_RXCTP0R, hw_p->rx_phys);
1452 		/* set RX buffer size */
1453 		mtdcr (MAL0_RCBS0, ENET_MAX_MTU_ALIGNED / 16);
1454 		break;
1455 	}
1456 
1457 	/* Enable MAL transmit and receive channels */
1458 #if defined(CONFIG_405EP) || defined(CONFIG_440EP) || defined(CONFIG_440GR)
1459 	mtdcr (MAL0_TXCASR, (MAL_TXRX_CASR >> (hw_p->devnum*2)));
1460 #else
1461 	mtdcr (MAL0_TXCASR, (MAL_TXRX_CASR >> hw_p->devnum));
1462 #endif
1463 	mtdcr (MAL0_RXCASR, (MAL_TXRX_CASR >> hw_p->devnum));
1464 
1465 	/* set transmit enable & receive enable */
1466 	out_be32((void *)EMAC0_MR0 + hw_p->hw_addr, EMAC_MR0_TXE | EMAC_MR0_RXE);
1467 
1468 	mode_reg = in_be32((void *)EMAC0_MR1 + hw_p->hw_addr);
1469 
1470 	/* set rx-/tx-fifo size */
1471 	mode_reg = (mode_reg & ~EMAC_MR1_FIFO_MASK) | EMAC_MR1_FIFO_SIZE;
1472 
1473 	/* set speed */
1474 	if (speed == _1000BASET) {
1475 #if defined(CONFIG_440SP) || defined(CONFIG_440SPE)
1476 		unsigned long pfc1;
1477 
1478 		mfsdr (SDR0_PFC1, pfc1);
1479 		pfc1 |= SDR0_PFC1_EM_1000;
1480 		mtsdr (SDR0_PFC1, pfc1);
1481 #endif
1482 		mode_reg = mode_reg | EMAC_MR1_MF_1000MBPS | EMAC_MR1_IST;
1483 	} else if (speed == _100BASET)
1484 		mode_reg = mode_reg | EMAC_MR1_MF_100MBPS | EMAC_MR1_IST;
1485 	else
1486 		mode_reg = mode_reg & ~0x00C00000;	/* 10 MBPS */
1487 	if (duplex == FULL)
1488 		mode_reg = mode_reg | 0x80000000 | EMAC_MR1_IST;
1489 
1490 	out_be32((void *)EMAC0_MR1 + hw_p->hw_addr, mode_reg);
1491 
1492 	/* Enable broadcast and indvidual address */
1493 	/* TBS: enabling runts as some misbehaved nics will send runts */
1494 	out_be32((void *)EMAC0_RXM + hw_p->hw_addr, EMAC_RMR_BAE | EMAC_RMR_IAE);
1495 
1496 	/* we probably need to set the tx mode1 reg? maybe at tx time */
1497 
1498 	/* set transmit request threshold register */
1499 	out_be32((void *)EMAC0_TRTR + hw_p->hw_addr, 0x18000000);	/* 256 byte threshold */
1500 
1501 	/* set receive	low/high water mark register */
1502 #if defined(CONFIG_440)
1503 	/* 440s has a 64 byte burst length */
1504 	out_be32((void *)EMAC0_RX_HI_LO_WMARK + hw_p->hw_addr, 0x80009000);
1505 #else
1506 	/* 405s have a 16 byte burst length */
1507 	out_be32((void *)EMAC0_RX_HI_LO_WMARK + hw_p->hw_addr, 0x0f002000);
1508 #endif /* defined(CONFIG_440) */
1509 	out_be32((void *)EMAC0_TMR1 + hw_p->hw_addr, 0xf8640000);
1510 
1511 	/* Set fifo limit entry in tx mode 0 */
1512 	out_be32((void *)EMAC0_TMR0 + hw_p->hw_addr, 0x00000003);
1513 	/* Frame gap set */
1514 	out_be32((void *)EMAC0_I_FRAME_GAP_REG + hw_p->hw_addr, 0x00000008);
1515 
1516 	/* Set EMAC IER */
1517 	hw_p->emac_ier = EMAC_ISR_PTLE | EMAC_ISR_BFCS | EMAC_ISR_ORE | EMAC_ISR_IRE;
1518 	if (speed == _100BASET)
1519 		hw_p->emac_ier = hw_p->emac_ier | EMAC_ISR_SYE;
1520 
1521 	out_be32((void *)EMAC0_ISR + hw_p->hw_addr, 0xffffffff);	/* clear pending interrupts */
1522 	out_be32((void *)EMAC0_IER + hw_p->hw_addr, hw_p->emac_ier);
1523 
1524 	if (hw_p->first_init == 0) {
1525 		/*
1526 		 * Connect interrupt service routines
1527 		 */
1528 		irq_install_handler(ETH_IRQ_NUM(hw_p->devnum),
1529 				    (interrupt_handler_t *) enetInt, dev);
1530 	}
1531 
1532 	mtmsr (msr);		/* enable interrupts again */
1533 
1534 	hw_p->bis = bis;
1535 	hw_p->first_init = 1;
1536 
1537 	return 0;
1538 }
1539 
1540 
ppc_4xx_eth_send(struct eth_device * dev,void * ptr,int len)1541 static int ppc_4xx_eth_send(struct eth_device *dev, void *ptr, int len)
1542 {
1543 	struct enet_frame *ef_ptr;
1544 	ulong time_start, time_now;
1545 	unsigned long temp_txm0;
1546 	EMAC_4XX_HW_PST hw_p = dev->priv;
1547 
1548 	ef_ptr = (struct enet_frame *) ptr;
1549 
1550 	/*-----------------------------------------------------------------------+
1551 	 *  Copy in our address into the frame.
1552 	 *-----------------------------------------------------------------------*/
1553 	(void) memcpy (ef_ptr->source_addr, dev->enetaddr, ENET_ADDR_LENGTH);
1554 
1555 	/*-----------------------------------------------------------------------+
1556 	 * If frame is too long or too short, modify length.
1557 	 *-----------------------------------------------------------------------*/
1558 	/* TBS: where does the fragment go???? */
1559 	if (len > ENET_MAX_MTU)
1560 		len = ENET_MAX_MTU;
1561 
1562 	/*   memcpy ((void *) &tx_buff[tx_slot], (const void *) ptr, len); */
1563 	memcpy ((void *) hw_p->txbuf_ptr, (const void *) ptr, len);
1564 	flush_dcache_range((u32)hw_p->txbuf_ptr, (u32)hw_p->txbuf_ptr + len);
1565 
1566 	/*-----------------------------------------------------------------------+
1567 	 * set TX Buffer busy, and send it
1568 	 *-----------------------------------------------------------------------*/
1569 	hw_p->tx[hw_p->tx_slot].ctrl = (MAL_TX_CTRL_LAST |
1570 					EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP) &
1571 		~(EMAC_TX_CTRL_ISA | EMAC_TX_CTRL_RSA);
1572 	if ((NUM_TX_BUFF - 1) == hw_p->tx_slot)
1573 		hw_p->tx[hw_p->tx_slot].ctrl |= MAL_TX_CTRL_WRAP;
1574 
1575 	hw_p->tx[hw_p->tx_slot].data_len = (short) len;
1576 	hw_p->tx[hw_p->tx_slot].ctrl |= MAL_TX_CTRL_READY;
1577 
1578 	sync();
1579 
1580 	out_be32((void *)EMAC0_TMR0 + hw_p->hw_addr,
1581 		 in_be32((void *)EMAC0_TMR0 + hw_p->hw_addr) | EMAC_TMR0_GNP0);
1582 #ifdef INFO_4XX_ENET
1583 	hw_p->stats.pkts_tx++;
1584 #endif
1585 
1586 	/*-----------------------------------------------------------------------+
1587 	 * poll unitl the packet is sent and then make sure it is OK
1588 	 *-----------------------------------------------------------------------*/
1589 	time_start = get_timer (0);
1590 	while (1) {
1591 		temp_txm0 = in_be32((void *)EMAC0_TMR0 + hw_p->hw_addr);
1592 		/* loop until either TINT turns on or 3 seconds elapse */
1593 		if ((temp_txm0 & EMAC_TMR0_GNP0) != 0) {
1594 			/* transmit is done, so now check for errors
1595 			 * If there is an error, an interrupt should
1596 			 * happen when we return
1597 			 */
1598 			time_now = get_timer (0);
1599 			if ((time_now - time_start) > 3000) {
1600 				return (-1);
1601 			}
1602 		} else {
1603 			return (len);
1604 		}
1605 	}
1606 }
1607 
enetInt(struct eth_device * dev)1608 int enetInt (struct eth_device *dev)
1609 {
1610 	int serviced;
1611 	int rc = -1;		/* default to not us */
1612 	u32 mal_isr;
1613 	u32 emac_isr = 0;
1614 	u32 mal_eob;
1615 	u32 uic_mal;
1616 	u32 uic_mal_err;
1617 	u32 uic_emac;
1618 	u32 uic_emac_b;
1619 	EMAC_4XX_HW_PST hw_p;
1620 
1621 	/*
1622 	 * Because the mal is generic, we need to get the current
1623 	 * eth device
1624 	 */
1625 	dev = eth_get_dev();
1626 
1627 	hw_p = dev->priv;
1628 
1629 	/* enter loop that stays in interrupt code until nothing to service */
1630 	do {
1631 		serviced = 0;
1632 
1633 		uic_mal = mfdcr(UIC_BASE_MAL + UIC_MSR);
1634 		uic_mal_err = mfdcr(UIC_BASE_MAL_ERR + UIC_MSR);
1635 		uic_emac = mfdcr(UIC_BASE_EMAC + UIC_MSR);
1636 		uic_emac_b = mfdcr(UIC_BASE_EMAC_B + UIC_MSR);
1637 
1638 		if (!(uic_mal & (UIC_MAL_RXEOB | UIC_MAL_TXEOB))
1639 		    && !(uic_mal_err & (UIC_MAL_SERR | UIC_MAL_TXDE | UIC_MAL_RXDE))
1640 		    && !(uic_emac & UIC_ETHx) && !(uic_emac_b & UIC_ETHxB)) {
1641 			/* not for us */
1642 			return (rc);
1643 		}
1644 
1645 		/* get and clear controller status interrupts */
1646 		/* look at MAL and EMAC error interrupts */
1647 		if (uic_mal_err & (UIC_MAL_SERR | UIC_MAL_TXDE | UIC_MAL_RXDE)) {
1648 			/* we have a MAL error interrupt */
1649 			mal_isr = mfdcr(MAL0_ESR);
1650 			mal_err(dev, mal_isr, uic_mal_err,
1651 				 MAL_UIC_DEF, MAL_UIC_ERR);
1652 
1653 			/* clear MAL error interrupt status bits */
1654 			mtdcr(UIC_BASE_MAL_ERR + UIC_SR,
1655 			      UIC_MAL_SERR | UIC_MAL_TXDE | UIC_MAL_RXDE);
1656 
1657 			return -1;
1658 		}
1659 
1660 		/* look for EMAC errors */
1661 		if ((uic_emac & UIC_ETHx) || (uic_emac_b & UIC_ETHxB)) {
1662 			emac_isr = in_be32((void *)EMAC0_ISR + hw_p->hw_addr);
1663 			emac_err(dev, emac_isr);
1664 
1665 			/* clear EMAC error interrupt status bits */
1666 			mtdcr(UIC_BASE_EMAC + UIC_SR, UIC_ETHx);
1667 			mtdcr(UIC_BASE_EMAC_B + UIC_SR, UIC_ETHxB);
1668 
1669 			return -1;
1670 		}
1671 
1672 		/* handle MAX TX EOB interrupt from a tx */
1673 		if (uic_mal & UIC_MAL_TXEOB) {
1674 			/* clear MAL interrupt status bits */
1675 			mal_eob = mfdcr(MAL0_TXEOBISR);
1676 			mtdcr(MAL0_TXEOBISR, mal_eob);
1677 			mtdcr(UIC_BASE_MAL + UIC_SR, UIC_MAL_TXEOB);
1678 
1679 			/* indicate that we serviced an interrupt */
1680 			serviced = 1;
1681 			rc = 0;
1682 		}
1683 
1684 		/* handle MAL RX EOB interrupt from a receive */
1685 		/* check for EOB on valid channels	     */
1686 		if (uic_mal & UIC_MAL_RXEOB) {
1687 			mal_eob = mfdcr(MAL0_RXEOBISR);
1688 			if (mal_eob &
1689 			    (0x80000000 >> (hw_p->devnum * MAL_RX_CHAN_MUL))) {
1690 				/* push packet to upper layer */
1691 				enet_rcv(dev, emac_isr);
1692 
1693 				/* clear MAL interrupt status bits */
1694 				mtdcr(UIC_BASE_MAL + UIC_SR, UIC_MAL_RXEOB);
1695 
1696 				/* indicate that we serviced an interrupt */
1697 				serviced = 1;
1698 				rc = 0;
1699 			}
1700 		}
1701 #if defined(CONFIG_405EZ)
1702 		/*
1703 		 * On 405EZ the RX-/TX-interrupts are coalesced into
1704 		 * one IRQ bit in the UIC. We need to acknowledge the
1705 		 * RX-/TX-interrupts in the SDR0_ICINTSTAT reg as well.
1706 		 */
1707 		mtsdr(SDR0_ICINTSTAT,
1708 		      SDR_ICRX_STAT | SDR_ICTX0_STAT | SDR_ICTX1_STAT);
1709 #endif  /* defined(CONFIG_405EZ) */
1710 	} while (serviced);
1711 
1712 	return (rc);
1713 }
1714 
1715 /*-----------------------------------------------------------------------------+
1716  *  MAL Error Routine
1717  *-----------------------------------------------------------------------------*/
mal_err(struct eth_device * dev,unsigned long isr,unsigned long uic,unsigned long maldef,unsigned long mal_errr)1718 static void mal_err (struct eth_device *dev, unsigned long isr,
1719 		     unsigned long uic, unsigned long maldef,
1720 		     unsigned long mal_errr)
1721 {
1722 	mtdcr (MAL0_ESR, isr);	/* clear interrupt */
1723 
1724 	/* clear DE interrupt */
1725 	mtdcr (MAL0_TXDEIR, 0xC0000000);
1726 	mtdcr (MAL0_RXDEIR, 0x80000000);
1727 
1728 #ifdef INFO_4XX_ENET
1729 	printf("\nMAL error occured.... ISR = %lx UIC = = %lx	MAL_DEF = %lx  MAL_ERR= %lx\n",
1730 	       isr, uic, maldef, mal_errr);
1731 #endif
1732 
1733 	eth_init();	/* start again... */
1734 }
1735 
1736 /*-----------------------------------------------------------------------------+
1737  *  EMAC Error Routine
1738  *-----------------------------------------------------------------------------*/
emac_err(struct eth_device * dev,unsigned long isr)1739 static void emac_err (struct eth_device *dev, unsigned long isr)
1740 {
1741 	EMAC_4XX_HW_PST hw_p = dev->priv;
1742 
1743 	printf ("EMAC%d error occured.... ISR = %lx\n", hw_p->devnum, isr);
1744 	out_be32((void *)EMAC0_ISR + hw_p->hw_addr, isr);
1745 }
1746 
1747 /*-----------------------------------------------------------------------------+
1748  *  enet_rcv() handles the ethernet receive data
1749  *-----------------------------------------------------------------------------*/
enet_rcv(struct eth_device * dev,unsigned long malisr)1750 static void enet_rcv (struct eth_device *dev, unsigned long malisr)
1751 {
1752 	unsigned long data_len;
1753 	unsigned long rx_eob_isr;
1754 	EMAC_4XX_HW_PST hw_p = dev->priv;
1755 
1756 	int handled = 0;
1757 	int i;
1758 	int loop_count = 0;
1759 
1760 	rx_eob_isr = mfdcr (MAL0_RXEOBISR);
1761 	if ((0x80000000 >> (hw_p->devnum * MAL_RX_CHAN_MUL)) & rx_eob_isr) {
1762 		/* clear EOB */
1763 		mtdcr (MAL0_RXEOBISR, rx_eob_isr);
1764 
1765 		/* EMAC RX done */
1766 		while (1) {	/* do all */
1767 			i = hw_p->rx_slot;
1768 
1769 			if ((MAL_RX_CTRL_EMPTY & hw_p->rx[i].ctrl)
1770 			    || (loop_count >= NUM_RX_BUFF))
1771 				break;
1772 
1773 			loop_count++;
1774 			handled++;
1775 			data_len = (unsigned long) hw_p->rx[i].data_len & 0x0fff;	/* Get len */
1776 			if (data_len) {
1777 				if (data_len > ENET_MAX_MTU)	/* Check len */
1778 					data_len = 0;
1779 				else {
1780 					if (EMAC_RX_ERRORS & hw_p->rx[i].ctrl) {	/* Check Errors */
1781 						data_len = 0;
1782 						hw_p->stats.rx_err_log[hw_p->
1783 								       rx_err_index]
1784 							= hw_p->rx[i].ctrl;
1785 						hw_p->rx_err_index++;
1786 						if (hw_p->rx_err_index ==
1787 						    MAX_ERR_LOG)
1788 							hw_p->rx_err_index =
1789 								0;
1790 					}	/* emac_erros */
1791 				}	/* data_len < max mtu */
1792 			}	/* if data_len */
1793 			if (!data_len) {	/* no data */
1794 				hw_p->rx[i].ctrl |= MAL_RX_CTRL_EMPTY;	/* Free Recv Buffer */
1795 
1796 				hw_p->stats.data_len_err++;	/* Error at Rx */
1797 			}
1798 
1799 			/* !data_len */
1800 			/* AS.HARNOIS */
1801 			/* Check if user has already eaten buffer */
1802 			/* if not => ERROR */
1803 			else if (hw_p->rx_ready[hw_p->rx_i_index] != -1) {
1804 				if (hw_p->is_receiving)
1805 					printf ("ERROR : Receive buffers are full!\n");
1806 				break;
1807 			} else {
1808 				hw_p->stats.rx_frames++;
1809 				hw_p->stats.rx += data_len;
1810 #ifdef INFO_4XX_ENET
1811 				hw_p->stats.pkts_rx++;
1812 #endif
1813 				/* AS.HARNOIS
1814 				 * use ring buffer
1815 				 */
1816 				hw_p->rx_ready[hw_p->rx_i_index] = i;
1817 				hw_p->rx_i_index++;
1818 				if (NUM_RX_BUFF == hw_p->rx_i_index)
1819 					hw_p->rx_i_index = 0;
1820 
1821 				hw_p->rx_slot++;
1822 				if (NUM_RX_BUFF == hw_p->rx_slot)
1823 					hw_p->rx_slot = 0;
1824 
1825 				/*  AS.HARNOIS
1826 				 * free receive buffer only when
1827 				 * buffer has been handled (eth_rx)
1828 				 rx[i].ctrl |= MAL_RX_CTRL_EMPTY;
1829 				 */
1830 			}	/* if data_len */
1831 		}		/* while */
1832 	}			/* if EMACK_RXCHL */
1833 }
1834 
1835 
ppc_4xx_eth_rx(struct eth_device * dev)1836 static int ppc_4xx_eth_rx (struct eth_device *dev)
1837 {
1838 	int length;
1839 	int user_index;
1840 	unsigned long msr;
1841 	EMAC_4XX_HW_PST hw_p = dev->priv;
1842 
1843 	hw_p->is_receiving = 1; /* tell driver */
1844 
1845 	for (;;) {
1846 		/* AS.HARNOIS
1847 		 * use ring buffer and
1848 		 * get index from rx buffer desciptor queue
1849 		 */
1850 		user_index = hw_p->rx_ready[hw_p->rx_u_index];
1851 		if (user_index == -1) {
1852 			length = -1;
1853 			break;	/* nothing received - leave for() loop */
1854 		}
1855 
1856 		msr = mfmsr ();
1857 		mtmsr (msr & ~(MSR_EE));
1858 
1859 		length = hw_p->rx[user_index].data_len & 0x0fff;
1860 
1861 		/*
1862 		 * Pass the packet up to the protocol layers.
1863 		 * net_process_received_packet(net_rx_packets[rxIdx],
1864 		 *			       length - 4);
1865 		 * net_process_received_packet(net_rx_packets[i], length);
1866 		 */
1867 		invalidate_dcache_range((u32)hw_p->rx[user_index].data_ptr,
1868 					(u32)hw_p->rx[user_index].data_ptr +
1869 					length - 4);
1870 		net_process_received_packet(net_rx_packets[user_index],
1871 					    length - 4);
1872 		/* Free Recv Buffer */
1873 		hw_p->rx[user_index].ctrl |= MAL_RX_CTRL_EMPTY;
1874 		/* Free rx buffer descriptor queue */
1875 		hw_p->rx_ready[hw_p->rx_u_index] = -1;
1876 		hw_p->rx_u_index++;
1877 		if (NUM_RX_BUFF == hw_p->rx_u_index)
1878 			hw_p->rx_u_index = 0;
1879 
1880 #ifdef INFO_4XX_ENET
1881 		hw_p->stats.pkts_handled++;
1882 #endif
1883 
1884 		mtmsr (msr);	/* Enable IRQ's */
1885 	}
1886 
1887 	hw_p->is_receiving = 0; /* tell driver */
1888 
1889 	return length;
1890 }
1891 
ppc_4xx_eth_initialize(bd_t * bis)1892 int ppc_4xx_eth_initialize (bd_t * bis)
1893 {
1894 	static int virgin = 0;
1895 	struct eth_device *dev;
1896 	int eth_num = 0;
1897 	EMAC_4XX_HW_PST hw = NULL;
1898 	u8 ethaddr[4 + CONFIG_EMAC_NR_START][6];
1899 	u32 hw_addr[4];
1900 	u32 mal_ier;
1901 
1902 #if defined(CONFIG_440GX)
1903 	unsigned long pfc1;
1904 
1905 	mfsdr (SDR0_PFC1, pfc1);
1906 	pfc1 &= ~(0x01e00000);
1907 	pfc1 |= 0x01200000;
1908 	mtsdr (SDR0_PFC1, pfc1);
1909 #endif
1910 
1911 	/* first clear all mac-addresses */
1912 	for (eth_num = 0; eth_num < LAST_EMAC_NUM; eth_num++)
1913 		memcpy(ethaddr[eth_num], "\0\0\0\0\0\0", 6);
1914 
1915 	for (eth_num = 0; eth_num < LAST_EMAC_NUM; eth_num++) {
1916 		int ethaddr_idx = eth_num + CONFIG_EMAC_NR_START;
1917 		switch (eth_num) {
1918 		default:		/* fall through */
1919 		case 0:
1920 			eth_getenv_enetaddr("ethaddr", ethaddr[ethaddr_idx]);
1921 			hw_addr[eth_num] = 0x0;
1922 			break;
1923 #ifdef CONFIG_HAS_ETH1
1924 		case 1:
1925 			eth_getenv_enetaddr("eth1addr", ethaddr[ethaddr_idx]);
1926 			hw_addr[eth_num] = 0x100;
1927 			break;
1928 #endif
1929 #ifdef CONFIG_HAS_ETH2
1930 		case 2:
1931 			eth_getenv_enetaddr("eth2addr", ethaddr[ethaddr_idx]);
1932 #if defined(CONFIG_460GT)
1933 			hw_addr[eth_num] = 0x300;
1934 #else
1935 			hw_addr[eth_num] = 0x400;
1936 #endif
1937 			break;
1938 #endif
1939 #ifdef CONFIG_HAS_ETH3
1940 		case 3:
1941 			eth_getenv_enetaddr("eth3addr", ethaddr[ethaddr_idx]);
1942 #if defined(CONFIG_460GT)
1943 			hw_addr[eth_num] = 0x400;
1944 #else
1945 			hw_addr[eth_num] = 0x600;
1946 #endif
1947 			break;
1948 #endif
1949 		}
1950 	}
1951 
1952 	/* set phy num and mode */
1953 	bis->bi_phynum[0] = CONFIG_PHY_ADDR;
1954 	bis->bi_phymode[0] = 0;
1955 
1956 #if defined(CONFIG_PHY1_ADDR)
1957 	bis->bi_phynum[1] = CONFIG_PHY1_ADDR;
1958 	bis->bi_phymode[1] = 0;
1959 #endif
1960 #if defined(CONFIG_440GX)
1961 	bis->bi_phynum[2] = CONFIG_PHY2_ADDR;
1962 	bis->bi_phynum[3] = CONFIG_PHY3_ADDR;
1963 	bis->bi_phymode[2] = 2;
1964 	bis->bi_phymode[3] = 2;
1965 #endif
1966 
1967 #if defined(CONFIG_440GX) || \
1968     defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
1969     defined(CONFIG_405EX)
1970 	ppc_4xx_eth_setup_bridge(0, bis);
1971 #endif
1972 
1973 	for (eth_num = 0; eth_num < LAST_EMAC_NUM; eth_num++) {
1974 		/*
1975 		 * See if we can actually bring up the interface,
1976 		 * otherwise, skip it
1977 		 */
1978 		if (memcmp (ethaddr[eth_num], "\0\0\0\0\0\0", 6) == 0) {
1979 			bis->bi_phymode[eth_num] = BI_PHYMODE_NONE;
1980 			continue;
1981 		}
1982 
1983 		/* Allocate device structure */
1984 		dev = (struct eth_device *) malloc (sizeof (*dev));
1985 		if (dev == NULL) {
1986 			printf ("ppc_4xx_eth_initialize: "
1987 				"Cannot allocate eth_device %d\n", eth_num);
1988 			return (-1);
1989 		}
1990 		memset(dev, 0, sizeof(*dev));
1991 
1992 		/* Allocate our private use data */
1993 		hw = (EMAC_4XX_HW_PST) malloc (sizeof (*hw));
1994 		if (hw == NULL) {
1995 			printf ("ppc_4xx_eth_initialize: "
1996 				"Cannot allocate private hw data for eth_device %d",
1997 				eth_num);
1998 			free (dev);
1999 			return (-1);
2000 		}
2001 		memset(hw, 0, sizeof(*hw));
2002 
2003 		hw->hw_addr = hw_addr[eth_num];
2004 		memcpy (dev->enetaddr, ethaddr[eth_num], 6);
2005 		hw->devnum = eth_num;
2006 		hw->print_speed = 1;
2007 
2008 		sprintf (dev->name, "ppc_4xx_eth%d", eth_num - CONFIG_EMAC_NR_START);
2009 		dev->priv = (void *) hw;
2010 		dev->init = ppc_4xx_eth_init;
2011 		dev->halt = ppc_4xx_eth_halt;
2012 		dev->send = ppc_4xx_eth_send;
2013 		dev->recv = ppc_4xx_eth_rx;
2014 
2015 		eth_register(dev);
2016 
2017 #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
2018 		miiphy_register(dev->name,
2019 				emac4xx_miiphy_read, emac4xx_miiphy_write);
2020 #endif
2021 
2022 		if (0 == virgin) {
2023 			/* set the MAL IER ??? names may change with new spec ??? */
2024 #if defined(CONFIG_440SPE) || \
2025     defined(CONFIG_440EPX) || defined(CONFIG_440GRX) || \
2026     defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
2027     defined(CONFIG_405EX)
2028 			mal_ier =
2029 				MAL_IER_PT | MAL_IER_PRE | MAL_IER_PWE |
2030 				MAL_IER_DE | MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE ;
2031 #else
2032 			mal_ier =
2033 				MAL_IER_DE | MAL_IER_NE | MAL_IER_TE |
2034 				MAL_IER_OPBE | MAL_IER_PLBE;
2035 #endif
2036 			mtdcr (MAL0_ESR, 0xffffffff);	/* clear pending interrupts */
2037 			mtdcr (MAL0_TXDEIR, 0xffffffff);	/* clear pending interrupts */
2038 			mtdcr (MAL0_RXDEIR, 0xffffffff);	/* clear pending interrupts */
2039 			mtdcr (MAL0_IER, mal_ier);
2040 
2041 			/* install MAL interrupt handler */
2042 			irq_install_handler (VECNUM_MAL_SERR,
2043 					     (interrupt_handler_t *) enetInt,
2044 					     dev);
2045 			irq_install_handler (VECNUM_MAL_TXEOB,
2046 					     (interrupt_handler_t *) enetInt,
2047 					     dev);
2048 			irq_install_handler (VECNUM_MAL_RXEOB,
2049 					     (interrupt_handler_t *) enetInt,
2050 					     dev);
2051 			irq_install_handler (VECNUM_MAL_TXDE,
2052 					     (interrupt_handler_t *) enetInt,
2053 					     dev);
2054 			irq_install_handler (VECNUM_MAL_RXDE,
2055 					     (interrupt_handler_t *) enetInt,
2056 					     dev);
2057 			virgin = 1;
2058 		}
2059 	}			/* end for each supported device */
2060 
2061 	return 0;
2062 }
2063