1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2006-2011 Freescale Semiconductor, Inc.
4 *
5 * Dave Liu <daveliu@freescale.com>
6 */
7
8 #include <common.h>
9 #include <log.h>
10 #include <net.h>
11 #include <malloc.h>
12 #include <linux/delay.h>
13 #include <linux/errno.h>
14 #include <asm/io.h>
15 #include <linux/immap_qe.h>
16 #include "uccf.h"
17 #include "uec.h"
18 #include "uec_phy.h"
19 #include "miiphy.h"
20 #include <fsl_qe.h>
21 #include <phy.h>
22
23 #if !defined(CONFIG_DM_ETH)
24 /* Default UTBIPAR SMI address */
25 #ifndef CONFIG_UTBIPAR_INIT_TBIPA
26 #define CONFIG_UTBIPAR_INIT_TBIPA 0x1F
27 #endif
28
29 static struct uec_inf uec_info[] = {
30 #ifdef CONFIG_UEC_ETH1
31 STD_UEC_INFO(1), /* UEC1 */
32 #endif
33 #ifdef CONFIG_UEC_ETH2
34 STD_UEC_INFO(2), /* UEC2 */
35 #endif
36 #ifdef CONFIG_UEC_ETH3
37 STD_UEC_INFO(3), /* UEC3 */
38 #endif
39 #ifdef CONFIG_UEC_ETH4
40 STD_UEC_INFO(4), /* UEC4 */
41 #endif
42 #ifdef CONFIG_UEC_ETH5
43 STD_UEC_INFO(5), /* UEC5 */
44 #endif
45 #ifdef CONFIG_UEC_ETH6
46 STD_UEC_INFO(6), /* UEC6 */
47 #endif
48 #ifdef CONFIG_UEC_ETH7
49 STD_UEC_INFO(7), /* UEC7 */
50 #endif
51 #ifdef CONFIG_UEC_ETH8
52 STD_UEC_INFO(8), /* UEC8 */
53 #endif
54 };
55
56 #define MAXCONTROLLERS (8)
57
58 static struct eth_device *devlist[MAXCONTROLLERS];
59
uec_mac_enable(struct uec_priv * uec,comm_dir_e mode)60 static int uec_mac_enable(struct uec_priv *uec, comm_dir_e mode)
61 {
62 uec_t *uec_regs;
63 u32 maccfg1;
64
65 if (!uec) {
66 printf("%s: uec not initial\n", __func__);
67 return -EINVAL;
68 }
69 uec_regs = uec->uec_regs;
70
71 maccfg1 = in_be32(&uec_regs->maccfg1);
72
73 if (mode & COMM_DIR_TX) {
74 maccfg1 |= MACCFG1_ENABLE_TX;
75 out_be32(&uec_regs->maccfg1, maccfg1);
76 uec->mac_tx_enabled = 1;
77 }
78
79 if (mode & COMM_DIR_RX) {
80 maccfg1 |= MACCFG1_ENABLE_RX;
81 out_be32(&uec_regs->maccfg1, maccfg1);
82 uec->mac_rx_enabled = 1;
83 }
84
85 return 0;
86 }
87
uec_mac_disable(struct uec_priv * uec,comm_dir_e mode)88 static int uec_mac_disable(struct uec_priv *uec, comm_dir_e mode)
89 {
90 uec_t *uec_regs;
91 u32 maccfg1;
92
93 if (!uec) {
94 printf("%s: uec not initial\n", __func__);
95 return -EINVAL;
96 }
97 uec_regs = uec->uec_regs;
98
99 maccfg1 = in_be32(&uec_regs->maccfg1);
100
101 if (mode & COMM_DIR_TX) {
102 maccfg1 &= ~MACCFG1_ENABLE_TX;
103 out_be32(&uec_regs->maccfg1, maccfg1);
104 uec->mac_tx_enabled = 0;
105 }
106
107 if (mode & COMM_DIR_RX) {
108 maccfg1 &= ~MACCFG1_ENABLE_RX;
109 out_be32(&uec_regs->maccfg1, maccfg1);
110 uec->mac_rx_enabled = 0;
111 }
112
113 return 0;
114 }
115
uec_graceful_stop_tx(struct uec_priv * uec)116 static int uec_graceful_stop_tx(struct uec_priv *uec)
117 {
118 ucc_fast_t *uf_regs;
119 u32 cecr_subblock;
120 u32 ucce;
121
122 if (!uec || !uec->uccf) {
123 printf("%s: No handle passed.\n", __func__);
124 return -EINVAL;
125 }
126
127 uf_regs = uec->uccf->uf_regs;
128
129 /* Clear the grace stop event */
130 out_be32(&uf_regs->ucce, UCCE_GRA);
131
132 /* Issue host command */
133 cecr_subblock =
134 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
135 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
136 (u8)QE_CR_PROTOCOL_ETHERNET, 0);
137
138 /* Wait for command to complete */
139 do {
140 ucce = in_be32(&uf_regs->ucce);
141 } while (!(ucce & UCCE_GRA));
142
143 uec->grace_stopped_tx = 1;
144
145 return 0;
146 }
147
uec_graceful_stop_rx(struct uec_priv * uec)148 static int uec_graceful_stop_rx(struct uec_priv *uec)
149 {
150 u32 cecr_subblock;
151 u8 ack;
152
153 if (!uec) {
154 printf("%s: No handle passed.\n", __func__);
155 return -EINVAL;
156 }
157
158 if (!uec->p_rx_glbl_pram) {
159 printf("%s: No init rx global parameter\n", __func__);
160 return -EINVAL;
161 }
162
163 /* Clear acknowledge bit */
164 ack = uec->p_rx_glbl_pram->rxgstpack;
165 ack &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX;
166 uec->p_rx_glbl_pram->rxgstpack = ack;
167
168 /* Keep issuing cmd and checking ack bit until it is asserted */
169 do {
170 /* Issue host command */
171 cecr_subblock =
172 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
173 qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
174 (u8)QE_CR_PROTOCOL_ETHERNET, 0);
175 ack = uec->p_rx_glbl_pram->rxgstpack;
176 } while (!(ack & GRACEFUL_STOP_ACKNOWLEDGE_RX));
177
178 uec->grace_stopped_rx = 1;
179
180 return 0;
181 }
182
uec_restart_tx(struct uec_priv * uec)183 static int uec_restart_tx(struct uec_priv *uec)
184 {
185 u32 cecr_subblock;
186
187 if (!uec || !uec->uec_info) {
188 printf("%s: No handle passed.\n", __func__);
189 return -EINVAL;
190 }
191
192 cecr_subblock =
193 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
194 qe_issue_cmd(QE_RESTART_TX, cecr_subblock,
195 (u8)QE_CR_PROTOCOL_ETHERNET, 0);
196
197 uec->grace_stopped_tx = 0;
198
199 return 0;
200 }
201
uec_restart_rx(struct uec_priv * uec)202 static int uec_restart_rx(struct uec_priv *uec)
203 {
204 u32 cecr_subblock;
205
206 if (!uec || !uec->uec_info) {
207 printf("%s: No handle passed.\n", __func__);
208 return -EINVAL;
209 }
210
211 cecr_subblock =
212 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
213 qe_issue_cmd(QE_RESTART_RX, cecr_subblock,
214 (u8)QE_CR_PROTOCOL_ETHERNET, 0);
215
216 uec->grace_stopped_rx = 0;
217
218 return 0;
219 }
220
uec_open(struct uec_priv * uec,comm_dir_e mode)221 static int uec_open(struct uec_priv *uec, comm_dir_e mode)
222 {
223 struct ucc_fast_priv *uccf;
224
225 if (!uec || !uec->uccf) {
226 printf("%s: No handle passed.\n", __func__);
227 return -EINVAL;
228 }
229 uccf = uec->uccf;
230
231 /* check if the UCC number is in range. */
232 if (uec->uec_info->uf_info.ucc_num >= UCC_MAX_NUM) {
233 printf("%s: ucc_num out of range.\n", __func__);
234 return -EINVAL;
235 }
236
237 /* Enable MAC */
238 uec_mac_enable(uec, mode);
239
240 /* Enable UCC fast */
241 ucc_fast_enable(uccf, mode);
242
243 /* RISC microcode start */
244 if ((mode & COMM_DIR_TX) && uec->grace_stopped_tx)
245 uec_restart_tx(uec);
246 if ((mode & COMM_DIR_RX) && uec->grace_stopped_rx)
247 uec_restart_rx(uec);
248
249 return 0;
250 }
251
uec_stop(struct uec_priv * uec,comm_dir_e mode)252 static int uec_stop(struct uec_priv *uec, comm_dir_e mode)
253 {
254 if (!uec || !uec->uccf) {
255 printf("%s: No handle passed.\n", __func__);
256 return -EINVAL;
257 }
258
259 /* check if the UCC number is in range. */
260 if (uec->uec_info->uf_info.ucc_num >= UCC_MAX_NUM) {
261 printf("%s: ucc_num out of range.\n", __func__);
262 return -EINVAL;
263 }
264 /* Stop any transmissions */
265 if ((mode & COMM_DIR_TX) && !uec->grace_stopped_tx)
266 uec_graceful_stop_tx(uec);
267
268 /* Stop any receptions */
269 if ((mode & COMM_DIR_RX) && !uec->grace_stopped_rx)
270 uec_graceful_stop_rx(uec);
271
272 /* Disable the UCC fast */
273 ucc_fast_disable(uec->uccf, mode);
274
275 /* Disable the MAC */
276 uec_mac_disable(uec, mode);
277
278 return 0;
279 }
280
uec_set_mac_duplex(struct uec_priv * uec,int duplex)281 static int uec_set_mac_duplex(struct uec_priv *uec, int duplex)
282 {
283 uec_t *uec_regs;
284 u32 maccfg2;
285
286 if (!uec) {
287 printf("%s: uec not initial\n", __func__);
288 return -EINVAL;
289 }
290 uec_regs = uec->uec_regs;
291
292 if (duplex == DUPLEX_HALF) {
293 maccfg2 = in_be32(&uec_regs->maccfg2);
294 maccfg2 &= ~MACCFG2_FDX;
295 out_be32(&uec_regs->maccfg2, maccfg2);
296 }
297
298 if (duplex == DUPLEX_FULL) {
299 maccfg2 = in_be32(&uec_regs->maccfg2);
300 maccfg2 |= MACCFG2_FDX;
301 out_be32(&uec_regs->maccfg2, maccfg2);
302 }
303
304 return 0;
305 }
306
uec_set_mac_if_mode(struct uec_priv * uec,phy_interface_t if_mode,int speed)307 static int uec_set_mac_if_mode(struct uec_priv *uec,
308 phy_interface_t if_mode, int speed)
309 {
310 phy_interface_t enet_if_mode;
311 uec_t *uec_regs;
312 u32 upsmr;
313 u32 maccfg2;
314
315 if (!uec) {
316 printf("%s: uec not initial\n", __func__);
317 return -EINVAL;
318 }
319
320 uec_regs = uec->uec_regs;
321 enet_if_mode = if_mode;
322
323 maccfg2 = in_be32(&uec_regs->maccfg2);
324 maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
325
326 upsmr = in_be32(&uec->uccf->uf_regs->upsmr);
327 upsmr &= ~(UPSMR_RPM | UPSMR_TBIM | UPSMR_R10M | UPSMR_RMM);
328
329 switch (speed) {
330 case SPEED_10:
331 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
332 switch (enet_if_mode) {
333 case PHY_INTERFACE_MODE_MII:
334 break;
335 case PHY_INTERFACE_MODE_RGMII:
336 upsmr |= (UPSMR_RPM | UPSMR_R10M);
337 break;
338 case PHY_INTERFACE_MODE_RMII:
339 upsmr |= (UPSMR_R10M | UPSMR_RMM);
340 break;
341 default:
342 return -EINVAL;
343 }
344 break;
345 case SPEED_100:
346 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
347 switch (enet_if_mode) {
348 case PHY_INTERFACE_MODE_MII:
349 break;
350 case PHY_INTERFACE_MODE_RGMII:
351 upsmr |= UPSMR_RPM;
352 break;
353 case PHY_INTERFACE_MODE_RMII:
354 upsmr |= UPSMR_RMM;
355 break;
356 default:
357 return -EINVAL;
358 }
359 break;
360 case SPEED_1000:
361 maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
362 switch (enet_if_mode) {
363 case PHY_INTERFACE_MODE_GMII:
364 break;
365 case PHY_INTERFACE_MODE_TBI:
366 upsmr |= UPSMR_TBIM;
367 break;
368 case PHY_INTERFACE_MODE_RTBI:
369 upsmr |= (UPSMR_RPM | UPSMR_TBIM);
370 break;
371 case PHY_INTERFACE_MODE_RGMII_RXID:
372 case PHY_INTERFACE_MODE_RGMII_TXID:
373 case PHY_INTERFACE_MODE_RGMII_ID:
374 case PHY_INTERFACE_MODE_RGMII:
375 upsmr |= UPSMR_RPM;
376 break;
377 case PHY_INTERFACE_MODE_SGMII:
378 upsmr |= UPSMR_SGMM;
379 break;
380 default:
381 return -EINVAL;
382 }
383 break;
384 default:
385 return -EINVAL;
386 }
387
388 out_be32(&uec_regs->maccfg2, maccfg2);
389 out_be32(&uec->uccf->uf_regs->upsmr, upsmr);
390
391 return 0;
392 }
393
init_mii_management_configuration(uec_mii_t * uec_mii_regs)394 static int init_mii_management_configuration(uec_mii_t *uec_mii_regs)
395 {
396 uint timeout = 0x1000;
397 u32 miimcfg = 0;
398
399 miimcfg = in_be32(&uec_mii_regs->miimcfg);
400 miimcfg |= MIIMCFG_MNGMNT_CLC_DIV_INIT_VALUE;
401 out_be32(&uec_mii_regs->miimcfg, miimcfg);
402
403 /* Wait until the bus is free */
404 while ((in_be32(&uec_mii_regs->miimcfg) & MIIMIND_BUSY) && timeout--)
405 ;
406 if (timeout <= 0) {
407 printf("%s: The MII Bus is stuck!", __func__);
408 return -ETIMEDOUT;
409 }
410
411 return 0;
412 }
413
init_phy(struct eth_device * dev)414 static int init_phy(struct eth_device *dev)
415 {
416 struct uec_priv *uec;
417 uec_mii_t *umii_regs;
418 struct uec_mii_info *mii_info;
419 struct phy_info *curphy;
420 int err;
421
422 uec = (struct uec_priv *)dev->priv;
423 umii_regs = uec->uec_mii_regs;
424
425 uec->oldlink = 0;
426 uec->oldspeed = 0;
427 uec->oldduplex = -1;
428
429 mii_info = malloc(sizeof(*mii_info));
430 if (!mii_info) {
431 printf("%s: Could not allocate mii_info", dev->name);
432 return -ENOMEM;
433 }
434 memset(mii_info, 0, sizeof(*mii_info));
435
436 if (uec->uec_info->uf_info.eth_type == GIGA_ETH)
437 mii_info->speed = SPEED_1000;
438 else
439 mii_info->speed = SPEED_100;
440
441 mii_info->duplex = DUPLEX_FULL;
442 mii_info->pause = 0;
443 mii_info->link = 1;
444
445 mii_info->advertising = (ADVERTISED_10baseT_Half |
446 ADVERTISED_10baseT_Full |
447 ADVERTISED_100baseT_Half |
448 ADVERTISED_100baseT_Full |
449 ADVERTISED_1000baseT_Full);
450 mii_info->autoneg = 1;
451 mii_info->mii_id = uec->uec_info->phy_address;
452 mii_info->dev = dev;
453
454 mii_info->mdio_read = &uec_read_phy_reg;
455 mii_info->mdio_write = &uec_write_phy_reg;
456
457 uec->mii_info = mii_info;
458
459 qe_set_mii_clk_src(uec->uec_info->uf_info.ucc_num);
460
461 if (init_mii_management_configuration(umii_regs)) {
462 printf("%s: The MII Bus is stuck!", dev->name);
463 err = -1;
464 goto bus_fail;
465 }
466
467 /* get info for this PHY */
468 curphy = uec_get_phy_info(uec->mii_info);
469 if (!curphy) {
470 printf("%s: No PHY found", dev->name);
471 err = -1;
472 goto no_phy;
473 }
474
475 mii_info->phyinfo = curphy;
476
477 /* Run the commands which initialize the PHY */
478 if (curphy->init) {
479 err = curphy->init(uec->mii_info);
480 if (err)
481 goto phy_init_fail;
482 }
483
484 return 0;
485
486 phy_init_fail:
487 no_phy:
488 bus_fail:
489 free(mii_info);
490 return err;
491 }
492
adjust_link(struct eth_device * dev)493 static void adjust_link(struct eth_device *dev)
494 {
495 struct uec_priv *uec = (struct uec_priv *)dev->priv;
496 struct uec_mii_info *mii_info = uec->mii_info;
497
498 if (mii_info->link) {
499 /*
500 * Now we make sure that we can be in full duplex mode.
501 * If not, we operate in half-duplex mode.
502 */
503 if (mii_info->duplex != uec->oldduplex) {
504 if (!(mii_info->duplex)) {
505 uec_set_mac_duplex(uec, DUPLEX_HALF);
506 printf("%s: Half Duplex\n", dev->name);
507 } else {
508 uec_set_mac_duplex(uec, DUPLEX_FULL);
509 printf("%s: Full Duplex\n", dev->name);
510 }
511 uec->oldduplex = mii_info->duplex;
512 }
513
514 if (mii_info->speed != uec->oldspeed) {
515 phy_interface_t mode =
516 uec->uec_info->enet_interface_type;
517 if (uec->uec_info->uf_info.eth_type == GIGA_ETH) {
518 switch (mii_info->speed) {
519 case SPEED_1000:
520 break;
521 case SPEED_100:
522 printf("switching to rgmii 100\n");
523 mode = PHY_INTERFACE_MODE_RGMII;
524 break;
525 case SPEED_10:
526 printf("switching to rgmii 10\n");
527 mode = PHY_INTERFACE_MODE_RGMII;
528 break;
529 default:
530 printf("%s: Ack,Speed(%d)is illegal\n",
531 dev->name, mii_info->speed);
532 break;
533 }
534 }
535
536 /* change phy */
537 change_phy_interface_mode(dev, mode, mii_info->speed);
538 /* change the MAC interface mode */
539 uec_set_mac_if_mode(uec, mode, mii_info->speed);
540
541 printf("%s: Speed %dBT\n", dev->name, mii_info->speed);
542 uec->oldspeed = mii_info->speed;
543 }
544
545 if (!uec->oldlink) {
546 printf("%s: Link is up\n", dev->name);
547 uec->oldlink = 1;
548 }
549
550 } else { /* if (mii_info->link) */
551 if (uec->oldlink) {
552 printf("%s: Link is down\n", dev->name);
553 uec->oldlink = 0;
554 uec->oldspeed = 0;
555 uec->oldduplex = -1;
556 }
557 }
558 }
559
phy_change(struct eth_device * dev)560 static void phy_change(struct eth_device *dev)
561 {
562 struct uec_priv *uec = (struct uec_priv *)dev->priv;
563
564 #if defined(CONFIG_ARCH_P1021) || defined(CONFIG_ARCH_P1025)
565 ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
566
567 /* QE9 and QE12 need to be set for enabling QE MII management signals */
568 setbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE9);
569 setbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE12);
570 #endif
571
572 /* Update the link, speed, duplex */
573 uec->mii_info->phyinfo->read_status(uec->mii_info);
574
575 #if defined(CONFIG_ARCH_P1021) || defined(CONFIG_ARCH_P1025)
576 /*
577 * QE12 is muxed with LBCTL, it needs to be released for enabling
578 * LBCTL signal for LBC usage.
579 */
580 clrbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE12);
581 #endif
582
583 /* Adjust the interface according to speed */
584 adjust_link(dev);
585 }
586
587 #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
588
589 /*
590 * Find a device index from the devlist by name
591 *
592 * Returns:
593 * The index where the device is located, -1 on error
594 */
uec_miiphy_find_dev_by_name(const char * devname)595 static int uec_miiphy_find_dev_by_name(const char *devname)
596 {
597 int i;
598
599 for (i = 0; i < MAXCONTROLLERS; i++) {
600 if (strncmp(devname, devlist[i]->name, strlen(devname)) == 0)
601 break;
602 }
603
604 /* If device cannot be found, returns -1 */
605 if (i == MAXCONTROLLERS) {
606 debug("%s: device %s not found in devlist\n", __func__,
607 devname);
608 i = -1;
609 }
610
611 return i;
612 }
613
614 /*
615 * Read a MII PHY register.
616 *
617 * Returns:
618 * 0 on success
619 */
uec_miiphy_read(struct mii_dev * bus,int addr,int devad,int reg)620 static int uec_miiphy_read(struct mii_dev *bus, int addr, int devad, int reg)
621 {
622 unsigned short value = 0;
623 int devindex = 0;
624
625 if (!bus->name) {
626 debug("%s: NULL pointer given\n", __func__);
627 } else {
628 devindex = uec_miiphy_find_dev_by_name(bus->name);
629 if (devindex >= 0)
630 value = uec_read_phy_reg(devlist[devindex], addr, reg);
631 }
632 return value;
633 }
634
635 /*
636 * Write a MII PHY register.
637 *
638 * Returns:
639 * 0 on success
640 */
uec_miiphy_write(struct mii_dev * bus,int addr,int devad,int reg,u16 value)641 static int uec_miiphy_write(struct mii_dev *bus, int addr, int devad, int reg,
642 u16 value)
643 {
644 int devindex = 0;
645
646 if (!bus->name) {
647 debug("%s: NULL pointer given\n", __func__);
648 } else {
649 devindex = uec_miiphy_find_dev_by_name(bus->name);
650 if (devindex >= 0)
651 uec_write_phy_reg(devlist[devindex], addr, reg, value);
652 }
653 return 0;
654 }
655 #endif
656
uec_set_mac_address(struct uec_priv * uec,u8 * mac_addr)657 static int uec_set_mac_address(struct uec_priv *uec, u8 *mac_addr)
658 {
659 uec_t *uec_regs;
660 u32 mac_addr1;
661 u32 mac_addr2;
662
663 if (!uec) {
664 printf("%s: uec not initial\n", __func__);
665 return -EINVAL;
666 }
667
668 uec_regs = uec->uec_regs;
669
670 /*
671 * if a station address of 0x12345678ABCD, perform a write to
672 * MACSTNADDR1 of 0xCDAB7856,
673 * MACSTNADDR2 of 0x34120000
674 */
675
676 mac_addr1 = (mac_addr[5] << 24) | (mac_addr[4] << 16) |
677 (mac_addr[3] << 8) | (mac_addr[2]);
678 out_be32(&uec_regs->macstnaddr1, mac_addr1);
679
680 mac_addr2 = ((mac_addr[1] << 24) | (mac_addr[0] << 16)) & 0xffff0000;
681 out_be32(&uec_regs->macstnaddr2, mac_addr2);
682
683 return 0;
684 }
685
uec_convert_threads_num(enum uec_num_of_threads threads_num,int * threads_num_ret)686 static int uec_convert_threads_num(enum uec_num_of_threads threads_num,
687 int *threads_num_ret)
688 {
689 int num_threads_numerica;
690
691 switch (threads_num) {
692 case UEC_NUM_OF_THREADS_1:
693 num_threads_numerica = 1;
694 break;
695 case UEC_NUM_OF_THREADS_2:
696 num_threads_numerica = 2;
697 break;
698 case UEC_NUM_OF_THREADS_4:
699 num_threads_numerica = 4;
700 break;
701 case UEC_NUM_OF_THREADS_6:
702 num_threads_numerica = 6;
703 break;
704 case UEC_NUM_OF_THREADS_8:
705 num_threads_numerica = 8;
706 break;
707 default:
708 printf("%s: Bad number of threads value.",
709 __func__);
710 return -EINVAL;
711 }
712
713 *threads_num_ret = num_threads_numerica;
714
715 return 0;
716 }
717
uec_init_tx_parameter(struct uec_priv * uec,int num_threads_tx)718 static void uec_init_tx_parameter(struct uec_priv *uec, int num_threads_tx)
719 {
720 struct uec_inf *uec_info;
721 u32 end_bd;
722 u8 bmrx = 0;
723 int i;
724
725 uec_info = uec->uec_info;
726
727 /* Alloc global Tx parameter RAM page */
728 uec->tx_glbl_pram_offset =
729 qe_muram_alloc(sizeof(struct uec_tx_global_pram),
730 UEC_TX_GLOBAL_PRAM_ALIGNMENT);
731 uec->p_tx_glbl_pram = (struct uec_tx_global_pram *)
732 qe_muram_addr(uec->tx_glbl_pram_offset);
733
734 /* Zero the global Tx prameter RAM */
735 memset(uec->p_tx_glbl_pram, 0, sizeof(struct uec_tx_global_pram));
736
737 /* Init global Tx parameter RAM */
738
739 /* TEMODER, RMON statistics disable, one Tx queue */
740 out_be16(&uec->p_tx_glbl_pram->temoder, TEMODER_INIT_VALUE);
741
742 /* SQPTR */
743 uec->send_q_mem_reg_offset =
744 qe_muram_alloc(sizeof(struct uec_send_queue_qd),
745 UEC_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
746 uec->p_send_q_mem_reg = (struct uec_send_queue_mem_region *)
747 qe_muram_addr(uec->send_q_mem_reg_offset);
748 out_be32(&uec->p_tx_glbl_pram->sqptr, uec->send_q_mem_reg_offset);
749
750 /* Setup the table with TxBDs ring */
751 end_bd = (u32)uec->p_tx_bd_ring + (uec_info->tx_bd_ring_len - 1)
752 * SIZEOFBD;
753 out_be32(&uec->p_send_q_mem_reg->sqqd[0].bd_ring_base,
754 (u32)(uec->p_tx_bd_ring));
755 out_be32(&uec->p_send_q_mem_reg->sqqd[0].last_bd_completed_address,
756 end_bd);
757
758 /* Scheduler Base Pointer, we have only one Tx queue, no need it */
759 out_be32(&uec->p_tx_glbl_pram->schedulerbasepointer, 0);
760
761 /* TxRMON Base Pointer, TxRMON disable, we don't need it */
762 out_be32(&uec->p_tx_glbl_pram->txrmonbaseptr, 0);
763
764 /* TSTATE, global snooping, big endian, the CSB bus selected */
765 bmrx = BMR_INIT_VALUE;
766 out_be32(&uec->p_tx_glbl_pram->tstate, ((u32)(bmrx) << BMR_SHIFT));
767
768 /* IPH_Offset */
769 for (i = 0; i < MAX_IPH_OFFSET_ENTRY; i++)
770 out_8(&uec->p_tx_glbl_pram->iphoffset[i], 0);
771
772 /* VTAG table */
773 for (i = 0; i < UEC_TX_VTAG_TABLE_ENTRY_MAX; i++)
774 out_be32(&uec->p_tx_glbl_pram->vtagtable[i], 0);
775
776 /* TQPTR */
777 uec->thread_dat_tx_offset =
778 qe_muram_alloc(num_threads_tx *
779 sizeof(struct uec_thread_data_tx) +
780 32 * (num_threads_tx == 1),
781 UEC_THREAD_DATA_ALIGNMENT);
782
783 uec->p_thread_data_tx = (struct uec_thread_data_tx *)
784 qe_muram_addr(uec->thread_dat_tx_offset);
785 out_be32(&uec->p_tx_glbl_pram->tqptr, uec->thread_dat_tx_offset);
786 }
787
uec_init_rx_parameter(struct uec_priv * uec,int num_threads_rx)788 static void uec_init_rx_parameter(struct uec_priv *uec, int num_threads_rx)
789 {
790 u8 bmrx = 0;
791 int i;
792 struct uec_82xx_add_filtering_pram *p_af_pram;
793
794 /* Allocate global Rx parameter RAM page */
795 uec->rx_glbl_pram_offset =
796 qe_muram_alloc(sizeof(struct uec_rx_global_pram),
797 UEC_RX_GLOBAL_PRAM_ALIGNMENT);
798 uec->p_rx_glbl_pram = (struct uec_rx_global_pram *)
799 qe_muram_addr(uec->rx_glbl_pram_offset);
800
801 /* Zero Global Rx parameter RAM */
802 memset(uec->p_rx_glbl_pram, 0, sizeof(struct uec_rx_global_pram));
803
804 /* Init global Rx parameter RAM */
805 /*
806 * REMODER, Extended feature mode disable, VLAN disable,
807 * LossLess flow control disable, Receive firmware statisic disable,
808 * Extended address parsing mode disable, One Rx queues,
809 * Dynamic maximum/minimum frame length disable, IP checksum check
810 * disable, IP address alignment disable
811 */
812 out_be32(&uec->p_rx_glbl_pram->remoder, REMODER_INIT_VALUE);
813
814 /* RQPTR */
815 uec->thread_dat_rx_offset =
816 qe_muram_alloc(num_threads_rx *
817 sizeof(struct uec_thread_data_rx),
818 UEC_THREAD_DATA_ALIGNMENT);
819 uec->p_thread_data_rx = (struct uec_thread_data_rx *)
820 qe_muram_addr(uec->thread_dat_rx_offset);
821 out_be32(&uec->p_rx_glbl_pram->rqptr, uec->thread_dat_rx_offset);
822
823 /* Type_or_Len */
824 out_be16(&uec->p_rx_glbl_pram->typeorlen, 3072);
825
826 /* RxRMON base pointer, we don't need it */
827 out_be32(&uec->p_rx_glbl_pram->rxrmonbaseptr, 0);
828
829 /* IntCoalescingPTR, we don't need it, no interrupt */
830 out_be32(&uec->p_rx_glbl_pram->intcoalescingptr, 0);
831
832 /* RSTATE, global snooping, big endian, the CSB bus selected */
833 bmrx = BMR_INIT_VALUE;
834 out_8(&uec->p_rx_glbl_pram->rstate, bmrx);
835
836 /* MRBLR */
837 out_be16(&uec->p_rx_glbl_pram->mrblr, MAX_RXBUF_LEN);
838
839 /* RBDQPTR */
840 uec->rx_bd_qs_tbl_offset =
841 qe_muram_alloc(sizeof(struct uec_rx_bd_queues_entry) +
842 sizeof(struct uec_rx_pref_bds),
843 UEC_RX_BD_QUEUES_ALIGNMENT);
844 uec->p_rx_bd_qs_tbl = (struct uec_rx_bd_queues_entry *)
845 qe_muram_addr(uec->rx_bd_qs_tbl_offset);
846
847 /* Zero it */
848 memset(uec->p_rx_bd_qs_tbl, 0, sizeof(struct uec_rx_bd_queues_entry) +
849 sizeof(struct uec_rx_pref_bds));
850 out_be32(&uec->p_rx_glbl_pram->rbdqptr, uec->rx_bd_qs_tbl_offset);
851 out_be32(&uec->p_rx_bd_qs_tbl->externalbdbaseptr,
852 (u32)uec->p_rx_bd_ring);
853
854 /* MFLR */
855 out_be16(&uec->p_rx_glbl_pram->mflr, MAX_FRAME_LEN);
856 /* MINFLR */
857 out_be16(&uec->p_rx_glbl_pram->minflr, MIN_FRAME_LEN);
858 /* MAXD1 */
859 out_be16(&uec->p_rx_glbl_pram->maxd1, MAX_DMA1_LEN);
860 /* MAXD2 */
861 out_be16(&uec->p_rx_glbl_pram->maxd2, MAX_DMA2_LEN);
862 /* ECAM_PTR */
863 out_be32(&uec->p_rx_glbl_pram->ecamptr, 0);
864 /* L2QT */
865 out_be32(&uec->p_rx_glbl_pram->l2qt, 0);
866 /* L3QT */
867 for (i = 0; i < 8; i++)
868 out_be32(&uec->p_rx_glbl_pram->l3qt[i], 0);
869
870 /* VLAN_TYPE */
871 out_be16(&uec->p_rx_glbl_pram->vlantype, 0x8100);
872 /* TCI */
873 out_be16(&uec->p_rx_glbl_pram->vlantci, 0);
874
875 /* Clear PQ2 style address filtering hash table */
876 p_af_pram = (struct uec_82xx_add_filtering_pram *)
877 uec->p_rx_glbl_pram->addressfiltering;
878
879 p_af_pram->iaddr_h = 0;
880 p_af_pram->iaddr_l = 0;
881 p_af_pram->gaddr_h = 0;
882 p_af_pram->gaddr_l = 0;
883 }
884
uec_issue_init_enet_rxtx_cmd(struct uec_priv * uec,int thread_tx,int thread_rx)885 static int uec_issue_init_enet_rxtx_cmd(struct uec_priv *uec,
886 int thread_tx, int thread_rx)
887 {
888 struct uec_init_cmd_pram *p_init_enet_param;
889 u32 init_enet_param_offset;
890 struct uec_inf *uec_info;
891 struct ucc_fast_inf *uf_info;
892 int i;
893 int snum;
894 u32 off;
895 u32 entry_val;
896 u32 command;
897 u32 cecr_subblock;
898
899 uec_info = uec->uec_info;
900 uf_info = &uec_info->uf_info;
901
902 /* Allocate init enet command parameter */
903 uec->init_enet_param_offset =
904 qe_muram_alloc(sizeof(struct uec_init_cmd_pram), 4);
905 init_enet_param_offset = uec->init_enet_param_offset;
906 uec->p_init_enet_param = (struct uec_init_cmd_pram *)
907 qe_muram_addr(uec->init_enet_param_offset);
908
909 /* Zero init enet command struct */
910 memset((void *)uec->p_init_enet_param, 0,
911 sizeof(struct uec_init_cmd_pram));
912
913 /* Init the command struct */
914 p_init_enet_param = uec->p_init_enet_param;
915 p_init_enet_param->resinit0 = ENET_INIT_PARAM_MAGIC_RES_INIT0;
916 p_init_enet_param->resinit1 = ENET_INIT_PARAM_MAGIC_RES_INIT1;
917 p_init_enet_param->resinit2 = ENET_INIT_PARAM_MAGIC_RES_INIT2;
918 p_init_enet_param->resinit3 = ENET_INIT_PARAM_MAGIC_RES_INIT3;
919 p_init_enet_param->resinit4 = ENET_INIT_PARAM_MAGIC_RES_INIT4;
920 p_init_enet_param->largestexternallookupkeysize = 0;
921
922 p_init_enet_param->rgftgfrxglobal |= ((u32)uec_info->num_threads_rx)
923 << ENET_INIT_PARAM_RGF_SHIFT;
924 p_init_enet_param->rgftgfrxglobal |= ((u32)uec_info->num_threads_tx)
925 << ENET_INIT_PARAM_TGF_SHIFT;
926
927 /* Init Rx global parameter pointer */
928 p_init_enet_param->rgftgfrxglobal |= uec->rx_glbl_pram_offset |
929 (u32)uec_info->risc_rx;
930
931 /* Init Rx threads */
932 for (i = 0; i < (thread_rx + 1); i++) {
933 snum = qe_get_snum();
934 if (snum < 0) {
935 printf("%s can not get snum\n", __func__);
936 return -ENOMEM;
937 }
938
939 if (i == 0) {
940 off = 0;
941 } else {
942 off = qe_muram_alloc(sizeof(struct uec_thread_rx_pram),
943 UEC_THREAD_RX_PRAM_ALIGNMENT);
944 }
945
946 entry_val = ((u32)snum << ENET_INIT_PARAM_SNUM_SHIFT) |
947 off | (u32)uec_info->risc_rx;
948 p_init_enet_param->rxthread[i] = entry_val;
949 }
950
951 /* Init Tx global parameter pointer */
952 p_init_enet_param->txglobal = uec->tx_glbl_pram_offset |
953 (u32)uec_info->risc_tx;
954
955 /* Init Tx threads */
956 for (i = 0; i < thread_tx; i++) {
957 snum = qe_get_snum();
958 if (snum < 0) {
959 printf("%s can not get snum\n", __func__);
960 return -ENOMEM;
961 }
962
963 off = qe_muram_alloc(sizeof(struct uec_thread_tx_pram),
964 UEC_THREAD_TX_PRAM_ALIGNMENT);
965
966 entry_val = ((u32)snum << ENET_INIT_PARAM_SNUM_SHIFT) |
967 off | (u32)uec_info->risc_tx;
968 p_init_enet_param->txthread[i] = entry_val;
969 }
970
971 __asm__ __volatile__("sync");
972
973 /* Issue QE command */
974 command = QE_INIT_TX_RX;
975 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
976 qe_issue_cmd(command, cecr_subblock, (u8)QE_CR_PROTOCOL_ETHERNET,
977 init_enet_param_offset);
978
979 return 0;
980 }
981
uec_startup(struct uec_priv * uec)982 static int uec_startup(struct uec_priv *uec)
983 {
984 struct uec_inf *uec_info;
985 struct ucc_fast_inf *uf_info;
986 struct ucc_fast_priv *uccf;
987 ucc_fast_t *uf_regs;
988 uec_t *uec_regs;
989 int num_threads_tx;
990 int num_threads_rx;
991 u32 utbipar;
992 u32 length;
993 u32 align;
994 struct buffer_descriptor *bd;
995 u8 *buf;
996 int i;
997
998 if (!uec || !uec->uec_info) {
999 printf("%s: uec or uec_info not initial\n", __func__);
1000 return -EINVAL;
1001 }
1002
1003 uec_info = uec->uec_info;
1004 uf_info = &uec_info->uf_info;
1005
1006 /* Check if Rx BD ring len is illegal */
1007 if (uec_info->rx_bd_ring_len < UEC_RX_BD_RING_SIZE_MIN ||
1008 (uec_info->rx_bd_ring_len % UEC_RX_BD_RING_SIZE_ALIGNMENT)) {
1009 printf("%s: Rx BD ring len must be multiple of 4, and > 8.\n",
1010 __func__);
1011 return -EINVAL;
1012 }
1013
1014 /* Check if Tx BD ring len is illegal */
1015 if (uec_info->tx_bd_ring_len < UEC_TX_BD_RING_SIZE_MIN) {
1016 printf("%s: Tx BD ring length must not be smaller than 2.\n",
1017 __func__);
1018 return -EINVAL;
1019 }
1020
1021 /* Check if MRBLR is illegal */
1022 if (MAX_RXBUF_LEN == 0 || MAX_RXBUF_LEN % UEC_MRBLR_ALIGNMENT) {
1023 printf("%s: max rx buffer length must be mutliple of 128.\n",
1024 __func__);
1025 return -EINVAL;
1026 }
1027
1028 /* Both Rx and Tx are stopped */
1029 uec->grace_stopped_rx = 1;
1030 uec->grace_stopped_tx = 1;
1031
1032 /* Init UCC fast */
1033 if (ucc_fast_init(uf_info, &uccf)) {
1034 printf("%s: failed to init ucc fast\n", __func__);
1035 return -ENOMEM;
1036 }
1037
1038 /* Save uccf */
1039 uec->uccf = uccf;
1040
1041 /* Convert the Tx threads number */
1042 if (uec_convert_threads_num(uec_info->num_threads_tx,
1043 &num_threads_tx)) {
1044 return -EINVAL;
1045 }
1046
1047 /* Convert the Rx threads number */
1048 if (uec_convert_threads_num(uec_info->num_threads_rx,
1049 &num_threads_rx)) {
1050 return -EINVAL;
1051 }
1052
1053 uf_regs = uccf->uf_regs;
1054
1055 /* UEC register is following UCC fast registers */
1056 uec_regs = (uec_t *)(&uf_regs->ucc_eth);
1057
1058 /* Save the UEC register pointer to UEC private struct */
1059 uec->uec_regs = uec_regs;
1060
1061 /* Init UPSMR, enable hardware statistics (UCC) */
1062 out_be32(&uec->uccf->uf_regs->upsmr, UPSMR_INIT_VALUE);
1063
1064 /* Init MACCFG1, flow control disable, disable Tx and Rx */
1065 out_be32(&uec_regs->maccfg1, MACCFG1_INIT_VALUE);
1066
1067 /* Init MACCFG2, length check, MAC PAD and CRC enable */
1068 out_be32(&uec_regs->maccfg2, MACCFG2_INIT_VALUE);
1069
1070 /* Setup MAC interface mode */
1071 uec_set_mac_if_mode(uec, uec_info->enet_interface_type,
1072 uec_info->speed);
1073
1074 /* Setup MII management base */
1075 #ifndef CONFIG_eTSEC_MDIO_BUS
1076 uec->uec_mii_regs = (uec_mii_t *)(&uec_regs->miimcfg);
1077 #else
1078 uec->uec_mii_regs = (uec_mii_t *)CONFIG_MIIM_ADDRESS;
1079 #endif
1080
1081 /* Setup MII master clock source */
1082 qe_set_mii_clk_src(uec_info->uf_info.ucc_num);
1083
1084 /* Setup UTBIPAR */
1085 utbipar = in_be32(&uec_regs->utbipar);
1086 utbipar &= ~UTBIPAR_PHY_ADDRESS_MASK;
1087
1088 /* Initialize UTBIPAR address to CONFIG_UTBIPAR_INIT_TBIPA for ALL UEC.
1089 * This frees up the remaining SMI addresses for use.
1090 */
1091 utbipar |= CONFIG_UTBIPAR_INIT_TBIPA << UTBIPAR_PHY_ADDRESS_SHIFT;
1092 out_be32(&uec_regs->utbipar, utbipar);
1093
1094 /* Configure the TBI for SGMII operation */
1095 if (uec->uec_info->enet_interface_type == PHY_INTERFACE_MODE_SGMII &&
1096 uec->uec_info->speed == SPEED_1000) {
1097 uec_write_phy_reg(uec->dev, uec_regs->utbipar,
1098 ENET_TBI_MII_ANA, TBIANA_SETTINGS);
1099
1100 uec_write_phy_reg(uec->dev, uec_regs->utbipar,
1101 ENET_TBI_MII_TBICON, TBICON_CLK_SELECT);
1102
1103 uec_write_phy_reg(uec->dev, uec_regs->utbipar,
1104 ENET_TBI_MII_CR, TBICR_SETTINGS);
1105 }
1106
1107 /* Allocate Tx BDs */
1108 length = ((uec_info->tx_bd_ring_len * SIZEOFBD) /
1109 UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) *
1110 UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
1111 if ((uec_info->tx_bd_ring_len * SIZEOFBD) %
1112 UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) {
1113 length += UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
1114 }
1115
1116 align = UEC_TX_BD_RING_ALIGNMENT;
1117 uec->tx_bd_ring_offset = (u32)malloc((u32)(length + align));
1118 if (uec->tx_bd_ring_offset != 0) {
1119 uec->p_tx_bd_ring = (u8 *)((uec->tx_bd_ring_offset + align)
1120 & ~(align - 1));
1121 }
1122
1123 /* Zero all of Tx BDs */
1124 memset((void *)(uec->tx_bd_ring_offset), 0, length + align);
1125
1126 /* Allocate Rx BDs */
1127 length = uec_info->rx_bd_ring_len * SIZEOFBD;
1128 align = UEC_RX_BD_RING_ALIGNMENT;
1129 uec->rx_bd_ring_offset = (u32)(malloc((u32)(length + align)));
1130 if (uec->rx_bd_ring_offset != 0) {
1131 uec->p_rx_bd_ring = (u8 *)((uec->rx_bd_ring_offset + align)
1132 & ~(align - 1));
1133 }
1134
1135 /* Zero all of Rx BDs */
1136 memset((void *)(uec->rx_bd_ring_offset), 0, length + align);
1137
1138 /* Allocate Rx buffer */
1139 length = uec_info->rx_bd_ring_len * MAX_RXBUF_LEN;
1140 align = UEC_RX_DATA_BUF_ALIGNMENT;
1141 uec->rx_buf_offset = (u32)malloc(length + align);
1142 if (uec->rx_buf_offset != 0) {
1143 uec->p_rx_buf = (u8 *)((uec->rx_buf_offset + align)
1144 & ~(align - 1));
1145 }
1146
1147 /* Zero all of the Rx buffer */
1148 memset((void *)(uec->rx_buf_offset), 0, length + align);
1149
1150 /* Init TxBD ring */
1151 bd = (struct buffer_descriptor *)uec->p_tx_bd_ring;
1152 uec->tx_bd = bd;
1153
1154 for (i = 0; i < uec_info->tx_bd_ring_len; i++) {
1155 BD_DATA_CLEAR(bd);
1156 BD_STATUS_SET(bd, 0);
1157 BD_LENGTH_SET(bd, 0);
1158 bd++;
1159 }
1160 BD_STATUS_SET((--bd), TX_BD_WRAP);
1161
1162 /* Init RxBD ring */
1163 bd = (struct buffer_descriptor *)uec->p_rx_bd_ring;
1164 uec->rx_bd = bd;
1165 buf = uec->p_rx_buf;
1166 for (i = 0; i < uec_info->rx_bd_ring_len; i++) {
1167 BD_DATA_SET(bd, buf);
1168 BD_LENGTH_SET(bd, 0);
1169 BD_STATUS_SET(bd, RX_BD_EMPTY);
1170 buf += MAX_RXBUF_LEN;
1171 bd++;
1172 }
1173 BD_STATUS_SET((--bd), RX_BD_WRAP | RX_BD_EMPTY);
1174
1175 /* Init global Tx parameter RAM */
1176 uec_init_tx_parameter(uec, num_threads_tx);
1177
1178 /* Init global Rx parameter RAM */
1179 uec_init_rx_parameter(uec, num_threads_rx);
1180
1181 /* Init ethernet Tx and Rx parameter command */
1182 if (uec_issue_init_enet_rxtx_cmd(uec, num_threads_tx,
1183 num_threads_rx)) {
1184 printf("%s issue init enet cmd failed\n", __func__);
1185 return -ENOMEM;
1186 }
1187
1188 return 0;
1189 }
1190
uec_init(struct eth_device * dev,struct bd_info * bd)1191 static int uec_init(struct eth_device *dev, struct bd_info *bd)
1192 {
1193 struct uec_priv *uec;
1194 int err, i;
1195 struct phy_info *curphy;
1196 #if defined(CONFIG_ARCH_P1021) || defined(CONFIG_ARCH_P1025)
1197 ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
1198 #endif
1199
1200 uec = (struct uec_priv *)dev->priv;
1201
1202 if (!uec->the_first_run) {
1203 #if defined(CONFIG_ARCH_P1021) || defined(CONFIG_ARCH_P1025)
1204 /*
1205 * QE9 and QE12 need to be set for enabling QE MII
1206 * management signals
1207 */
1208 setbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE9);
1209 setbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE12);
1210 #endif
1211
1212 err = init_phy(dev);
1213 if (err) {
1214 printf("%s: Cannot initialize PHY, aborting.\n",
1215 dev->name);
1216 return err;
1217 }
1218
1219 curphy = uec->mii_info->phyinfo;
1220
1221 if (curphy->config_aneg) {
1222 err = curphy->config_aneg(uec->mii_info);
1223 if (err) {
1224 printf("%s: Can't negotiate PHY\n", dev->name);
1225 return err;
1226 }
1227 }
1228
1229 /* Give PHYs up to 5 sec to report a link */
1230 i = 50;
1231 do {
1232 err = curphy->read_status(uec->mii_info);
1233 if (!(((i-- > 0) && !uec->mii_info->link) || err))
1234 break;
1235 mdelay(100);
1236 } while (1);
1237
1238 #if defined(CONFIG_ARCH_P1021) || defined(CONFIG_ARCH_P1025)
1239 /* QE12 needs to be released for enabling LBCTL signal*/
1240 clrbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE12);
1241 #endif
1242
1243 if (err || i <= 0)
1244 printf("warning: %s: timeout on PHY link\n", dev->name);
1245
1246 adjust_link(dev);
1247 uec->the_first_run = 1;
1248 }
1249
1250 /* Set up the MAC address */
1251 if (dev->enetaddr[0] & 0x01) {
1252 printf("%s: MacAddress is multcast address\n",
1253 __func__);
1254 return -1;
1255 }
1256 uec_set_mac_address(uec, dev->enetaddr);
1257
1258 err = uec_open(uec, COMM_DIR_RX_AND_TX);
1259 if (err) {
1260 printf("%s: cannot enable UEC device\n", dev->name);
1261 return -1;
1262 }
1263
1264 phy_change(dev);
1265
1266 return uec->mii_info->link ? 0 : -1;
1267 }
1268
uec_halt(struct eth_device * dev)1269 static void uec_halt(struct eth_device *dev)
1270 {
1271 struct uec_priv *uec = (struct uec_priv *)dev->priv;
1272
1273 uec_stop(uec, COMM_DIR_RX_AND_TX);
1274 }
1275
uec_send(struct eth_device * dev,void * buf,int len)1276 static int uec_send(struct eth_device *dev, void *buf, int len)
1277 {
1278 struct uec_priv *uec;
1279 struct ucc_fast_priv *uccf;
1280 struct buffer_descriptor *bd;
1281 u16 status;
1282 int i;
1283 int result = 0;
1284
1285 uec = (struct uec_priv *)dev->priv;
1286 uccf = uec->uccf;
1287 bd = uec->tx_bd;
1288
1289 /* Find an empty TxBD */
1290 for (i = 0; BD_STATUS(bd) & TX_BD_READY; i++) {
1291 if (i > 0x100000) {
1292 printf("%s: tx buffer not ready\n", dev->name);
1293 return result;
1294 }
1295 }
1296
1297 /* Init TxBD */
1298 BD_DATA_SET(bd, buf);
1299 BD_LENGTH_SET(bd, len);
1300 status = BD_STATUS(bd);
1301 status &= BD_WRAP;
1302 status |= (TX_BD_READY | TX_BD_LAST);
1303 BD_STATUS_SET(bd, status);
1304
1305 /* Tell UCC to transmit the buffer */
1306 ucc_fast_transmit_on_demand(uccf);
1307
1308 /* Wait for buffer to be transmitted */
1309 for (i = 0; BD_STATUS(bd) & TX_BD_READY; i++) {
1310 if (i > 0x100000) {
1311 printf("%s: tx error\n", dev->name);
1312 return result;
1313 }
1314 }
1315
1316 /* Ok, the buffer be transimitted */
1317 BD_ADVANCE(bd, status, uec->p_tx_bd_ring);
1318 uec->tx_bd = bd;
1319 result = 1;
1320
1321 return result;
1322 }
1323
uec_recv(struct eth_device * dev)1324 static int uec_recv(struct eth_device *dev)
1325 {
1326 struct uec_priv *uec = dev->priv;
1327 struct buffer_descriptor *bd;
1328 u16 status;
1329 u16 len;
1330 u8 *data;
1331
1332 bd = uec->rx_bd;
1333 status = BD_STATUS(bd);
1334
1335 while (!(status & RX_BD_EMPTY)) {
1336 if (!(status & RX_BD_ERROR)) {
1337 data = BD_DATA(bd);
1338 len = BD_LENGTH(bd);
1339 net_process_received_packet(data, len);
1340 } else {
1341 printf("%s: Rx error\n", dev->name);
1342 }
1343 status &= BD_CLEAN;
1344 BD_LENGTH_SET(bd, 0);
1345 BD_STATUS_SET(bd, status | RX_BD_EMPTY);
1346 BD_ADVANCE(bd, status, uec->p_rx_bd_ring);
1347 status = BD_STATUS(bd);
1348 }
1349 uec->rx_bd = bd;
1350
1351 return 1;
1352 }
1353
uec_initialize(struct bd_info * bis,struct uec_inf * uec_info)1354 int uec_initialize(struct bd_info *bis, struct uec_inf *uec_info)
1355 {
1356 struct eth_device *dev;
1357 int i;
1358 struct uec_priv *uec;
1359 int err;
1360
1361 dev = (struct eth_device *)malloc(sizeof(struct eth_device));
1362 if (!dev)
1363 return 0;
1364 memset(dev, 0, sizeof(struct eth_device));
1365
1366 /* Allocate the UEC private struct */
1367 uec = (struct uec_priv *)malloc(sizeof(struct uec_priv));
1368 if (!uec)
1369 return -ENOMEM;
1370
1371 memset(uec, 0, sizeof(struct uec_priv));
1372
1373 /* Adjust uec_info */
1374 #if (MAX_QE_RISC == 4)
1375 uec_info->risc_tx = QE_RISC_ALLOCATION_FOUR_RISCS;
1376 uec_info->risc_rx = QE_RISC_ALLOCATION_FOUR_RISCS;
1377 #endif
1378
1379 devlist[uec_info->uf_info.ucc_num] = dev;
1380
1381 uec->uec_info = uec_info;
1382 uec->dev = dev;
1383
1384 sprintf(dev->name, "UEC%d", uec_info->uf_info.ucc_num);
1385 dev->iobase = 0;
1386 dev->priv = (void *)uec;
1387 dev->init = uec_init;
1388 dev->halt = uec_halt;
1389 dev->send = uec_send;
1390 dev->recv = uec_recv;
1391
1392 /* Clear the ethnet address */
1393 for (i = 0; i < 6; i++)
1394 dev->enetaddr[i] = 0;
1395
1396 eth_register(dev);
1397
1398 err = uec_startup(uec);
1399 if (err) {
1400 printf("%s: Cannot configure net device, aborting.", dev->name);
1401 return err;
1402 }
1403
1404 #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
1405 int retval;
1406 struct mii_dev *mdiodev = mdio_alloc();
1407
1408 if (!mdiodev)
1409 return -ENOMEM;
1410 strncpy(mdiodev->name, dev->name, MDIO_NAME_LEN);
1411 mdiodev->read = uec_miiphy_read;
1412 mdiodev->write = uec_miiphy_write;
1413
1414 retval = mdio_register(mdiodev);
1415 if (retval < 0)
1416 return retval;
1417 #endif
1418
1419 return 1;
1420 }
1421
uec_eth_init(struct bd_info * bis,struct uec_inf * uecs,int num)1422 int uec_eth_init(struct bd_info *bis, struct uec_inf *uecs, int num)
1423 {
1424 int i;
1425
1426 for (i = 0; i < num; i++)
1427 uec_initialize(bis, &uecs[i]);
1428
1429 return 0;
1430 }
1431
uec_standard_init(struct bd_info * bis)1432 int uec_standard_init(struct bd_info *bis)
1433 {
1434 return uec_eth_init(bis, uec_info, ARRAY_SIZE(uec_info));
1435 }
1436 #endif
1437